problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_23904
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-847
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TweedieDevianceScore producing nan results when setting power=1.0 ## 🐛 Bug <!-- A clear and concise description of what the bug is. --> ### To Reproduce ```>>> td=TweedieDevianceScore(power=1.0) >>> targets=torch.tensor([0,1,0,1]) >>> preds=torch.tensor([0.1,0.1,0.1,0.1]) >>> td(preds,targets) tensor(nan) >>> td=TweedieDevianceScore(power=1.0001) >>> td(preds,targets) tensor(1.5038)``` I assume this is a bug since the documentation says targets >= 0 (obviously Poisson data will often be 0). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchmetrics/functional/regression/tweedie_deviance.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Tuple 15 16 import torch 17 from torch import Tensor 18 19 from torchmetrics.utilities.checks import _check_same_shape 20 21 22 def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]: 23 """Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape 24 of input tensors. 25 26 Args: 27 preds: Predicted tensor 28 targets: Ground truth tensor 29 power: see :func:`tweedie_deviance_score` 30 31 Example: 32 >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0]) 33 >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0]) 34 >>> _tweedie_deviance_score_update(preds, targets, power=2) 35 (tensor(4.8333), tensor(4)) 36 """ 37 _check_same_shape(preds, targets) 38 39 zero_tensor = torch.zeros(preds.shape, device=preds.device) 40 41 if 0 < power < 1: 42 raise ValueError(f"Deviance Score is not defined for power={power}.") 43 44 if power == 0: 45 deviance_score = torch.pow(targets - preds, exponent=2) 46 elif power == 1: 47 # Poisson distribution 48 if torch.any(preds <= 0) or torch.any(targets < 0): 49 raise ValueError( 50 f"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative." 51 ) 52 53 deviance_score = 2 * (targets * torch.log(targets / preds) + preds - targets) 54 elif power == 2: 55 # Gamma distribution 56 if torch.any(preds <= 0) or torch.any(targets <= 0): 57 raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") 58 59 deviance_score = 2 * (torch.log(preds / targets) + (targets / preds) - 1) 60 else: 61 if power < 0: 62 if torch.any(preds <= 0): 63 raise ValueError(f"For power={power}, 'preds' has to be strictly positive.") 64 elif 1 < power < 2: 65 if torch.any(preds <= 0) or torch.any(targets < 0): 66 raise ValueError( 67 f"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative." 68 ) 69 else: 70 if torch.any(preds <= 0) or torch.any(targets <= 0): 71 raise ValueError(f"For power={power}, both 'preds' and 'targets' have to be strictly positive.") 72 73 term_1 = torch.pow(torch.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power)) 74 term_2 = targets * torch.pow(preds, 1 - power) / (1 - power) 75 term_3 = torch.pow(preds, 2 - power) / (2 - power) 76 deviance_score = 2 * (term_1 - term_2 + term_3) 77 78 sum_deviance_score = torch.sum(deviance_score) 79 num_observations = torch.tensor(torch.numel(deviance_score), device=preds.device) 80 81 return sum_deviance_score, num_observations 82 83 84 def _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor: 85 """Computes Deviance Score. 86 87 Args: 88 sum_deviance_score: Sum of deviance scores accumalated until now. 89 num_observations: Number of observations encountered until now. 90 91 Example: 92 >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0]) 93 >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0]) 94 >>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2) 95 >>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations) 96 tensor(1.2083) 97 """ 98 99 return sum_deviance_score / num_observations 100 101 102 def tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor: 103 r""" 104 Computes the `Tweedie Deviance Score`_ between targets and predictions: 105 106 .. math:: 107 deviance\_score(\hat{y},y) = 108 \begin{cases} 109 (\hat{y} - y)^2, & \text{for }power=0\\ 110 2 * (y * log(\frac{y}{\hat{y}}) + \hat{y} - y), & \text{for }power=1\\ 111 2 * (log(\frac{\hat{y}}{y}) + \frac{y}{\hat{y}} - 1), & \text{for }power=2\\ 112 2 * (\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \frac{y(\hat{y})^{1 - power}}{1 - power} + \frac{(\hat{y}) 113 ^{2 - power}}{2 - power}), & \text{otherwise} 114 \end{cases} 115 116 where :math:`y` is a tensor of targets values, and :math:`\hat{y}` is a tensor of predictions. 117 118 Args: 119 preds: Predicted tensor with shape ``(N,...)`` 120 targets: Ground truth tensor with shape ``(N,...)`` 121 power: 122 - power < 0 : Extreme stable distribution. (Requires: preds > 0.) 123 - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.) 124 - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.) 125 - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.) 126 - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.) 127 - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.) 128 - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.) 129 130 Example: 131 >>> from torchmetrics.functional import tweedie_deviance_score 132 >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0]) 133 >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0]) 134 >>> tweedie_deviance_score(preds, targets, power=2) 135 tensor(1.2083) 136 137 """ 138 sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power) 139 return _tweedie_deviance_score_compute(sum_deviance_score, num_observations) 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchmetrics/functional/regression/tweedie_deviance.py b/torchmetrics/functional/regression/tweedie_deviance.py --- a/torchmetrics/functional/regression/tweedie_deviance.py +++ b/torchmetrics/functional/regression/tweedie_deviance.py @@ -19,6 +19,13 @@ from torchmetrics.utilities.checks import _check_same_shape +def xlogy(x: Tensor, y: Tensor) -> Tensor: + """Computes x * log(y). Returns 0 if x=0""" + res = x * torch.log(y) + res[x == 0] = 0.0 + return res + + def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape of input tensors. @@ -50,7 +57,7 @@ f"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative." ) - deviance_score = 2 * (targets * torch.log(targets / preds) + preds - targets) + deviance_score = 2 * (xlogy(targets, targets / preds) + preds - targets) elif power == 2: # Gamma distribution if torch.any(preds <= 0) or torch.any(targets <= 0):
{"golden_diff": "diff --git a/torchmetrics/functional/regression/tweedie_deviance.py b/torchmetrics/functional/regression/tweedie_deviance.py\n--- a/torchmetrics/functional/regression/tweedie_deviance.py\n+++ b/torchmetrics/functional/regression/tweedie_deviance.py\n@@ -19,6 +19,13 @@\n from torchmetrics.utilities.checks import _check_same_shape\n \n \n+def xlogy(x: Tensor, y: Tensor) -> Tensor:\n+ \"\"\"Computes x * log(y). Returns 0 if x=0\"\"\"\n+ res = x * torch.log(y)\n+ res[x == 0] = 0.0\n+ return res\n+\n+\n def _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]:\n \"\"\"Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape\n of input tensors.\n@@ -50,7 +57,7 @@\n f\"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative.\"\n )\n \n- deviance_score = 2 * (targets * torch.log(targets / preds) + preds - targets)\n+ deviance_score = 2 * (xlogy(targets, targets / preds) + preds - targets)\n elif power == 2:\n # Gamma distribution\n if torch.any(preds <= 0) or torch.any(targets <= 0):\n", "issue": "TweedieDevianceScore producing nan results when setting power=1.0\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\n```>>> td=TweedieDevianceScore(power=1.0)\r\n>>> targets=torch.tensor([0,1,0,1])\r\n>>> preds=torch.tensor([0.1,0.1,0.1,0.1])\r\n>>> td(preds,targets)\r\ntensor(nan)\r\n>>> td=TweedieDevianceScore(power=1.0001)\r\n>>> td(preds,targets)\r\ntensor(1.5038)```\r\n\r\nI assume this is a bug since the documentation says targets >= 0 (obviously Poisson data will often be 0). \n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]:\n \"\"\"Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape\n of input tensors.\n\n Args:\n preds: Predicted tensor\n targets: Ground truth tensor\n power: see :func:`tweedie_deviance_score`\n\n Example:\n >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])\n >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])\n >>> _tweedie_deviance_score_update(preds, targets, power=2)\n (tensor(4.8333), tensor(4))\n \"\"\"\n _check_same_shape(preds, targets)\n\n zero_tensor = torch.zeros(preds.shape, device=preds.device)\n\n if 0 < power < 1:\n raise ValueError(f\"Deviance Score is not defined for power={power}.\")\n\n if power == 0:\n deviance_score = torch.pow(targets - preds, exponent=2)\n elif power == 1:\n # Poisson distribution\n if torch.any(preds <= 0) or torch.any(targets < 0):\n raise ValueError(\n f\"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative.\"\n )\n\n deviance_score = 2 * (targets * torch.log(targets / preds) + preds - targets)\n elif power == 2:\n # Gamma distribution\n if torch.any(preds <= 0) or torch.any(targets <= 0):\n raise ValueError(f\"For power={power}, both 'preds' and 'targets' have to be strictly positive.\")\n\n deviance_score = 2 * (torch.log(preds / targets) + (targets / preds) - 1)\n else:\n if power < 0:\n if torch.any(preds <= 0):\n raise ValueError(f\"For power={power}, 'preds' has to be strictly positive.\")\n elif 1 < power < 2:\n if torch.any(preds <= 0) or torch.any(targets < 0):\n raise ValueError(\n f\"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative.\"\n )\n else:\n if torch.any(preds <= 0) or torch.any(targets <= 0):\n raise ValueError(f\"For power={power}, both 'preds' and 'targets' have to be strictly positive.\")\n\n term_1 = torch.pow(torch.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power))\n term_2 = targets * torch.pow(preds, 1 - power) / (1 - power)\n term_3 = torch.pow(preds, 2 - power) / (2 - power)\n deviance_score = 2 * (term_1 - term_2 + term_3)\n\n sum_deviance_score = torch.sum(deviance_score)\n num_observations = torch.tensor(torch.numel(deviance_score), device=preds.device)\n\n return sum_deviance_score, num_observations\n\n\ndef _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor:\n \"\"\"Computes Deviance Score.\n\n Args:\n sum_deviance_score: Sum of deviance scores accumalated until now.\n num_observations: Number of observations encountered until now.\n\n Example:\n >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])\n >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])\n >>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2)\n >>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations)\n tensor(1.2083)\n \"\"\"\n\n return sum_deviance_score / num_observations\n\n\ndef tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor:\n r\"\"\"\n Computes the `Tweedie Deviance Score`_ between targets and predictions:\n\n .. math::\n deviance\\_score(\\hat{y},y) =\n \\begin{cases}\n (\\hat{y} - y)^2, & \\text{for }power=0\\\\\n 2 * (y * log(\\frac{y}{\\hat{y}}) + \\hat{y} - y), & \\text{for }power=1\\\\\n 2 * (log(\\frac{\\hat{y}}{y}) + \\frac{y}{\\hat{y}} - 1), & \\text{for }power=2\\\\\n 2 * (\\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \\frac{y(\\hat{y})^{1 - power}}{1 - power} + \\frac{(\\hat{y})\n ^{2 - power}}{2 - power}), & \\text{otherwise}\n \\end{cases}\n\n where :math:`y` is a tensor of targets values, and :math:`\\hat{y}` is a tensor of predictions.\n\n Args:\n preds: Predicted tensor with shape ``(N,...)``\n targets: Ground truth tensor with shape ``(N,...)``\n power:\n - power < 0 : Extreme stable distribution. (Requires: preds > 0.)\n - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.)\n - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.)\n - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.)\n - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.)\n - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.)\n - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.)\n\n Example:\n >>> from torchmetrics.functional import tweedie_deviance_score\n >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])\n >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])\n >>> tweedie_deviance_score(preds, targets, power=2)\n tensor(1.2083)\n\n \"\"\"\n sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power)\n return _tweedie_deviance_score_compute(sum_deviance_score, num_observations)\n", "path": "torchmetrics/functional/regression/tweedie_deviance.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef xlogy(x: Tensor, y: Tensor) -> Tensor:\n \"\"\"Computes x * log(y). Returns 0 if x=0\"\"\"\n res = x * torch.log(y)\n res[x == 0] = 0.0\n return res\n\n\ndef _tweedie_deviance_score_update(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tuple[Tensor, Tensor]:\n \"\"\"Updates and returns variables required to compute Deviance Score for the given power. Checks for same shape\n of input tensors.\n\n Args:\n preds: Predicted tensor\n targets: Ground truth tensor\n power: see :func:`tweedie_deviance_score`\n\n Example:\n >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])\n >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])\n >>> _tweedie_deviance_score_update(preds, targets, power=2)\n (tensor(4.8333), tensor(4))\n \"\"\"\n _check_same_shape(preds, targets)\n\n zero_tensor = torch.zeros(preds.shape, device=preds.device)\n\n if 0 < power < 1:\n raise ValueError(f\"Deviance Score is not defined for power={power}.\")\n\n if power == 0:\n deviance_score = torch.pow(targets - preds, exponent=2)\n elif power == 1:\n # Poisson distribution\n if torch.any(preds <= 0) or torch.any(targets < 0):\n raise ValueError(\n f\"For power={power}, 'preds' has to be strictly positive and 'targets' cannot be negative.\"\n )\n\n deviance_score = 2 * (xlogy(targets, targets / preds) + preds - targets)\n elif power == 2:\n # Gamma distribution\n if torch.any(preds <= 0) or torch.any(targets <= 0):\n raise ValueError(f\"For power={power}, both 'preds' and 'targets' have to be strictly positive.\")\n\n deviance_score = 2 * (torch.log(preds / targets) + (targets / preds) - 1)\n else:\n if power < 0:\n if torch.any(preds <= 0):\n raise ValueError(f\"For power={power}, 'preds' has to be strictly positive.\")\n elif 1 < power < 2:\n if torch.any(preds <= 0) or torch.any(targets < 0):\n raise ValueError(\n f\"For power={power}, 'targets' has to be strictly positive and 'preds' cannot be negative.\"\n )\n else:\n if torch.any(preds <= 0) or torch.any(targets <= 0):\n raise ValueError(f\"For power={power}, both 'preds' and 'targets' have to be strictly positive.\")\n\n term_1 = torch.pow(torch.max(targets, zero_tensor), 2 - power) / ((1 - power) * (2 - power))\n term_2 = targets * torch.pow(preds, 1 - power) / (1 - power)\n term_3 = torch.pow(preds, 2 - power) / (2 - power)\n deviance_score = 2 * (term_1 - term_2 + term_3)\n\n sum_deviance_score = torch.sum(deviance_score)\n num_observations = torch.tensor(torch.numel(deviance_score), device=preds.device)\n\n return sum_deviance_score, num_observations\n\n\ndef _tweedie_deviance_score_compute(sum_deviance_score: Tensor, num_observations: Tensor) -> Tensor:\n \"\"\"Computes Deviance Score.\n\n Args:\n sum_deviance_score: Sum of deviance scores accumalated until now.\n num_observations: Number of observations encountered until now.\n\n Example:\n >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])\n >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])\n >>> sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=2)\n >>> _tweedie_deviance_score_compute(sum_deviance_score, num_observations)\n tensor(1.2083)\n \"\"\"\n\n return sum_deviance_score / num_observations\n\n\ndef tweedie_deviance_score(preds: Tensor, targets: Tensor, power: float = 0.0) -> Tensor:\n r\"\"\"\n Computes the `Tweedie Deviance Score`_ between targets and predictions:\n\n .. math::\n deviance\\_score(\\hat{y},y) =\n \\begin{cases}\n (\\hat{y} - y)^2, & \\text{for }power=0\\\\\n 2 * (y * log(\\frac{y}{\\hat{y}}) + \\hat{y} - y), & \\text{for }power=1\\\\\n 2 * (log(\\frac{\\hat{y}}{y}) + \\frac{y}{\\hat{y}} - 1), & \\text{for }power=2\\\\\n 2 * (\\frac{(max(y,0))^{2}}{(1 - power)(2 - power)} - \\frac{y(\\hat{y})^{1 - power}}{1 - power} + \\frac{(\\hat{y})\n ^{2 - power}}{2 - power}), & \\text{otherwise}\n \\end{cases}\n\n where :math:`y` is a tensor of targets values, and :math:`\\hat{y}` is a tensor of predictions.\n\n Args:\n preds: Predicted tensor with shape ``(N,...)``\n targets: Ground truth tensor with shape ``(N,...)``\n power:\n - power < 0 : Extreme stable distribution. (Requires: preds > 0.)\n - power = 0 : Normal distribution. (Requires: targets and preds can be any real numbers.)\n - power = 1 : Poisson distribution. (Requires: targets >= 0 and y_pred > 0.)\n - 1 < p < 2 : Compound Poisson distribution. (Requires: targets >= 0 and preds > 0.)\n - power = 2 : Gamma distribution. (Requires: targets > 0 and preds > 0.)\n - power = 3 : Inverse Gaussian distribution. (Requires: targets > 0 and preds > 0.)\n - otherwise : Positive stable distribution. (Requires: targets > 0 and preds > 0.)\n\n Example:\n >>> from torchmetrics.functional import tweedie_deviance_score\n >>> targets = torch.tensor([1.0, 2.0, 3.0, 4.0])\n >>> preds = torch.tensor([4.0, 3.0, 2.0, 1.0])\n >>> tweedie_deviance_score(preds, targets, power=2)\n tensor(1.2083)\n\n \"\"\"\n sum_deviance_score, num_observations = _tweedie_deviance_score_update(preds, targets, power=power)\n return _tweedie_deviance_score_compute(sum_deviance_score, num_observations)\n", "path": "torchmetrics/functional/regression/tweedie_deviance.py"}]}
2,432
330
gh_patches_debug_2684
rasdani/github-patches
git_diff
CTFd__CTFd-1918
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Users in admin scoreboard show user position instead of team position In teams mode on the admin panel, users are shown with their user position on the scoreboard instead of their teams position. We should be showing both. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/admin/users.py` Content: ``` 1 from flask import render_template, request, url_for 2 from sqlalchemy.sql import not_ 3 4 from CTFd.admin import admin 5 from CTFd.models import Challenges, Tracking, Users 6 from CTFd.utils import get_config 7 from CTFd.utils.decorators import admins_only 8 from CTFd.utils.modes import TEAMS_MODE 9 10 11 @admin.route("/admin/users") 12 @admins_only 13 def users_listing(): 14 q = request.args.get("q") 15 field = request.args.get("field") 16 page = abs(request.args.get("page", 1, type=int)) 17 filters = [] 18 users = [] 19 20 if q: 21 # The field exists as an exposed column 22 if Users.__mapper__.has_property(field): 23 filters.append(getattr(Users, field).like("%{}%".format(q))) 24 25 if q and field == "ip": 26 users = ( 27 Users.query.join(Tracking, Users.id == Tracking.user_id) 28 .filter(Tracking.ip.like("%{}%".format(q))) 29 .order_by(Users.id.asc()) 30 .paginate(page=page, per_page=50) 31 ) 32 else: 33 users = ( 34 Users.query.filter(*filters) 35 .order_by(Users.id.asc()) 36 .paginate(page=page, per_page=50) 37 ) 38 39 args = dict(request.args) 40 args.pop("page", 1) 41 42 return render_template( 43 "admin/users/users.html", 44 users=users, 45 prev_page=url_for(request.endpoint, page=users.prev_num, **args), 46 next_page=url_for(request.endpoint, page=users.next_num, **args), 47 q=q, 48 field=field, 49 ) 50 51 52 @admin.route("/admin/users/new") 53 @admins_only 54 def users_new(): 55 return render_template("admin/users/new.html") 56 57 58 @admin.route("/admin/users/<int:user_id>") 59 @admins_only 60 def users_detail(user_id): 61 # Get user object 62 user = Users.query.filter_by(id=user_id).first_or_404() 63 64 # Get the user's solves 65 solves = user.get_solves(admin=True) 66 67 # Get challenges that the user is missing 68 if get_config("user_mode") == TEAMS_MODE: 69 if user.team: 70 all_solves = user.team.get_solves(admin=True) 71 else: 72 all_solves = user.get_solves(admin=True) 73 else: 74 all_solves = user.get_solves(admin=True) 75 76 solve_ids = [s.challenge_id for s in all_solves] 77 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all() 78 79 # Get IP addresses that the User has used 80 addrs = ( 81 Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all() 82 ) 83 84 # Get Fails 85 fails = user.get_fails(admin=True) 86 87 # Get Awards 88 awards = user.get_awards(admin=True) 89 90 # Get user properties 91 score = user.get_score(admin=True) 92 place = user.get_place(admin=True) 93 94 return render_template( 95 "admin/users/user.html", 96 solves=solves, 97 user=user, 98 addrs=addrs, 99 score=score, 100 missing=missing, 101 place=place, 102 fails=fails, 103 awards=awards, 104 ) 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py --- a/CTFd/admin/users.py +++ b/CTFd/admin/users.py @@ -88,8 +88,8 @@ awards = user.get_awards(admin=True) # Get user properties - score = user.get_score(admin=True) - place = user.get_place(admin=True) + score = user.account.get_score(admin=True) + place = user.account.get_place(admin=True) return render_template( "admin/users/user.html",
{"golden_diff": "diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py\n--- a/CTFd/admin/users.py\n+++ b/CTFd/admin/users.py\n@@ -88,8 +88,8 @@\n awards = user.get_awards(admin=True)\n \n # Get user properties\n- score = user.get_score(admin=True)\n- place = user.get_place(admin=True)\n+ score = user.account.get_score(admin=True)\n+ place = user.account.get_place(admin=True)\n \n return render_template(\n \"admin/users/user.html\",\n", "issue": "Users in admin scoreboard show user position instead of team position\nIn teams mode on the admin panel, users are shown with their user position on the scoreboard instead of their teams position. We should be showing both. \n", "before_files": [{"content": "from flask import render_template, request, url_for\nfrom sqlalchemy.sql import not_\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Tracking, Users\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n filters = []\n users = []\n\n if q:\n # The field exists as an exposed column\n if Users.__mapper__.has_property(field):\n filters.append(getattr(Users, field).like(\"%{}%\".format(q)))\n\n if q and field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n else:\n users = (\n Users.query.filter(*filters)\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n prev_page=url_for(request.endpoint, page=users.prev_num, **args),\n next_page=url_for(request.endpoint, page=users.next_num, **args),\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.get_score(admin=True)\n place = user.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}], "after_files": [{"content": "from flask import render_template, request, url_for\nfrom sqlalchemy.sql import not_\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Tracking, Users\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n filters = []\n users = []\n\n if q:\n # The field exists as an exposed column\n if Users.__mapper__.has_property(field):\n filters.append(getattr(Users, field).like(\"%{}%\".format(q)))\n\n if q and field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n else:\n users = (\n Users.query.filter(*filters)\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n prev_page=url_for(request.endpoint, page=users.prev_num, **args),\n next_page=url_for(request.endpoint, page=users.next_num, **args),\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.account.get_score(admin=True)\n place = user.account.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}]}
1,217
120
gh_patches_debug_37122
rasdani/github-patches
git_diff
graspologic-org__graspologic-559
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- singular_values_ attribute needed for MultipleASE class ## Expected Behavior MultipleASE should have an attribute that stores singular values for ease of generating scree plots, consistent with the other embedding classes. Specifically, it should output the singular values of the concatenated adjacency spectral embeddings, consistent with Figure 10 of the [paper](https://arxiv.org/pdf/1906.10026.pdf) where it is introduced. ## Actual Behavior MultipleASE has no attribute for singular values. ## Template Code N/A ## Full Traceback N/A ## Your Environment * Python version: 3.7.6 * graspologic version: 0.1.0 ## Proposed Labels * Question --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `graspologic/embed/mase.py` Content: ``` 1 # Copyright (c) Microsoft Corporation and contributors. 2 # Licensed under the MIT License. 3 4 import numpy as np 5 from sklearn.utils.validation import check_is_fitted 6 7 from ..utils import import_graph, is_almost_symmetric 8 from .base import BaseEmbedMulti 9 from .svd import select_dimension, selectSVD 10 11 12 class MultipleASE(BaseEmbedMulti): 13 r""" 14 Multiple Adjacency Spectral Embedding (MASE) embeds arbitrary number of input 15 graphs with matched vertex sets. 16 17 For a population of undirected graphs, MASE assumes that the population of graphs 18 is sampled from :math:`VR^{(i)}V^T` where :math:`V \in \mathbb{R}^{n\times d}` and 19 :math:`R^{(i)} \in \mathbb{R}^{d\times d}`. Score matrices, :math:`R^{(i)}`, are 20 allowed to vary for each graph, but are symmetric. All graphs share a common a 21 latent position matrix :math:`V`. 22 23 For a population of directed graphs, MASE assumes that the population is sampled 24 from :math:`UR^{(i)}V^T` where :math:`U \in \mathbb{R}^{n\times d_1}`, 25 :math:`V \in \mathbb{R}^{n\times d_2}`, and 26 :math:`R^{(i)} \in \mathbb{R}^{d_1\times d_2}`. In this case, score matrices 27 :math:`R^{(i)}` can be assymetric and non-square, but all graphs still share a 28 common latent position matrices :math:`U` and :math:`V`. 29 30 Parameters 31 ---------- 32 n_components : int or None, default = None 33 Desired dimensionality of output data. If "full", 34 ``n_components`` must be ``<= min(X.shape)``. Otherwise, ``n_components`` must be 35 ``< min(X.shape)``. If None, then optimal dimensions will be chosen by 36 :func:`~graspologic.embed.select_dimension` using ``n_elbows`` argument. 37 38 n_elbows : int, optional, default: 2 39 If ``n_components`` is None, then compute the optimal embedding dimension using 40 :func:`~graspologic.embed.select_dimension`. Otherwise, ignored. 41 42 algorithm : {'randomized' (default), 'full', 'truncated'}, optional 43 SVD solver to use: 44 45 - 'randomized' 46 Computes randomized svd using 47 :func:`sklearn.utils.extmath.randomized_svd` 48 - 'full' 49 Computes full svd using :func:`scipy.linalg.svd` 50 - 'truncated' 51 Computes truncated svd using :func:`scipy.sparse.linalg.svds` 52 53 n_iter : int, optional (default = 5) 54 Number of iterations for randomized SVD solver. Not used by 'full' or 55 'truncated'. The default is larger than the default in randomized_svd 56 to handle sparse matrices that may have large slowly decaying spectrum. 57 58 scaled : bool, optional (default=True) 59 Whether to scale individual eigenvectors with eigenvalues in first embedding 60 stage. 61 62 diag_aug : bool, optional (default = True) 63 Whether to replace the main diagonal of each adjacency matrices with 64 a vector corresponding to the degree (or sum of edge weights for a 65 weighted network) before embedding. 66 67 concat : bool, optional (default False) 68 If graph(s) are directed, whether to concatenate each graph's left and right (out and in) latent positions 69 along axis 1. 70 71 72 Attributes 73 ---------- 74 n_graphs_ : int 75 Number of graphs 76 77 n_vertices_ : int 78 Number of vertices in each graph 79 80 latent_left_ : array, shape (n_samples, n_components) 81 Estimated left latent positions of the graph. 82 83 latent_right_ : array, shape (n_samples, n_components), or None 84 Estimated right latent positions of the graph. Only computed when the an input 85 graph is directed, or adjacency matrix is assymetric. Otherwise, None. 86 87 scores_ : array, shape (n_samples, n_components, n_components) 88 Estimated :math:`\hat{R}` matrices for each input graph. 89 90 91 Notes 92 ----- 93 When an input graph is directed, ``n_components`` of :attr:`latent_left_` may not be equal 94 to ``n_components`` of :attr:`latent_right_`. 95 """ 96 97 def __init__( 98 self, 99 n_components=None, 100 n_elbows=2, 101 algorithm="randomized", 102 n_iter=5, 103 scaled=True, 104 diag_aug=True, 105 concat=False, 106 ): 107 if not isinstance(scaled, bool): 108 msg = "scaled must be a boolean, not {}".format(scaled) 109 raise TypeError(msg) 110 111 super().__init__( 112 n_components=n_components, 113 n_elbows=n_elbows, 114 algorithm=algorithm, 115 n_iter=n_iter, 116 diag_aug=diag_aug, 117 concat=concat, 118 ) 119 self.scaled = scaled 120 121 def _reduce_dim(self, graphs): 122 # first embed into log2(n_vertices) for each graph 123 n_components = int(np.ceil(np.log2(np.min(self.n_vertices_)))) 124 125 # embed individual graphs 126 embeddings = [ 127 selectSVD( 128 graph, 129 n_components=n_components, 130 algorithm=self.algorithm, 131 n_iter=self.n_iter, 132 ) 133 for graph in graphs 134 ] 135 Us, Ds, Vs = zip(*embeddings) 136 137 # Choose the best embedding dimension for each graphs 138 if self.n_components is None: 139 embedding_dimensions = [] 140 for D in Ds: 141 elbows, _ = select_dimension(D, n_elbows=self.n_elbows) 142 embedding_dimensions.append(elbows[-1]) 143 144 # Choose the max of all of best embedding dimension of all graphs 145 best_dimension = int(np.ceil(np.max(embedding_dimensions))) 146 else: 147 best_dimension = self.n_components 148 149 if not self.scaled: 150 Us = np.hstack([U[:, :best_dimension] for U in Us]) 151 Vs = np.hstack([V.T[:, :best_dimension] for V in Vs]) 152 else: 153 # Equivalent to ASE 154 Us = np.hstack( 155 [ 156 U[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension])) 157 for U, D in zip(Us, Ds) 158 ] 159 ) 160 Vs = np.hstack( 161 [ 162 V.T[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension])) 163 for V, D in zip(Vs, Ds) 164 ] 165 ) 166 167 # Second SVD for vertices 168 # The notation is slightly different than the paper 169 Uhat, _, _ = selectSVD( 170 Us, 171 n_components=self.n_components, 172 n_elbows=self.n_elbows, 173 algorithm=self.algorithm, 174 n_iter=self.n_iter, 175 ) 176 177 Vhat, _, _ = selectSVD( 178 Vs, 179 n_components=self.n_components, 180 n_elbows=self.n_elbows, 181 algorithm=self.algorithm, 182 n_iter=self.n_iter, 183 ) 184 return Uhat, Vhat 185 186 def fit(self, graphs, y=None): 187 """ 188 Fit the model with graphs. 189 190 Parameters 191 ---------- 192 graphs : list of nx.Graph or ndarray, or ndarray 193 If list of nx.Graph, each Graph must contain same number of nodes. 194 If list of ndarray, each array must have shape (n_vertices, n_vertices). 195 If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices). 196 197 Returns 198 ------- 199 self : object 200 Returns an instance of self. 201 """ 202 graphs = self._check_input_graphs(graphs) 203 204 # Check if undirected 205 undirected = all(is_almost_symmetric(g) for g in graphs) 206 207 # Diag augment 208 if self.diag_aug: 209 graphs = self._diag_aug(graphs) 210 211 # embed 212 Uhat, Vhat = self._reduce_dim(graphs) 213 self.latent_left_ = Uhat 214 if not undirected: 215 self.latent_right_ = Vhat 216 self.scores_ = Uhat.T @ graphs @ Vhat 217 else: 218 self.latent_right_ = None 219 self.scores_ = Uhat.T @ graphs @ Uhat 220 221 return self 222 223 def fit_transform(self, graphs, y=None): 224 """ 225 Fit the model with graphs and apply the embedding on graphs. 226 n_components is either automatically determined or based on user input. 227 228 Parameters 229 ---------- 230 graphs : list of nx.Graph or ndarray, or ndarray 231 If list of nx.Graph, each Graph must contain same number of nodes. 232 If list of ndarray, each array must have shape (n_vertices, n_vertices). 233 If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices). 234 235 Returns 236 ------- 237 out : np.ndarray or length 2 tuple of np.ndarray. 238 If input graphs were symmetric shape (n_vertices, n_components). 239 If graphs were directed and ``concat`` is False, returns tuple of two arrays (same shape as above). 240 The first corresponds to the left latent positions, and the second to the right latent positions. 241 When ``concat`` is True left and right (out and in) latent positions are concatenated along axis 1. 242 """ 243 return self._fit_transform(graphs) 244 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/graspologic/embed/mase.py b/graspologic/embed/mase.py --- a/graspologic/embed/mase.py +++ b/graspologic/embed/mase.py @@ -87,6 +87,13 @@ scores_ : array, shape (n_samples, n_components, n_components) Estimated :math:`\hat{R}` matrices for each input graph. + singular_values_ : array, shape (n_components) OR length 2 tuple of arrays + If input graph is undirected, equal to the singular values of the concatenated + adjacency spectral embeddings. If input graph is directed, :attr:`singular_values_` + is a tuple of length 2, where :attr:`singular_values_[0]` corresponds to + the singular values of the concatenated left adjacency spectral embeddings, + and :attr:`singular_values_[1]` corresponds to + the singular values of the concatenated right adjacency spectral embeddings. Notes ----- @@ -166,7 +173,7 @@ # Second SVD for vertices # The notation is slightly different than the paper - Uhat, _, _ = selectSVD( + Uhat, sing_vals_left, _ = selectSVD( Us, n_components=self.n_components, n_elbows=self.n_elbows, @@ -174,14 +181,14 @@ n_iter=self.n_iter, ) - Vhat, _, _ = selectSVD( + Vhat, sing_vals_right, _ = selectSVD( Vs, n_components=self.n_components, n_elbows=self.n_elbows, algorithm=self.algorithm, n_iter=self.n_iter, ) - return Uhat, Vhat + return Uhat, Vhat, sing_vals_left, sing_vals_right def fit(self, graphs, y=None): """ @@ -209,14 +216,16 @@ graphs = self._diag_aug(graphs) # embed - Uhat, Vhat = self._reduce_dim(graphs) + Uhat, Vhat, sing_vals_left, sing_vals_right = self._reduce_dim(graphs) self.latent_left_ = Uhat if not undirected: self.latent_right_ = Vhat self.scores_ = Uhat.T @ graphs @ Vhat + self.singular_values_ = (sing_vals_left, sing_vals_right) else: self.latent_right_ = None self.scores_ = Uhat.T @ graphs @ Uhat + self.singular_values_ = sing_vals_left return self
{"golden_diff": "diff --git a/graspologic/embed/mase.py b/graspologic/embed/mase.py\n--- a/graspologic/embed/mase.py\n+++ b/graspologic/embed/mase.py\n@@ -87,6 +87,13 @@\n scores_ : array, shape (n_samples, n_components, n_components)\n Estimated :math:`\\hat{R}` matrices for each input graph.\n \n+ singular_values_ : array, shape (n_components) OR length 2 tuple of arrays\n+ If input graph is undirected, equal to the singular values of the concatenated\n+ adjacency spectral embeddings. If input graph is directed, :attr:`singular_values_`\n+ is a tuple of length 2, where :attr:`singular_values_[0]` corresponds to\n+ the singular values of the concatenated left adjacency spectral embeddings,\n+ and :attr:`singular_values_[1]` corresponds to\n+ the singular values of the concatenated right adjacency spectral embeddings.\n \n Notes\n -----\n@@ -166,7 +173,7 @@\n \n # Second SVD for vertices\n # The notation is slightly different than the paper\n- Uhat, _, _ = selectSVD(\n+ Uhat, sing_vals_left, _ = selectSVD(\n Us,\n n_components=self.n_components,\n n_elbows=self.n_elbows,\n@@ -174,14 +181,14 @@\n n_iter=self.n_iter,\n )\n \n- Vhat, _, _ = selectSVD(\n+ Vhat, sing_vals_right, _ = selectSVD(\n Vs,\n n_components=self.n_components,\n n_elbows=self.n_elbows,\n algorithm=self.algorithm,\n n_iter=self.n_iter,\n )\n- return Uhat, Vhat\n+ return Uhat, Vhat, sing_vals_left, sing_vals_right\n \n def fit(self, graphs, y=None):\n \"\"\"\n@@ -209,14 +216,16 @@\n graphs = self._diag_aug(graphs)\n \n # embed\n- Uhat, Vhat = self._reduce_dim(graphs)\n+ Uhat, Vhat, sing_vals_left, sing_vals_right = self._reduce_dim(graphs)\n self.latent_left_ = Uhat\n if not undirected:\n self.latent_right_ = Vhat\n self.scores_ = Uhat.T @ graphs @ Vhat\n+ self.singular_values_ = (sing_vals_left, sing_vals_right)\n else:\n self.latent_right_ = None\n self.scores_ = Uhat.T @ graphs @ Uhat\n+ self.singular_values_ = sing_vals_left\n \n return self\n", "issue": "singular_values_ attribute needed for MultipleASE class\n## Expected Behavior\r\nMultipleASE should have an attribute that stores singular values for ease of generating scree plots, consistent with the other embedding classes. Specifically, it should output the singular values of the concatenated adjacency spectral embeddings, consistent with Figure 10 of the [paper](https://arxiv.org/pdf/1906.10026.pdf) where it is introduced.\r\n\r\n## Actual Behavior\r\nMultipleASE has no attribute for singular values.\r\n\r\n## Template Code\r\nN/A\r\n\r\n## Full Traceback\r\nN/A\r\n\r\n## Your Environment\r\n* Python version: 3.7.6\r\n* graspologic version: 0.1.0\r\n\r\n## Proposed Labels\r\n * Question\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport numpy as np\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom ..utils import import_graph, is_almost_symmetric\nfrom .base import BaseEmbedMulti\nfrom .svd import select_dimension, selectSVD\n\n\nclass MultipleASE(BaseEmbedMulti):\n r\"\"\"\n Multiple Adjacency Spectral Embedding (MASE) embeds arbitrary number of input\n graphs with matched vertex sets.\n\n For a population of undirected graphs, MASE assumes that the population of graphs\n is sampled from :math:`VR^{(i)}V^T` where :math:`V \\in \\mathbb{R}^{n\\times d}` and\n :math:`R^{(i)} \\in \\mathbb{R}^{d\\times d}`. Score matrices, :math:`R^{(i)}`, are\n allowed to vary for each graph, but are symmetric. All graphs share a common a\n latent position matrix :math:`V`.\n\n For a population of directed graphs, MASE assumes that the population is sampled\n from :math:`UR^{(i)}V^T` where :math:`U \\in \\mathbb{R}^{n\\times d_1}`,\n :math:`V \\in \\mathbb{R}^{n\\times d_2}`, and\n :math:`R^{(i)} \\in \\mathbb{R}^{d_1\\times d_2}`. In this case, score matrices\n :math:`R^{(i)}` can be assymetric and non-square, but all graphs still share a\n common latent position matrices :math:`U` and :math:`V`.\n\n Parameters\n ----------\n n_components : int or None, default = None\n Desired dimensionality of output data. If \"full\",\n ``n_components`` must be ``<= min(X.shape)``. Otherwise, ``n_components`` must be\n ``< min(X.shape)``. If None, then optimal dimensions will be chosen by\n :func:`~graspologic.embed.select_dimension` using ``n_elbows`` argument.\n\n n_elbows : int, optional, default: 2\n If ``n_components`` is None, then compute the optimal embedding dimension using\n :func:`~graspologic.embed.select_dimension`. Otherwise, ignored.\n\n algorithm : {'randomized' (default), 'full', 'truncated'}, optional\n SVD solver to use:\n\n - 'randomized'\n Computes randomized svd using\n :func:`sklearn.utils.extmath.randomized_svd`\n - 'full'\n Computes full svd using :func:`scipy.linalg.svd`\n - 'truncated'\n Computes truncated svd using :func:`scipy.sparse.linalg.svds`\n\n n_iter : int, optional (default = 5)\n Number of iterations for randomized SVD solver. Not used by 'full' or\n 'truncated'. The default is larger than the default in randomized_svd\n to handle sparse matrices that may have large slowly decaying spectrum.\n\n scaled : bool, optional (default=True)\n Whether to scale individual eigenvectors with eigenvalues in first embedding\n stage.\n\n diag_aug : bool, optional (default = True)\n Whether to replace the main diagonal of each adjacency matrices with\n a vector corresponding to the degree (or sum of edge weights for a\n weighted network) before embedding.\n\n concat : bool, optional (default False)\n If graph(s) are directed, whether to concatenate each graph's left and right (out and in) latent positions\n along axis 1.\n\n\n Attributes\n ----------\n n_graphs_ : int\n Number of graphs\n\n n_vertices_ : int\n Number of vertices in each graph\n\n latent_left_ : array, shape (n_samples, n_components)\n Estimated left latent positions of the graph.\n\n latent_right_ : array, shape (n_samples, n_components), or None\n Estimated right latent positions of the graph. Only computed when the an input\n graph is directed, or adjacency matrix is assymetric. Otherwise, None.\n\n scores_ : array, shape (n_samples, n_components, n_components)\n Estimated :math:`\\hat{R}` matrices for each input graph.\n\n\n Notes\n -----\n When an input graph is directed, ``n_components`` of :attr:`latent_left_` may not be equal\n to ``n_components`` of :attr:`latent_right_`.\n \"\"\"\n\n def __init__(\n self,\n n_components=None,\n n_elbows=2,\n algorithm=\"randomized\",\n n_iter=5,\n scaled=True,\n diag_aug=True,\n concat=False,\n ):\n if not isinstance(scaled, bool):\n msg = \"scaled must be a boolean, not {}\".format(scaled)\n raise TypeError(msg)\n\n super().__init__(\n n_components=n_components,\n n_elbows=n_elbows,\n algorithm=algorithm,\n n_iter=n_iter,\n diag_aug=diag_aug,\n concat=concat,\n )\n self.scaled = scaled\n\n def _reduce_dim(self, graphs):\n # first embed into log2(n_vertices) for each graph\n n_components = int(np.ceil(np.log2(np.min(self.n_vertices_))))\n\n # embed individual graphs\n embeddings = [\n selectSVD(\n graph,\n n_components=n_components,\n algorithm=self.algorithm,\n n_iter=self.n_iter,\n )\n for graph in graphs\n ]\n Us, Ds, Vs = zip(*embeddings)\n\n # Choose the best embedding dimension for each graphs\n if self.n_components is None:\n embedding_dimensions = []\n for D in Ds:\n elbows, _ = select_dimension(D, n_elbows=self.n_elbows)\n embedding_dimensions.append(elbows[-1])\n\n # Choose the max of all of best embedding dimension of all graphs\n best_dimension = int(np.ceil(np.max(embedding_dimensions)))\n else:\n best_dimension = self.n_components\n\n if not self.scaled:\n Us = np.hstack([U[:, :best_dimension] for U in Us])\n Vs = np.hstack([V.T[:, :best_dimension] for V in Vs])\n else:\n # Equivalent to ASE\n Us = np.hstack(\n [\n U[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))\n for U, D in zip(Us, Ds)\n ]\n )\n Vs = np.hstack(\n [\n V.T[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))\n for V, D in zip(Vs, Ds)\n ]\n )\n\n # Second SVD for vertices\n # The notation is slightly different than the paper\n Uhat, _, _ = selectSVD(\n Us,\n n_components=self.n_components,\n n_elbows=self.n_elbows,\n algorithm=self.algorithm,\n n_iter=self.n_iter,\n )\n\n Vhat, _, _ = selectSVD(\n Vs,\n n_components=self.n_components,\n n_elbows=self.n_elbows,\n algorithm=self.algorithm,\n n_iter=self.n_iter,\n )\n return Uhat, Vhat\n\n def fit(self, graphs, y=None):\n \"\"\"\n Fit the model with graphs.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n Returns\n -------\n self : object\n Returns an instance of self.\n \"\"\"\n graphs = self._check_input_graphs(graphs)\n\n # Check if undirected\n undirected = all(is_almost_symmetric(g) for g in graphs)\n\n # Diag augment\n if self.diag_aug:\n graphs = self._diag_aug(graphs)\n\n # embed\n Uhat, Vhat = self._reduce_dim(graphs)\n self.latent_left_ = Uhat\n if not undirected:\n self.latent_right_ = Vhat\n self.scores_ = Uhat.T @ graphs @ Vhat\n else:\n self.latent_right_ = None\n self.scores_ = Uhat.T @ graphs @ Uhat\n\n return self\n\n def fit_transform(self, graphs, y=None):\n \"\"\"\n Fit the model with graphs and apply the embedding on graphs.\n n_components is either automatically determined or based on user input.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n Returns\n -------\n out : np.ndarray or length 2 tuple of np.ndarray.\n If input graphs were symmetric shape (n_vertices, n_components).\n If graphs were directed and ``concat`` is False, returns tuple of two arrays (same shape as above).\n The first corresponds to the left latent positions, and the second to the right latent positions.\n When ``concat`` is True left and right (out and in) latent positions are concatenated along axis 1.\n \"\"\"\n return self._fit_transform(graphs)\n", "path": "graspologic/embed/mase.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport numpy as np\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom ..utils import import_graph, is_almost_symmetric\nfrom .base import BaseEmbedMulti\nfrom .svd import select_dimension, selectSVD\n\n\nclass MultipleASE(BaseEmbedMulti):\n r\"\"\"\n Multiple Adjacency Spectral Embedding (MASE) embeds arbitrary number of input\n graphs with matched vertex sets.\n\n For a population of undirected graphs, MASE assumes that the population of graphs\n is sampled from :math:`VR^{(i)}V^T` where :math:`V \\in \\mathbb{R}^{n\\times d}` and\n :math:`R^{(i)} \\in \\mathbb{R}^{d\\times d}`. Score matrices, :math:`R^{(i)}`, are\n allowed to vary for each graph, but are symmetric. All graphs share a common a\n latent position matrix :math:`V`.\n\n For a population of directed graphs, MASE assumes that the population is sampled\n from :math:`UR^{(i)}V^T` where :math:`U \\in \\mathbb{R}^{n\\times d_1}`,\n :math:`V \\in \\mathbb{R}^{n\\times d_2}`, and\n :math:`R^{(i)} \\in \\mathbb{R}^{d_1\\times d_2}`. In this case, score matrices\n :math:`R^{(i)}` can be assymetric and non-square, but all graphs still share a\n common latent position matrices :math:`U` and :math:`V`.\n\n Parameters\n ----------\n n_components : int or None, default = None\n Desired dimensionality of output data. If \"full\",\n ``n_components`` must be ``<= min(X.shape)``. Otherwise, ``n_components`` must be\n ``< min(X.shape)``. If None, then optimal dimensions will be chosen by\n :func:`~graspologic.embed.select_dimension` using ``n_elbows`` argument.\n\n n_elbows : int, optional, default: 2\n If ``n_components`` is None, then compute the optimal embedding dimension using\n :func:`~graspologic.embed.select_dimension`. Otherwise, ignored.\n\n algorithm : {'randomized' (default), 'full', 'truncated'}, optional\n SVD solver to use:\n\n - 'randomized'\n Computes randomized svd using\n :func:`sklearn.utils.extmath.randomized_svd`\n - 'full'\n Computes full svd using :func:`scipy.linalg.svd`\n - 'truncated'\n Computes truncated svd using :func:`scipy.sparse.linalg.svds`\n\n n_iter : int, optional (default = 5)\n Number of iterations for randomized SVD solver. Not used by 'full' or\n 'truncated'. The default is larger than the default in randomized_svd\n to handle sparse matrices that may have large slowly decaying spectrum.\n\n scaled : bool, optional (default=True)\n Whether to scale individual eigenvectors with eigenvalues in first embedding\n stage.\n\n diag_aug : bool, optional (default = True)\n Whether to replace the main diagonal of each adjacency matrices with\n a vector corresponding to the degree (or sum of edge weights for a\n weighted network) before embedding.\n\n concat : bool, optional (default False)\n If graph(s) are directed, whether to concatenate each graph's left and right (out and in) latent positions\n along axis 1.\n\n\n Attributes\n ----------\n n_graphs_ : int\n Number of graphs\n\n n_vertices_ : int\n Number of vertices in each graph\n\n latent_left_ : array, shape (n_samples, n_components)\n Estimated left latent positions of the graph.\n\n latent_right_ : array, shape (n_samples, n_components), or None\n Estimated right latent positions of the graph. Only computed when the an input\n graph is directed, or adjacency matrix is assymetric. Otherwise, None.\n\n scores_ : array, shape (n_samples, n_components, n_components)\n Estimated :math:`\\hat{R}` matrices for each input graph.\n\n singular_values_ : array, shape (n_components) OR length 2 tuple of arrays\n If input graph is undirected, equal to the singular values of the concatenated\n adjacency spectral embeddings. If input graph is directed, :attr:`singular_values_`\n is a tuple of length 2, where :attr:`singular_values_[0]` corresponds to\n the singular values of the concatenated left adjacency spectral embeddings,\n and :attr:`singular_values_[1]` corresponds to\n the singular values of the concatenated right adjacency spectral embeddings.\n\n Notes\n -----\n When an input graph is directed, ``n_components`` of :attr:`latent_left_` may not be equal\n to ``n_components`` of :attr:`latent_right_`.\n \"\"\"\n\n def __init__(\n self,\n n_components=None,\n n_elbows=2,\n algorithm=\"randomized\",\n n_iter=5,\n scaled=True,\n diag_aug=True,\n concat=False,\n ):\n if not isinstance(scaled, bool):\n msg = \"scaled must be a boolean, not {}\".format(scaled)\n raise TypeError(msg)\n\n super().__init__(\n n_components=n_components,\n n_elbows=n_elbows,\n algorithm=algorithm,\n n_iter=n_iter,\n diag_aug=diag_aug,\n concat=concat,\n )\n self.scaled = scaled\n\n def _reduce_dim(self, graphs):\n # first embed into log2(n_vertices) for each graph\n n_components = int(np.ceil(np.log2(np.min(self.n_vertices_))))\n\n # embed individual graphs\n embeddings = [\n selectSVD(\n graph,\n n_components=n_components,\n algorithm=self.algorithm,\n n_iter=self.n_iter,\n )\n for graph in graphs\n ]\n Us, Ds, Vs = zip(*embeddings)\n\n # Choose the best embedding dimension for each graphs\n if self.n_components is None:\n embedding_dimensions = []\n for D in Ds:\n elbows, _ = select_dimension(D, n_elbows=self.n_elbows)\n embedding_dimensions.append(elbows[-1])\n\n # Choose the max of all of best embedding dimension of all graphs\n best_dimension = int(np.ceil(np.max(embedding_dimensions)))\n else:\n best_dimension = self.n_components\n\n if not self.scaled:\n Us = np.hstack([U[:, :best_dimension] for U in Us])\n Vs = np.hstack([V.T[:, :best_dimension] for V in Vs])\n else:\n # Equivalent to ASE\n Us = np.hstack(\n [\n U[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))\n for U, D in zip(Us, Ds)\n ]\n )\n Vs = np.hstack(\n [\n V.T[:, :best_dimension] @ np.diag(np.sqrt(D[:best_dimension]))\n for V, D in zip(Vs, Ds)\n ]\n )\n\n # Second SVD for vertices\n # The notation is slightly different than the paper\n Uhat, sing_vals_left, _ = selectSVD(\n Us,\n n_components=self.n_components,\n n_elbows=self.n_elbows,\n algorithm=self.algorithm,\n n_iter=self.n_iter,\n )\n\n Vhat, sing_vals_right, _ = selectSVD(\n Vs,\n n_components=self.n_components,\n n_elbows=self.n_elbows,\n algorithm=self.algorithm,\n n_iter=self.n_iter,\n )\n return Uhat, Vhat, sing_vals_left, sing_vals_right\n\n def fit(self, graphs, y=None):\n \"\"\"\n Fit the model with graphs.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n Returns\n -------\n self : object\n Returns an instance of self.\n \"\"\"\n graphs = self._check_input_graphs(graphs)\n\n # Check if undirected\n undirected = all(is_almost_symmetric(g) for g in graphs)\n\n # Diag augment\n if self.diag_aug:\n graphs = self._diag_aug(graphs)\n\n # embed\n Uhat, Vhat, sing_vals_left, sing_vals_right = self._reduce_dim(graphs)\n self.latent_left_ = Uhat\n if not undirected:\n self.latent_right_ = Vhat\n self.scores_ = Uhat.T @ graphs @ Vhat\n self.singular_values_ = (sing_vals_left, sing_vals_right)\n else:\n self.latent_right_ = None\n self.scores_ = Uhat.T @ graphs @ Uhat\n self.singular_values_ = sing_vals_left\n\n return self\n\n def fit_transform(self, graphs, y=None):\n \"\"\"\n Fit the model with graphs and apply the embedding on graphs.\n n_components is either automatically determined or based on user input.\n\n Parameters\n ----------\n graphs : list of nx.Graph or ndarray, or ndarray\n If list of nx.Graph, each Graph must contain same number of nodes.\n If list of ndarray, each array must have shape (n_vertices, n_vertices).\n If ndarray, then array must have shape (n_graphs, n_vertices, n_vertices).\n\n Returns\n -------\n out : np.ndarray or length 2 tuple of np.ndarray.\n If input graphs were symmetric shape (n_vertices, n_components).\n If graphs were directed and ``concat`` is False, returns tuple of two arrays (same shape as above).\n The first corresponds to the left latent positions, and the second to the right latent positions.\n When ``concat`` is True left and right (out and in) latent positions are concatenated along axis 1.\n \"\"\"\n return self._fit_transform(graphs)\n", "path": "graspologic/embed/mase.py"}]}
3,103
589
gh_patches_debug_19064
rasdani/github-patches
git_diff
pymedusa__Medusa-10565
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Series not found in show ids (Medusa does not download subtiltes) Medusa gives this warning for a few of my shows, for example How I Met Your Father: 2022-02-13 23:36:54 WARNING Thread_0 :: [918cfe7] Series How I Met Your Father not found in show ids It doesn't download subtitles for this show. I couldn't find anything related to this issue. Hope you can help. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `medusa/subtitle_providers/addic7ed.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """Custom subliminal addic7ed.com subtitle provider module.""" 3 4 import hashlib 5 import logging 6 import re 7 8 from babelfish import Language 9 10 from guessit import guessit 11 12 from requests import Session 13 14 from subliminal.cache import SHOW_EXPIRATION_TIME, region 15 from subliminal.exceptions import ConfigurationError, DownloadLimitExceeded 16 from subliminal.matches import guess_matches 17 from subliminal.providers import ParserBeautifulSoup, Provider 18 from subliminal.subtitle import Subtitle, fix_line_ending 19 from subliminal.utils import sanitize 20 from subliminal.video import Episode 21 22 logger = logging.getLogger(__name__) 23 24 # language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter') 25 26 # Series cell matching regex 27 show_cells_re = re.compile(b'<td class="vr">.*?</td>', re.DOTALL) 28 29 #: Series header parsing regex 30 series_year_re = re.compile(r'^(?P<series>[ \w\'.:(),*&!?-]+?)(?: \((?P<year>\d{4})\))?$') 31 32 33 class Addic7edSubtitle(Subtitle): 34 """Addic7ed Subtitle.""" 35 36 provider_name = 'addic7ed' 37 38 def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version, 39 download_link): 40 super(Addic7edSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link) 41 self.series = series 42 self.season = season 43 self.episode = episode 44 self.title = title 45 self.year = year 46 self.version = version 47 self.download_link = download_link 48 49 @property 50 def id(self): 51 """Get id.""" 52 return self.download_link 53 54 @property 55 def info(self): 56 """Get info.""" 57 return '{series}{yopen}{year}{yclose} s{season:02d}e{episode:02d}{topen}{title}{tclose}{version}'.format( 58 series=self.series, season=self.season, episode=self.episode, title=self.title, year=self.year or '', 59 version=self.version, yopen=' (' if self.year else '', yclose=')' if self.year else '', 60 topen=' - ' if self.title else '', tclose=' - ' if self.version else '' 61 ) 62 63 def get_matches(self, video): 64 """Get matches.""" 65 # series name 66 matches = guess_matches(video, { 67 'title': self.series, 68 'season': self.season, 69 'episode': self.episode, 70 'episode_title': self.title, 71 'year': self.year, 72 'release_group': self.version, 73 }) 74 75 # resolution 76 if video.resolution and self.version and video.resolution in self.version.lower(): 77 matches.add('resolution') 78 # other properties 79 if self.version: 80 matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True) 81 82 return matches 83 84 85 class Addic7edProvider(Provider): 86 """Addic7ed Provider.""" 87 88 languages = {Language('por', 'BR')} | {Language(l) for l in [ 89 'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg', 90 'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus', 91 'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho' 92 ]} 93 video_types = (Episode,) 94 server_url = 'http://www.addic7ed.com/' 95 subtitle_class = Addic7edSubtitle 96 97 def __init__(self, username=None, password=None): 98 if any((username, password)) and not all((username, password)): 99 raise ConfigurationError('Username and password must be specified') 100 101 self.username = username 102 self.password = hashlib.md5(password.encode('utf-8')).hexdigest() 103 self.logged_in = False 104 self.cookies = {'wikisubtitlesuser': self.username, 'wikisubtitlespass': self.password} 105 106 def initialize(self): 107 """Initialize Addic7edProvider provider.""" 108 self.session = Session() 109 self.session.headers['User-Agent'] = self.user_agent 110 111 # login 112 if self.username and self.password: 113 logger.debug('Logged in') 114 self.logged_in = True 115 116 def terminate(self): 117 """Terminate.""" 118 # logout 119 logger.debug('Logged out') 120 self.logged_in = False 121 122 @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) 123 def _get_show_ids(self): 124 """Get the ``dict`` of show ids per series by querying the `shows.php` page. 125 126 :return: show id per series, lower case and without quotes. 127 :rtype: dict 128 """ 129 # get the show page 130 logger.info('Getting show ids') 131 r = self.session.get(self.server_url + 'shows.php', timeout=20, cookies=self.cookies) 132 r.raise_for_status() 133 134 # LXML parser seems to fail when parsing Addic7ed.com HTML markup. 135 # Last known version to work properly is 3.6.4 (next version, 3.7.0, fails) 136 # Assuming the site's markup is bad, and stripping it down to only contain what's needed. 137 show_cells = re.findall(show_cells_re, r.content) 138 if show_cells: 139 soup = ParserBeautifulSoup(b''.join(show_cells), ['lxml', 'html.parser']) 140 else: 141 # If RegEx fails, fall back to original r.content and use 'html.parser' 142 soup = ParserBeautifulSoup(r.content, ['html.parser']) 143 144 # populate the show ids 145 show_ids = {} 146 for show in soup.select('td.vr > h3 > a[href^="/show/"]'): 147 show_ids[sanitize(show.text)] = int(show['href'][6:]) 148 logger.debug('Found %d show ids', len(show_ids)) 149 150 return show_ids 151 152 @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME) 153 def _search_show_id(self, series, year=None): 154 """Search the show id from the `series` and `year`. 155 156 :param str series: series of the episode. 157 :param year: year of the series, if any. 158 :type year: int 159 :return: the show id, if found. 160 :rtype: int 161 """ 162 # addic7ed doesn't support search with quotes 163 series = series.replace("'", ' ') 164 165 # build the params 166 series_year = '%s %d' % (series, year) if year is not None else series 167 params = {'search': series_year, 'Submit': 'Search'} 168 169 r = self.session.get('http://www.addic7ed.com/srch.php', params=params, timeout=10, cookies=self.cookies) 170 171 # make the search 172 logger.info('Searching show ids with %r', params) 173 r.raise_for_status() 174 soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) 175 176 # get the suggestion 177 suggestion = soup.select('span.titulo > a[href^="/show/"]') 178 if not suggestion: 179 logger.warning('Show id not found: no suggestion') 180 return None 181 if not sanitize(suggestion[0].i.text.replace("'", ' ')) == sanitize(series_year): 182 logger.warning('Show id not found: suggestion does not match') 183 return None 184 show_id = int(suggestion[0]['href'][6:]) 185 logger.debug('Found show id %d', show_id) 186 187 return show_id 188 189 def get_show_id(self, series, year=None, country_code=None): 190 """Get the best matching show id for `series`, `year` and `country_code`. 191 192 First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`. 193 :param str series: series of the episode. 194 :param year: year of the series, if any. 195 :type year: int 196 :param country_code: country code of the series, if any. 197 :type country_code: str 198 :return: the show id, if found. 199 :rtype: int 200 """ 201 series_sanitized = sanitize(series).lower() 202 show_ids = self._get_show_ids() 203 show_id = None 204 205 # attempt with country 206 if country_code: 207 logger.debug('Getting show id with country') 208 show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower())) 209 210 # attempt with year 211 if not show_id and year: 212 logger.debug('Getting show id with year') 213 show_id = show_ids.get('%s %d' % (series_sanitized, year)) 214 215 # attempt clean 216 if not show_id: 217 logger.debug('Getting show id') 218 show_id = show_ids.get(series_sanitized) 219 220 # search as last resort 221 if not show_id: 222 logger.warning('Series %s not found in show ids', series) 223 show_id = self._search_show_id(series) 224 225 return show_id 226 227 def query(self, show_id, series, season, year=None, country=None): 228 """Query provider to get all subitles for a specific show + season.""" 229 # get the page of the season of the show 230 logger.info('Getting the page of show id %d, season %d', show_id, season) 231 r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=60, cookies=self.cookies) 232 r.raise_for_status() 233 234 if not r.content: 235 # Provider returns a status of 304 Not Modified with an empty content 236 # raise_for_status won't raise exception for that status code 237 logger.debug('No data returned from provider') 238 return [] 239 240 soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) 241 242 # loop over subtitle rows 243 match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10]) 244 series = match.group('series') 245 year = int(match.group('year')) if match.group('year') else None 246 subtitles = [] 247 for row in soup.select('tr.epeven'): 248 cells = row('td') 249 250 # ignore incomplete subtitles 251 status = cells[5].text 252 if status != 'Completed': 253 logger.debug('Ignoring subtitle with status %s', status) 254 continue 255 256 # read the item 257 language = Language.fromaddic7ed(cells[3].text) 258 hearing_impaired = bool(cells[6].text) 259 page_link = self.server_url + cells[2].a['href'][1:] 260 season = int(cells[0].text) 261 episode = int(cells[1].text) 262 title = cells[2].text 263 version = cells[4].text 264 download_link = cells[9].a['href'][1:] 265 266 subtitle = self.subtitle_class(language, hearing_impaired, page_link, series, season, episode, title, year, 267 version, download_link) 268 logger.debug('Found subtitle %r', subtitle) 269 subtitles.append(subtitle) 270 271 return subtitles 272 273 def list_subtitles(self, video, languages): 274 """List Subitles.""" 275 # lookup show_id 276 titles = [video.series] + video.alternative_series 277 show_id = None 278 for title in titles: 279 show_id = self.get_show_id(title, video.year) 280 if show_id is not None: 281 break 282 283 # query for subtitles with the show_id 284 if show_id is not None: 285 subtitles = [s for s in self.query(show_id, title, video.season, video.year) 286 if s.language in languages and s.episode == video.episode] 287 if subtitles: 288 return subtitles 289 else: 290 logger.error('No show id found for %r (%r)', video.series, {'year': video.year}) 291 292 return [] 293 294 def download_subtitle(self, subtitle): 295 """Download subtitles.""" 296 # download the subtitle 297 logger.info('Downloading subtitle %r', subtitle) 298 r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link}, 299 timeout=20) 300 r.raise_for_status() 301 302 if not r.content: 303 # Provider returns a status of 304 Not Modified with an empty content 304 # raise_for_status won't raise exception for that status code 305 logger.debug('Unable to download subtitle. No data returned from provider') 306 return 307 308 # detect download limit exceeded 309 if r.headers['Content-Type'] == 'text/html': 310 raise DownloadLimitExceeded 311 312 subtitle.content = fix_line_ending(r.content) 313 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/medusa/subtitle_providers/addic7ed.py b/medusa/subtitle_providers/addic7ed.py --- a/medusa/subtitle_providers/addic7ed.py +++ b/medusa/subtitle_providers/addic7ed.py @@ -166,7 +166,7 @@ series_year = '%s %d' % (series, year) if year is not None else series params = {'search': series_year, 'Submit': 'Search'} - r = self.session.get('http://www.addic7ed.com/srch.php', params=params, timeout=10, cookies=self.cookies) + r = self.session.get('http://www.addic7ed.com/srch.php', params=params, timeout=30, cookies=self.cookies) # make the search logger.info('Searching show ids with %r', params) @@ -219,7 +219,7 @@ # search as last resort if not show_id: - logger.warning('Series %s not found in show ids', series) + logger.info('Series %s not found in show ids', series) show_id = self._search_show_id(series) return show_id
{"golden_diff": "diff --git a/medusa/subtitle_providers/addic7ed.py b/medusa/subtitle_providers/addic7ed.py\n--- a/medusa/subtitle_providers/addic7ed.py\n+++ b/medusa/subtitle_providers/addic7ed.py\n@@ -166,7 +166,7 @@\n series_year = '%s %d' % (series, year) if year is not None else series\n params = {'search': series_year, 'Submit': 'Search'}\n \n- r = self.session.get('http://www.addic7ed.com/srch.php', params=params, timeout=10, cookies=self.cookies)\n+ r = self.session.get('http://www.addic7ed.com/srch.php', params=params, timeout=30, cookies=self.cookies)\n \n # make the search\n logger.info('Searching show ids with %r', params)\n@@ -219,7 +219,7 @@\n \n # search as last resort\n if not show_id:\n- logger.warning('Series %s not found in show ids', series)\n+ logger.info('Series %s not found in show ids', series)\n show_id = self._search_show_id(series)\n \n return show_id\n", "issue": "Series not found in show ids (Medusa does not download subtiltes)\nMedusa gives this warning for a few of my shows, for example How I Met Your Father:\r\n2022-02-13 23:36:54 WARNING Thread_0 :: [918cfe7] Series How I Met Your Father not found in show ids\r\n\r\nIt doesn't download subtitles for this show. I couldn't find anything related to this issue. Hope you can help.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Custom subliminal addic7ed.com subtitle provider module.\"\"\"\n\nimport hashlib\nimport logging\nimport re\n\nfrom babelfish import Language\n\nfrom guessit import guessit\n\nfrom requests import Session\n\nfrom subliminal.cache import SHOW_EXPIRATION_TIME, region\nfrom subliminal.exceptions import ConfigurationError, DownloadLimitExceeded\nfrom subliminal.matches import guess_matches\nfrom subliminal.providers import ParserBeautifulSoup, Provider\nfrom subliminal.subtitle import Subtitle, fix_line_ending\nfrom subliminal.utils import sanitize\nfrom subliminal.video import Episode\n\nlogger = logging.getLogger(__name__)\n\n# language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')\n\n# Series cell matching regex\nshow_cells_re = re.compile(b'<td class=\"vr\">.*?</td>', re.DOTALL)\n\n#: Series header parsing regex\nseries_year_re = re.compile(r'^(?P<series>[ \\w\\'.:(),*&!?-]+?)(?: \\((?P<year>\\d{4})\\))?$')\n\n\nclass Addic7edSubtitle(Subtitle):\n \"\"\"Addic7ed Subtitle.\"\"\"\n\n provider_name = 'addic7ed'\n\n def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,\n download_link):\n super(Addic7edSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link)\n self.series = series\n self.season = season\n self.episode = episode\n self.title = title\n self.year = year\n self.version = version\n self.download_link = download_link\n\n @property\n def id(self):\n \"\"\"Get id.\"\"\"\n return self.download_link\n\n @property\n def info(self):\n \"\"\"Get info.\"\"\"\n return '{series}{yopen}{year}{yclose} s{season:02d}e{episode:02d}{topen}{title}{tclose}{version}'.format(\n series=self.series, season=self.season, episode=self.episode, title=self.title, year=self.year or '',\n version=self.version, yopen=' (' if self.year else '', yclose=')' if self.year else '',\n topen=' - ' if self.title else '', tclose=' - ' if self.version else ''\n )\n\n def get_matches(self, video):\n \"\"\"Get matches.\"\"\"\n # series name\n matches = guess_matches(video, {\n 'title': self.series,\n 'season': self.season,\n 'episode': self.episode,\n 'episode_title': self.title,\n 'year': self.year,\n 'release_group': self.version,\n })\n\n # resolution\n if video.resolution and self.version and video.resolution in self.version.lower():\n matches.add('resolution')\n # other properties\n if self.version:\n matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True)\n\n return matches\n\n\nclass Addic7edProvider(Provider):\n \"\"\"Addic7ed Provider.\"\"\"\n\n languages = {Language('por', 'BR')} | {Language(l) for l in [\n 'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg',\n 'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus',\n 'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho'\n ]}\n video_types = (Episode,)\n server_url = 'http://www.addic7ed.com/'\n subtitle_class = Addic7edSubtitle\n\n def __init__(self, username=None, password=None):\n if any((username, password)) and not all((username, password)):\n raise ConfigurationError('Username and password must be specified')\n\n self.username = username\n self.password = hashlib.md5(password.encode('utf-8')).hexdigest()\n self.logged_in = False\n self.cookies = {'wikisubtitlesuser': self.username, 'wikisubtitlespass': self.password}\n\n def initialize(self):\n \"\"\"Initialize Addic7edProvider provider.\"\"\"\n self.session = Session()\n self.session.headers['User-Agent'] = self.user_agent\n\n # login\n if self.username and self.password:\n logger.debug('Logged in')\n self.logged_in = True\n\n def terminate(self):\n \"\"\"Terminate.\"\"\"\n # logout\n logger.debug('Logged out')\n self.logged_in = False\n\n @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)\n def _get_show_ids(self):\n \"\"\"Get the ``dict`` of show ids per series by querying the `shows.php` page.\n\n :return: show id per series, lower case and without quotes.\n :rtype: dict\n \"\"\"\n # get the show page\n logger.info('Getting show ids')\n r = self.session.get(self.server_url + 'shows.php', timeout=20, cookies=self.cookies)\n r.raise_for_status()\n\n # LXML parser seems to fail when parsing Addic7ed.com HTML markup.\n # Last known version to work properly is 3.6.4 (next version, 3.7.0, fails)\n # Assuming the site's markup is bad, and stripping it down to only contain what's needed.\n show_cells = re.findall(show_cells_re, r.content)\n if show_cells:\n soup = ParserBeautifulSoup(b''.join(show_cells), ['lxml', 'html.parser'])\n else:\n # If RegEx fails, fall back to original r.content and use 'html.parser'\n soup = ParserBeautifulSoup(r.content, ['html.parser'])\n\n # populate the show ids\n show_ids = {}\n for show in soup.select('td.vr > h3 > a[href^=\"/show/\"]'):\n show_ids[sanitize(show.text)] = int(show['href'][6:])\n logger.debug('Found %d show ids', len(show_ids))\n\n return show_ids\n\n @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)\n def _search_show_id(self, series, year=None):\n \"\"\"Search the show id from the `series` and `year`.\n\n :param str series: series of the episode.\n :param year: year of the series, if any.\n :type year: int\n :return: the show id, if found.\n :rtype: int\n \"\"\"\n # addic7ed doesn't support search with quotes\n series = series.replace(\"'\", ' ')\n\n # build the params\n series_year = '%s %d' % (series, year) if year is not None else series\n params = {'search': series_year, 'Submit': 'Search'}\n\n r = self.session.get('http://www.addic7ed.com/srch.php', params=params, timeout=10, cookies=self.cookies)\n\n # make the search\n logger.info('Searching show ids with %r', params)\n r.raise_for_status()\n soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])\n\n # get the suggestion\n suggestion = soup.select('span.titulo > a[href^=\"/show/\"]')\n if not suggestion:\n logger.warning('Show id not found: no suggestion')\n return None\n if not sanitize(suggestion[0].i.text.replace(\"'\", ' ')) == sanitize(series_year):\n logger.warning('Show id not found: suggestion does not match')\n return None\n show_id = int(suggestion[0]['href'][6:])\n logger.debug('Found show id %d', show_id)\n\n return show_id\n\n def get_show_id(self, series, year=None, country_code=None):\n \"\"\"Get the best matching show id for `series`, `year` and `country_code`.\n\n First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.\n :param str series: series of the episode.\n :param year: year of the series, if any.\n :type year: int\n :param country_code: country code of the series, if any.\n :type country_code: str\n :return: the show id, if found.\n :rtype: int\n \"\"\"\n series_sanitized = sanitize(series).lower()\n show_ids = self._get_show_ids()\n show_id = None\n\n # attempt with country\n if country_code:\n logger.debug('Getting show id with country')\n show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))\n\n # attempt with year\n if not show_id and year:\n logger.debug('Getting show id with year')\n show_id = show_ids.get('%s %d' % (series_sanitized, year))\n\n # attempt clean\n if not show_id:\n logger.debug('Getting show id')\n show_id = show_ids.get(series_sanitized)\n\n # search as last resort\n if not show_id:\n logger.warning('Series %s not found in show ids', series)\n show_id = self._search_show_id(series)\n\n return show_id\n\n def query(self, show_id, series, season, year=None, country=None):\n \"\"\"Query provider to get all subitles for a specific show + season.\"\"\"\n # get the page of the season of the show\n logger.info('Getting the page of show id %d, season %d', show_id, season)\n r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=60, cookies=self.cookies)\n r.raise_for_status()\n\n if not r.content:\n # Provider returns a status of 304 Not Modified with an empty content\n # raise_for_status won't raise exception for that status code\n logger.debug('No data returned from provider')\n return []\n\n soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])\n\n # loop over subtitle rows\n match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10])\n series = match.group('series')\n year = int(match.group('year')) if match.group('year') else None\n subtitles = []\n for row in soup.select('tr.epeven'):\n cells = row('td')\n\n # ignore incomplete subtitles\n status = cells[5].text\n if status != 'Completed':\n logger.debug('Ignoring subtitle with status %s', status)\n continue\n\n # read the item\n language = Language.fromaddic7ed(cells[3].text)\n hearing_impaired = bool(cells[6].text)\n page_link = self.server_url + cells[2].a['href'][1:]\n season = int(cells[0].text)\n episode = int(cells[1].text)\n title = cells[2].text\n version = cells[4].text\n download_link = cells[9].a['href'][1:]\n\n subtitle = self.subtitle_class(language, hearing_impaired, page_link, series, season, episode, title, year,\n version, download_link)\n logger.debug('Found subtitle %r', subtitle)\n subtitles.append(subtitle)\n\n return subtitles\n\n def list_subtitles(self, video, languages):\n \"\"\"List Subitles.\"\"\"\n # lookup show_id\n titles = [video.series] + video.alternative_series\n show_id = None\n for title in titles:\n show_id = self.get_show_id(title, video.year)\n if show_id is not None:\n break\n\n # query for subtitles with the show_id\n if show_id is not None:\n subtitles = [s for s in self.query(show_id, title, video.season, video.year)\n if s.language in languages and s.episode == video.episode]\n if subtitles:\n return subtitles\n else:\n logger.error('No show id found for %r (%r)', video.series, {'year': video.year})\n\n return []\n\n def download_subtitle(self, subtitle):\n \"\"\"Download subtitles.\"\"\"\n # download the subtitle\n logger.info('Downloading subtitle %r', subtitle)\n r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},\n timeout=20)\n r.raise_for_status()\n\n if not r.content:\n # Provider returns a status of 304 Not Modified with an empty content\n # raise_for_status won't raise exception for that status code\n logger.debug('Unable to download subtitle. No data returned from provider')\n return\n\n # detect download limit exceeded\n if r.headers['Content-Type'] == 'text/html':\n raise DownloadLimitExceeded\n\n subtitle.content = fix_line_ending(r.content)\n", "path": "medusa/subtitle_providers/addic7ed.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Custom subliminal addic7ed.com subtitle provider module.\"\"\"\n\nimport hashlib\nimport logging\nimport re\n\nfrom babelfish import Language\n\nfrom guessit import guessit\n\nfrom requests import Session\n\nfrom subliminal.cache import SHOW_EXPIRATION_TIME, region\nfrom subliminal.exceptions import ConfigurationError, DownloadLimitExceeded\nfrom subliminal.matches import guess_matches\nfrom subliminal.providers import ParserBeautifulSoup, Provider\nfrom subliminal.subtitle import Subtitle, fix_line_ending\nfrom subliminal.utils import sanitize\nfrom subliminal.video import Episode\n\nlogger = logging.getLogger(__name__)\n\n# language_converters.register('addic7ed = subliminal.converters.addic7ed:Addic7edConverter')\n\n# Series cell matching regex\nshow_cells_re = re.compile(b'<td class=\"vr\">.*?</td>', re.DOTALL)\n\n#: Series header parsing regex\nseries_year_re = re.compile(r'^(?P<series>[ \\w\\'.:(),*&!?-]+?)(?: \\((?P<year>\\d{4})\\))?$')\n\n\nclass Addic7edSubtitle(Subtitle):\n \"\"\"Addic7ed Subtitle.\"\"\"\n\n provider_name = 'addic7ed'\n\n def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,\n download_link):\n super(Addic7edSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link)\n self.series = series\n self.season = season\n self.episode = episode\n self.title = title\n self.year = year\n self.version = version\n self.download_link = download_link\n\n @property\n def id(self):\n \"\"\"Get id.\"\"\"\n return self.download_link\n\n @property\n def info(self):\n \"\"\"Get info.\"\"\"\n return '{series}{yopen}{year}{yclose} s{season:02d}e{episode:02d}{topen}{title}{tclose}{version}'.format(\n series=self.series, season=self.season, episode=self.episode, title=self.title, year=self.year or '',\n version=self.version, yopen=' (' if self.year else '', yclose=')' if self.year else '',\n topen=' - ' if self.title else '', tclose=' - ' if self.version else ''\n )\n\n def get_matches(self, video):\n \"\"\"Get matches.\"\"\"\n # series name\n matches = guess_matches(video, {\n 'title': self.series,\n 'season': self.season,\n 'episode': self.episode,\n 'episode_title': self.title,\n 'year': self.year,\n 'release_group': self.version,\n })\n\n # resolution\n if video.resolution and self.version and video.resolution in self.version.lower():\n matches.add('resolution')\n # other properties\n if self.version:\n matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True)\n\n return matches\n\n\nclass Addic7edProvider(Provider):\n \"\"\"Addic7ed Provider.\"\"\"\n\n languages = {Language('por', 'BR')} | {Language(l) for l in [\n 'ara', 'aze', 'ben', 'bos', 'bul', 'cat', 'ces', 'dan', 'deu', 'ell', 'eng', 'eus', 'fas', 'fin', 'fra', 'glg',\n 'heb', 'hrv', 'hun', 'hye', 'ind', 'ita', 'jpn', 'kor', 'mkd', 'msa', 'nld', 'nor', 'pol', 'por', 'ron', 'rus',\n 'slk', 'slv', 'spa', 'sqi', 'srp', 'swe', 'tha', 'tur', 'ukr', 'vie', 'zho'\n ]}\n video_types = (Episode,)\n server_url = 'http://www.addic7ed.com/'\n subtitle_class = Addic7edSubtitle\n\n def __init__(self, username=None, password=None):\n if any((username, password)) and not all((username, password)):\n raise ConfigurationError('Username and password must be specified')\n\n self.username = username\n self.password = hashlib.md5(password.encode('utf-8')).hexdigest()\n self.logged_in = False\n self.cookies = {'wikisubtitlesuser': self.username, 'wikisubtitlespass': self.password}\n\n def initialize(self):\n \"\"\"Initialize Addic7edProvider provider.\"\"\"\n self.session = Session()\n self.session.headers['User-Agent'] = self.user_agent\n\n # login\n if self.username and self.password:\n logger.debug('Logged in')\n self.logged_in = True\n\n def terminate(self):\n \"\"\"Terminate.\"\"\"\n # logout\n logger.debug('Logged out')\n self.logged_in = False\n\n @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)\n def _get_show_ids(self):\n \"\"\"Get the ``dict`` of show ids per series by querying the `shows.php` page.\n\n :return: show id per series, lower case and without quotes.\n :rtype: dict\n \"\"\"\n # get the show page\n logger.info('Getting show ids')\n r = self.session.get(self.server_url + 'shows.php', timeout=20, cookies=self.cookies)\n r.raise_for_status()\n\n # LXML parser seems to fail when parsing Addic7ed.com HTML markup.\n # Last known version to work properly is 3.6.4 (next version, 3.7.0, fails)\n # Assuming the site's markup is bad, and stripping it down to only contain what's needed.\n show_cells = re.findall(show_cells_re, r.content)\n if show_cells:\n soup = ParserBeautifulSoup(b''.join(show_cells), ['lxml', 'html.parser'])\n else:\n # If RegEx fails, fall back to original r.content and use 'html.parser'\n soup = ParserBeautifulSoup(r.content, ['html.parser'])\n\n # populate the show ids\n show_ids = {}\n for show in soup.select('td.vr > h3 > a[href^=\"/show/\"]'):\n show_ids[sanitize(show.text)] = int(show['href'][6:])\n logger.debug('Found %d show ids', len(show_ids))\n\n return show_ids\n\n @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)\n def _search_show_id(self, series, year=None):\n \"\"\"Search the show id from the `series` and `year`.\n\n :param str series: series of the episode.\n :param year: year of the series, if any.\n :type year: int\n :return: the show id, if found.\n :rtype: int\n \"\"\"\n # addic7ed doesn't support search with quotes\n series = series.replace(\"'\", ' ')\n\n # build the params\n series_year = '%s %d' % (series, year) if year is not None else series\n params = {'search': series_year, 'Submit': 'Search'}\n\n r = self.session.get('http://www.addic7ed.com/srch.php', params=params, timeout=30, cookies=self.cookies)\n\n # make the search\n logger.info('Searching show ids with %r', params)\n r.raise_for_status()\n soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])\n\n # get the suggestion\n suggestion = soup.select('span.titulo > a[href^=\"/show/\"]')\n if not suggestion:\n logger.warning('Show id not found: no suggestion')\n return None\n if not sanitize(suggestion[0].i.text.replace(\"'\", ' ')) == sanitize(series_year):\n logger.warning('Show id not found: suggestion does not match')\n return None\n show_id = int(suggestion[0]['href'][6:])\n logger.debug('Found show id %d', show_id)\n\n return show_id\n\n def get_show_id(self, series, year=None, country_code=None):\n \"\"\"Get the best matching show id for `series`, `year` and `country_code`.\n\n First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.\n :param str series: series of the episode.\n :param year: year of the series, if any.\n :type year: int\n :param country_code: country code of the series, if any.\n :type country_code: str\n :return: the show id, if found.\n :rtype: int\n \"\"\"\n series_sanitized = sanitize(series).lower()\n show_ids = self._get_show_ids()\n show_id = None\n\n # attempt with country\n if country_code:\n logger.debug('Getting show id with country')\n show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))\n\n # attempt with year\n if not show_id and year:\n logger.debug('Getting show id with year')\n show_id = show_ids.get('%s %d' % (series_sanitized, year))\n\n # attempt clean\n if not show_id:\n logger.debug('Getting show id')\n show_id = show_ids.get(series_sanitized)\n\n # search as last resort\n if not show_id:\n logger.info('Series %s not found in show ids', series)\n show_id = self._search_show_id(series)\n\n return show_id\n\n def query(self, show_id, series, season, year=None, country=None):\n \"\"\"Query provider to get all subitles for a specific show + season.\"\"\"\n # get the page of the season of the show\n logger.info('Getting the page of show id %d, season %d', show_id, season)\n r = self.session.get(self.server_url + 'show/%d' % show_id, params={'season': season}, timeout=60, cookies=self.cookies)\n r.raise_for_status()\n\n if not r.content:\n # Provider returns a status of 304 Not Modified with an empty content\n # raise_for_status won't raise exception for that status code\n logger.debug('No data returned from provider')\n return []\n\n soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])\n\n # loop over subtitle rows\n match = series_year_re.match(soup.select('#header font')[0].text.strip()[:-10])\n series = match.group('series')\n year = int(match.group('year')) if match.group('year') else None\n subtitles = []\n for row in soup.select('tr.epeven'):\n cells = row('td')\n\n # ignore incomplete subtitles\n status = cells[5].text\n if status != 'Completed':\n logger.debug('Ignoring subtitle with status %s', status)\n continue\n\n # read the item\n language = Language.fromaddic7ed(cells[3].text)\n hearing_impaired = bool(cells[6].text)\n page_link = self.server_url + cells[2].a['href'][1:]\n season = int(cells[0].text)\n episode = int(cells[1].text)\n title = cells[2].text\n version = cells[4].text\n download_link = cells[9].a['href'][1:]\n\n subtitle = self.subtitle_class(language, hearing_impaired, page_link, series, season, episode, title, year,\n version, download_link)\n logger.debug('Found subtitle %r', subtitle)\n subtitles.append(subtitle)\n\n return subtitles\n\n def list_subtitles(self, video, languages):\n \"\"\"List Subitles.\"\"\"\n # lookup show_id\n titles = [video.series] + video.alternative_series\n show_id = None\n for title in titles:\n show_id = self.get_show_id(title, video.year)\n if show_id is not None:\n break\n\n # query for subtitles with the show_id\n if show_id is not None:\n subtitles = [s for s in self.query(show_id, title, video.season, video.year)\n if s.language in languages and s.episode == video.episode]\n if subtitles:\n return subtitles\n else:\n logger.error('No show id found for %r (%r)', video.series, {'year': video.year})\n\n return []\n\n def download_subtitle(self, subtitle):\n \"\"\"Download subtitles.\"\"\"\n # download the subtitle\n logger.info('Downloading subtitle %r', subtitle)\n r = self.session.get(self.server_url + subtitle.download_link, headers={'Referer': subtitle.page_link},\n timeout=20)\n r.raise_for_status()\n\n if not r.content:\n # Provider returns a status of 304 Not Modified with an empty content\n # raise_for_status won't raise exception for that status code\n logger.debug('Unable to download subtitle. No data returned from provider')\n return\n\n # detect download limit exceeded\n if r.headers['Content-Type'] == 'text/html':\n raise DownloadLimitExceeded\n\n subtitle.content = fix_line_ending(r.content)\n", "path": "medusa/subtitle_providers/addic7ed.py"}]}
4,079
274
gh_patches_debug_423
rasdani/github-patches
git_diff
napalm-automation__napalm-1681
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- IOS-XR get_lldp_neighbor_details does not support 'N/A' capabilities ### Description of Issue/Question The constants LLDP_CAPAB_TRANFORM_TABLE does not include 'N/A' of system capabilities. No checks for this value either. ``` <SystemCapabilities> N/A </SystemCapabilities> ``` *Note*: Please check https://guides.github.com/features/mastering-markdown/ to see how to properly format your request. ### Did you follow the steps from https://github.com/napalm-automation/napalm#faq (Place an ``x`` between the square brackets where applicable) - [x] Yes - [] No ### Setup ### napalm version (Paste verbatim output from `pip freeze | grep napalm` between quotes below) ``` root@salt-master1:/# python3 -m pip freeze | grep napalm napalm==3.2.0 ``` ### Network operating system version (Paste verbatim output from `show version` - or equivalent - between quotes below) ``` RP/0/RP0/CPU0:hbr1.12345.999#show version Wed Sep 23 18:41:02.500 CEST Cisco IOS XR Software, Version 7.1.2 LNT Copyright (c) 2013-2020 by Cisco Systems, Inc. Build Information: Built By : ahoang Built On : Sat Aug 29 20:04:27 UTC 2020 Build Host : iox-lnx-069 Workspace : /auto/srcarchive13/prod/7.1.2/ncs540l/ws Version : 7.1.2 Label : 7.1.2 cisco NCS540L (C3708 @ 1.70GHz) System uptime is 1 week, 1 day, 10 hours, 2 minutes ``` ### Steps to Reproduce the Issue ### Error Traceback (Paste the complete traceback of the exception between quotes below) ``` ================= Traceback ================= Traceback (most recent call last): File "/usr/local/bin/napalm", line 8, in <module> sys.exit(main()) File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 308, in main run_tests(args) File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 291, in run_tests call_getter(device, args.method, **method_kwargs) File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 27, in wrapper r = func(*args, **kwargs) File "/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py", line 255, in call_getter r = func(**kwargs) File "/usr/local/lib/python3.8/dist-packages/napalm/iosxr/iosxr.py", line 852, in get_lldp_neighbors_detail "remote_system_capab": napalm.base.helpers.transform_lldp_capab( File "/usr/local/lib/python3.8/dist-packages/napalm/base/helpers.py", line 463, in transform_lldp_capab [constants.LLDP_CAPAB_TRANFORM_TABLE[c.strip()] for c in capabilities] File "/usr/local/lib/python3.8/dist-packages/napalm/base/helpers.py", line 463, in <listcomp> [constants.LLDP_CAPAB_TRANFORM_TABLE[c.strip()] for c in capabilities] KeyError: 'n/a' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napalm/base/constants.py` Content: ``` 1 """Constants to be used across NAPALM drivers.""" 2 3 CONFIG_LOCK = True # must be changed soon! 4 TIMEOUT = 60 # seconds 5 6 INTERFACE_NULL_SPEED = -1 7 8 ACTION_TYPE_METHODS = ("ping", "traceroute") 9 10 BGP_NEIGHBOR_NULL_COUNTER = -1 11 12 SNMP_AUTHORIZATION_MODE_MAP = {"read-only": "ro", "read-write": "rw"} 13 14 ROUTE_COMMON_PROTOCOL_FIELDS = [ 15 "destination", 16 "prefix_length", 17 "protocol", 18 "current_active", 19 "last_active", 20 "age", 21 "next_hop", 22 "outgoing_interface", 23 "selected_next_hop", 24 "preference", 25 "inactive_reason", 26 "routing_table", 27 ] # identifies the list of fileds common for all protocols 28 29 ROUTE_PROTOCOL_SPECIFIC_FIELDS = { 30 "bgp": [ 31 "local_as", 32 "remote_as", 33 "as_path", 34 "communities", 35 "local_preference", 36 "preference2", 37 "remote_address", 38 "metric", 39 "metric2", 40 ], 41 "isis": ["level", "metric", "local_as"], 42 "static": [], # nothing specific to static routes 43 } 44 45 TRACEROUTE_TTL = 255 46 TRACEROUTE_SOURCE = "" 47 TRACEROUTE_TIMEOUT = 2 48 TRACEROUTE_NULL_HOST_NAME = "*" 49 TRACEROUTE_NULL_IP_ADDRESS = "*" 50 TRACEROUTE_VRF = "" 51 52 OPTICS_NULL_LEVEL = "-Inf" 53 54 PING_SOURCE = "" 55 PING_TTL = 255 56 PING_TIMEOUT = 2 57 PING_SIZE = 100 58 PING_COUNT = 5 59 PING_VRF = "" 60 PING_SOURCE_INTERFACE = "" 61 62 NETMIKO_MAP = { 63 "ios": "cisco_ios", 64 "nxos": "cisco_nxos", 65 "nxos_ssh": "cisco_nxos", 66 "iosxr": "cisco_iosxr", 67 "eos": "arista_eos", 68 "junos": "juniper_junos", 69 } 70 LLDP_CAPAB_TRANFORM_TABLE = { 71 "o": "other", 72 "p": "repeater", 73 "b": "bridge", 74 "w": "wlan-access-point", 75 "r": "router", 76 "t": "telephone", 77 "c": "docsis-cable-device", 78 "s": "station", 79 } 80 81 CISCO_SANITIZE_FILTERS = { 82 r"^(snmp-server community).*$": r"\1 <removed>", 83 r"^(snmp-server host \S+( vrf \S+)?( version (1|2c|3))?)\s+\S+((\s+\S*)*)\s*$": r"\1 <removed> \5", # noqa 84 r"^(snmp-server user \S+( \S+)? auth md5) \S+ (priv) \S+ (localizedkey( engineID \S+)?)\s*$": r"\1 <removed> \3 <removed> \4\5", # noqa 85 r"^(username .+ (password|secret) \d) .+$": r"\1 <removed>", 86 r"^(enable (password|secret)( level \d+)? \d) .+$": r"\1 <removed>", 87 r"^(\s+(?:password|secret)) (?:\d{1,2} )?\S+$": r"\1 <removed>", 88 r"^(.*wpa-psk ascii \d) (\S+)$": r"\1 <removed>", 89 r"^(.*key 7) (\d.+)$": r"\1 <removed>", 90 r"^(tacacs-server (.+ )?key) .+$": r"\1 <removed>", 91 r"^(crypto isakmp key) (\S+) (.*)$": r"\1 <removed> \3", 92 r"^(\s+ip ospf message-digest-key \d+ md5) .+$": r"\1 <removed>", 93 r"^(\s+ip ospf authentication-key) .+$": r"\1 <removed>", 94 r"^(\s+neighbor \S+ password) .+$": r"\1 <removed>", 95 r"^(\s+vrrp \d+ authentication text) .+$": r"\1 <removed>", 96 r"^(\s+standby \d+ authentication) .{1,8}$": r"\1 <removed>", 97 r"^(\s+standby \d+ authentication md5 key-string) .+?( timeout \d+)?$": r"\1 <removed> \2", 98 r"^(\s+key-string) .+$": r"\1 <removed>", 99 r"^((tacacs|radius) server [^\n]+\n(\s+[^\n]+\n)*\s+key) [^\n]+$": r"\1 <removed>", 100 r"^(\s+ppp (chap|pap) password \d) .+$": r"\1 <removed>", 101 } 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napalm/base/constants.py b/napalm/base/constants.py --- a/napalm/base/constants.py +++ b/napalm/base/constants.py @@ -69,6 +69,7 @@ } LLDP_CAPAB_TRANFORM_TABLE = { "o": "other", + "n/a": "other", "p": "repeater", "b": "bridge", "w": "wlan-access-point",
{"golden_diff": "diff --git a/napalm/base/constants.py b/napalm/base/constants.py\n--- a/napalm/base/constants.py\n+++ b/napalm/base/constants.py\n@@ -69,6 +69,7 @@\n }\n LLDP_CAPAB_TRANFORM_TABLE = {\n \"o\": \"other\",\n+ \"n/a\": \"other\",\n \"p\": \"repeater\",\n \"b\": \"bridge\",\n \"w\": \"wlan-access-point\",\n", "issue": "IOS-XR get_lldp_neighbor_details does not support 'N/A' capabilities\n### Description of Issue/Question\r\n\r\nThe constants LLDP_CAPAB_TRANFORM_TABLE does not include 'N/A' of system capabilities. No checks for this value either.\r\n```\r\n<SystemCapabilities>\r\n N/A\r\n</SystemCapabilities>\r\n```\r\n\r\n\r\n*Note*: Please check https://guides.github.com/features/mastering-markdown/\r\nto see how to properly format your request.\r\n\r\n### Did you follow the steps from https://github.com/napalm-automation/napalm#faq\r\n(Place an ``x`` between the square brackets where applicable)\r\n\r\n- [x] Yes\r\n- [] No\r\n\r\n\r\n### Setup\r\n\r\n### napalm version\r\n(Paste verbatim output from `pip freeze | grep napalm` between quotes below)\r\n\r\n```\r\nroot@salt-master1:/# python3 -m pip freeze | grep napalm\r\nnapalm==3.2.0\r\n```\r\n\r\n### Network operating system version\r\n(Paste verbatim output from `show version` - or equivalent - between quotes below)\r\n\r\n```\r\nRP/0/RP0/CPU0:hbr1.12345.999#show version\r\nWed Sep 23 18:41:02.500 CEST\r\nCisco IOS XR Software, Version 7.1.2 LNT\r\nCopyright (c) 2013-2020 by Cisco Systems, Inc.\r\n\r\nBuild Information:\r\n Built By : ahoang\r\n Built On : Sat Aug 29 20:04:27 UTC 2020\r\n Build Host : iox-lnx-069\r\n Workspace : /auto/srcarchive13/prod/7.1.2/ncs540l/ws\r\n Version : 7.1.2\r\n Label : 7.1.2\r\n\r\ncisco NCS540L (C3708 @ 1.70GHz)\r\nSystem uptime is 1 week, 1 day, 10 hours, 2 minutes\r\n\r\n```\r\n\r\n### Steps to Reproduce the Issue\r\n\r\n### Error Traceback\r\n(Paste the complete traceback of the exception between quotes below)\r\n\r\n```\r\n================= Traceback =================\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/napalm\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py\", line 308, in main\r\n run_tests(args)\r\n File \"/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py\", line 291, in run_tests\r\n call_getter(device, args.method, **method_kwargs)\r\n File \"/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py\", line 27, in wrapper\r\n r = func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.8/dist-packages/napalm/base/clitools/cl_napalm.py\", line 255, in call_getter\r\n r = func(**kwargs)\r\n File \"/usr/local/lib/python3.8/dist-packages/napalm/iosxr/iosxr.py\", line 852, in get_lldp_neighbors_detail\r\n \"remote_system_capab\": napalm.base.helpers.transform_lldp_capab(\r\n File \"/usr/local/lib/python3.8/dist-packages/napalm/base/helpers.py\", line 463, in transform_lldp_capab\r\n [constants.LLDP_CAPAB_TRANFORM_TABLE[c.strip()] for c in capabilities]\r\n File \"/usr/local/lib/python3.8/dist-packages/napalm/base/helpers.py\", line 463, in <listcomp>\r\n [constants.LLDP_CAPAB_TRANFORM_TABLE[c.strip()] for c in capabilities]\r\nKeyError: 'n/a'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Constants to be used across NAPALM drivers.\"\"\"\n\nCONFIG_LOCK = True # must be changed soon!\nTIMEOUT = 60 # seconds\n\nINTERFACE_NULL_SPEED = -1\n\nACTION_TYPE_METHODS = (\"ping\", \"traceroute\")\n\nBGP_NEIGHBOR_NULL_COUNTER = -1\n\nSNMP_AUTHORIZATION_MODE_MAP = {\"read-only\": \"ro\", \"read-write\": \"rw\"}\n\nROUTE_COMMON_PROTOCOL_FIELDS = [\n \"destination\",\n \"prefix_length\",\n \"protocol\",\n \"current_active\",\n \"last_active\",\n \"age\",\n \"next_hop\",\n \"outgoing_interface\",\n \"selected_next_hop\",\n \"preference\",\n \"inactive_reason\",\n \"routing_table\",\n] # identifies the list of fileds common for all protocols\n\nROUTE_PROTOCOL_SPECIFIC_FIELDS = {\n \"bgp\": [\n \"local_as\",\n \"remote_as\",\n \"as_path\",\n \"communities\",\n \"local_preference\",\n \"preference2\",\n \"remote_address\",\n \"metric\",\n \"metric2\",\n ],\n \"isis\": [\"level\", \"metric\", \"local_as\"],\n \"static\": [], # nothing specific to static routes\n}\n\nTRACEROUTE_TTL = 255\nTRACEROUTE_SOURCE = \"\"\nTRACEROUTE_TIMEOUT = 2\nTRACEROUTE_NULL_HOST_NAME = \"*\"\nTRACEROUTE_NULL_IP_ADDRESS = \"*\"\nTRACEROUTE_VRF = \"\"\n\nOPTICS_NULL_LEVEL = \"-Inf\"\n\nPING_SOURCE = \"\"\nPING_TTL = 255\nPING_TIMEOUT = 2\nPING_SIZE = 100\nPING_COUNT = 5\nPING_VRF = \"\"\nPING_SOURCE_INTERFACE = \"\"\n\nNETMIKO_MAP = {\n \"ios\": \"cisco_ios\",\n \"nxos\": \"cisco_nxos\",\n \"nxos_ssh\": \"cisco_nxos\",\n \"iosxr\": \"cisco_iosxr\",\n \"eos\": \"arista_eos\",\n \"junos\": \"juniper_junos\",\n}\nLLDP_CAPAB_TRANFORM_TABLE = {\n \"o\": \"other\",\n \"p\": \"repeater\",\n \"b\": \"bridge\",\n \"w\": \"wlan-access-point\",\n \"r\": \"router\",\n \"t\": \"telephone\",\n \"c\": \"docsis-cable-device\",\n \"s\": \"station\",\n}\n\nCISCO_SANITIZE_FILTERS = {\n r\"^(snmp-server community).*$\": r\"\\1 <removed>\",\n r\"^(snmp-server host \\S+( vrf \\S+)?( version (1|2c|3))?)\\s+\\S+((\\s+\\S*)*)\\s*$\": r\"\\1 <removed> \\5\", # noqa\n r\"^(snmp-server user \\S+( \\S+)? auth md5) \\S+ (priv) \\S+ (localizedkey( engineID \\S+)?)\\s*$\": r\"\\1 <removed> \\3 <removed> \\4\\5\", # noqa\n r\"^(username .+ (password|secret) \\d) .+$\": r\"\\1 <removed>\",\n r\"^(enable (password|secret)( level \\d+)? \\d) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+(?:password|secret)) (?:\\d{1,2} )?\\S+$\": r\"\\1 <removed>\",\n r\"^(.*wpa-psk ascii \\d) (\\S+)$\": r\"\\1 <removed>\",\n r\"^(.*key 7) (\\d.+)$\": r\"\\1 <removed>\",\n r\"^(tacacs-server (.+ )?key) .+$\": r\"\\1 <removed>\",\n r\"^(crypto isakmp key) (\\S+) (.*)$\": r\"\\1 <removed> \\3\",\n r\"^(\\s+ip ospf message-digest-key \\d+ md5) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+ip ospf authentication-key) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+neighbor \\S+ password) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+vrrp \\d+ authentication text) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication) .{1,8}$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication md5 key-string) .+?( timeout \\d+)?$\": r\"\\1 <removed> \\2\",\n r\"^(\\s+key-string) .+$\": r\"\\1 <removed>\",\n r\"^((tacacs|radius) server [^\\n]+\\n(\\s+[^\\n]+\\n)*\\s+key) [^\\n]+$\": r\"\\1 <removed>\",\n r\"^(\\s+ppp (chap|pap) password \\d) .+$\": r\"\\1 <removed>\",\n}\n", "path": "napalm/base/constants.py"}], "after_files": [{"content": "\"\"\"Constants to be used across NAPALM drivers.\"\"\"\n\nCONFIG_LOCK = True # must be changed soon!\nTIMEOUT = 60 # seconds\n\nINTERFACE_NULL_SPEED = -1\n\nACTION_TYPE_METHODS = (\"ping\", \"traceroute\")\n\nBGP_NEIGHBOR_NULL_COUNTER = -1\n\nSNMP_AUTHORIZATION_MODE_MAP = {\"read-only\": \"ro\", \"read-write\": \"rw\"}\n\nROUTE_COMMON_PROTOCOL_FIELDS = [\n \"destination\",\n \"prefix_length\",\n \"protocol\",\n \"current_active\",\n \"last_active\",\n \"age\",\n \"next_hop\",\n \"outgoing_interface\",\n \"selected_next_hop\",\n \"preference\",\n \"inactive_reason\",\n \"routing_table\",\n] # identifies the list of fileds common for all protocols\n\nROUTE_PROTOCOL_SPECIFIC_FIELDS = {\n \"bgp\": [\n \"local_as\",\n \"remote_as\",\n \"as_path\",\n \"communities\",\n \"local_preference\",\n \"preference2\",\n \"remote_address\",\n \"metric\",\n \"metric2\",\n ],\n \"isis\": [\"level\", \"metric\", \"local_as\"],\n \"static\": [], # nothing specific to static routes\n}\n\nTRACEROUTE_TTL = 255\nTRACEROUTE_SOURCE = \"\"\nTRACEROUTE_TIMEOUT = 2\nTRACEROUTE_NULL_HOST_NAME = \"*\"\nTRACEROUTE_NULL_IP_ADDRESS = \"*\"\nTRACEROUTE_VRF = \"\"\n\nOPTICS_NULL_LEVEL = \"-Inf\"\n\nPING_SOURCE = \"\"\nPING_TTL = 255\nPING_TIMEOUT = 2\nPING_SIZE = 100\nPING_COUNT = 5\nPING_VRF = \"\"\nPING_SOURCE_INTERFACE = \"\"\n\nNETMIKO_MAP = {\n \"ios\": \"cisco_ios\",\n \"nxos\": \"cisco_nxos\",\n \"nxos_ssh\": \"cisco_nxos\",\n \"iosxr\": \"cisco_iosxr\",\n \"eos\": \"arista_eos\",\n \"junos\": \"juniper_junos\",\n}\nLLDP_CAPAB_TRANFORM_TABLE = {\n \"o\": \"other\",\n \"n/a\": \"other\",\n \"p\": \"repeater\",\n \"b\": \"bridge\",\n \"w\": \"wlan-access-point\",\n \"r\": \"router\",\n \"t\": \"telephone\",\n \"c\": \"docsis-cable-device\",\n \"s\": \"station\",\n}\n\nCISCO_SANITIZE_FILTERS = {\n r\"^(snmp-server community).*$\": r\"\\1 <removed>\",\n r\"^(snmp-server host \\S+( vrf \\S+)?( version (1|2c|3))?)\\s+\\S+((\\s+\\S*)*)\\s*$\": r\"\\1 <removed> \\5\", # noqa\n r\"^(snmp-server user \\S+( \\S+)? auth md5) \\S+ (priv) \\S+ (localizedkey( engineID \\S+)?)\\s*$\": r\"\\1 <removed> \\3 <removed> \\4\\5\", # noqa\n r\"^(username .+ (password|secret) \\d) .+$\": r\"\\1 <removed>\",\n r\"^(enable (password|secret)( level \\d+)? \\d) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+(?:password|secret)) (?:\\d{1,2} )?\\S+$\": r\"\\1 <removed>\",\n r\"^(.*wpa-psk ascii \\d) (\\S+)$\": r\"\\1 <removed>\",\n r\"^(.*key 7) (\\d.+)$\": r\"\\1 <removed>\",\n r\"^(tacacs-server (.+ )?key) .+$\": r\"\\1 <removed>\",\n r\"^(crypto isakmp key) (\\S+) (.*)$\": r\"\\1 <removed> \\3\",\n r\"^(\\s+ip ospf message-digest-key \\d+ md5) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+ip ospf authentication-key) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+neighbor \\S+ password) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+vrrp \\d+ authentication text) .+$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication) .{1,8}$\": r\"\\1 <removed>\",\n r\"^(\\s+standby \\d+ authentication md5 key-string) .+?( timeout \\d+)?$\": r\"\\1 <removed> \\2\",\n r\"^(\\s+key-string) .+$\": r\"\\1 <removed>\",\n r\"^((tacacs|radius) server [^\\n]+\\n(\\s+[^\\n]+\\n)*\\s+key) [^\\n]+$\": r\"\\1 <removed>\",\n r\"^(\\s+ppp (chap|pap) password \\d) .+$\": r\"\\1 <removed>\",\n}\n", "path": "napalm/base/constants.py"}]}
2,390
99
gh_patches_debug_5120
rasdani/github-patches
git_diff
translate__pootle-6751
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pootle Install I am a Pootle novice, I successfully installed it on the mac, but when i want to deploy to the production environment, the following installation error occurs. # Steps to reproduce: 1. I create a virtualenv env 2. Install Pootle requirements 3. Install Pootle, using `pip install --process-dependency-links Pootle` # Results (Expected/Actual): ```Python Collecting Pootle Using cached Pootle-2.8.1.tar.bz2 Complete output from command python setup.py egg_info: Pootle requires a non-PyPI dependency, when using pip ensure you use the --process-dependency-links option. Traceback (most recent call last): File "<string>", line 1, in <module> File "/tmp/pip-build-_2TvXs/Pootle/setup.py", line 453, in <module> long_description=parse_long_description(README_FILE, tag=True), File "/tmp/pip-build-_2TvXs/Pootle/setup.py", line 404, in parse_long_description replace_urls() File "/tmp/pip-build-_2TvXs/Pootle/setup.py", line 332, in replace_urls branch = version.get_git_branch() File "/tmp/pip-build-_2TvXs/Pootle/pootle/core/utils/version.py", line 231, in get_git_branch 'HEAD']).strip() AttributeError: 'NoneType' object has no attribute 'strip' ``` # Environment (i.e. 'pootle --version', DB, OS, Browser): python version: 2.7.12 system: ubuntu 16.04 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/core/utils/version.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 # Some functions are taken from or modelled on the version management in 10 # Django. Those are: 11 # Copyright (c) Django Software Foundation and individual contributors. All 12 # rights reserved. 13 14 from __future__ import print_function 15 16 import datetime 17 import os 18 import subprocess 19 20 try: 21 from django.utils.lru_cache import lru_cache 22 except ImportError: 23 # Required for Python 2.7 support and when backported Django version is 24 # unavailable 25 def lru_cache(): 26 def fake(func): 27 return func 28 return fake 29 30 from pootle.constants import VERSION 31 32 33 CANDIDATE_MARKERS = ('alpha', 'beta', 'rc', 'final') 34 35 36 def get_version(version=None): 37 """Returns a PEP 440-compliant version number from VERSION. 38 39 The following examples show a progression from development through 40 pre-release to release and the resultant versions generated: 41 42 >>> get_version((2, 7, 1, 'alpha', 0)) 43 '2.7.1.dev20150530132219' 44 >>> get_version((2, 7, 1, 'alpha', 1)) 45 '2.7.1a1' 46 >>> get_version((2, 7, 1, 'beta', 1)) 47 '2.7.1b1' 48 >>> get_version((2, 7, 1, 'rc', 2)) 49 '2.7.1rc2' 50 >>> get_version((2, 7, 1, 'final', 0)) 51 '2.7.1' 52 """ 53 version = get_complete_version(version) 54 55 # Now build the two parts of the version number: 56 # main = X.Y[.Z] 57 # sub = .devN - for pre-alpha releases 58 # | {a|b|rc}N - for alpha, beta and rc releases 59 60 main = get_main_version(version) 61 candidate_pos = _get_candidate_pos(version) 62 candidate = version[candidate_pos] 63 candidate_extra = version[candidate_pos+1] 64 65 sub = '' 66 if _is_development_candidate(version): 67 git_changeset = get_git_changeset() 68 if git_changeset: 69 sub = '.dev%s' % git_changeset 70 else: 71 sub = '.dev0' 72 73 elif candidate != 'final': 74 mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} 75 sub = mapping[candidate] + str(candidate_extra) 76 77 return str(main + sub) 78 79 80 def _is_development_candidate(version): 81 """Is this a pre-alpha release 82 83 >>> _is_development_candidate((2, 1, 0, 'alpha', 0)) 84 True 85 >>> _is_development_candidate((2, 1, 0, 'beta', 1)) 86 False 87 """ 88 candidate_pos = _get_candidate_pos(version) 89 candidate = version[candidate_pos] 90 candidate_extra = version[candidate_pos+1] 91 return candidate == 'alpha' and candidate_extra == 0 92 93 94 def _get_candidate_pos(version): 95 """Returns the position of the candidate marker. 96 97 >>> _get_candidate_pos((1, 2, 0, 'alpha', 0)) 98 3 99 """ 100 return [i for i, part in enumerate(version) 101 if part in CANDIDATE_MARKERS][0] 102 103 104 def _get_candidate(version): 105 """Returns the candidate. One of alpha, beta, rc or final. 106 107 >>> _get_candidate((0, 1, 2, 'rc', 1)) 108 'rc' 109 """ 110 return version[_get_candidate_pos(version)] 111 112 113 def _get_version_string(parts): 114 """Returns an X.Y.Z version from the list of version parts. 115 116 >>> _get_version_string((1, 1, 0)) 117 '1.1.0' 118 >>> _get_version_string((1, 1, 0, 1)) 119 '1.1.0.1' 120 """ 121 return '.'.join(str(x) for x in parts) 122 123 124 def get_main_version(version=None): 125 """Returns main version (X.Y[.Z]) from VERSION. 126 127 >>> get_main_version((1, 2, 3, 'alpha', 1)) 128 '1.2.3' 129 """ 130 version = get_complete_version(version) 131 candidate_pos = _get_candidate_pos(version) 132 return _get_version_string(version[:candidate_pos]) 133 134 135 def get_major_minor_version(version=None): 136 """Returns X.Y from VERSION. 137 138 >>> get_major_minor_version((1, 2, 3, 'final', 0)) 139 '1.2' 140 """ 141 version = get_complete_version(version) 142 return _get_version_string(version[:2]) 143 144 145 def get_complete_version(version=None): 146 """Returns a tuple of the Pootle version. Or the supplied ``version`` 147 148 >>> get_complete_version((1, 2, 3, 'alpha', 0)) 149 (1, 2, 3, 'alpha', 0) 150 """ 151 if version is not None: 152 return version 153 154 return VERSION 155 156 157 def get_docs_version(version=None, positions=2): 158 """Return the version used in documentation. 159 160 >>> get_docs_version((1, 2, 1, 'alpha', 0)) 161 'dev' 162 >>> get_docs_version((1, 2, 1, 'rc', 2)) 163 '1.2' 164 """ 165 version = get_complete_version(version) 166 candidate_pos = _get_candidate_pos(version) 167 if positions > candidate_pos: 168 positions = candidate_pos 169 if _is_development_candidate(version): 170 return 'dev' 171 return _get_version_string(version[:positions]) 172 173 174 def get_rtd_version(version=None): 175 """Return the docs version string reported in the RTD site.""" 176 version_str = get_docs_version(version=version, positions=2) 177 return ( 178 'latest' 179 if version_str == 'dev' 180 else 'stable-%s.x' % (version_str, ) 181 ) 182 183 184 def _shell_command(command): 185 """Return the first result of a shell ``command``""" 186 repo_dir = os.path.dirname(os.path.abspath(__file__)) 187 188 try: 189 command_subprocess = subprocess.Popen( 190 command, 191 stdout=subprocess.PIPE, 192 stderr=subprocess.PIPE, 193 cwd=repo_dir, 194 universal_newlines=True 195 ) 196 except OSError: 197 return None 198 199 return command_subprocess.communicate()[0] 200 201 202 @lru_cache() 203 def get_git_changeset(): 204 """Returns a numeric identifier of the latest git changeset. 205 206 The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. 207 This value isn't guaranteed to be unique, but collisions are very unlikely, 208 so it's sufficient for generating the development version numbers. 209 210 >>> get_git_changeset() 211 '20150530132219' 212 """ 213 timestamp = _shell_command( 214 ['/usr/bin/git', 'log', '--pretty=format:%ct', '--quiet', '-1', 'HEAD'] 215 ) 216 try: 217 timestamp = datetime.datetime.utcfromtimestamp(int(timestamp)) 218 except ValueError: 219 return None 220 return timestamp.strftime('%Y%m%d%H%M%S') 221 222 223 @lru_cache() 224 def get_git_branch(): 225 """Returns the current git branch. 226 227 >>> get_git_branch() 228 'feature/proper_version' 229 """ 230 branch = _shell_command(['/usr/bin/git', 'symbolic-ref', '-q', 231 'HEAD']).strip() 232 if not branch: 233 return None 234 return "/".join(branch.split("/")[2:]) 235 236 237 @lru_cache() 238 def get_git_hash(): 239 """Returns the current git commit hash or None. 240 241 >>> get_git_hash() 242 'ad768e8' 243 """ 244 git_hash = _shell_command( 245 ['/usr/bin/git', 'rev-parse', '--verify', '--short', 'HEAD'] 246 ) 247 if git_hash: 248 return git_hash.strip() 249 return None 250 251 252 if __name__ == "__main__": 253 from sys import argv 254 if len(argv) == 2: 255 if argv[1] == "main": 256 print(get_main_version()) 257 elif argv[1] == "major_minor": 258 print(get_major_minor_version()) 259 elif argv[1] == "docs": 260 print(get_docs_version()) 261 else: 262 print(get_version()) 263 264 265 def is_prerelease(version=None): 266 """Is this a final release or not""" 267 268 return _get_candidate(get_complete_version(version)) != 'final' 269 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/core/utils/version.py b/pootle/core/utils/version.py --- a/pootle/core/utils/version.py +++ b/pootle/core/utils/version.py @@ -228,10 +228,10 @@ 'feature/proper_version' """ branch = _shell_command(['/usr/bin/git', 'symbolic-ref', '-q', - 'HEAD']).strip() + 'HEAD']) if not branch: return None - return "/".join(branch.split("/")[2:]) + return "/".join(branch.strip().split("/")[2:]) @lru_cache()
{"golden_diff": "diff --git a/pootle/core/utils/version.py b/pootle/core/utils/version.py\n--- a/pootle/core/utils/version.py\n+++ b/pootle/core/utils/version.py\n@@ -228,10 +228,10 @@\n 'feature/proper_version'\n \"\"\"\n branch = _shell_command(['/usr/bin/git', 'symbolic-ref', '-q',\n- 'HEAD']).strip()\n+ 'HEAD'])\n if not branch:\n return None\n- return \"/\".join(branch.split(\"/\")[2:])\n+ return \"/\".join(branch.strip().split(\"/\")[2:])\n \n \n @lru_cache()\n", "issue": "Pootle Install \nI am a Pootle novice, I successfully installed it on the mac, but when i want to deploy to the production environment, the following installation error occurs.\r\n\r\n# Steps to reproduce:\r\n\r\n1. I create a virtualenv env\r\n2. Install Pootle requirements\r\n3. Install Pootle, using `pip install --process-dependency-links Pootle`\r\n\r\n# Results (Expected/Actual):\r\n\r\n```Python\r\nCollecting Pootle\r\n Using cached Pootle-2.8.1.tar.bz2\r\n Complete output from command python setup.py egg_info:\r\n Pootle requires a non-PyPI dependency, when using pip ensure you use the --process-dependency-links option.\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/pip-build-_2TvXs/Pootle/setup.py\", line 453, in <module>\r\n long_description=parse_long_description(README_FILE, tag=True),\r\n File \"/tmp/pip-build-_2TvXs/Pootle/setup.py\", line 404, in parse_long_description\r\n replace_urls()\r\n File \"/tmp/pip-build-_2TvXs/Pootle/setup.py\", line 332, in replace_urls\r\n branch = version.get_git_branch()\r\n File \"/tmp/pip-build-_2TvXs/Pootle/pootle/core/utils/version.py\", line 231, in get_git_branch\r\n 'HEAD']).strip()\r\n AttributeError: 'NoneType' object has no attribute 'strip'\r\n```\r\n\r\n\r\n# Environment (i.e. 'pootle --version', DB, OS, Browser):\r\n\r\npython version: 2.7.12\r\nsystem: ubuntu 16.04\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n# Some functions are taken from or modelled on the version management in\n# Django. Those are:\n# Copyright (c) Django Software Foundation and individual contributors. All\n# rights reserved.\n\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport subprocess\n\ntry:\n from django.utils.lru_cache import lru_cache\nexcept ImportError:\n # Required for Python 2.7 support and when backported Django version is\n # unavailable\n def lru_cache():\n def fake(func):\n return func\n return fake\n\nfrom pootle.constants import VERSION\n\n\nCANDIDATE_MARKERS = ('alpha', 'beta', 'rc', 'final')\n\n\ndef get_version(version=None):\n \"\"\"Returns a PEP 440-compliant version number from VERSION.\n\n The following examples show a progression from development through\n pre-release to release and the resultant versions generated:\n\n >>> get_version((2, 7, 1, 'alpha', 0))\n '2.7.1.dev20150530132219'\n >>> get_version((2, 7, 1, 'alpha', 1))\n '2.7.1a1'\n >>> get_version((2, 7, 1, 'beta', 1))\n '2.7.1b1'\n >>> get_version((2, 7, 1, 'rc', 2))\n '2.7.1rc2'\n >>> get_version((2, 7, 1, 'final', 0))\n '2.7.1'\n \"\"\"\n version = get_complete_version(version)\n\n # Now build the two parts of the version number:\n # main = X.Y[.Z]\n # sub = .devN - for pre-alpha releases\n # | {a|b|rc}N - for alpha, beta and rc releases\n\n main = get_main_version(version)\n candidate_pos = _get_candidate_pos(version)\n candidate = version[candidate_pos]\n candidate_extra = version[candidate_pos+1]\n\n sub = ''\n if _is_development_candidate(version):\n git_changeset = get_git_changeset()\n if git_changeset:\n sub = '.dev%s' % git_changeset\n else:\n sub = '.dev0'\n\n elif candidate != 'final':\n mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}\n sub = mapping[candidate] + str(candidate_extra)\n\n return str(main + sub)\n\n\ndef _is_development_candidate(version):\n \"\"\"Is this a pre-alpha release\n\n >>> _is_development_candidate((2, 1, 0, 'alpha', 0))\n True\n >>> _is_development_candidate((2, 1, 0, 'beta', 1))\n False\n \"\"\"\n candidate_pos = _get_candidate_pos(version)\n candidate = version[candidate_pos]\n candidate_extra = version[candidate_pos+1]\n return candidate == 'alpha' and candidate_extra == 0\n\n\ndef _get_candidate_pos(version):\n \"\"\"Returns the position of the candidate marker.\n\n >>> _get_candidate_pos((1, 2, 0, 'alpha', 0))\n 3\n \"\"\"\n return [i for i, part in enumerate(version)\n if part in CANDIDATE_MARKERS][0]\n\n\ndef _get_candidate(version):\n \"\"\"Returns the candidate. One of alpha, beta, rc or final.\n\n >>> _get_candidate((0, 1, 2, 'rc', 1))\n 'rc'\n \"\"\"\n return version[_get_candidate_pos(version)]\n\n\ndef _get_version_string(parts):\n \"\"\"Returns an X.Y.Z version from the list of version parts.\n\n >>> _get_version_string((1, 1, 0))\n '1.1.0'\n >>> _get_version_string((1, 1, 0, 1))\n '1.1.0.1'\n \"\"\"\n return '.'.join(str(x) for x in parts)\n\n\ndef get_main_version(version=None):\n \"\"\"Returns main version (X.Y[.Z]) from VERSION.\n\n >>> get_main_version((1, 2, 3, 'alpha', 1))\n '1.2.3'\n \"\"\"\n version = get_complete_version(version)\n candidate_pos = _get_candidate_pos(version)\n return _get_version_string(version[:candidate_pos])\n\n\ndef get_major_minor_version(version=None):\n \"\"\"Returns X.Y from VERSION.\n\n >>> get_major_minor_version((1, 2, 3, 'final', 0))\n '1.2'\n \"\"\"\n version = get_complete_version(version)\n return _get_version_string(version[:2])\n\n\ndef get_complete_version(version=None):\n \"\"\"Returns a tuple of the Pootle version. Or the supplied ``version``\n\n >>> get_complete_version((1, 2, 3, 'alpha', 0))\n (1, 2, 3, 'alpha', 0)\n \"\"\"\n if version is not None:\n return version\n\n return VERSION\n\n\ndef get_docs_version(version=None, positions=2):\n \"\"\"Return the version used in documentation.\n\n >>> get_docs_version((1, 2, 1, 'alpha', 0))\n 'dev'\n >>> get_docs_version((1, 2, 1, 'rc', 2))\n '1.2'\n \"\"\"\n version = get_complete_version(version)\n candidate_pos = _get_candidate_pos(version)\n if positions > candidate_pos:\n positions = candidate_pos\n if _is_development_candidate(version):\n return 'dev'\n return _get_version_string(version[:positions])\n\n\ndef get_rtd_version(version=None):\n \"\"\"Return the docs version string reported in the RTD site.\"\"\"\n version_str = get_docs_version(version=version, positions=2)\n return (\n 'latest'\n if version_str == 'dev'\n else 'stable-%s.x' % (version_str, )\n )\n\n\ndef _shell_command(command):\n \"\"\"Return the first result of a shell ``command``\"\"\"\n repo_dir = os.path.dirname(os.path.abspath(__file__))\n\n try:\n command_subprocess = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=repo_dir,\n universal_newlines=True\n )\n except OSError:\n return None\n\n return command_subprocess.communicate()[0]\n\n\n@lru_cache()\ndef get_git_changeset():\n \"\"\"Returns a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.\n\n >>> get_git_changeset()\n '20150530132219'\n \"\"\"\n timestamp = _shell_command(\n ['/usr/bin/git', 'log', '--pretty=format:%ct', '--quiet', '-1', 'HEAD']\n )\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError:\n return None\n return timestamp.strftime('%Y%m%d%H%M%S')\n\n\n@lru_cache()\ndef get_git_branch():\n \"\"\"Returns the current git branch.\n\n >>> get_git_branch()\n 'feature/proper_version'\n \"\"\"\n branch = _shell_command(['/usr/bin/git', 'symbolic-ref', '-q',\n 'HEAD']).strip()\n if not branch:\n return None\n return \"/\".join(branch.split(\"/\")[2:])\n\n\n@lru_cache()\ndef get_git_hash():\n \"\"\"Returns the current git commit hash or None.\n\n >>> get_git_hash()\n 'ad768e8'\n \"\"\"\n git_hash = _shell_command(\n ['/usr/bin/git', 'rev-parse', '--verify', '--short', 'HEAD']\n )\n if git_hash:\n return git_hash.strip()\n return None\n\n\nif __name__ == \"__main__\":\n from sys import argv\n if len(argv) == 2:\n if argv[1] == \"main\":\n print(get_main_version())\n elif argv[1] == \"major_minor\":\n print(get_major_minor_version())\n elif argv[1] == \"docs\":\n print(get_docs_version())\n else:\n print(get_version())\n\n\ndef is_prerelease(version=None):\n \"\"\"Is this a final release or not\"\"\"\n\n return _get_candidate(get_complete_version(version)) != 'final'\n", "path": "pootle/core/utils/version.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n# Some functions are taken from or modelled on the version management in\n# Django. Those are:\n# Copyright (c) Django Software Foundation and individual contributors. All\n# rights reserved.\n\nfrom __future__ import print_function\n\nimport datetime\nimport os\nimport subprocess\n\ntry:\n from django.utils.lru_cache import lru_cache\nexcept ImportError:\n # Required for Python 2.7 support and when backported Django version is\n # unavailable\n def lru_cache():\n def fake(func):\n return func\n return fake\n\nfrom pootle.constants import VERSION\n\n\nCANDIDATE_MARKERS = ('alpha', 'beta', 'rc', 'final')\n\n\ndef get_version(version=None):\n \"\"\"Returns a PEP 440-compliant version number from VERSION.\n\n The following examples show a progression from development through\n pre-release to release and the resultant versions generated:\n\n >>> get_version((2, 7, 1, 'alpha', 0))\n '2.7.1.dev20150530132219'\n >>> get_version((2, 7, 1, 'alpha', 1))\n '2.7.1a1'\n >>> get_version((2, 7, 1, 'beta', 1))\n '2.7.1b1'\n >>> get_version((2, 7, 1, 'rc', 2))\n '2.7.1rc2'\n >>> get_version((2, 7, 1, 'final', 0))\n '2.7.1'\n \"\"\"\n version = get_complete_version(version)\n\n # Now build the two parts of the version number:\n # main = X.Y[.Z]\n # sub = .devN - for pre-alpha releases\n # | {a|b|rc}N - for alpha, beta and rc releases\n\n main = get_main_version(version)\n candidate_pos = _get_candidate_pos(version)\n candidate = version[candidate_pos]\n candidate_extra = version[candidate_pos+1]\n\n sub = ''\n if _is_development_candidate(version):\n git_changeset = get_git_changeset()\n if git_changeset:\n sub = '.dev%s' % git_changeset\n else:\n sub = '.dev0'\n\n elif candidate != 'final':\n mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}\n sub = mapping[candidate] + str(candidate_extra)\n\n return str(main + sub)\n\n\ndef _is_development_candidate(version):\n \"\"\"Is this a pre-alpha release\n\n >>> _is_development_candidate((2, 1, 0, 'alpha', 0))\n True\n >>> _is_development_candidate((2, 1, 0, 'beta', 1))\n False\n \"\"\"\n candidate_pos = _get_candidate_pos(version)\n candidate = version[candidate_pos]\n candidate_extra = version[candidate_pos+1]\n return candidate == 'alpha' and candidate_extra == 0\n\n\ndef _get_candidate_pos(version):\n \"\"\"Returns the position of the candidate marker.\n\n >>> _get_candidate_pos((1, 2, 0, 'alpha', 0))\n 3\n \"\"\"\n return [i for i, part in enumerate(version)\n if part in CANDIDATE_MARKERS][0]\n\n\ndef _get_candidate(version):\n \"\"\"Returns the candidate. One of alpha, beta, rc or final.\n\n >>> _get_candidate((0, 1, 2, 'rc', 1))\n 'rc'\n \"\"\"\n return version[_get_candidate_pos(version)]\n\n\ndef _get_version_string(parts):\n \"\"\"Returns an X.Y.Z version from the list of version parts.\n\n >>> _get_version_string((1, 1, 0))\n '1.1.0'\n >>> _get_version_string((1, 1, 0, 1))\n '1.1.0.1'\n \"\"\"\n return '.'.join(str(x) for x in parts)\n\n\ndef get_main_version(version=None):\n \"\"\"Returns main version (X.Y[.Z]) from VERSION.\n\n >>> get_main_version((1, 2, 3, 'alpha', 1))\n '1.2.3'\n \"\"\"\n version = get_complete_version(version)\n candidate_pos = _get_candidate_pos(version)\n return _get_version_string(version[:candidate_pos])\n\n\ndef get_major_minor_version(version=None):\n \"\"\"Returns X.Y from VERSION.\n\n >>> get_major_minor_version((1, 2, 3, 'final', 0))\n '1.2'\n \"\"\"\n version = get_complete_version(version)\n return _get_version_string(version[:2])\n\n\ndef get_complete_version(version=None):\n \"\"\"Returns a tuple of the Pootle version. Or the supplied ``version``\n\n >>> get_complete_version((1, 2, 3, 'alpha', 0))\n (1, 2, 3, 'alpha', 0)\n \"\"\"\n if version is not None:\n return version\n\n return VERSION\n\n\ndef get_docs_version(version=None, positions=2):\n \"\"\"Return the version used in documentation.\n\n >>> get_docs_version((1, 2, 1, 'alpha', 0))\n 'dev'\n >>> get_docs_version((1, 2, 1, 'rc', 2))\n '1.2'\n \"\"\"\n version = get_complete_version(version)\n candidate_pos = _get_candidate_pos(version)\n if positions > candidate_pos:\n positions = candidate_pos\n if _is_development_candidate(version):\n return 'dev'\n return _get_version_string(version[:positions])\n\n\ndef get_rtd_version(version=None):\n \"\"\"Return the docs version string reported in the RTD site.\"\"\"\n version_str = get_docs_version(version=version, positions=2)\n return (\n 'latest'\n if version_str == 'dev'\n else 'stable-%s.x' % (version_str, )\n )\n\n\ndef _shell_command(command):\n \"\"\"Return the first result of a shell ``command``\"\"\"\n repo_dir = os.path.dirname(os.path.abspath(__file__))\n\n try:\n command_subprocess = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=repo_dir,\n universal_newlines=True\n )\n except OSError:\n return None\n\n return command_subprocess.communicate()[0]\n\n\n@lru_cache()\ndef get_git_changeset():\n \"\"\"Returns a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.\n\n >>> get_git_changeset()\n '20150530132219'\n \"\"\"\n timestamp = _shell_command(\n ['/usr/bin/git', 'log', '--pretty=format:%ct', '--quiet', '-1', 'HEAD']\n )\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError:\n return None\n return timestamp.strftime('%Y%m%d%H%M%S')\n\n\n@lru_cache()\ndef get_git_branch():\n \"\"\"Returns the current git branch.\n\n >>> get_git_branch()\n 'feature/proper_version'\n \"\"\"\n branch = _shell_command(['/usr/bin/git', 'symbolic-ref', '-q',\n 'HEAD'])\n if not branch:\n return None\n return \"/\".join(branch.strip().split(\"/\")[2:])\n\n\n@lru_cache()\ndef get_git_hash():\n \"\"\"Returns the current git commit hash or None.\n\n >>> get_git_hash()\n 'ad768e8'\n \"\"\"\n git_hash = _shell_command(\n ['/usr/bin/git', 'rev-parse', '--verify', '--short', 'HEAD']\n )\n if git_hash:\n return git_hash.strip()\n return None\n\n\nif __name__ == \"__main__\":\n from sys import argv\n if len(argv) == 2:\n if argv[1] == \"main\":\n print(get_main_version())\n elif argv[1] == \"major_minor\":\n print(get_major_minor_version())\n elif argv[1] == \"docs\":\n print(get_docs_version())\n else:\n print(get_version())\n\n\ndef is_prerelease(version=None):\n \"\"\"Is this a final release or not\"\"\"\n\n return _get_candidate(get_complete_version(version)) != 'final'\n", "path": "pootle/core/utils/version.py"}]}
3,285
137
gh_patches_debug_45049
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1679
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Metrics instrumentation celery Relevant semconv: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/instrumentation/kafka.md --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 Instrument `celery`_ to trace Celery applications. 16 17 .. _celery: https://pypi.org/project/celery/ 18 19 Usage 20 ----- 21 22 * Start broker backend 23 24 .. code:: 25 26 docker run -p 5672:5672 rabbitmq 27 28 29 * Run instrumented task 30 31 .. code:: python 32 33 from opentelemetry.instrumentation.celery import CeleryInstrumentor 34 35 from celery import Celery 36 from celery.signals import worker_process_init 37 38 @worker_process_init.connect(weak=False) 39 def init_celery_tracing(*args, **kwargs): 40 CeleryInstrumentor().instrument() 41 42 app = Celery("tasks", broker="amqp://localhost") 43 44 @app.task 45 def add(x, y): 46 return x + y 47 48 add.delay(42, 50) 49 50 Setting up tracing 51 ------------------ 52 53 When tracing a celery worker process, tracing and instrumention both must be initialized after the celery worker 54 process is initialized. This is required for any tracing components that might use threading to work correctly 55 such as the BatchSpanProcessor. Celery provides a signal called ``worker_process_init`` that can be used to 56 accomplish this as shown in the example above. 57 58 API 59 --- 60 """ 61 62 import logging 63 from typing import Collection, Iterable 64 65 from celery import signals # pylint: disable=no-name-in-module 66 67 from opentelemetry import trace 68 from opentelemetry.instrumentation.celery import utils 69 from opentelemetry.instrumentation.celery.package import _instruments 70 from opentelemetry.instrumentation.celery.version import __version__ 71 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor 72 from opentelemetry.propagate import extract, inject 73 from opentelemetry.propagators.textmap import Getter 74 from opentelemetry.semconv.trace import SpanAttributes 75 from opentelemetry.trace.status import Status, StatusCode 76 77 logger = logging.getLogger(__name__) 78 79 # Task operations 80 _TASK_TAG_KEY = "celery.action" 81 _TASK_APPLY_ASYNC = "apply_async" 82 _TASK_RUN = "run" 83 84 _TASK_RETRY_REASON_KEY = "celery.retry.reason" 85 _TASK_REVOKED_REASON_KEY = "celery.revoked.reason" 86 _TASK_REVOKED_TERMINATED_SIGNAL_KEY = "celery.terminated.signal" 87 _TASK_NAME_KEY = "celery.task_name" 88 89 90 class CeleryGetter(Getter): 91 def get(self, carrier, key): 92 value = getattr(carrier, key, None) 93 if value is None: 94 return None 95 if isinstance(value, str) or not isinstance(value, Iterable): 96 value = (value,) 97 return value 98 99 def keys(self, carrier): 100 return [] 101 102 103 celery_getter = CeleryGetter() 104 105 106 class CeleryInstrumentor(BaseInstrumentor): 107 def instrumentation_dependencies(self) -> Collection[str]: 108 return _instruments 109 110 def _instrument(self, **kwargs): 111 tracer_provider = kwargs.get("tracer_provider") 112 113 # pylint: disable=attribute-defined-outside-init 114 self._tracer = trace.get_tracer(__name__, __version__, tracer_provider) 115 116 signals.task_prerun.connect(self._trace_prerun, weak=False) 117 signals.task_postrun.connect(self._trace_postrun, weak=False) 118 signals.before_task_publish.connect( 119 self._trace_before_publish, weak=False 120 ) 121 signals.after_task_publish.connect( 122 self._trace_after_publish, weak=False 123 ) 124 signals.task_failure.connect(self._trace_failure, weak=False) 125 signals.task_retry.connect(self._trace_retry, weak=False) 126 127 def _uninstrument(self, **kwargs): 128 signals.task_prerun.disconnect(self._trace_prerun) 129 signals.task_postrun.disconnect(self._trace_postrun) 130 signals.before_task_publish.disconnect(self._trace_before_publish) 131 signals.after_task_publish.disconnect(self._trace_after_publish) 132 signals.task_failure.disconnect(self._trace_failure) 133 signals.task_retry.disconnect(self._trace_retry) 134 135 def _trace_prerun(self, *args, **kwargs): 136 task = utils.retrieve_task(kwargs) 137 task_id = utils.retrieve_task_id(kwargs) 138 139 if task is None or task_id is None: 140 return 141 142 request = task.request 143 tracectx = extract(request, getter=celery_getter) or None 144 145 logger.debug("prerun signal start task_id=%s", task_id) 146 147 operation_name = f"{_TASK_RUN}/{task.name}" 148 span = self._tracer.start_span( 149 operation_name, context=tracectx, kind=trace.SpanKind.CONSUMER 150 ) 151 152 activation = trace.use_span(span, end_on_exit=True) 153 activation.__enter__() # pylint: disable=E1101 154 utils.attach_span(task, task_id, (span, activation)) 155 156 @staticmethod 157 def _trace_postrun(*args, **kwargs): 158 task = utils.retrieve_task(kwargs) 159 task_id = utils.retrieve_task_id(kwargs) 160 161 if task is None or task_id is None: 162 return 163 164 logger.debug("postrun signal task_id=%s", task_id) 165 166 # retrieve and finish the Span 167 span, activation = utils.retrieve_span(task, task_id) 168 if span is None: 169 logger.warning("no existing span found for task_id=%s", task_id) 170 return 171 172 # request context tags 173 if span.is_recording(): 174 span.set_attribute(_TASK_TAG_KEY, _TASK_RUN) 175 utils.set_attributes_from_context(span, kwargs) 176 utils.set_attributes_from_context(span, task.request) 177 span.set_attribute(_TASK_NAME_KEY, task.name) 178 179 activation.__exit__(None, None, None) 180 utils.detach_span(task, task_id) 181 182 def _trace_before_publish(self, *args, **kwargs): 183 task = utils.retrieve_task_from_sender(kwargs) 184 task_id = utils.retrieve_task_id_from_message(kwargs) 185 186 if task_id is None: 187 return 188 189 if task is None: 190 # task is an anonymous task send using send_task or using canvas workflow 191 # Signatures() to send to a task not in the current processes dependency 192 # tree 193 task_name = kwargs.get("sender", "unknown") 194 else: 195 task_name = task.name 196 operation_name = f"{_TASK_APPLY_ASYNC}/{task_name}" 197 span = self._tracer.start_span( 198 operation_name, kind=trace.SpanKind.PRODUCER 199 ) 200 201 # apply some attributes here because most of the data is not available 202 if span.is_recording(): 203 span.set_attribute(_TASK_TAG_KEY, _TASK_APPLY_ASYNC) 204 span.set_attribute(SpanAttributes.MESSAGING_MESSAGE_ID, task_id) 205 span.set_attribute(_TASK_NAME_KEY, task_name) 206 utils.set_attributes_from_context(span, kwargs) 207 208 activation = trace.use_span(span, end_on_exit=True) 209 activation.__enter__() # pylint: disable=E1101 210 211 utils.attach_span(task, task_id, (span, activation), is_publish=True) 212 213 headers = kwargs.get("headers") 214 if headers: 215 inject(headers) 216 217 @staticmethod 218 def _trace_after_publish(*args, **kwargs): 219 task = utils.retrieve_task_from_sender(kwargs) 220 task_id = utils.retrieve_task_id_from_message(kwargs) 221 222 if task is None or task_id is None: 223 return 224 225 # retrieve and finish the Span 226 _, activation = utils.retrieve_span(task, task_id, is_publish=True) 227 if activation is None: 228 logger.warning("no existing span found for task_id=%s", task_id) 229 return 230 231 activation.__exit__(None, None, None) # pylint: disable=E1101 232 utils.detach_span(task, task_id, is_publish=True) 233 234 @staticmethod 235 def _trace_failure(*args, **kwargs): 236 task = utils.retrieve_task_from_sender(kwargs) 237 task_id = utils.retrieve_task_id(kwargs) 238 239 if task is None or task_id is None: 240 return 241 242 # retrieve and pass exception info to activation 243 span, _ = utils.retrieve_span(task, task_id) 244 if span is None or not span.is_recording(): 245 return 246 247 status_kwargs = {"status_code": StatusCode.ERROR} 248 249 ex = kwargs.get("einfo") 250 251 if ( 252 hasattr(task, "throws") 253 and ex is not None 254 and isinstance(ex.exception, task.throws) 255 ): 256 return 257 258 if ex is not None: 259 status_kwargs["description"] = str(ex) 260 span.record_exception(ex) 261 span.set_status(Status(**status_kwargs)) 262 263 @staticmethod 264 def _trace_retry(*args, **kwargs): 265 task = utils.retrieve_task_from_sender(kwargs) 266 task_id = utils.retrieve_task_id_from_request(kwargs) 267 reason = utils.retrieve_reason(kwargs) 268 269 if task is None or task_id is None or reason is None: 270 return 271 272 span, _ = utils.retrieve_span(task, task_id) 273 if span is None or not span.is_recording(): 274 return 275 276 # Add retry reason metadata to span 277 # Use `str(reason)` instead of `reason.message` in case we get 278 # something that isn't an `Exception` 279 span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason)) 280 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py --- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py @@ -60,6 +60,7 @@ """ import logging +from timeit import default_timer from typing import Collection, Iterable from celery import signals # pylint: disable=no-name-in-module @@ -69,6 +70,7 @@ from opentelemetry.instrumentation.celery.package import _instruments from opentelemetry.instrumentation.celery.version import __version__ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor +from opentelemetry.metrics import get_meter from opentelemetry.propagate import extract, inject from opentelemetry.propagators.textmap import Getter from opentelemetry.semconv.trace import SpanAttributes @@ -104,6 +106,11 @@ class CeleryInstrumentor(BaseInstrumentor): + def __init__(self): + super().__init__() + self.metrics = None + self.task_id_to_start_time = {} + def instrumentation_dependencies(self) -> Collection[str]: return _instruments @@ -113,6 +120,11 @@ # pylint: disable=attribute-defined-outside-init self._tracer = trace.get_tracer(__name__, __version__, tracer_provider) + meter_provider = kwargs.get("meter_provider") + meter = get_meter(__name__, __version__, meter_provider) + + self.create_celery_metrics(meter) + signals.task_prerun.connect(self._trace_prerun, weak=False) signals.task_postrun.connect(self._trace_postrun, weak=False) signals.before_task_publish.connect( @@ -139,6 +151,7 @@ if task is None or task_id is None: return + self.update_task_duration_time(task_id) request = task.request tracectx = extract(request, getter=celery_getter) or None @@ -153,8 +166,7 @@ activation.__enter__() # pylint: disable=E1101 utils.attach_span(task, task_id, (span, activation)) - @staticmethod - def _trace_postrun(*args, **kwargs): + def _trace_postrun(self, *args, **kwargs): task = utils.retrieve_task(kwargs) task_id = utils.retrieve_task_id(kwargs) @@ -178,6 +190,9 @@ activation.__exit__(None, None, None) utils.detach_span(task, task_id) + self.update_task_duration_time(task_id) + labels = {"task": task.name, "worker": task.request.hostname} + self._record_histograms(task_id, labels) def _trace_before_publish(self, *args, **kwargs): task = utils.retrieve_task_from_sender(kwargs) @@ -277,3 +292,30 @@ # Use `str(reason)` instead of `reason.message` in case we get # something that isn't an `Exception` span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason)) + + def update_task_duration_time(self, task_id): + cur_time = default_timer() + task_duration_time_until_now = ( + cur_time - self.task_id_to_start_time[task_id] + if task_id in self.task_id_to_start_time + else cur_time + ) + self.task_id_to_start_time[task_id] = task_duration_time_until_now + + def _record_histograms(self, task_id, metric_attributes): + if task_id is None: + return + + self.metrics["flower.task.runtime.seconds"].record( + self.task_id_to_start_time.get(task_id), + attributes=metric_attributes, + ) + + def create_celery_metrics(self, meter) -> None: + self.metrics = { + "flower.task.runtime.seconds": meter.create_histogram( + name="flower.task.runtime.seconds", + unit="seconds", + description="The time it took to run the task.", + ) + }
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py\n@@ -60,6 +60,7 @@\n \"\"\"\n \n import logging\n+from timeit import default_timer\n from typing import Collection, Iterable\n \n from celery import signals # pylint: disable=no-name-in-module\n@@ -69,6 +70,7 @@\n from opentelemetry.instrumentation.celery.package import _instruments\n from opentelemetry.instrumentation.celery.version import __version__\n from opentelemetry.instrumentation.instrumentor import BaseInstrumentor\n+from opentelemetry.metrics import get_meter\n from opentelemetry.propagate import extract, inject\n from opentelemetry.propagators.textmap import Getter\n from opentelemetry.semconv.trace import SpanAttributes\n@@ -104,6 +106,11 @@\n \n \n class CeleryInstrumentor(BaseInstrumentor):\n+ def __init__(self):\n+ super().__init__()\n+ self.metrics = None\n+ self.task_id_to_start_time = {}\n+\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n \n@@ -113,6 +120,11 @@\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n \n+ meter_provider = kwargs.get(\"meter_provider\")\n+ meter = get_meter(__name__, __version__, meter_provider)\n+\n+ self.create_celery_metrics(meter)\n+\n signals.task_prerun.connect(self._trace_prerun, weak=False)\n signals.task_postrun.connect(self._trace_postrun, weak=False)\n signals.before_task_publish.connect(\n@@ -139,6 +151,7 @@\n if task is None or task_id is None:\n return\n \n+ self.update_task_duration_time(task_id)\n request = task.request\n tracectx = extract(request, getter=celery_getter) or None\n \n@@ -153,8 +166,7 @@\n activation.__enter__() # pylint: disable=E1101\n utils.attach_span(task, task_id, (span, activation))\n \n- @staticmethod\n- def _trace_postrun(*args, **kwargs):\n+ def _trace_postrun(self, *args, **kwargs):\n task = utils.retrieve_task(kwargs)\n task_id = utils.retrieve_task_id(kwargs)\n \n@@ -178,6 +190,9 @@\n \n activation.__exit__(None, None, None)\n utils.detach_span(task, task_id)\n+ self.update_task_duration_time(task_id)\n+ labels = {\"task\": task.name, \"worker\": task.request.hostname}\n+ self._record_histograms(task_id, labels)\n \n def _trace_before_publish(self, *args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n@@ -277,3 +292,30 @@\n # Use `str(reason)` instead of `reason.message` in case we get\n # something that isn't an `Exception`\n span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason))\n+\n+ def update_task_duration_time(self, task_id):\n+ cur_time = default_timer()\n+ task_duration_time_until_now = (\n+ cur_time - self.task_id_to_start_time[task_id]\n+ if task_id in self.task_id_to_start_time\n+ else cur_time\n+ )\n+ self.task_id_to_start_time[task_id] = task_duration_time_until_now\n+\n+ def _record_histograms(self, task_id, metric_attributes):\n+ if task_id is None:\n+ return\n+\n+ self.metrics[\"flower.task.runtime.seconds\"].record(\n+ self.task_id_to_start_time.get(task_id),\n+ attributes=metric_attributes,\n+ )\n+\n+ def create_celery_metrics(self, meter) -> None:\n+ self.metrics = {\n+ \"flower.task.runtime.seconds\": meter.create_histogram(\n+ name=\"flower.task.runtime.seconds\",\n+ unit=\"seconds\",\n+ description=\"The time it took to run the task.\",\n+ )\n+ }\n", "issue": "Metrics instrumentation celery\nRelevant semconv: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/instrumentation/kafka.md\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nInstrument `celery`_ to trace Celery applications.\n\n.. _celery: https://pypi.org/project/celery/\n\nUsage\n-----\n\n* Start broker backend\n\n.. code::\n\n docker run -p 5672:5672 rabbitmq\n\n\n* Run instrumented task\n\n.. code:: python\n\n from opentelemetry.instrumentation.celery import CeleryInstrumentor\n\n from celery import Celery\n from celery.signals import worker_process_init\n\n @worker_process_init.connect(weak=False)\n def init_celery_tracing(*args, **kwargs):\n CeleryInstrumentor().instrument()\n\n app = Celery(\"tasks\", broker=\"amqp://localhost\")\n\n @app.task\n def add(x, y):\n return x + y\n\n add.delay(42, 50)\n\nSetting up tracing\n------------------\n\nWhen tracing a celery worker process, tracing and instrumention both must be initialized after the celery worker\nprocess is initialized. This is required for any tracing components that might use threading to work correctly\nsuch as the BatchSpanProcessor. Celery provides a signal called ``worker_process_init`` that can be used to\naccomplish this as shown in the example above.\n\nAPI\n---\n\"\"\"\n\nimport logging\nfrom typing import Collection, Iterable\n\nfrom celery import signals # pylint: disable=no-name-in-module\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.celery import utils\nfrom opentelemetry.instrumentation.celery.package import _instruments\nfrom opentelemetry.instrumentation.celery.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.propagate import extract, inject\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\n# Task operations\n_TASK_TAG_KEY = \"celery.action\"\n_TASK_APPLY_ASYNC = \"apply_async\"\n_TASK_RUN = \"run\"\n\n_TASK_RETRY_REASON_KEY = \"celery.retry.reason\"\n_TASK_REVOKED_REASON_KEY = \"celery.revoked.reason\"\n_TASK_REVOKED_TERMINATED_SIGNAL_KEY = \"celery.terminated.signal\"\n_TASK_NAME_KEY = \"celery.task_name\"\n\n\nclass CeleryGetter(Getter):\n def get(self, carrier, key):\n value = getattr(carrier, key, None)\n if value is None:\n return None\n if isinstance(value, str) or not isinstance(value, Iterable):\n value = (value,)\n return value\n\n def keys(self, carrier):\n return []\n\n\ncelery_getter = CeleryGetter()\n\n\nclass CeleryInstrumentor(BaseInstrumentor):\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n signals.task_prerun.connect(self._trace_prerun, weak=False)\n signals.task_postrun.connect(self._trace_postrun, weak=False)\n signals.before_task_publish.connect(\n self._trace_before_publish, weak=False\n )\n signals.after_task_publish.connect(\n self._trace_after_publish, weak=False\n )\n signals.task_failure.connect(self._trace_failure, weak=False)\n signals.task_retry.connect(self._trace_retry, weak=False)\n\n def _uninstrument(self, **kwargs):\n signals.task_prerun.disconnect(self._trace_prerun)\n signals.task_postrun.disconnect(self._trace_postrun)\n signals.before_task_publish.disconnect(self._trace_before_publish)\n signals.after_task_publish.disconnect(self._trace_after_publish)\n signals.task_failure.disconnect(self._trace_failure)\n signals.task_retry.disconnect(self._trace_retry)\n\n def _trace_prerun(self, *args, **kwargs):\n task = utils.retrieve_task(kwargs)\n task_id = utils.retrieve_task_id(kwargs)\n\n if task is None or task_id is None:\n return\n\n request = task.request\n tracectx = extract(request, getter=celery_getter) or None\n\n logger.debug(\"prerun signal start task_id=%s\", task_id)\n\n operation_name = f\"{_TASK_RUN}/{task.name}\"\n span = self._tracer.start_span(\n operation_name, context=tracectx, kind=trace.SpanKind.CONSUMER\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n utils.attach_span(task, task_id, (span, activation))\n\n @staticmethod\n def _trace_postrun(*args, **kwargs):\n task = utils.retrieve_task(kwargs)\n task_id = utils.retrieve_task_id(kwargs)\n\n if task is None or task_id is None:\n return\n\n logger.debug(\"postrun signal task_id=%s\", task_id)\n\n # retrieve and finish the Span\n span, activation = utils.retrieve_span(task, task_id)\n if span is None:\n logger.warning(\"no existing span found for task_id=%s\", task_id)\n return\n\n # request context tags\n if span.is_recording():\n span.set_attribute(_TASK_TAG_KEY, _TASK_RUN)\n utils.set_attributes_from_context(span, kwargs)\n utils.set_attributes_from_context(span, task.request)\n span.set_attribute(_TASK_NAME_KEY, task.name)\n\n activation.__exit__(None, None, None)\n utils.detach_span(task, task_id)\n\n def _trace_before_publish(self, *args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id_from_message(kwargs)\n\n if task_id is None:\n return\n\n if task is None:\n # task is an anonymous task send using send_task or using canvas workflow\n # Signatures() to send to a task not in the current processes dependency\n # tree\n task_name = kwargs.get(\"sender\", \"unknown\")\n else:\n task_name = task.name\n operation_name = f\"{_TASK_APPLY_ASYNC}/{task_name}\"\n span = self._tracer.start_span(\n operation_name, kind=trace.SpanKind.PRODUCER\n )\n\n # apply some attributes here because most of the data is not available\n if span.is_recording():\n span.set_attribute(_TASK_TAG_KEY, _TASK_APPLY_ASYNC)\n span.set_attribute(SpanAttributes.MESSAGING_MESSAGE_ID, task_id)\n span.set_attribute(_TASK_NAME_KEY, task_name)\n utils.set_attributes_from_context(span, kwargs)\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(task, task_id, (span, activation), is_publish=True)\n\n headers = kwargs.get(\"headers\")\n if headers:\n inject(headers)\n\n @staticmethod\n def _trace_after_publish(*args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id_from_message(kwargs)\n\n if task is None or task_id is None:\n return\n\n # retrieve and finish the Span\n _, activation = utils.retrieve_span(task, task_id, is_publish=True)\n if activation is None:\n logger.warning(\"no existing span found for task_id=%s\", task_id)\n return\n\n activation.__exit__(None, None, None) # pylint: disable=E1101\n utils.detach_span(task, task_id, is_publish=True)\n\n @staticmethod\n def _trace_failure(*args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id(kwargs)\n\n if task is None or task_id is None:\n return\n\n # retrieve and pass exception info to activation\n span, _ = utils.retrieve_span(task, task_id)\n if span is None or not span.is_recording():\n return\n\n status_kwargs = {\"status_code\": StatusCode.ERROR}\n\n ex = kwargs.get(\"einfo\")\n\n if (\n hasattr(task, \"throws\")\n and ex is not None\n and isinstance(ex.exception, task.throws)\n ):\n return\n\n if ex is not None:\n status_kwargs[\"description\"] = str(ex)\n span.record_exception(ex)\n span.set_status(Status(**status_kwargs))\n\n @staticmethod\n def _trace_retry(*args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id_from_request(kwargs)\n reason = utils.retrieve_reason(kwargs)\n\n if task is None or task_id is None or reason is None:\n return\n\n span, _ = utils.retrieve_span(task, task_id)\n if span is None or not span.is_recording():\n return\n\n # Add retry reason metadata to span\n # Use `str(reason)` instead of `reason.message` in case we get\n # something that isn't an `Exception`\n span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason))\n", "path": "instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nInstrument `celery`_ to trace Celery applications.\n\n.. _celery: https://pypi.org/project/celery/\n\nUsage\n-----\n\n* Start broker backend\n\n.. code::\n\n docker run -p 5672:5672 rabbitmq\n\n\n* Run instrumented task\n\n.. code:: python\n\n from opentelemetry.instrumentation.celery import CeleryInstrumentor\n\n from celery import Celery\n from celery.signals import worker_process_init\n\n @worker_process_init.connect(weak=False)\n def init_celery_tracing(*args, **kwargs):\n CeleryInstrumentor().instrument()\n\n app = Celery(\"tasks\", broker=\"amqp://localhost\")\n\n @app.task\n def add(x, y):\n return x + y\n\n add.delay(42, 50)\n\nSetting up tracing\n------------------\n\nWhen tracing a celery worker process, tracing and instrumention both must be initialized after the celery worker\nprocess is initialized. This is required for any tracing components that might use threading to work correctly\nsuch as the BatchSpanProcessor. Celery provides a signal called ``worker_process_init`` that can be used to\naccomplish this as shown in the example above.\n\nAPI\n---\n\"\"\"\n\nimport logging\nfrom timeit import default_timer\nfrom typing import Collection, Iterable\n\nfrom celery import signals # pylint: disable=no-name-in-module\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.celery import utils\nfrom opentelemetry.instrumentation.celery.package import _instruments\nfrom opentelemetry.instrumentation.celery.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.metrics import get_meter\nfrom opentelemetry.propagate import extract, inject\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\n# Task operations\n_TASK_TAG_KEY = \"celery.action\"\n_TASK_APPLY_ASYNC = \"apply_async\"\n_TASK_RUN = \"run\"\n\n_TASK_RETRY_REASON_KEY = \"celery.retry.reason\"\n_TASK_REVOKED_REASON_KEY = \"celery.revoked.reason\"\n_TASK_REVOKED_TERMINATED_SIGNAL_KEY = \"celery.terminated.signal\"\n_TASK_NAME_KEY = \"celery.task_name\"\n\n\nclass CeleryGetter(Getter):\n def get(self, carrier, key):\n value = getattr(carrier, key, None)\n if value is None:\n return None\n if isinstance(value, str) or not isinstance(value, Iterable):\n value = (value,)\n return value\n\n def keys(self, carrier):\n return []\n\n\ncelery_getter = CeleryGetter()\n\n\nclass CeleryInstrumentor(BaseInstrumentor):\n def __init__(self):\n super().__init__()\n self.metrics = None\n self.task_id_to_start_time = {}\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n\n meter_provider = kwargs.get(\"meter_provider\")\n meter = get_meter(__name__, __version__, meter_provider)\n\n self.create_celery_metrics(meter)\n\n signals.task_prerun.connect(self._trace_prerun, weak=False)\n signals.task_postrun.connect(self._trace_postrun, weak=False)\n signals.before_task_publish.connect(\n self._trace_before_publish, weak=False\n )\n signals.after_task_publish.connect(\n self._trace_after_publish, weak=False\n )\n signals.task_failure.connect(self._trace_failure, weak=False)\n signals.task_retry.connect(self._trace_retry, weak=False)\n\n def _uninstrument(self, **kwargs):\n signals.task_prerun.disconnect(self._trace_prerun)\n signals.task_postrun.disconnect(self._trace_postrun)\n signals.before_task_publish.disconnect(self._trace_before_publish)\n signals.after_task_publish.disconnect(self._trace_after_publish)\n signals.task_failure.disconnect(self._trace_failure)\n signals.task_retry.disconnect(self._trace_retry)\n\n def _trace_prerun(self, *args, **kwargs):\n task = utils.retrieve_task(kwargs)\n task_id = utils.retrieve_task_id(kwargs)\n\n if task is None or task_id is None:\n return\n\n self.update_task_duration_time(task_id)\n request = task.request\n tracectx = extract(request, getter=celery_getter) or None\n\n logger.debug(\"prerun signal start task_id=%s\", task_id)\n\n operation_name = f\"{_TASK_RUN}/{task.name}\"\n span = self._tracer.start_span(\n operation_name, context=tracectx, kind=trace.SpanKind.CONSUMER\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n utils.attach_span(task, task_id, (span, activation))\n\n def _trace_postrun(self, *args, **kwargs):\n task = utils.retrieve_task(kwargs)\n task_id = utils.retrieve_task_id(kwargs)\n\n if task is None or task_id is None:\n return\n\n logger.debug(\"postrun signal task_id=%s\", task_id)\n\n # retrieve and finish the Span\n span, activation = utils.retrieve_span(task, task_id)\n if span is None:\n logger.warning(\"no existing span found for task_id=%s\", task_id)\n return\n\n # request context tags\n if span.is_recording():\n span.set_attribute(_TASK_TAG_KEY, _TASK_RUN)\n utils.set_attributes_from_context(span, kwargs)\n utils.set_attributes_from_context(span, task.request)\n span.set_attribute(_TASK_NAME_KEY, task.name)\n\n activation.__exit__(None, None, None)\n utils.detach_span(task, task_id)\n self.update_task_duration_time(task_id)\n labels = {\"task\": task.name, \"worker\": task.request.hostname}\n self._record_histograms(task_id, labels)\n\n def _trace_before_publish(self, *args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id_from_message(kwargs)\n\n if task_id is None:\n return\n\n if task is None:\n # task is an anonymous task send using send_task or using canvas workflow\n # Signatures() to send to a task not in the current processes dependency\n # tree\n task_name = kwargs.get(\"sender\", \"unknown\")\n else:\n task_name = task.name\n operation_name = f\"{_TASK_APPLY_ASYNC}/{task_name}\"\n span = self._tracer.start_span(\n operation_name, kind=trace.SpanKind.PRODUCER\n )\n\n # apply some attributes here because most of the data is not available\n if span.is_recording():\n span.set_attribute(_TASK_TAG_KEY, _TASK_APPLY_ASYNC)\n span.set_attribute(SpanAttributes.MESSAGING_MESSAGE_ID, task_id)\n span.set_attribute(_TASK_NAME_KEY, task_name)\n utils.set_attributes_from_context(span, kwargs)\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(task, task_id, (span, activation), is_publish=True)\n\n headers = kwargs.get(\"headers\")\n if headers:\n inject(headers)\n\n @staticmethod\n def _trace_after_publish(*args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id_from_message(kwargs)\n\n if task is None or task_id is None:\n return\n\n # retrieve and finish the Span\n _, activation = utils.retrieve_span(task, task_id, is_publish=True)\n if activation is None:\n logger.warning(\"no existing span found for task_id=%s\", task_id)\n return\n\n activation.__exit__(None, None, None) # pylint: disable=E1101\n utils.detach_span(task, task_id, is_publish=True)\n\n @staticmethod\n def _trace_failure(*args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id(kwargs)\n\n if task is None or task_id is None:\n return\n\n # retrieve and pass exception info to activation\n span, _ = utils.retrieve_span(task, task_id)\n if span is None or not span.is_recording():\n return\n\n status_kwargs = {\"status_code\": StatusCode.ERROR}\n\n ex = kwargs.get(\"einfo\")\n\n if (\n hasattr(task, \"throws\")\n and ex is not None\n and isinstance(ex.exception, task.throws)\n ):\n return\n\n if ex is not None:\n status_kwargs[\"description\"] = str(ex)\n span.record_exception(ex)\n span.set_status(Status(**status_kwargs))\n\n @staticmethod\n def _trace_retry(*args, **kwargs):\n task = utils.retrieve_task_from_sender(kwargs)\n task_id = utils.retrieve_task_id_from_request(kwargs)\n reason = utils.retrieve_reason(kwargs)\n\n if task is None or task_id is None or reason is None:\n return\n\n span, _ = utils.retrieve_span(task, task_id)\n if span is None or not span.is_recording():\n return\n\n # Add retry reason metadata to span\n # Use `str(reason)` instead of `reason.message` in case we get\n # something that isn't an `Exception`\n span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason))\n\n def update_task_duration_time(self, task_id):\n cur_time = default_timer()\n task_duration_time_until_now = (\n cur_time - self.task_id_to_start_time[task_id]\n if task_id in self.task_id_to_start_time\n else cur_time\n )\n self.task_id_to_start_time[task_id] = task_duration_time_until_now\n\n def _record_histograms(self, task_id, metric_attributes):\n if task_id is None:\n return\n\n self.metrics[\"flower.task.runtime.seconds\"].record(\n self.task_id_to_start_time.get(task_id),\n attributes=metric_attributes,\n )\n\n def create_celery_metrics(self, meter) -> None:\n self.metrics = {\n \"flower.task.runtime.seconds\": meter.create_histogram(\n name=\"flower.task.runtime.seconds\",\n unit=\"seconds\",\n description=\"The time it took to run the task.\",\n )\n }\n", "path": "instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py"}]}
3,159
991
gh_patches_debug_7313
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-1195
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Calling the pause() function does not pause the playback This [pause() function](https://github.com/MycroftAI/mycroft-core/blob/a9d627c979b98053719200c6084f3b31a9a97c62/mycroft/skills/audioservice.py#L83) does not have the desired effect of pausing the playback. `self.emitter.emit(Message('mycroft.sudio.service.stop'))` This stops the playback, but the Audioservice does not expose a stop function. I could make the commit to add the stop() function if agreed upon. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mycroft/skills/audioservice.py` Content: ``` 1 # Copyright 2017 Mycroft AI Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 import time 16 17 from os.path import abspath 18 19 from mycroft.messagebus.message import Message 20 21 22 def ensure_uri(s): 23 """ 24 Interprete paths as file:// uri's 25 26 Args: 27 s: string to be checked 28 29 Returns: 30 if s is uri, s is returned otherwise file:// is prepended 31 """ 32 if '://' not in s: 33 return 'file://' + abspath(s) 34 else: 35 return s 36 37 38 class AudioService(): 39 """ 40 AudioService object for interacting with the audio subsystem 41 42 Args: 43 emitter: eventemitter or websocket object 44 """ 45 46 def __init__(self, emitter): 47 self.emitter = emitter 48 self.emitter.on('mycroft.audio.service.track_info_reply', 49 self._track_info) 50 self.info = None 51 52 def _track_info(self, message=None): 53 """ 54 Handler for catching returning track info 55 """ 56 self.info = message.data 57 58 def play(self, tracks=[], utterance=''): 59 """ Start playback. 60 61 Args: 62 tracks: track uri or list of track uri's 63 utterance: forward utterance for further processing by the 64 audio service. 65 """ 66 if isinstance(tracks, basestring): 67 tracks = [tracks] 68 elif not isinstance(tracks, list): 69 raise ValueError 70 tracks = [ensure_uri(t) for t in tracks] 71 self.emitter.emit(Message('mycroft.audio.service.play', 72 data={'tracks': tracks, 73 'utterance': utterance})) 74 75 def next(self): 76 """ Change to next track. """ 77 self.emitter.emit(Message('mycroft.audio.service.next')) 78 79 def prev(self): 80 """ Change to previous track. """ 81 self.emitter.emit(Message('mycroft.audio.service.prev')) 82 83 def pause(self): 84 """ Pause playback. """ 85 self.emitter.emit(Message('mycroft.audio.service.pause')) 86 87 def resume(self): 88 """ Resume paused playback. """ 89 self.emitter.emit(Message('mycroft.audio.service.resume')) 90 91 def track_info(self): 92 """ Request information of current playing track. 93 94 Returns: 95 Dict with track info. 96 """ 97 self.info = None 98 self.emitter.emit(Message('mycroft.audio.service.track_info')) 99 wait = 5.0 100 while self.info is None and wait >= 0: 101 time.sleep(0.1) 102 wait -= 0.1 103 104 return self.info or {} 105 106 @property 107 def is_playing(self): 108 return self.track_info() != {} 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mycroft/skills/audioservice.py b/mycroft/skills/audioservice.py --- a/mycroft/skills/audioservice.py +++ b/mycroft/skills/audioservice.py @@ -72,6 +72,10 @@ data={'tracks': tracks, 'utterance': utterance})) + def stop(self): + """ Stop the track. """ + self.emitter.emit(Message('mycroft.audio.service.stop')) + def next(self): """ Change to next track. """ self.emitter.emit(Message('mycroft.audio.service.next'))
{"golden_diff": "diff --git a/mycroft/skills/audioservice.py b/mycroft/skills/audioservice.py\n--- a/mycroft/skills/audioservice.py\n+++ b/mycroft/skills/audioservice.py\n@@ -72,6 +72,10 @@\n data={'tracks': tracks,\n 'utterance': utterance}))\n \n+ def stop(self):\n+ \"\"\" Stop the track. \"\"\"\n+ self.emitter.emit(Message('mycroft.audio.service.stop'))\n+\n def next(self):\n \"\"\" Change to next track. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.next'))\n", "issue": "Calling the pause() function does not pause the playback\nThis [pause() function](https://github.com/MycroftAI/mycroft-core/blob/a9d627c979b98053719200c6084f3b31a9a97c62/mycroft/skills/audioservice.py#L83) does not have the desired effect of pausing the playback. \r\n`self.emitter.emit(Message('mycroft.sudio.service.stop'))`\r\nThis stops the playback, but the Audioservice does not expose a stop function. I could make the commit to add the stop() function if agreed upon.\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport time\n\nfrom os.path import abspath\n\nfrom mycroft.messagebus.message import Message\n\n\ndef ensure_uri(s):\n \"\"\"\n Interprete paths as file:// uri's\n\n Args:\n s: string to be checked\n\n Returns:\n if s is uri, s is returned otherwise file:// is prepended\n \"\"\"\n if '://' not in s:\n return 'file://' + abspath(s)\n else:\n return s\n\n\nclass AudioService():\n \"\"\"\n AudioService object for interacting with the audio subsystem\n\n Args:\n emitter: eventemitter or websocket object\n \"\"\"\n\n def __init__(self, emitter):\n self.emitter = emitter\n self.emitter.on('mycroft.audio.service.track_info_reply',\n self._track_info)\n self.info = None\n\n def _track_info(self, message=None):\n \"\"\"\n Handler for catching returning track info\n \"\"\"\n self.info = message.data\n\n def play(self, tracks=[], utterance=''):\n \"\"\" Start playback.\n\n Args:\n tracks: track uri or list of track uri's\n utterance: forward utterance for further processing by the\n audio service.\n \"\"\"\n if isinstance(tracks, basestring):\n tracks = [tracks]\n elif not isinstance(tracks, list):\n raise ValueError\n tracks = [ensure_uri(t) for t in tracks]\n self.emitter.emit(Message('mycroft.audio.service.play',\n data={'tracks': tracks,\n 'utterance': utterance}))\n\n def next(self):\n \"\"\" Change to next track. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.next'))\n\n def prev(self):\n \"\"\" Change to previous track. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.prev'))\n\n def pause(self):\n \"\"\" Pause playback. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.pause'))\n\n def resume(self):\n \"\"\" Resume paused playback. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.resume'))\n\n def track_info(self):\n \"\"\" Request information of current playing track.\n\n Returns:\n Dict with track info.\n \"\"\"\n self.info = None\n self.emitter.emit(Message('mycroft.audio.service.track_info'))\n wait = 5.0\n while self.info is None and wait >= 0:\n time.sleep(0.1)\n wait -= 0.1\n\n return self.info or {}\n\n @property\n def is_playing(self):\n return self.track_info() != {}\n", "path": "mycroft/skills/audioservice.py"}], "after_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport time\n\nfrom os.path import abspath\n\nfrom mycroft.messagebus.message import Message\n\n\ndef ensure_uri(s):\n \"\"\"\n Interprete paths as file:// uri's\n\n Args:\n s: string to be checked\n\n Returns:\n if s is uri, s is returned otherwise file:// is prepended\n \"\"\"\n if '://' not in s:\n return 'file://' + abspath(s)\n else:\n return s\n\n\nclass AudioService():\n \"\"\"\n AudioService object for interacting with the audio subsystem\n\n Args:\n emitter: eventemitter or websocket object\n \"\"\"\n\n def __init__(self, emitter):\n self.emitter = emitter\n self.emitter.on('mycroft.audio.service.track_info_reply',\n self._track_info)\n self.info = None\n\n def _track_info(self, message=None):\n \"\"\"\n Handler for catching returning track info\n \"\"\"\n self.info = message.data\n\n def play(self, tracks=[], utterance=''):\n \"\"\" Start playback.\n\n Args:\n tracks: track uri or list of track uri's\n utterance: forward utterance for further processing by the\n audio service.\n \"\"\"\n if isinstance(tracks, basestring):\n tracks = [tracks]\n elif not isinstance(tracks, list):\n raise ValueError\n tracks = [ensure_uri(t) for t in tracks]\n self.emitter.emit(Message('mycroft.audio.service.play',\n data={'tracks': tracks,\n 'utterance': utterance}))\n\n def stop(self):\n \"\"\" Stop the track. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.stop'))\n\n def next(self):\n \"\"\" Change to next track. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.next'))\n\n def prev(self):\n \"\"\" Change to previous track. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.prev'))\n\n def pause(self):\n \"\"\" Pause playback. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.pause'))\n\n def resume(self):\n \"\"\" Resume paused playback. \"\"\"\n self.emitter.emit(Message('mycroft.audio.service.resume'))\n\n def track_info(self):\n \"\"\" Request information of current playing track.\n\n Returns:\n Dict with track info.\n \"\"\"\n self.info = None\n self.emitter.emit(Message('mycroft.audio.service.track_info'))\n wait = 5.0\n while self.info is None and wait >= 0:\n time.sleep(0.1)\n wait -= 0.1\n\n return self.info or {}\n\n @property\n def is_playing(self):\n return self.track_info() != {}\n", "path": "mycroft/skills/audioservice.py"}]}
1,302
132
gh_patches_debug_62283
rasdani/github-patches
git_diff
keras-team__autokeras-1367
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exception in multi label classification with StructuredDataClassifier ### Bug Description Multi label classification throws the following error when using `StructuredDataClassifier`: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-23-e7c45e9f8b2f> in <module> ----> 1 clf.predict(X) /opt/conda/lib/python3.7/site-packages/autokeras/tasks/structured_data.py in predict(self, x, batch_size, **kwargs) 156 x = self.read_for_predict(x) 157 --> 158 return super().predict(x=x, batch_size=batch_size, **kwargs) 159 160 def evaluate(self, x, y=None, batch_size=32, **kwargs): /opt/conda/lib/python3.7/site-packages/autokeras/auto_model.py in predict(self, x, **kwargs) 421 self._check_data_format((x, None), predict=True) 422 dataset = self._adapt(x, self.inputs) --> 423 pipeline = self.tuner.get_best_pipeline() 424 model = self.tuner.get_best_model() 425 dataset = pipeline.transform_x(dataset) /opt/conda/lib/python3.7/site-packages/autokeras/engine/tuner.py in get_best_pipeline(self) 65 66 def get_best_pipeline(self): ---> 67 return pipeline_module.load_pipeline(self.best_pipeline_path) 68 69 def _pipeline_path(self, trial_id): /opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in load_pipeline(filepath, custom_objects) 73 custom_objects = {} 74 with tf.keras.utils.custom_object_scope(custom_objects): ---> 75 return Pipeline.from_config(utils.load_json(filepath)) 76 77 /opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in from_config(cls, config) 180 for preprocessor in preprocessors 181 ] --> 182 for preprocessors in config["outputs"] 183 ], 184 ) /opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in <listcomp>(.0) 180 for preprocessor in preprocessors 181 ] --> 182 for preprocessors in config["outputs"] 183 ], 184 ) /opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in <listcomp>(.0) 178 [ 179 preprocessors_module.deserialize(preprocessor) --> 180 for preprocessor in preprocessors 181 ] 182 for preprocessors in config["outputs"] /opt/conda/lib/python3.7/site-packages/autokeras/preprocessors/__init__.py in deserialize(config, custom_objects) 33 module_objects=globals(), 34 custom_objects=custom_objects, ---> 35 printable_module_name="preprocessors", 36 ) /opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name) 358 list(custom_objects.items()))) 359 with CustomObjectScope(custom_objects): --> 360 return cls.from_config(cls_config) 361 else: 362 # Then `cls` may be a function returning a class. /opt/conda/lib/python3.7/site-packages/autokeras/engine/serializable.py in from_config(cls, config) 32 config: Dict. The config of the object. 33 """ ---> 34 return cls(**config) /opt/conda/lib/python3.7/site-packages/autokeras/preprocessors/encoders.py in __init__(self, **kwargs) 125 126 def __init__(self, **kwargs): --> 127 super().__init__(labels=[], **kwargs) 128 129 def transform(self, dataset): TypeError: __init__() got multiple values for keyword argument 'labels' ``` ### Bug Reproduction Code for reproducing the bug: ``` from sklearn.datasets import make_multilabel_classification import autokeras as ak X, y = make_multilabel_classification(n_samples=1000, n_features=20, n_classes=2, n_labels=5) clf = ak.StructuredDataClassifier( overwrite=True, max_trials=3, multi_label=True, objective='val_loss', loss='binary_crossentropy' ) clf.fit(X, y, epochs=3) clf.predict(X) ``` ### Expected Behavior This should not throw an error, but return the predictions of the multi label classification ### Setup Details Include the details about the versions of: - OS type and version: Ubuntu 18.04.4 - Python: 3.7.6 - autokeras: 1.0.9 - keras-tuner: 1.0.2rc2 - scikit-learn: 0.23.2 - numpy: 1.18.5 - pandas: 1.1.2 - tensorflow: 2.3.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `autokeras/preprocessors/encoders.py` Content: ``` 1 # Copyright 2020 The AutoKeras Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import numpy as np 16 import tensorflow as tf 17 18 from autokeras.engine import preprocessor 19 20 21 class Encoder(preprocessor.TargetPreprocessor): 22 """Transform labels to encodings. 23 24 # Arguments 25 labels: A list of labels of any type. The labels to be encoded. 26 """ 27 28 def __init__(self, labels, **kwargs): 29 super().__init__(**kwargs) 30 self.labels = [ 31 label.decode("utf-8") if isinstance(label, bytes) else str(label) 32 for label in labels 33 ] 34 35 def get_config(self): 36 return {"labels": self.labels} 37 38 def fit(self, dataset): 39 return 40 41 def transform(self, dataset): 42 """Transform labels to integer encodings. 43 44 # Arguments 45 dataset: tf.data.Dataset. The dataset to be transformed. 46 47 # Returns 48 tf.data.Dataset. The transformed dataset. 49 """ 50 keys_tensor = tf.constant(self.labels) 51 vals_tensor = tf.constant(list(range(len(self.labels)))) 52 table = tf.lookup.StaticHashTable( 53 tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1 54 ) 55 56 return dataset.map(lambda x: table.lookup(tf.reshape(x, [-1]))) 57 58 59 class OneHotEncoder(Encoder): 60 def transform(self, dataset): 61 """Transform labels to one-hot encodings. 62 63 # Arguments 64 dataset: tf.data.Dataset. The dataset to be transformed. 65 66 # Returns 67 tf.data.Dataset. The transformed dataset. 68 """ 69 dataset = super().transform(dataset) 70 eye = tf.eye(len(self.labels)) 71 dataset = dataset.map(lambda x: tf.nn.embedding_lookup(eye, x)) 72 return dataset 73 74 def postprocess(self, data): 75 """Transform probabilities back to labels. 76 77 # Arguments 78 data: numpy.ndarray. The output probabilities of the classification head. 79 80 # Returns 81 numpy.ndarray. The original labels. 82 """ 83 return np.array( 84 list( 85 map( 86 lambda x: self.labels[x], 87 np.argmax(np.array(data), axis=1), 88 ) 89 ) 90 ).reshape(-1, 1) 91 92 93 class LabelEncoder(Encoder): 94 """Transform the labels to integer encodings.""" 95 96 def transform(self, dataset): 97 """Transform labels to integer encodings. 98 99 # Arguments 100 dataset: tf.data.Dataset. The dataset to be transformed. 101 102 # Returns 103 tf.data.Dataset. The transformed dataset. 104 """ 105 dataset = super().transform(dataset) 106 dataset = dataset.map(lambda x: tf.expand_dims(x, axis=-1)) 107 return dataset 108 109 def postprocess(self, data): 110 """Transform probabilities back to labels. 111 112 # Arguments 113 data: numpy.ndarray. The output probabilities of the classification head. 114 115 # Returns 116 numpy.ndarray. The original labels. 117 """ 118 return np.array( 119 list(map(lambda x: self.labels[int(round(x[0]))], np.array(data))) 120 ).reshape(-1, 1) 121 122 123 class MultiLabelEncoder(Encoder): 124 """Encoder for multi-label data.""" 125 126 def __init__(self, **kwargs): 127 super().__init__(labels=[], **kwargs) 128 129 def transform(self, dataset): 130 return dataset 131 132 def postprocess(self, data): 133 """Transform probabilities to zeros and ones. 134 135 # Arguments 136 data: numpy.ndarray. The output probabilities of the classification head. 137 138 # Returns 139 numpy.ndarray. The zeros and ones predictions. 140 """ 141 data[data < 0.5] = 0 142 data[data > 0.5] = 1 143 return data 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/autokeras/preprocessors/encoders.py b/autokeras/preprocessors/encoders.py --- a/autokeras/preprocessors/encoders.py +++ b/autokeras/preprocessors/encoders.py @@ -124,6 +124,7 @@ """Encoder for multi-label data.""" def __init__(self, **kwargs): + kwargs.pop("labels", None) super().__init__(labels=[], **kwargs) def transform(self, dataset):
{"golden_diff": "diff --git a/autokeras/preprocessors/encoders.py b/autokeras/preprocessors/encoders.py\n--- a/autokeras/preprocessors/encoders.py\n+++ b/autokeras/preprocessors/encoders.py\n@@ -124,6 +124,7 @@\n \"\"\"Encoder for multi-label data.\"\"\"\n \n def __init__(self, **kwargs):\n+ kwargs.pop(\"labels\", None)\n super().__init__(labels=[], **kwargs)\n \n def transform(self, dataset):\n", "issue": "Exception in multi label classification with StructuredDataClassifier\n### Bug Description\r\nMulti label classification throws the following error when using `StructuredDataClassifier`:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-23-e7c45e9f8b2f> in <module>\r\n----> 1 clf.predict(X)\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/tasks/structured_data.py in predict(self, x, batch_size, **kwargs)\r\n 156 x = self.read_for_predict(x)\r\n 157 \r\n--> 158 return super().predict(x=x, batch_size=batch_size, **kwargs)\r\n 159 \r\n 160 def evaluate(self, x, y=None, batch_size=32, **kwargs):\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/auto_model.py in predict(self, x, **kwargs)\r\n 421 self._check_data_format((x, None), predict=True)\r\n 422 dataset = self._adapt(x, self.inputs)\r\n--> 423 pipeline = self.tuner.get_best_pipeline()\r\n 424 model = self.tuner.get_best_model()\r\n 425 dataset = pipeline.transform_x(dataset)\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/engine/tuner.py in get_best_pipeline(self)\r\n 65 \r\n 66 def get_best_pipeline(self):\r\n---> 67 return pipeline_module.load_pipeline(self.best_pipeline_path)\r\n 68 \r\n 69 def _pipeline_path(self, trial_id):\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in load_pipeline(filepath, custom_objects)\r\n 73 custom_objects = {}\r\n 74 with tf.keras.utils.custom_object_scope(custom_objects):\r\n---> 75 return Pipeline.from_config(utils.load_json(filepath))\r\n 76 \r\n 77 \r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in from_config(cls, config)\r\n 180 for preprocessor in preprocessors\r\n 181 ]\r\n--> 182 for preprocessors in config[\"outputs\"]\r\n 183 ],\r\n 184 )\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in <listcomp>(.0)\r\n 180 for preprocessor in preprocessors\r\n 181 ]\r\n--> 182 for preprocessors in config[\"outputs\"]\r\n 183 ],\r\n 184 )\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/pipeline.py in <listcomp>(.0)\r\n 178 [\r\n 179 preprocessors_module.deserialize(preprocessor)\r\n--> 180 for preprocessor in preprocessors\r\n 181 ]\r\n 182 for preprocessors in config[\"outputs\"]\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/preprocessors/__init__.py in deserialize(config, custom_objects)\r\n 33 module_objects=globals(),\r\n 34 custom_objects=custom_objects,\r\n---> 35 printable_module_name=\"preprocessors\",\r\n 36 )\r\n\r\n/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)\r\n 358 list(custom_objects.items())))\r\n 359 with CustomObjectScope(custom_objects):\r\n--> 360 return cls.from_config(cls_config)\r\n 361 else:\r\n 362 # Then `cls` may be a function returning a class.\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/engine/serializable.py in from_config(cls, config)\r\n 32 config: Dict. The config of the object.\r\n 33 \"\"\"\r\n---> 34 return cls(**config)\r\n\r\n/opt/conda/lib/python3.7/site-packages/autokeras/preprocessors/encoders.py in __init__(self, **kwargs)\r\n 125 \r\n 126 def __init__(self, **kwargs):\r\n--> 127 super().__init__(labels=[], **kwargs)\r\n 128 \r\n 129 def transform(self, dataset):\r\n\r\nTypeError: __init__() got multiple values for keyword argument 'labels'\r\n```\r\n\r\n### Bug Reproduction\r\nCode for reproducing the bug:\r\n\r\n```\r\nfrom sklearn.datasets import make_multilabel_classification\r\nimport autokeras as ak\r\n\r\nX, y = make_multilabel_classification(n_samples=1000, n_features=20, n_classes=2, n_labels=5)\r\nclf = ak.StructuredDataClassifier(\r\n overwrite=True,\r\n max_trials=3,\r\n multi_label=True,\r\n objective='val_loss',\r\n loss='binary_crossentropy'\r\n)\r\nclf.fit(X, y, epochs=3)\r\nclf.predict(X)\r\n```\r\n\r\n### Expected Behavior\r\nThis should not throw an error, but return the predictions of the multi label classification\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version: Ubuntu 18.04.4\r\n - Python: 3.7.6\r\n - autokeras: 1.0.9\r\n - keras-tuner: 1.0.2rc2\r\n - scikit-learn: 0.23.2\r\n - numpy: 1.18.5\r\n - pandas: 1.1.2\r\n - tensorflow: 2.3.0\r\n\n", "before_files": [{"content": "# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom autokeras.engine import preprocessor\n\n\nclass Encoder(preprocessor.TargetPreprocessor):\n \"\"\"Transform labels to encodings.\n\n # Arguments\n labels: A list of labels of any type. The labels to be encoded.\n \"\"\"\n\n def __init__(self, labels, **kwargs):\n super().__init__(**kwargs)\n self.labels = [\n label.decode(\"utf-8\") if isinstance(label, bytes) else str(label)\n for label in labels\n ]\n\n def get_config(self):\n return {\"labels\": self.labels}\n\n def fit(self, dataset):\n return\n\n def transform(self, dataset):\n \"\"\"Transform labels to integer encodings.\n\n # Arguments\n dataset: tf.data.Dataset. The dataset to be transformed.\n\n # Returns\n tf.data.Dataset. The transformed dataset.\n \"\"\"\n keys_tensor = tf.constant(self.labels)\n vals_tensor = tf.constant(list(range(len(self.labels))))\n table = tf.lookup.StaticHashTable(\n tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1\n )\n\n return dataset.map(lambda x: table.lookup(tf.reshape(x, [-1])))\n\n\nclass OneHotEncoder(Encoder):\n def transform(self, dataset):\n \"\"\"Transform labels to one-hot encodings.\n\n # Arguments\n dataset: tf.data.Dataset. The dataset to be transformed.\n\n # Returns\n tf.data.Dataset. The transformed dataset.\n \"\"\"\n dataset = super().transform(dataset)\n eye = tf.eye(len(self.labels))\n dataset = dataset.map(lambda x: tf.nn.embedding_lookup(eye, x))\n return dataset\n\n def postprocess(self, data):\n \"\"\"Transform probabilities back to labels.\n\n # Arguments\n data: numpy.ndarray. The output probabilities of the classification head.\n\n # Returns\n numpy.ndarray. The original labels.\n \"\"\"\n return np.array(\n list(\n map(\n lambda x: self.labels[x],\n np.argmax(np.array(data), axis=1),\n )\n )\n ).reshape(-1, 1)\n\n\nclass LabelEncoder(Encoder):\n \"\"\"Transform the labels to integer encodings.\"\"\"\n\n def transform(self, dataset):\n \"\"\"Transform labels to integer encodings.\n\n # Arguments\n dataset: tf.data.Dataset. The dataset to be transformed.\n\n # Returns\n tf.data.Dataset. The transformed dataset.\n \"\"\"\n dataset = super().transform(dataset)\n dataset = dataset.map(lambda x: tf.expand_dims(x, axis=-1))\n return dataset\n\n def postprocess(self, data):\n \"\"\"Transform probabilities back to labels.\n\n # Arguments\n data: numpy.ndarray. The output probabilities of the classification head.\n\n # Returns\n numpy.ndarray. The original labels.\n \"\"\"\n return np.array(\n list(map(lambda x: self.labels[int(round(x[0]))], np.array(data)))\n ).reshape(-1, 1)\n\n\nclass MultiLabelEncoder(Encoder):\n \"\"\"Encoder for multi-label data.\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(labels=[], **kwargs)\n\n def transform(self, dataset):\n return dataset\n\n def postprocess(self, data):\n \"\"\"Transform probabilities to zeros and ones.\n\n # Arguments\n data: numpy.ndarray. The output probabilities of the classification head.\n\n # Returns\n numpy.ndarray. The zeros and ones predictions.\n \"\"\"\n data[data < 0.5] = 0\n data[data > 0.5] = 1\n return data\n", "path": "autokeras/preprocessors/encoders.py"}], "after_files": [{"content": "# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom autokeras.engine import preprocessor\n\n\nclass Encoder(preprocessor.TargetPreprocessor):\n \"\"\"Transform labels to encodings.\n\n # Arguments\n labels: A list of labels of any type. The labels to be encoded.\n \"\"\"\n\n def __init__(self, labels, **kwargs):\n super().__init__(**kwargs)\n self.labels = [\n label.decode(\"utf-8\") if isinstance(label, bytes) else str(label)\n for label in labels\n ]\n\n def get_config(self):\n return {\"labels\": self.labels}\n\n def fit(self, dataset):\n return\n\n def transform(self, dataset):\n \"\"\"Transform labels to integer encodings.\n\n # Arguments\n dataset: tf.data.Dataset. The dataset to be transformed.\n\n # Returns\n tf.data.Dataset. The transformed dataset.\n \"\"\"\n keys_tensor = tf.constant(self.labels)\n vals_tensor = tf.constant(list(range(len(self.labels))))\n table = tf.lookup.StaticHashTable(\n tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), -1\n )\n\n return dataset.map(lambda x: table.lookup(tf.reshape(x, [-1])))\n\n\nclass OneHotEncoder(Encoder):\n def transform(self, dataset):\n \"\"\"Transform labels to one-hot encodings.\n\n # Arguments\n dataset: tf.data.Dataset. The dataset to be transformed.\n\n # Returns\n tf.data.Dataset. The transformed dataset.\n \"\"\"\n dataset = super().transform(dataset)\n eye = tf.eye(len(self.labels))\n dataset = dataset.map(lambda x: tf.nn.embedding_lookup(eye, x))\n return dataset\n\n def postprocess(self, data):\n \"\"\"Transform probabilities back to labels.\n\n # Arguments\n data: numpy.ndarray. The output probabilities of the classification head.\n\n # Returns\n numpy.ndarray. The original labels.\n \"\"\"\n return np.array(\n list(\n map(\n lambda x: self.labels[x],\n np.argmax(np.array(data), axis=1),\n )\n )\n ).reshape(-1, 1)\n\n\nclass LabelEncoder(Encoder):\n \"\"\"Transform the labels to integer encodings.\"\"\"\n\n def transform(self, dataset):\n \"\"\"Transform labels to integer encodings.\n\n # Arguments\n dataset: tf.data.Dataset. The dataset to be transformed.\n\n # Returns\n tf.data.Dataset. The transformed dataset.\n \"\"\"\n dataset = super().transform(dataset)\n dataset = dataset.map(lambda x: tf.expand_dims(x, axis=-1))\n return dataset\n\n def postprocess(self, data):\n \"\"\"Transform probabilities back to labels.\n\n # Arguments\n data: numpy.ndarray. The output probabilities of the classification head.\n\n # Returns\n numpy.ndarray. The original labels.\n \"\"\"\n return np.array(\n list(map(lambda x: self.labels[int(round(x[0]))], np.array(data)))\n ).reshape(-1, 1)\n\n\nclass MultiLabelEncoder(Encoder):\n \"\"\"Encoder for multi-label data.\"\"\"\n\n def __init__(self, **kwargs):\n kwargs.pop(\"labels\", None)\n super().__init__(labels=[], **kwargs)\n\n def transform(self, dataset):\n return dataset\n\n def postprocess(self, data):\n \"\"\"Transform probabilities to zeros and ones.\n\n # Arguments\n data: numpy.ndarray. The output probabilities of the classification head.\n\n # Returns\n numpy.ndarray. The zeros and ones predictions.\n \"\"\"\n data[data < 0.5] = 0\n data[data > 0.5] = 1\n return data\n", "path": "autokeras/preprocessors/encoders.py"}]}
2,715
112
gh_patches_debug_14746
rasdani/github-patches
git_diff
GoogleCloudPlatform__PerfKitBenchmarker-563
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Project id not respected when deleting disk in Cloudstack I forgot to pass project_id as an agrugment when deleting volume. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `perfkitbenchmarker/cloudstack/cloudstack_disk.py` Content: ``` 1 # Copyright 2014 Google Inc. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Module containing classes related to CloudStack disks. 15 16 Disks can be created, deleted, attached to VMs, and detached from VMs. 17 """ 18 19 from perfkitbenchmarker import disk 20 from perfkitbenchmarker import vm_util 21 from perfkitbenchmarker import flags 22 from perfkitbenchmarker.cloudstack import util 23 24 import string 25 import logging 26 27 FLAGS = flags.FLAGS 28 29 30 class CloudStackDisk(disk.BaseDisk): 31 """Object representing a Cloudstack Disk.""" 32 33 34 def __init__(self, disk_spec, name, zone_id, project_id=None): 35 super(CloudStackDisk, self).__init__(disk_spec) 36 37 self.cs = util.CsClient( 38 FLAGS.CS_API_URL, 39 FLAGS.CS_API_KEY, 40 FLAGS.CS_API_SECRET 41 ) 42 43 self.attached_vm_name = None 44 self.attached_vm_id = None 45 self.name = name 46 47 self.zone_id = zone_id 48 self.project_id = project_id 49 50 self.disk_offering_id = self._GetBestOfferingId(self.disk_size) 51 assert self.disk_offering_id, "Unable get disk offering of given size" 52 53 if self.disk_spec.disk_type: 54 logging.warn("Cloudstack does not support disk types") 55 56 57 @vm_util.Retry(max_retries=3) 58 def _Create(self): 59 """Creates the disk.""" 60 61 62 volume = self.cs.create_volume(self.name, 63 self.disk_offering_id, 64 self.zone_id, 65 self.project_id) 66 67 assert volume, "Unable to create volume" 68 69 self.volume_id = volume['id'] 70 self.disk_type = volume['type'] 71 self.actual_disk_size = int(volume['size']) / (2 ** 30) # In GB 72 73 74 def _Delete(self): 75 """Deletes the disk.""" 76 vol = self.cs.get_volume(self.name) 77 if vol: 78 self.cs.delete_volume(self.volume_id) 79 80 81 def _Exists(self): 82 """Returns true if the disk exists.""" 83 vol = self.cs.get_volume(self.name, self.project_id) 84 if vol: 85 return True 86 return False 87 88 89 @vm_util.Retry(max_retries=3) 90 def Attach(self, vm): 91 """Attaches the disk to a VM. 92 93 Args: 94 vm: The CloudStackVirtualMachine instance to which 95 the disk will be attached. 96 97 """ 98 99 res = self.cs.attach_volume(self.volume_id, vm.id) 100 assert res, "Unable to attach volume" 101 102 self.device_id = res['deviceid'] 103 104 self.device_path = "/dev/xvd" + \ 105 str(string.ascii_lowercase[self.device_id]) 106 107 108 109 def Detach(self): 110 """Detaches the disk from a VM.""" 111 112 self.cs.detach_volume(self.volume_id) 113 114 115 def _GetBestOfferingId(self, disk_size): 116 """ Given a disk_size (in GB), try to find a disk 117 offering that is atleast as big as the requested 118 one. 119 """ 120 121 disk_offerings = self.cs.list_disk_offerings() 122 sorted_do = sorted(disk_offerings, key=lambda x: x['disksize']) 123 124 for do in sorted_do: 125 if int(do['disksize']) >= disk_size: 126 return do['id'] 127 128 return None 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/perfkitbenchmarker/cloudstack/cloudstack_disk.py b/perfkitbenchmarker/cloudstack/cloudstack_disk.py --- a/perfkitbenchmarker/cloudstack/cloudstack_disk.py +++ b/perfkitbenchmarker/cloudstack/cloudstack_disk.py @@ -50,7 +50,7 @@ self.disk_offering_id = self._GetBestOfferingId(self.disk_size) assert self.disk_offering_id, "Unable get disk offering of given size" - if self.disk_spec.disk_type: + if disk_spec.disk_type: logging.warn("Cloudstack does not support disk types") @@ -73,7 +73,7 @@ def _Delete(self): """Deletes the disk.""" - vol = self.cs.get_volume(self.name) + vol = self.cs.get_volume(self.name, self.project_id) if vol: self.cs.delete_volume(self.volume_id)
{"golden_diff": "diff --git a/perfkitbenchmarker/cloudstack/cloudstack_disk.py b/perfkitbenchmarker/cloudstack/cloudstack_disk.py\n--- a/perfkitbenchmarker/cloudstack/cloudstack_disk.py\n+++ b/perfkitbenchmarker/cloudstack/cloudstack_disk.py\n@@ -50,7 +50,7 @@\n self.disk_offering_id = self._GetBestOfferingId(self.disk_size)\n assert self.disk_offering_id, \"Unable get disk offering of given size\"\n \n- if self.disk_spec.disk_type:\n+ if disk_spec.disk_type:\n logging.warn(\"Cloudstack does not support disk types\")\n \n \n@@ -73,7 +73,7 @@\n \n def _Delete(self):\n \"\"\"Deletes the disk.\"\"\"\n- vol = self.cs.get_volume(self.name)\n+ vol = self.cs.get_volume(self.name, self.project_id)\n if vol:\n self.cs.delete_volume(self.volume_id)\n", "issue": "Project id not respected when deleting disk in Cloudstack\nI forgot to pass project_id as an agrugment when deleting volume. \n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Module containing classes related to CloudStack disks.\n\nDisks can be created, deleted, attached to VMs, and detached from VMs.\n\"\"\"\n\nfrom perfkitbenchmarker import disk\nfrom perfkitbenchmarker import vm_util\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker.cloudstack import util\n\nimport string\nimport logging\n\nFLAGS = flags.FLAGS\n\n\nclass CloudStackDisk(disk.BaseDisk):\n \"\"\"Object representing a Cloudstack Disk.\"\"\"\n\n\n def __init__(self, disk_spec, name, zone_id, project_id=None):\n super(CloudStackDisk, self).__init__(disk_spec)\n\n self.cs = util.CsClient(\n FLAGS.CS_API_URL,\n FLAGS.CS_API_KEY,\n FLAGS.CS_API_SECRET\n )\n\n self.attached_vm_name = None\n self.attached_vm_id = None\n self.name = name\n\n self.zone_id = zone_id\n self.project_id = project_id\n\n self.disk_offering_id = self._GetBestOfferingId(self.disk_size)\n assert self.disk_offering_id, \"Unable get disk offering of given size\"\n\n if self.disk_spec.disk_type:\n logging.warn(\"Cloudstack does not support disk types\")\n\n\n @vm_util.Retry(max_retries=3)\n def _Create(self):\n \"\"\"Creates the disk.\"\"\"\n\n\n volume = self.cs.create_volume(self.name,\n self.disk_offering_id,\n self.zone_id,\n self.project_id)\n\n assert volume, \"Unable to create volume\"\n\n self.volume_id = volume['id']\n self.disk_type = volume['type']\n self.actual_disk_size = int(volume['size']) / (2 ** 30) # In GB\n\n\n def _Delete(self):\n \"\"\"Deletes the disk.\"\"\"\n vol = self.cs.get_volume(self.name)\n if vol:\n self.cs.delete_volume(self.volume_id)\n\n\n def _Exists(self):\n \"\"\"Returns true if the disk exists.\"\"\"\n vol = self.cs.get_volume(self.name, self.project_id)\n if vol:\n return True\n return False\n\n\n @vm_util.Retry(max_retries=3)\n def Attach(self, vm):\n \"\"\"Attaches the disk to a VM.\n\n Args:\n vm: The CloudStackVirtualMachine instance to which\n the disk will be attached.\n\n \"\"\"\n\n res = self.cs.attach_volume(self.volume_id, vm.id)\n assert res, \"Unable to attach volume\"\n\n self.device_id = res['deviceid']\n\n self.device_path = \"/dev/xvd\" + \\\n str(string.ascii_lowercase[self.device_id])\n\n\n\n def Detach(self):\n \"\"\"Detaches the disk from a VM.\"\"\"\n\n self.cs.detach_volume(self.volume_id)\n\n\n def _GetBestOfferingId(self, disk_size):\n \"\"\" Given a disk_size (in GB), try to find a disk\n offering that is atleast as big as the requested\n one.\n \"\"\"\n\n disk_offerings = self.cs.list_disk_offerings()\n sorted_do = sorted(disk_offerings, key=lambda x: x['disksize'])\n\n for do in sorted_do:\n if int(do['disksize']) >= disk_size:\n return do['id']\n\n return None\n", "path": "perfkitbenchmarker/cloudstack/cloudstack_disk.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Module containing classes related to CloudStack disks.\n\nDisks can be created, deleted, attached to VMs, and detached from VMs.\n\"\"\"\n\nfrom perfkitbenchmarker import disk\nfrom perfkitbenchmarker import vm_util\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker.cloudstack import util\n\nimport string\nimport logging\n\nFLAGS = flags.FLAGS\n\n\nclass CloudStackDisk(disk.BaseDisk):\n \"\"\"Object representing a Cloudstack Disk.\"\"\"\n\n\n def __init__(self, disk_spec, name, zone_id, project_id=None):\n super(CloudStackDisk, self).__init__(disk_spec)\n\n self.cs = util.CsClient(\n FLAGS.CS_API_URL,\n FLAGS.CS_API_KEY,\n FLAGS.CS_API_SECRET\n )\n\n self.attached_vm_name = None\n self.attached_vm_id = None\n self.name = name\n\n self.zone_id = zone_id\n self.project_id = project_id\n\n self.disk_offering_id = self._GetBestOfferingId(self.disk_size)\n assert self.disk_offering_id, \"Unable get disk offering of given size\"\n\n if disk_spec.disk_type:\n logging.warn(\"Cloudstack does not support disk types\")\n\n\n @vm_util.Retry(max_retries=3)\n def _Create(self):\n \"\"\"Creates the disk.\"\"\"\n\n\n volume = self.cs.create_volume(self.name,\n self.disk_offering_id,\n self.zone_id,\n self.project_id)\n\n assert volume, \"Unable to create volume\"\n\n self.volume_id = volume['id']\n self.disk_type = volume['type']\n self.actual_disk_size = int(volume['size']) / (2 ** 30) # In GB\n\n\n def _Delete(self):\n \"\"\"Deletes the disk.\"\"\"\n vol = self.cs.get_volume(self.name, self.project_id)\n if vol:\n self.cs.delete_volume(self.volume_id)\n\n\n def _Exists(self):\n \"\"\"Returns true if the disk exists.\"\"\"\n vol = self.cs.get_volume(self.name, self.project_id)\n if vol:\n return True\n return False\n\n\n @vm_util.Retry(max_retries=3)\n def Attach(self, vm):\n \"\"\"Attaches the disk to a VM.\n\n Args:\n vm: The CloudStackVirtualMachine instance to which\n the disk will be attached.\n\n \"\"\"\n\n res = self.cs.attach_volume(self.volume_id, vm.id)\n assert res, \"Unable to attach volume\"\n\n self.device_id = res['deviceid']\n\n self.device_path = \"/dev/xvd\" + \\\n str(string.ascii_lowercase[self.device_id])\n\n\n\n def Detach(self):\n \"\"\"Detaches the disk from a VM.\"\"\"\n\n self.cs.detach_volume(self.volume_id)\n\n\n def _GetBestOfferingId(self, disk_size):\n \"\"\" Given a disk_size (in GB), try to find a disk\n offering that is atleast as big as the requested\n one.\n \"\"\"\n\n disk_offerings = self.cs.list_disk_offerings()\n sorted_do = sorted(disk_offerings, key=lambda x: x['disksize'])\n\n for do in sorted_do:\n if int(do['disksize']) >= disk_size:\n return do['id']\n\n return None\n", "path": "perfkitbenchmarker/cloudstack/cloudstack_disk.py"}]}
1,415
198
gh_patches_debug_25894
rasdani/github-patches
git_diff
sopel-irc__sopel-2545
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Broken plugin symlink -> `PluginError` in CLI ### Description The `sopel-plugins` command should gracefully handle things that look like plugins but can't be loaded. It does this in some cases (like Python file plugins with syntax errors), but not all. There's room for improvement. For example, I recently symlinked a file in `~/.sopel/plugins` so it would point to my local repository for that plugin (not yet converted to an entry-point or package) for testing. I did the `ln -s` command wrong, so the link was broken: `pluginfile.py -> ./pluginfile.py`, which is a circular reference. ### Logs ``` Traceback (most recent call last): File "/home/dgw/.local/bin/sopel-plugins", line 33, in <module> sys.exit(load_entry_point('sopel', 'console_scripts', 'sopel-plugins')()) File "/home/dgw/github/sopel/sopel/cli/plugins.py", line 556, in main return handle_list(options) File "/home/dgw/github/sopel/sopel/cli/plugins.py", line 174, in handle_list for name, info in plugins.get_usable_plugins(settings).items() File "/home/dgw/github/sopel/sopel/plugins/__init__.py", line 225, in get_usable_plugins plugins_info = collections.OrderedDict( File "/home/dgw/github/sopel/sopel/plugins/__init__.py", line 225, in <genexpr> plugins_info = collections.OrderedDict( File "/home/dgw/github/sopel/sopel/plugins/__init__.py", line 185, in enumerate_plugins for plugin in all_plugins: File "/home/dgw/github/sopel/sopel/plugins/__init__.py", line 124, in find_directory_plugins yield handlers.PyFilePlugin(abspath) File "/home/dgw/github/sopel/sopel/plugins/handlers.py", line 440, in __init__ raise exceptions.PluginError('Invalid Sopel plugin: %s' % filename) sopel.plugins.exceptions.PluginError: Invalid Sopel plugin: /home/dgw/.sopel/plugins/bombbot.py ``` ### Environment - Sopel `.version`: 8b463a0e49cac7a27dc528a180243a6a5183d1f9 - Sopel installed via: `pip -e` - Python version: 3.9.5 - Operating system: Ubuntu 20.04 - IRCd `/version`: n/a - Relevant plugins: n/a --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/plugins/__init__.py` Content: ``` 1 """Sopel's plugins interface. 2 3 .. versionadded:: 7.0 4 5 Sopel uses what are called Plugin Handlers as an interface between the bot and 6 its plugins (formerly called "modules"). This interface is defined by the 7 :class:`~.handlers.AbstractPluginHandler` abstract class. 8 9 Plugins that can be used by Sopel are provided by :func:`~.get_usable_plugins` 10 in an :class:`ordered dict<collections.OrderedDict>`. This dict contains one 11 and only one plugin per unique name, using a specific order: 12 13 * extra directories defined in the settings 14 * homedir's ``plugins`` directory 15 * ``sopel.plugins`` entry point group 16 * ``sopel_modules``'s subpackages 17 * ``sopel.builtins``'s core plugins 18 19 (The ``coretasks`` plugin is *always* the one from ``sopel.coretasks`` and 20 cannot be overridden.) 21 22 To find all plugins (no matter their sources), the :func:`~.enumerate_plugins` 23 function can be used. For a more fine-grained search, ``find_*`` functions 24 exist for each type of plugin. 25 """ 26 # Copyright 2019, Florian Strzelecki <[email protected]> 27 # 28 # Licensed under the Eiffel Forum License 2. 29 from __future__ import annotations 30 31 import collections 32 import importlib 33 import itertools 34 import os 35 36 # TODO: use stdlib importlib.metadata when possible, after dropping py3.9. 37 # Stdlib does not support `entry_points(group='filter')` until py3.10, but 38 # fallback logic is more trouble than it's worth when e.g. clean Ubuntu 39 # py3.10 envs include old versions of this backport. 40 import importlib_metadata 41 42 from . import exceptions, handlers, rules # noqa 43 44 45 def _list_plugin_filenames(directory): 46 # list plugin filenames from a directory 47 # yield 2-value tuples: (name, absolute path) 48 base = os.path.abspath(directory) 49 for filename in os.listdir(base): 50 abspath = os.path.join(base, filename) 51 52 if os.path.isdir(abspath): 53 if os.path.isfile(os.path.join(abspath, '__init__.py')): 54 yield os.path.basename(filename), abspath 55 else: 56 name, ext = os.path.splitext(filename) 57 if ext == '.py' and name != '__init__': 58 yield name, abspath 59 60 61 def find_internal_plugins(): 62 """List internal plugins. 63 64 :return: yield instances of :class:`~.handlers.PyModulePlugin` 65 configured for ``sopel.builtins.*`` 66 67 Internal plugins can be found under ``sopel.builtins``. This list does not 68 include the ``coretasks`` plugin. 69 """ 70 builtins = importlib.util.find_spec('sopel.builtins') 71 if builtins is None or builtins.submodule_search_locations is None: 72 raise RuntimeError('Cannot resolve internal plugins') 73 plugin_list = itertools.chain.from_iterable( 74 _list_plugin_filenames(path) 75 for path in builtins.submodule_search_locations 76 ) 77 78 for name, _ in set(plugin_list): 79 yield handlers.PyModulePlugin(name, 'sopel.builtins') 80 81 82 def find_sopel_modules_plugins(): 83 """List plugins from ``sopel_modules.*``. 84 85 :return: yield instances of :class:`~.handlers.PyModulePlugin` 86 configured for ``sopel_modules.*`` 87 88 Before entry point plugins, the only way to package a plugin was to follow 89 :pep:`382` by using the ``sopel_modules`` namespace. This function is 90 responsible to load such plugins. 91 """ 92 try: 93 import sopel_modules # type: ignore[import] 94 except ImportError: 95 return 96 97 for plugin_dir in set(sopel_modules.__path__): 98 for name, _ in _list_plugin_filenames(plugin_dir): 99 yield handlers.PyModulePlugin(name, 'sopel_modules') 100 101 102 def find_entry_point_plugins(group='sopel.plugins'): 103 """List plugins from an entry point group. 104 105 :param str group: entry point group to search in (defaults to 106 ``sopel.plugins``) 107 :return: yield instances of :class:`~.handlers.EntryPointPlugin` 108 created from each entry point in the ``group`` 109 110 This function finds plugins declared under an entry point group; by 111 default it looks in the ``sopel.plugins`` group. 112 """ 113 for entry_point in importlib_metadata.entry_points(group=group): 114 yield handlers.EntryPointPlugin(entry_point) 115 116 117 def find_directory_plugins(directory): 118 """List plugins from a ``directory``. 119 120 :param str directory: directory path to search 121 :return: yield instances of :class:`~.handlers.PyFilePlugin` 122 found in ``directory`` 123 124 This function looks for single file and folder plugins in a directory. 125 """ 126 for _, abspath in _list_plugin_filenames(directory): 127 yield handlers.PyFilePlugin(abspath) 128 129 130 def enumerate_plugins(settings): 131 """Yield Sopel's plugins. 132 133 :param settings: Sopel's configuration 134 :type settings: :class:`sopel.config.Config` 135 :return: yield 2-value tuple: an instance of 136 :class:`~.handlers.AbstractPluginHandler`, and if the plugin is 137 active or not 138 139 This function uses the find functions to find all of Sopel's available 140 plugins. It uses the bot's ``settings`` to determine if the plugin is 141 enabled or disabled. 142 143 .. seealso:: 144 145 The find functions used are: 146 147 * :func:`find_internal_plugins` for internal plugins 148 * :func:`find_sopel_modules_plugins` for ``sopel_modules.*`` plugins 149 * :func:`find_entry_point_plugins` for plugins exposed via packages' 150 entry points 151 * :func:`find_directory_plugins` for plugins in ``$homedir/plugins``, 152 and in extra directories as defined by ``settings.core.extra`` 153 154 .. versionchanged:: 8.0 155 156 Looks in ``$homedir/plugins`` instead of the ``$homedir/modules`` 157 directory, reflecting Sopel's shift away from calling them "modules". 158 159 """ 160 from_internals = find_internal_plugins() 161 from_sopel_modules = find_sopel_modules_plugins() 162 from_entry_points = find_entry_point_plugins() 163 # load from directories 164 source_dirs = [ 165 os.path.join(settings.homedir, 'plugins'), 166 ] 167 if settings.core.extra: 168 source_dirs = source_dirs + settings.core.extra 169 170 from_directories = [ 171 find_directory_plugins(source_dir) 172 for source_dir in source_dirs 173 if os.path.isdir(source_dir) 174 ] 175 176 # Retrieve all plugins 177 all_plugins = itertools.chain( 178 from_internals, 179 from_sopel_modules, 180 from_entry_points, 181 *from_directories) 182 183 # Get plugin settings 184 enabled = settings.core.enable 185 disabled = settings.core.exclude 186 187 # Yield all found plugins with their enabled status (True/False) 188 for plugin in all_plugins: 189 name = plugin.name 190 is_enabled = name not in disabled and (not enabled or name in enabled) 191 yield plugin, is_enabled 192 193 # And always yield coretasks 194 yield handlers.PyModulePlugin('coretasks', 'sopel'), True 195 196 197 def get_usable_plugins(settings): 198 """Get usable plugins, unique per name. 199 200 :param settings: Sopel's configuration 201 :type settings: :class:`sopel.config.Config` 202 :return: an ordered dict of usable plugins 203 :rtype: collections.OrderedDict 204 205 This function provides the plugins Sopel can use to load, enable, 206 or disable, as an :class:`ordered dict<collections.OrderedDict>`. This dict 207 contains one and only one plugin per unique name, using a specific order: 208 209 * extra directories defined in the settings 210 * homedir's ``plugins`` directory 211 * ``sopel.plugins`` entry point group 212 * ``sopel_modules``'s subpackages 213 * ``sopel.builtins``'s core plugins 214 215 (The ``coretasks`` plugin is *always* the one from ``sopel.coretasks`` and 216 cannot be overridden.) 217 218 .. seealso:: 219 220 The :func:`~.enumerate_plugins` function is used to generate a list 221 of all possible plugins, and its return value is used to populate 222 the :class:`ordered dict<collections.OrderedDict>`. 223 224 """ 225 # Use an OrderedDict to get one and only one plugin per name 226 # based on what plugins.enumerate_plugins does, external plugins are 227 # allowed to override internal plugins 228 plugins_info = collections.OrderedDict( 229 (plugin.name, (plugin, is_enabled)) 230 for plugin, is_enabled in enumerate_plugins(settings)) 231 # reset coretasks's position at the end of the loading queue 232 plugins_info.move_to_end('coretasks') 233 234 return plugins_info 235 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/plugins/__init__.py b/sopel/plugins/__init__.py --- a/sopel/plugins/__init__.py +++ b/sopel/plugins/__init__.py @@ -31,7 +31,9 @@ import collections import importlib import itertools +import logging import os +from typing import TYPE_CHECKING, Union # TODO: use stdlib importlib.metadata when possible, after dropping py3.9. # Stdlib does not support `entry_points(group='filter')` until py3.10, but @@ -41,13 +43,22 @@ from . import exceptions, handlers, rules # noqa +if TYPE_CHECKING: + from collections.abc import Iterable -def _list_plugin_filenames(directory): + +LOGGER = logging.getLogger(__name__) + + +def _list_plugin_filenames(directory: Union[str, os.PathLike]) -> Iterable[tuple[str, str]]: # list plugin filenames from a directory # yield 2-value tuples: (name, absolute path) base = os.path.abspath(directory) for filename in os.listdir(base): - abspath = os.path.join(base, filename) + abspath = os.path.realpath(os.path.join(base, filename)) + if not os.path.exists(abspath): + LOGGER.warning("Plugin path does not exist, skipping: %r", abspath) + continue if os.path.isdir(abspath): if os.path.isfile(os.path.join(abspath, '__init__.py')):
{"golden_diff": "diff --git a/sopel/plugins/__init__.py b/sopel/plugins/__init__.py\n--- a/sopel/plugins/__init__.py\n+++ b/sopel/plugins/__init__.py\n@@ -31,7 +31,9 @@\n import collections\n import importlib\n import itertools\n+import logging\n import os\n+from typing import TYPE_CHECKING, Union\n \n # TODO: use stdlib importlib.metadata when possible, after dropping py3.9.\n # Stdlib does not support `entry_points(group='filter')` until py3.10, but\n@@ -41,13 +43,22 @@\n \n from . import exceptions, handlers, rules # noqa\n \n+if TYPE_CHECKING:\n+ from collections.abc import Iterable\n \n-def _list_plugin_filenames(directory):\n+\n+LOGGER = logging.getLogger(__name__)\n+\n+\n+def _list_plugin_filenames(directory: Union[str, os.PathLike]) -> Iterable[tuple[str, str]]:\n # list plugin filenames from a directory\n # yield 2-value tuples: (name, absolute path)\n base = os.path.abspath(directory)\n for filename in os.listdir(base):\n- abspath = os.path.join(base, filename)\n+ abspath = os.path.realpath(os.path.join(base, filename))\n+ if not os.path.exists(abspath):\n+ LOGGER.warning(\"Plugin path does not exist, skipping: %r\", abspath)\n+ continue\n \n if os.path.isdir(abspath):\n if os.path.isfile(os.path.join(abspath, '__init__.py')):\n", "issue": "Broken plugin symlink -> `PluginError` in CLI\n### Description\r\nThe `sopel-plugins` command should gracefully handle things that look like plugins but can't be loaded. It does this in some cases (like Python file plugins with syntax errors), but not all. There's room for improvement.\r\n\r\nFor example, I recently symlinked a file in `~/.sopel/plugins` so it would point to my local repository for that plugin (not yet converted to an entry-point or package) for testing. I did the `ln -s` command wrong, so the link was broken: `pluginfile.py -> ./pluginfile.py`, which is a circular reference.\r\n\r\n### Logs\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/dgw/.local/bin/sopel-plugins\", line 33, in <module>\r\n sys.exit(load_entry_point('sopel', 'console_scripts', 'sopel-plugins')())\r\n File \"/home/dgw/github/sopel/sopel/cli/plugins.py\", line 556, in main\r\n return handle_list(options)\r\n File \"/home/dgw/github/sopel/sopel/cli/plugins.py\", line 174, in handle_list\r\n for name, info in plugins.get_usable_plugins(settings).items()\r\n File \"/home/dgw/github/sopel/sopel/plugins/__init__.py\", line 225, in get_usable_plugins\r\n plugins_info = collections.OrderedDict(\r\n File \"/home/dgw/github/sopel/sopel/plugins/__init__.py\", line 225, in <genexpr>\r\n plugins_info = collections.OrderedDict(\r\n File \"/home/dgw/github/sopel/sopel/plugins/__init__.py\", line 185, in enumerate_plugins\r\n for plugin in all_plugins:\r\n File \"/home/dgw/github/sopel/sopel/plugins/__init__.py\", line 124, in find_directory_plugins\r\n yield handlers.PyFilePlugin(abspath)\r\n File \"/home/dgw/github/sopel/sopel/plugins/handlers.py\", line 440, in __init__\r\n raise exceptions.PluginError('Invalid Sopel plugin: %s' % filename)\r\nsopel.plugins.exceptions.PluginError: Invalid Sopel plugin: /home/dgw/.sopel/plugins/bombbot.py\r\n```\r\n\r\n### Environment\r\n- Sopel `.version`: 8b463a0e49cac7a27dc528a180243a6a5183d1f9 \r\n- Sopel installed via: `pip -e`\r\n- Python version: 3.9.5\r\n- Operating system: Ubuntu 20.04\r\n- IRCd `/version`: n/a\r\n- Relevant plugins: n/a\n", "before_files": [{"content": "\"\"\"Sopel's plugins interface.\n\n.. versionadded:: 7.0\n\nSopel uses what are called Plugin Handlers as an interface between the bot and\nits plugins (formerly called \"modules\"). This interface is defined by the\n:class:`~.handlers.AbstractPluginHandler` abstract class.\n\nPlugins that can be used by Sopel are provided by :func:`~.get_usable_plugins`\nin an :class:`ordered dict<collections.OrderedDict>`. This dict contains one\nand only one plugin per unique name, using a specific order:\n\n* extra directories defined in the settings\n* homedir's ``plugins`` directory\n* ``sopel.plugins`` entry point group\n* ``sopel_modules``'s subpackages\n* ``sopel.builtins``'s core plugins\n\n(The ``coretasks`` plugin is *always* the one from ``sopel.coretasks`` and\ncannot be overridden.)\n\nTo find all plugins (no matter their sources), the :func:`~.enumerate_plugins`\nfunction can be used. For a more fine-grained search, ``find_*`` functions\nexist for each type of plugin.\n\"\"\"\n# Copyright 2019, Florian Strzelecki <[email protected]>\n#\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import annotations\n\nimport collections\nimport importlib\nimport itertools\nimport os\n\n# TODO: use stdlib importlib.metadata when possible, after dropping py3.9.\n# Stdlib does not support `entry_points(group='filter')` until py3.10, but\n# fallback logic is more trouble than it's worth when e.g. clean Ubuntu\n# py3.10 envs include old versions of this backport.\nimport importlib_metadata\n\nfrom . import exceptions, handlers, rules # noqa\n\n\ndef _list_plugin_filenames(directory):\n # list plugin filenames from a directory\n # yield 2-value tuples: (name, absolute path)\n base = os.path.abspath(directory)\n for filename in os.listdir(base):\n abspath = os.path.join(base, filename)\n\n if os.path.isdir(abspath):\n if os.path.isfile(os.path.join(abspath, '__init__.py')):\n yield os.path.basename(filename), abspath\n else:\n name, ext = os.path.splitext(filename)\n if ext == '.py' and name != '__init__':\n yield name, abspath\n\n\ndef find_internal_plugins():\n \"\"\"List internal plugins.\n\n :return: yield instances of :class:`~.handlers.PyModulePlugin`\n configured for ``sopel.builtins.*``\n\n Internal plugins can be found under ``sopel.builtins``. This list does not\n include the ``coretasks`` plugin.\n \"\"\"\n builtins = importlib.util.find_spec('sopel.builtins')\n if builtins is None or builtins.submodule_search_locations is None:\n raise RuntimeError('Cannot resolve internal plugins')\n plugin_list = itertools.chain.from_iterable(\n _list_plugin_filenames(path)\n for path in builtins.submodule_search_locations\n )\n\n for name, _ in set(plugin_list):\n yield handlers.PyModulePlugin(name, 'sopel.builtins')\n\n\ndef find_sopel_modules_plugins():\n \"\"\"List plugins from ``sopel_modules.*``.\n\n :return: yield instances of :class:`~.handlers.PyModulePlugin`\n configured for ``sopel_modules.*``\n\n Before entry point plugins, the only way to package a plugin was to follow\n :pep:`382` by using the ``sopel_modules`` namespace. This function is\n responsible to load such plugins.\n \"\"\"\n try:\n import sopel_modules # type: ignore[import]\n except ImportError:\n return\n\n for plugin_dir in set(sopel_modules.__path__):\n for name, _ in _list_plugin_filenames(plugin_dir):\n yield handlers.PyModulePlugin(name, 'sopel_modules')\n\n\ndef find_entry_point_plugins(group='sopel.plugins'):\n \"\"\"List plugins from an entry point group.\n\n :param str group: entry point group to search in (defaults to\n ``sopel.plugins``)\n :return: yield instances of :class:`~.handlers.EntryPointPlugin`\n created from each entry point in the ``group``\n\n This function finds plugins declared under an entry point group; by\n default it looks in the ``sopel.plugins`` group.\n \"\"\"\n for entry_point in importlib_metadata.entry_points(group=group):\n yield handlers.EntryPointPlugin(entry_point)\n\n\ndef find_directory_plugins(directory):\n \"\"\"List plugins from a ``directory``.\n\n :param str directory: directory path to search\n :return: yield instances of :class:`~.handlers.PyFilePlugin`\n found in ``directory``\n\n This function looks for single file and folder plugins in a directory.\n \"\"\"\n for _, abspath in _list_plugin_filenames(directory):\n yield handlers.PyFilePlugin(abspath)\n\n\ndef enumerate_plugins(settings):\n \"\"\"Yield Sopel's plugins.\n\n :param settings: Sopel's configuration\n :type settings: :class:`sopel.config.Config`\n :return: yield 2-value tuple: an instance of\n :class:`~.handlers.AbstractPluginHandler`, and if the plugin is\n active or not\n\n This function uses the find functions to find all of Sopel's available\n plugins. It uses the bot's ``settings`` to determine if the plugin is\n enabled or disabled.\n\n .. seealso::\n\n The find functions used are:\n\n * :func:`find_internal_plugins` for internal plugins\n * :func:`find_sopel_modules_plugins` for ``sopel_modules.*`` plugins\n * :func:`find_entry_point_plugins` for plugins exposed via packages'\n entry points\n * :func:`find_directory_plugins` for plugins in ``$homedir/plugins``,\n and in extra directories as defined by ``settings.core.extra``\n\n .. versionchanged:: 8.0\n\n Looks in ``$homedir/plugins`` instead of the ``$homedir/modules``\n directory, reflecting Sopel's shift away from calling them \"modules\".\n\n \"\"\"\n from_internals = find_internal_plugins()\n from_sopel_modules = find_sopel_modules_plugins()\n from_entry_points = find_entry_point_plugins()\n # load from directories\n source_dirs = [\n os.path.join(settings.homedir, 'plugins'),\n ]\n if settings.core.extra:\n source_dirs = source_dirs + settings.core.extra\n\n from_directories = [\n find_directory_plugins(source_dir)\n for source_dir in source_dirs\n if os.path.isdir(source_dir)\n ]\n\n # Retrieve all plugins\n all_plugins = itertools.chain(\n from_internals,\n from_sopel_modules,\n from_entry_points,\n *from_directories)\n\n # Get plugin settings\n enabled = settings.core.enable\n disabled = settings.core.exclude\n\n # Yield all found plugins with their enabled status (True/False)\n for plugin in all_plugins:\n name = plugin.name\n is_enabled = name not in disabled and (not enabled or name in enabled)\n yield plugin, is_enabled\n\n # And always yield coretasks\n yield handlers.PyModulePlugin('coretasks', 'sopel'), True\n\n\ndef get_usable_plugins(settings):\n \"\"\"Get usable plugins, unique per name.\n\n :param settings: Sopel's configuration\n :type settings: :class:`sopel.config.Config`\n :return: an ordered dict of usable plugins\n :rtype: collections.OrderedDict\n\n This function provides the plugins Sopel can use to load, enable,\n or disable, as an :class:`ordered dict<collections.OrderedDict>`. This dict\n contains one and only one plugin per unique name, using a specific order:\n\n * extra directories defined in the settings\n * homedir's ``plugins`` directory\n * ``sopel.plugins`` entry point group\n * ``sopel_modules``'s subpackages\n * ``sopel.builtins``'s core plugins\n\n (The ``coretasks`` plugin is *always* the one from ``sopel.coretasks`` and\n cannot be overridden.)\n\n .. seealso::\n\n The :func:`~.enumerate_plugins` function is used to generate a list\n of all possible plugins, and its return value is used to populate\n the :class:`ordered dict<collections.OrderedDict>`.\n\n \"\"\"\n # Use an OrderedDict to get one and only one plugin per name\n # based on what plugins.enumerate_plugins does, external plugins are\n # allowed to override internal plugins\n plugins_info = collections.OrderedDict(\n (plugin.name, (plugin, is_enabled))\n for plugin, is_enabled in enumerate_plugins(settings))\n # reset coretasks's position at the end of the loading queue\n plugins_info.move_to_end('coretasks')\n\n return plugins_info\n", "path": "sopel/plugins/__init__.py"}], "after_files": [{"content": "\"\"\"Sopel's plugins interface.\n\n.. versionadded:: 7.0\n\nSopel uses what are called Plugin Handlers as an interface between the bot and\nits plugins (formerly called \"modules\"). This interface is defined by the\n:class:`~.handlers.AbstractPluginHandler` abstract class.\n\nPlugins that can be used by Sopel are provided by :func:`~.get_usable_plugins`\nin an :class:`ordered dict<collections.OrderedDict>`. This dict contains one\nand only one plugin per unique name, using a specific order:\n\n* extra directories defined in the settings\n* homedir's ``plugins`` directory\n* ``sopel.plugins`` entry point group\n* ``sopel_modules``'s subpackages\n* ``sopel.builtins``'s core plugins\n\n(The ``coretasks`` plugin is *always* the one from ``sopel.coretasks`` and\ncannot be overridden.)\n\nTo find all plugins (no matter their sources), the :func:`~.enumerate_plugins`\nfunction can be used. For a more fine-grained search, ``find_*`` functions\nexist for each type of plugin.\n\"\"\"\n# Copyright 2019, Florian Strzelecki <[email protected]>\n#\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import annotations\n\nimport collections\nimport importlib\nimport itertools\nimport logging\nimport os\nfrom typing import TYPE_CHECKING, Union\n\n# TODO: use stdlib importlib.metadata when possible, after dropping py3.9.\n# Stdlib does not support `entry_points(group='filter')` until py3.10, but\n# fallback logic is more trouble than it's worth when e.g. clean Ubuntu\n# py3.10 envs include old versions of this backport.\nimport importlib_metadata\n\nfrom . import exceptions, handlers, rules # noqa\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef _list_plugin_filenames(directory: Union[str, os.PathLike]) -> Iterable[tuple[str, str]]:\n # list plugin filenames from a directory\n # yield 2-value tuples: (name, absolute path)\n base = os.path.abspath(directory)\n for filename in os.listdir(base):\n abspath = os.path.realpath(os.path.join(base, filename))\n if not os.path.exists(abspath):\n LOGGER.warning(\"Plugin path does not exist, skipping: %r\", abspath)\n continue\n\n if os.path.isdir(abspath):\n if os.path.isfile(os.path.join(abspath, '__init__.py')):\n yield os.path.basename(filename), abspath\n else:\n name, ext = os.path.splitext(filename)\n if ext == '.py' and name != '__init__':\n yield name, abspath\n\n\ndef find_internal_plugins():\n \"\"\"List internal plugins.\n\n :return: yield instances of :class:`~.handlers.PyModulePlugin`\n configured for ``sopel.builtins.*``\n\n Internal plugins can be found under ``sopel.builtins``. This list does not\n include the ``coretasks`` plugin.\n \"\"\"\n builtins = importlib.util.find_spec('sopel.builtins')\n if builtins is None or builtins.submodule_search_locations is None:\n raise RuntimeError('Cannot resolve internal plugins')\n plugin_list = itertools.chain.from_iterable(\n _list_plugin_filenames(path)\n for path in builtins.submodule_search_locations\n )\n\n for name, _ in set(plugin_list):\n yield handlers.PyModulePlugin(name, 'sopel.builtins')\n\n\ndef find_sopel_modules_plugins():\n \"\"\"List plugins from ``sopel_modules.*``.\n\n :return: yield instances of :class:`~.handlers.PyModulePlugin`\n configured for ``sopel_modules.*``\n\n Before entry point plugins, the only way to package a plugin was to follow\n :pep:`382` by using the ``sopel_modules`` namespace. This function is\n responsible to load such plugins.\n \"\"\"\n try:\n import sopel_modules # type: ignore[import]\n except ImportError:\n return\n\n for plugin_dir in set(sopel_modules.__path__):\n for name, _ in _list_plugin_filenames(plugin_dir):\n yield handlers.PyModulePlugin(name, 'sopel_modules')\n\n\ndef find_entry_point_plugins(group='sopel.plugins'):\n \"\"\"List plugins from an entry point group.\n\n :param str group: entry point group to search in (defaults to\n ``sopel.plugins``)\n :return: yield instances of :class:`~.handlers.EntryPointPlugin`\n created from each entry point in the ``group``\n\n This function finds plugins declared under an entry point group; by\n default it looks in the ``sopel.plugins`` group.\n \"\"\"\n for entry_point in importlib_metadata.entry_points(group=group):\n yield handlers.EntryPointPlugin(entry_point)\n\n\ndef find_directory_plugins(directory):\n \"\"\"List plugins from a ``directory``.\n\n :param str directory: directory path to search\n :return: yield instances of :class:`~.handlers.PyFilePlugin`\n found in ``directory``\n\n This function looks for single file and folder plugins in a directory.\n \"\"\"\n for _, abspath in _list_plugin_filenames(directory):\n yield handlers.PyFilePlugin(abspath)\n\n\ndef enumerate_plugins(settings):\n \"\"\"Yield Sopel's plugins.\n\n :param settings: Sopel's configuration\n :type settings: :class:`sopel.config.Config`\n :return: yield 2-value tuple: an instance of\n :class:`~.handlers.AbstractPluginHandler`, and if the plugin is\n active or not\n\n This function uses the find functions to find all of Sopel's available\n plugins. It uses the bot's ``settings`` to determine if the plugin is\n enabled or disabled.\n\n .. seealso::\n\n The find functions used are:\n\n * :func:`find_internal_plugins` for internal plugins\n * :func:`find_sopel_modules_plugins` for ``sopel_modules.*`` plugins\n * :func:`find_entry_point_plugins` for plugins exposed via packages'\n entry points\n * :func:`find_directory_plugins` for plugins in ``$homedir/plugins``,\n and in extra directories as defined by ``settings.core.extra``\n\n .. versionchanged:: 8.0\n\n Looks in ``$homedir/plugins`` instead of the ``$homedir/modules``\n directory, reflecting Sopel's shift away from calling them \"modules\".\n\n \"\"\"\n from_internals = find_internal_plugins()\n from_sopel_modules = find_sopel_modules_plugins()\n from_entry_points = find_entry_point_plugins()\n # load from directories\n source_dirs = [\n os.path.join(settings.homedir, 'plugins'),\n ]\n if settings.core.extra:\n source_dirs = source_dirs + settings.core.extra\n\n from_directories = [\n find_directory_plugins(source_dir)\n for source_dir in source_dirs\n if os.path.isdir(source_dir)\n ]\n\n # Retrieve all plugins\n all_plugins = itertools.chain(\n from_internals,\n from_sopel_modules,\n from_entry_points,\n *from_directories)\n\n # Get plugin settings\n enabled = settings.core.enable\n disabled = settings.core.exclude\n\n # Yield all found plugins with their enabled status (True/False)\n for plugin in all_plugins:\n name = plugin.name\n is_enabled = name not in disabled and (not enabled or name in enabled)\n yield plugin, is_enabled\n\n # And always yield coretasks\n yield handlers.PyModulePlugin('coretasks', 'sopel'), True\n\n\ndef get_usable_plugins(settings):\n \"\"\"Get usable plugins, unique per name.\n\n :param settings: Sopel's configuration\n :type settings: :class:`sopel.config.Config`\n :return: an ordered dict of usable plugins\n :rtype: collections.OrderedDict\n\n This function provides the plugins Sopel can use to load, enable,\n or disable, as an :class:`ordered dict<collections.OrderedDict>`. This dict\n contains one and only one plugin per unique name, using a specific order:\n\n * extra directories defined in the settings\n * homedir's ``plugins`` directory\n * ``sopel.plugins`` entry point group\n * ``sopel_modules``'s subpackages\n * ``sopel.builtins``'s core plugins\n\n (The ``coretasks`` plugin is *always* the one from ``sopel.coretasks`` and\n cannot be overridden.)\n\n .. seealso::\n\n The :func:`~.enumerate_plugins` function is used to generate a list\n of all possible plugins, and its return value is used to populate\n the :class:`ordered dict<collections.OrderedDict>`.\n\n \"\"\"\n # Use an OrderedDict to get one and only one plugin per name\n # based on what plugins.enumerate_plugins does, external plugins are\n # allowed to override internal plugins\n plugins_info = collections.OrderedDict(\n (plugin.name, (plugin, is_enabled))\n for plugin, is_enabled in enumerate_plugins(settings))\n # reset coretasks's position at the end of the loading queue\n plugins_info.move_to_end('coretasks')\n\n return plugins_info\n", "path": "sopel/plugins/__init__.py"}]}
3,435
331
gh_patches_debug_16233
rasdani/github-patches
git_diff
pyodide__pyodide-4223
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Emscripten.cmake missing from pyodide-build wheel ## 🐛 Bug It seems like `Emscripten.cmake` is not bundled with `pyodide-build` wheels any more (since version 0.24.0) and I wonder whether this is a bug or an intended change. (I haven't found anything in the changelog). We were relying on the presence of the toolchain file over there in https://github.com/igraph/python-igraph/ to build a wasm wheel in CI and it is not working any more. ### To Reproduce See, e.g., https://github.com/igraph/python-igraph/actions/runs/6459502630/job/17535393423 ### Expected behavior `pyodide-build` wheels should keep on bundling `Emscripten.cmake` so we can simply `pip install pyodide-build` in CI and then build `python-igraph`'s wasm wheels as before. ### Environment - Pyodide Version<!-- (e.g. 1.8.1) -->: 0.24.1 - Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: not applicable --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyodide-build/pyodide_build/build_env.py` Content: ``` 1 # This file contains functions for managing the Pyodide build environment. 2 3 import functools 4 import os 5 import re 6 import subprocess 7 import sys 8 from collections.abc import Iterator 9 from contextlib import nullcontext, redirect_stdout 10 from io import StringIO 11 from pathlib import Path 12 13 if sys.version_info < (3, 11, 0): 14 import tomli as tomllib 15 else: 16 import tomllib 17 18 from packaging.tags import Tag, compatible_tags, cpython_tags 19 20 from .common import exit_with_stdio 21 from .logger import logger 22 from .recipe import load_all_recipes 23 24 RUST_BUILD_PRELUDE = """ 25 rustup toolchain install ${RUST_TOOLCHAIN} && rustup default ${RUST_TOOLCHAIN} 26 rustup target add wasm32-unknown-emscripten --toolchain ${RUST_TOOLCHAIN} 27 """ 28 29 30 BUILD_VARS: set[str] = { 31 "CARGO_BUILD_TARGET", 32 "CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_LINKER", 33 "HOME", 34 "HOSTINSTALLDIR", 35 "HOSTSITEPACKAGES", 36 "NUMPY_LIB", 37 "PATH", 38 "PKG_CONFIG_PATH", 39 "PLATFORM_TRIPLET", 40 "PIP_CONSTRAINT", 41 "PYMAJOR", 42 "PYMICRO", 43 "PYMINOR", 44 "PYO3_CROSS_INCLUDE_DIR", 45 "PYO3_CROSS_LIB_DIR", 46 "PYODIDE_EMSCRIPTEN_VERSION", 47 "PYODIDE_JOBS", 48 "PYODIDE_PACKAGE_ABI", 49 "PYODIDE_ROOT", 50 "PYTHON_ARCHIVE_SHA256", 51 "PYTHON_ARCHIVE_URL", 52 "PYTHONINCLUDE", 53 "PYTHONPATH", 54 "PYVERSION", 55 "RUSTFLAGS", 56 "RUST_TOOLCHAIN", 57 "SIDE_MODULE_CFLAGS", 58 "SIDE_MODULE_CXXFLAGS", 59 "SIDE_MODULE_LDFLAGS", 60 "STDLIB_MODULE_CFLAGS", 61 "SYSCONFIGDATA_DIR", 62 "SYSCONFIG_NAME", 63 "TARGETINSTALLDIR", 64 "WASM_LIBRARY_DIR", 65 } 66 67 68 def init_environment(*, quiet: bool = False) -> None: 69 """ 70 Initialize Pyodide build environment. 71 This function needs to be called before any other Pyodide build functions. 72 73 Parameters 74 ---------- 75 quiet 76 If True, do not print any messages 77 """ 78 79 # Already initialized 80 if "PYODIDE_ROOT" in os.environ: 81 return 82 83 try: 84 root = search_pyodide_root(Path.cwd()) 85 except FileNotFoundError: # Not in Pyodide tree 86 root = _init_xbuild_env(quiet=quiet) 87 88 os.environ["PYODIDE_ROOT"] = str(root) 89 90 91 def _init_xbuild_env(*, quiet: bool = False) -> Path: 92 """ 93 Initialize the build environment for out-of-tree builds. 94 95 Parameters 96 ---------- 97 quiet 98 If True, do not print any messages 99 100 Returns 101 ------- 102 The path to the Pyodide root directory inside the xbuild environment 103 """ 104 from . import install_xbuildenv # avoid circular import 105 106 # TODO: Do not hardcode the path 107 # TODO: Add version numbers to the path 108 xbuildenv_path = Path(".pyodide-xbuildenv").resolve() 109 110 context = redirect_stdout(StringIO()) if quiet else nullcontext() 111 with context: 112 return install_xbuildenv.install(xbuildenv_path, download=True) 113 114 115 @functools.cache 116 def get_pyodide_root() -> Path: 117 init_environment() 118 return Path(os.environ["PYODIDE_ROOT"]) 119 120 121 def search_pyodide_root(curdir: str | Path, *, max_depth: int = 5) -> Path: 122 """ 123 Recursively search for the root of the Pyodide repository, 124 by looking for the pyproject.toml file in the parent directories 125 which contains [tool.pyodide] section. 126 """ 127 128 # We want to include "curdir" in parent_dirs, so add a garbage suffix 129 parent_dirs = (Path(curdir) / "garbage").parents[:max_depth] 130 131 for base in parent_dirs: 132 pyproject_file = base / "pyproject.toml" 133 134 if not pyproject_file.is_file(): 135 continue 136 137 try: 138 with pyproject_file.open("rb") as f: 139 configs = tomllib.load(f) 140 except tomllib.TOMLDecodeError as e: 141 raise ValueError(f"Could not parse {pyproject_file}.") from e 142 143 if "tool" in configs and "pyodide" in configs["tool"]: 144 return base 145 146 raise FileNotFoundError( 147 "Could not find Pyodide root directory. If you are not in the Pyodide directory, set `PYODIDE_ROOT=<pyodide-root-directory>`." 148 ) 149 150 151 def in_xbuildenv() -> bool: 152 pyodide_root = get_pyodide_root() 153 return pyodide_root.name == "pyodide-root" 154 155 156 @functools.cache 157 def get_build_environment_vars() -> dict[str, str]: 158 """ 159 Get common environment variables for the in-tree and out-of-tree build. 160 """ 161 env = _get_make_environment_vars().copy() 162 163 # Allow users to overwrite the build environment variables by setting 164 # host environment variables. 165 # TODO: Add modifiable configuration file instead. 166 # (https://github.com/pyodide/pyodide/pull/3737/files#r1161247201) 167 env.update({key: os.environ[key] for key in BUILD_VARS if key in os.environ}) 168 env["PYODIDE"] = "1" 169 170 tools_dir = Path(__file__).parent / "tools" 171 172 env["CMAKE_TOOLCHAIN_FILE"] = str( 173 tools_dir / "cmake/Modules/Platform/Emscripten.cmake" 174 ) 175 env["PYO3_CONFIG_FILE"] = str(tools_dir / "pyo3_config.ini") 176 177 hostsitepackages = env["HOSTSITEPACKAGES"] 178 pythonpath = [ 179 hostsitepackages, 180 ] 181 env["PYTHONPATH"] = ":".join(pythonpath) 182 183 return env 184 185 186 def _get_make_environment_vars(*, pyodide_root: Path | None = None) -> dict[str, str]: 187 """Load environment variables from Makefile.envs 188 189 This allows us to set all build vars in one place 190 191 Parameters 192 ---------- 193 pyodide_root 194 The root directory of the Pyodide repository. If None, this will be inferred. 195 """ 196 197 PYODIDE_ROOT = get_pyodide_root() if pyodide_root is None else pyodide_root 198 environment = {} 199 result = subprocess.run( 200 ["make", "-f", str(PYODIDE_ROOT / "Makefile.envs"), ".output_vars"], 201 capture_output=True, 202 text=True, 203 env={"PYODIDE_ROOT": str(PYODIDE_ROOT)}, 204 ) 205 206 if result.returncode != 0: 207 logger.error("ERROR: Failed to load environment variables from Makefile.envs") 208 exit_with_stdio(result) 209 210 for line in result.stdout.splitlines(): 211 equalPos = line.find("=") 212 if equalPos != -1: 213 varname = line[0:equalPos] 214 215 if varname not in BUILD_VARS: 216 continue 217 218 value = line[equalPos + 1 :] 219 value = value.strip("'").strip() 220 environment[varname] = value 221 return environment 222 223 224 def get_build_flag(name: str) -> str: 225 """ 226 Get a value of a build flag. 227 """ 228 build_vars = get_build_environment_vars() 229 if name not in build_vars: 230 raise ValueError(f"Unknown build flag: {name}") 231 232 return build_vars[name] 233 234 235 def get_pyversion_major() -> str: 236 return get_build_flag("PYMAJOR") 237 238 239 def get_pyversion_minor() -> str: 240 return get_build_flag("PYMINOR") 241 242 243 def get_pyversion_major_minor() -> str: 244 return f"{get_pyversion_major()}.{get_pyversion_minor()}" 245 246 247 def get_pyversion() -> str: 248 return f"python{get_pyversion_major_minor()}" 249 250 251 def get_hostsitepackages() -> str: 252 return get_build_flag("HOSTSITEPACKAGES") 253 254 255 @functools.cache 256 def get_unisolated_packages() -> list[str]: 257 PYODIDE_ROOT = get_pyodide_root() 258 259 unisolated_file = PYODIDE_ROOT / "unisolated.txt" 260 if unisolated_file.exists(): 261 # in xbuild env, read from file 262 unisolated_packages = unisolated_file.read_text().splitlines() 263 else: 264 unisolated_packages = [] 265 recipe_dir = PYODIDE_ROOT / "packages" 266 recipes = load_all_recipes(recipe_dir) 267 for name, config in recipes.items(): 268 if config.build.cross_build_env: 269 unisolated_packages.append(name) 270 271 return unisolated_packages 272 273 274 def platform() -> str: 275 emscripten_version = get_build_flag("PYODIDE_EMSCRIPTEN_VERSION") 276 version = emscripten_version.replace(".", "_") 277 return f"emscripten_{version}_wasm32" 278 279 280 def pyodide_tags() -> Iterator[Tag]: 281 """ 282 Returns the sequence of tag triples for the Pyodide interpreter. 283 284 The sequence is ordered in decreasing specificity. 285 """ 286 PYMAJOR = get_pyversion_major() 287 PYMINOR = get_pyversion_minor() 288 PLATFORM = platform() 289 python_version = (int(PYMAJOR), int(PYMINOR)) 290 yield from cpython_tags(platforms=[PLATFORM], python_version=python_version) 291 yield from compatible_tags(platforms=[PLATFORM], python_version=python_version) 292 # Following line can be removed once packaging 22.0 is released and we update to it. 293 yield Tag(interpreter=f"cp{PYMAJOR}{PYMINOR}", abi="none", platform="any") 294 295 296 def replace_so_abi_tags(wheel_dir: Path) -> None: 297 """Replace native abi tag with emscripten abi tag in .so file names""" 298 import sysconfig 299 300 build_soabi = sysconfig.get_config_var("SOABI") 301 assert build_soabi 302 ext_suffix = sysconfig.get_config_var("EXT_SUFFIX") 303 assert ext_suffix 304 build_triplet = "-".join(build_soabi.split("-")[2:]) 305 host_triplet = get_build_flag("PLATFORM_TRIPLET") 306 for file in wheel_dir.glob(f"**/*{ext_suffix}"): 307 file.rename(file.with_name(file.name.replace(build_triplet, host_triplet))) 308 309 310 def emscripten_version() -> str: 311 return get_build_flag("PYODIDE_EMSCRIPTEN_VERSION") 312 313 314 def get_emscripten_version_info() -> str: 315 """Extracted for testing purposes.""" 316 return subprocess.run(["emcc", "-v"], capture_output=True, encoding="utf8").stderr 317 318 319 def check_emscripten_version() -> None: 320 needed_version = emscripten_version() 321 try: 322 version_info = get_emscripten_version_info() 323 except FileNotFoundError: 324 raise RuntimeError( 325 f"No Emscripten compiler found. Need Emscripten version {needed_version}" 326 ) from None 327 installed_version = None 328 try: 329 for x in reversed(version_info.partition("\n")[0].split(" ")): 330 if re.match(r"[0-9]+\.[0-9]+\.[0-9]+", x): 331 installed_version = x 332 break 333 except Exception: 334 raise RuntimeError("Failed to determine Emscripten version.") from None 335 if installed_version is None: 336 raise RuntimeError("Failed to determine Emscripten version.") 337 if installed_version != needed_version: 338 raise RuntimeError( 339 f"Incorrect Emscripten version {installed_version}. Need Emscripten version {needed_version}" 340 ) 341 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyodide-build/pyodide_build/build_env.py b/pyodide-build/pyodide_build/build_env.py --- a/pyodide-build/pyodide_build/build_env.py +++ b/pyodide-build/pyodide_build/build_env.py @@ -62,6 +62,8 @@ "SYSCONFIG_NAME", "TARGETINSTALLDIR", "WASM_LIBRARY_DIR", + "CMAKE_TOOLCHAIN_FILE", + "PYO3_CONFIG_FILE", } @@ -169,10 +171,13 @@ tools_dir = Path(__file__).parent / "tools" - env["CMAKE_TOOLCHAIN_FILE"] = str( - tools_dir / "cmake/Modules/Platform/Emscripten.cmake" - ) - env["PYO3_CONFIG_FILE"] = str(tools_dir / "pyo3_config.ini") + if "CMAKE_TOOLCHAIN_FILE" not in env: + env["CMAKE_TOOLCHAIN_FILE"] = str( + tools_dir / "cmake/Modules/Platform/Emscripten.cmake" + ) + + if "PYO3_CONFIG_FILE" not in env: + env["PYO3_CONFIG_FILE"] = str(tools_dir / "pyo3_config.ini") hostsitepackages = env["HOSTSITEPACKAGES"] pythonpath = [
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/build_env.py b/pyodide-build/pyodide_build/build_env.py\n--- a/pyodide-build/pyodide_build/build_env.py\n+++ b/pyodide-build/pyodide_build/build_env.py\n@@ -62,6 +62,8 @@\n \"SYSCONFIG_NAME\",\n \"TARGETINSTALLDIR\",\n \"WASM_LIBRARY_DIR\",\n+ \"CMAKE_TOOLCHAIN_FILE\",\n+ \"PYO3_CONFIG_FILE\",\n }\n \n \n@@ -169,10 +171,13 @@\n \n tools_dir = Path(__file__).parent / \"tools\"\n \n- env[\"CMAKE_TOOLCHAIN_FILE\"] = str(\n- tools_dir / \"cmake/Modules/Platform/Emscripten.cmake\"\n- )\n- env[\"PYO3_CONFIG_FILE\"] = str(tools_dir / \"pyo3_config.ini\")\n+ if \"CMAKE_TOOLCHAIN_FILE\" not in env:\n+ env[\"CMAKE_TOOLCHAIN_FILE\"] = str(\n+ tools_dir / \"cmake/Modules/Platform/Emscripten.cmake\"\n+ )\n+\n+ if \"PYO3_CONFIG_FILE\" not in env:\n+ env[\"PYO3_CONFIG_FILE\"] = str(tools_dir / \"pyo3_config.ini\")\n \n hostsitepackages = env[\"HOSTSITEPACKAGES\"]\n pythonpath = [\n", "issue": "Emscripten.cmake missing from pyodide-build wheel\n## \ud83d\udc1b Bug\r\n\r\nIt seems like `Emscripten.cmake` is not bundled with `pyodide-build` wheels any more (since version 0.24.0) and I wonder whether this is a bug or an intended change. (I haven't found anything in the changelog). We were relying on the presence of the toolchain file over there in https://github.com/igraph/python-igraph/ to build a wasm wheel in CI and it is not working any more.\r\n\r\n### To Reproduce\r\n\r\nSee, e.g., https://github.com/igraph/python-igraph/actions/runs/6459502630/job/17535393423\r\n\r\n### Expected behavior\r\n\r\n`pyodide-build` wheels should keep on bundling `Emscripten.cmake` so we can simply `pip install pyodide-build` in CI and then build `python-igraph`'s wasm wheels as before.\r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->: 0.24.1\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->: not applicable\r\n\n", "before_files": [{"content": "# This file contains functions for managing the Pyodide build environment.\n\nimport functools\nimport os\nimport re\nimport subprocess\nimport sys\nfrom collections.abc import Iterator\nfrom contextlib import nullcontext, redirect_stdout\nfrom io import StringIO\nfrom pathlib import Path\n\nif sys.version_info < (3, 11, 0):\n import tomli as tomllib\nelse:\n import tomllib\n\nfrom packaging.tags import Tag, compatible_tags, cpython_tags\n\nfrom .common import exit_with_stdio\nfrom .logger import logger\nfrom .recipe import load_all_recipes\n\nRUST_BUILD_PRELUDE = \"\"\"\nrustup toolchain install ${RUST_TOOLCHAIN} && rustup default ${RUST_TOOLCHAIN}\nrustup target add wasm32-unknown-emscripten --toolchain ${RUST_TOOLCHAIN}\n\"\"\"\n\n\nBUILD_VARS: set[str] = {\n \"CARGO_BUILD_TARGET\",\n \"CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_LINKER\",\n \"HOME\",\n \"HOSTINSTALLDIR\",\n \"HOSTSITEPACKAGES\",\n \"NUMPY_LIB\",\n \"PATH\",\n \"PKG_CONFIG_PATH\",\n \"PLATFORM_TRIPLET\",\n \"PIP_CONSTRAINT\",\n \"PYMAJOR\",\n \"PYMICRO\",\n \"PYMINOR\",\n \"PYO3_CROSS_INCLUDE_DIR\",\n \"PYO3_CROSS_LIB_DIR\",\n \"PYODIDE_EMSCRIPTEN_VERSION\",\n \"PYODIDE_JOBS\",\n \"PYODIDE_PACKAGE_ABI\",\n \"PYODIDE_ROOT\",\n \"PYTHON_ARCHIVE_SHA256\",\n \"PYTHON_ARCHIVE_URL\",\n \"PYTHONINCLUDE\",\n \"PYTHONPATH\",\n \"PYVERSION\",\n \"RUSTFLAGS\",\n \"RUST_TOOLCHAIN\",\n \"SIDE_MODULE_CFLAGS\",\n \"SIDE_MODULE_CXXFLAGS\",\n \"SIDE_MODULE_LDFLAGS\",\n \"STDLIB_MODULE_CFLAGS\",\n \"SYSCONFIGDATA_DIR\",\n \"SYSCONFIG_NAME\",\n \"TARGETINSTALLDIR\",\n \"WASM_LIBRARY_DIR\",\n}\n\n\ndef init_environment(*, quiet: bool = False) -> None:\n \"\"\"\n Initialize Pyodide build environment.\n This function needs to be called before any other Pyodide build functions.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n \"\"\"\n\n # Already initialized\n if \"PYODIDE_ROOT\" in os.environ:\n return\n\n try:\n root = search_pyodide_root(Path.cwd())\n except FileNotFoundError: # Not in Pyodide tree\n root = _init_xbuild_env(quiet=quiet)\n\n os.environ[\"PYODIDE_ROOT\"] = str(root)\n\n\ndef _init_xbuild_env(*, quiet: bool = False) -> Path:\n \"\"\"\n Initialize the build environment for out-of-tree builds.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n\n Returns\n -------\n The path to the Pyodide root directory inside the xbuild environment\n \"\"\"\n from . import install_xbuildenv # avoid circular import\n\n # TODO: Do not hardcode the path\n # TODO: Add version numbers to the path\n xbuildenv_path = Path(\".pyodide-xbuildenv\").resolve()\n\n context = redirect_stdout(StringIO()) if quiet else nullcontext()\n with context:\n return install_xbuildenv.install(xbuildenv_path, download=True)\n\n\[email protected]\ndef get_pyodide_root() -> Path:\n init_environment()\n return Path(os.environ[\"PYODIDE_ROOT\"])\n\n\ndef search_pyodide_root(curdir: str | Path, *, max_depth: int = 5) -> Path:\n \"\"\"\n Recursively search for the root of the Pyodide repository,\n by looking for the pyproject.toml file in the parent directories\n which contains [tool.pyodide] section.\n \"\"\"\n\n # We want to include \"curdir\" in parent_dirs, so add a garbage suffix\n parent_dirs = (Path(curdir) / \"garbage\").parents[:max_depth]\n\n for base in parent_dirs:\n pyproject_file = base / \"pyproject.toml\"\n\n if not pyproject_file.is_file():\n continue\n\n try:\n with pyproject_file.open(\"rb\") as f:\n configs = tomllib.load(f)\n except tomllib.TOMLDecodeError as e:\n raise ValueError(f\"Could not parse {pyproject_file}.\") from e\n\n if \"tool\" in configs and \"pyodide\" in configs[\"tool\"]:\n return base\n\n raise FileNotFoundError(\n \"Could not find Pyodide root directory. If you are not in the Pyodide directory, set `PYODIDE_ROOT=<pyodide-root-directory>`.\"\n )\n\n\ndef in_xbuildenv() -> bool:\n pyodide_root = get_pyodide_root()\n return pyodide_root.name == \"pyodide-root\"\n\n\[email protected]\ndef get_build_environment_vars() -> dict[str, str]:\n \"\"\"\n Get common environment variables for the in-tree and out-of-tree build.\n \"\"\"\n env = _get_make_environment_vars().copy()\n\n # Allow users to overwrite the build environment variables by setting\n # host environment variables.\n # TODO: Add modifiable configuration file instead.\n # (https://github.com/pyodide/pyodide/pull/3737/files#r1161247201)\n env.update({key: os.environ[key] for key in BUILD_VARS if key in os.environ})\n env[\"PYODIDE\"] = \"1\"\n\n tools_dir = Path(__file__).parent / \"tools\"\n\n env[\"CMAKE_TOOLCHAIN_FILE\"] = str(\n tools_dir / \"cmake/Modules/Platform/Emscripten.cmake\"\n )\n env[\"PYO3_CONFIG_FILE\"] = str(tools_dir / \"pyo3_config.ini\")\n\n hostsitepackages = env[\"HOSTSITEPACKAGES\"]\n pythonpath = [\n hostsitepackages,\n ]\n env[\"PYTHONPATH\"] = \":\".join(pythonpath)\n\n return env\n\n\ndef _get_make_environment_vars(*, pyodide_root: Path | None = None) -> dict[str, str]:\n \"\"\"Load environment variables from Makefile.envs\n\n This allows us to set all build vars in one place\n\n Parameters\n ----------\n pyodide_root\n The root directory of the Pyodide repository. If None, this will be inferred.\n \"\"\"\n\n PYODIDE_ROOT = get_pyodide_root() if pyodide_root is None else pyodide_root\n environment = {}\n result = subprocess.run(\n [\"make\", \"-f\", str(PYODIDE_ROOT / \"Makefile.envs\"), \".output_vars\"],\n capture_output=True,\n text=True,\n env={\"PYODIDE_ROOT\": str(PYODIDE_ROOT)},\n )\n\n if result.returncode != 0:\n logger.error(\"ERROR: Failed to load environment variables from Makefile.envs\")\n exit_with_stdio(result)\n\n for line in result.stdout.splitlines():\n equalPos = line.find(\"=\")\n if equalPos != -1:\n varname = line[0:equalPos]\n\n if varname not in BUILD_VARS:\n continue\n\n value = line[equalPos + 1 :]\n value = value.strip(\"'\").strip()\n environment[varname] = value\n return environment\n\n\ndef get_build_flag(name: str) -> str:\n \"\"\"\n Get a value of a build flag.\n \"\"\"\n build_vars = get_build_environment_vars()\n if name not in build_vars:\n raise ValueError(f\"Unknown build flag: {name}\")\n\n return build_vars[name]\n\n\ndef get_pyversion_major() -> str:\n return get_build_flag(\"PYMAJOR\")\n\n\ndef get_pyversion_minor() -> str:\n return get_build_flag(\"PYMINOR\")\n\n\ndef get_pyversion_major_minor() -> str:\n return f\"{get_pyversion_major()}.{get_pyversion_minor()}\"\n\n\ndef get_pyversion() -> str:\n return f\"python{get_pyversion_major_minor()}\"\n\n\ndef get_hostsitepackages() -> str:\n return get_build_flag(\"HOSTSITEPACKAGES\")\n\n\[email protected]\ndef get_unisolated_packages() -> list[str]:\n PYODIDE_ROOT = get_pyodide_root()\n\n unisolated_file = PYODIDE_ROOT / \"unisolated.txt\"\n if unisolated_file.exists():\n # in xbuild env, read from file\n unisolated_packages = unisolated_file.read_text().splitlines()\n else:\n unisolated_packages = []\n recipe_dir = PYODIDE_ROOT / \"packages\"\n recipes = load_all_recipes(recipe_dir)\n for name, config in recipes.items():\n if config.build.cross_build_env:\n unisolated_packages.append(name)\n\n return unisolated_packages\n\n\ndef platform() -> str:\n emscripten_version = get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n version = emscripten_version.replace(\".\", \"_\")\n return f\"emscripten_{version}_wasm32\"\n\n\ndef pyodide_tags() -> Iterator[Tag]:\n \"\"\"\n Returns the sequence of tag triples for the Pyodide interpreter.\n\n The sequence is ordered in decreasing specificity.\n \"\"\"\n PYMAJOR = get_pyversion_major()\n PYMINOR = get_pyversion_minor()\n PLATFORM = platform()\n python_version = (int(PYMAJOR), int(PYMINOR))\n yield from cpython_tags(platforms=[PLATFORM], python_version=python_version)\n yield from compatible_tags(platforms=[PLATFORM], python_version=python_version)\n # Following line can be removed once packaging 22.0 is released and we update to it.\n yield Tag(interpreter=f\"cp{PYMAJOR}{PYMINOR}\", abi=\"none\", platform=\"any\")\n\n\ndef replace_so_abi_tags(wheel_dir: Path) -> None:\n \"\"\"Replace native abi tag with emscripten abi tag in .so file names\"\"\"\n import sysconfig\n\n build_soabi = sysconfig.get_config_var(\"SOABI\")\n assert build_soabi\n ext_suffix = sysconfig.get_config_var(\"EXT_SUFFIX\")\n assert ext_suffix\n build_triplet = \"-\".join(build_soabi.split(\"-\")[2:])\n host_triplet = get_build_flag(\"PLATFORM_TRIPLET\")\n for file in wheel_dir.glob(f\"**/*{ext_suffix}\"):\n file.rename(file.with_name(file.name.replace(build_triplet, host_triplet)))\n\n\ndef emscripten_version() -> str:\n return get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n\n\ndef get_emscripten_version_info() -> str:\n \"\"\"Extracted for testing purposes.\"\"\"\n return subprocess.run([\"emcc\", \"-v\"], capture_output=True, encoding=\"utf8\").stderr\n\n\ndef check_emscripten_version() -> None:\n needed_version = emscripten_version()\n try:\n version_info = get_emscripten_version_info()\n except FileNotFoundError:\n raise RuntimeError(\n f\"No Emscripten compiler found. Need Emscripten version {needed_version}\"\n ) from None\n installed_version = None\n try:\n for x in reversed(version_info.partition(\"\\n\")[0].split(\" \")):\n if re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", x):\n installed_version = x\n break\n except Exception:\n raise RuntimeError(\"Failed to determine Emscripten version.\") from None\n if installed_version is None:\n raise RuntimeError(\"Failed to determine Emscripten version.\")\n if installed_version != needed_version:\n raise RuntimeError(\n f\"Incorrect Emscripten version {installed_version}. Need Emscripten version {needed_version}\"\n )\n", "path": "pyodide-build/pyodide_build/build_env.py"}], "after_files": [{"content": "# This file contains functions for managing the Pyodide build environment.\n\nimport functools\nimport os\nimport re\nimport subprocess\nimport sys\nfrom collections.abc import Iterator\nfrom contextlib import nullcontext, redirect_stdout\nfrom io import StringIO\nfrom pathlib import Path\n\nif sys.version_info < (3, 11, 0):\n import tomli as tomllib\nelse:\n import tomllib\n\nfrom packaging.tags import Tag, compatible_tags, cpython_tags\n\nfrom .common import exit_with_stdio\nfrom .logger import logger\nfrom .recipe import load_all_recipes\n\nRUST_BUILD_PRELUDE = \"\"\"\nrustup toolchain install ${RUST_TOOLCHAIN} && rustup default ${RUST_TOOLCHAIN}\nrustup target add wasm32-unknown-emscripten --toolchain ${RUST_TOOLCHAIN}\n\"\"\"\n\n\nBUILD_VARS: set[str] = {\n \"CARGO_BUILD_TARGET\",\n \"CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_LINKER\",\n \"HOME\",\n \"HOSTINSTALLDIR\",\n \"HOSTSITEPACKAGES\",\n \"NUMPY_LIB\",\n \"PATH\",\n \"PKG_CONFIG_PATH\",\n \"PLATFORM_TRIPLET\",\n \"PIP_CONSTRAINT\",\n \"PYMAJOR\",\n \"PYMICRO\",\n \"PYMINOR\",\n \"PYO3_CROSS_INCLUDE_DIR\",\n \"PYO3_CROSS_LIB_DIR\",\n \"PYODIDE_EMSCRIPTEN_VERSION\",\n \"PYODIDE_JOBS\",\n \"PYODIDE_PACKAGE_ABI\",\n \"PYODIDE_ROOT\",\n \"PYTHON_ARCHIVE_SHA256\",\n \"PYTHON_ARCHIVE_URL\",\n \"PYTHONINCLUDE\",\n \"PYTHONPATH\",\n \"PYVERSION\",\n \"RUSTFLAGS\",\n \"RUST_TOOLCHAIN\",\n \"SIDE_MODULE_CFLAGS\",\n \"SIDE_MODULE_CXXFLAGS\",\n \"SIDE_MODULE_LDFLAGS\",\n \"STDLIB_MODULE_CFLAGS\",\n \"SYSCONFIGDATA_DIR\",\n \"SYSCONFIG_NAME\",\n \"TARGETINSTALLDIR\",\n \"WASM_LIBRARY_DIR\",\n \"CMAKE_TOOLCHAIN_FILE\",\n \"PYO3_CONFIG_FILE\",\n}\n\n\ndef init_environment(*, quiet: bool = False) -> None:\n \"\"\"\n Initialize Pyodide build environment.\n This function needs to be called before any other Pyodide build functions.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n \"\"\"\n\n # Already initialized\n if \"PYODIDE_ROOT\" in os.environ:\n return\n\n try:\n root = search_pyodide_root(Path.cwd())\n except FileNotFoundError: # Not in Pyodide tree\n root = _init_xbuild_env(quiet=quiet)\n\n os.environ[\"PYODIDE_ROOT\"] = str(root)\n\n\ndef _init_xbuild_env(*, quiet: bool = False) -> Path:\n \"\"\"\n Initialize the build environment for out-of-tree builds.\n\n Parameters\n ----------\n quiet\n If True, do not print any messages\n\n Returns\n -------\n The path to the Pyodide root directory inside the xbuild environment\n \"\"\"\n from . import install_xbuildenv # avoid circular import\n\n # TODO: Do not hardcode the path\n # TODO: Add version numbers to the path\n xbuildenv_path = Path(\".pyodide-xbuildenv\").resolve()\n\n context = redirect_stdout(StringIO()) if quiet else nullcontext()\n with context:\n return install_xbuildenv.install(xbuildenv_path, download=True)\n\n\[email protected]\ndef get_pyodide_root() -> Path:\n init_environment()\n return Path(os.environ[\"PYODIDE_ROOT\"])\n\n\ndef search_pyodide_root(curdir: str | Path, *, max_depth: int = 5) -> Path:\n \"\"\"\n Recursively search for the root of the Pyodide repository,\n by looking for the pyproject.toml file in the parent directories\n which contains [tool.pyodide] section.\n \"\"\"\n\n # We want to include \"curdir\" in parent_dirs, so add a garbage suffix\n parent_dirs = (Path(curdir) / \"garbage\").parents[:max_depth]\n\n for base in parent_dirs:\n pyproject_file = base / \"pyproject.toml\"\n\n if not pyproject_file.is_file():\n continue\n\n try:\n with pyproject_file.open(\"rb\") as f:\n configs = tomllib.load(f)\n except tomllib.TOMLDecodeError as e:\n raise ValueError(f\"Could not parse {pyproject_file}.\") from e\n\n if \"tool\" in configs and \"pyodide\" in configs[\"tool\"]:\n return base\n\n raise FileNotFoundError(\n \"Could not find Pyodide root directory. If you are not in the Pyodide directory, set `PYODIDE_ROOT=<pyodide-root-directory>`.\"\n )\n\n\ndef in_xbuildenv() -> bool:\n pyodide_root = get_pyodide_root()\n return pyodide_root.name == \"pyodide-root\"\n\n\[email protected]\ndef get_build_environment_vars() -> dict[str, str]:\n \"\"\"\n Get common environment variables for the in-tree and out-of-tree build.\n \"\"\"\n env = _get_make_environment_vars().copy()\n\n # Allow users to overwrite the build environment variables by setting\n # host environment variables.\n # TODO: Add modifiable configuration file instead.\n # (https://github.com/pyodide/pyodide/pull/3737/files#r1161247201)\n env.update({key: os.environ[key] for key in BUILD_VARS if key in os.environ})\n env[\"PYODIDE\"] = \"1\"\n\n tools_dir = Path(__file__).parent / \"tools\"\n\n if \"CMAKE_TOOLCHAIN_FILE\" not in env:\n env[\"CMAKE_TOOLCHAIN_FILE\"] = str(\n tools_dir / \"cmake/Modules/Platform/Emscripten.cmake\"\n )\n\n if \"PYO3_CONFIG_FILE\" not in env:\n env[\"PYO3_CONFIG_FILE\"] = str(tools_dir / \"pyo3_config.ini\")\n\n hostsitepackages = env[\"HOSTSITEPACKAGES\"]\n pythonpath = [\n hostsitepackages,\n ]\n env[\"PYTHONPATH\"] = \":\".join(pythonpath)\n\n return env\n\n\ndef _get_make_environment_vars(*, pyodide_root: Path | None = None) -> dict[str, str]:\n \"\"\"Load environment variables from Makefile.envs\n\n This allows us to set all build vars in one place\n\n Parameters\n ----------\n pyodide_root\n The root directory of the Pyodide repository. If None, this will be inferred.\n \"\"\"\n\n PYODIDE_ROOT = get_pyodide_root() if pyodide_root is None else pyodide_root\n environment = {}\n result = subprocess.run(\n [\"make\", \"-f\", str(PYODIDE_ROOT / \"Makefile.envs\"), \".output_vars\"],\n capture_output=True,\n text=True,\n env={\"PYODIDE_ROOT\": str(PYODIDE_ROOT)},\n )\n\n if result.returncode != 0:\n logger.error(\"ERROR: Failed to load environment variables from Makefile.envs\")\n exit_with_stdio(result)\n\n for line in result.stdout.splitlines():\n equalPos = line.find(\"=\")\n if equalPos != -1:\n varname = line[0:equalPos]\n\n if varname not in BUILD_VARS:\n continue\n\n value = line[equalPos + 1 :]\n value = value.strip(\"'\").strip()\n environment[varname] = value\n return environment\n\n\ndef get_build_flag(name: str) -> str:\n \"\"\"\n Get a value of a build flag.\n \"\"\"\n build_vars = get_build_environment_vars()\n if name not in build_vars:\n raise ValueError(f\"Unknown build flag: {name}\")\n\n return build_vars[name]\n\n\ndef get_pyversion_major() -> str:\n return get_build_flag(\"PYMAJOR\")\n\n\ndef get_pyversion_minor() -> str:\n return get_build_flag(\"PYMINOR\")\n\n\ndef get_pyversion_major_minor() -> str:\n return f\"{get_pyversion_major()}.{get_pyversion_minor()}\"\n\n\ndef get_pyversion() -> str:\n return f\"python{get_pyversion_major_minor()}\"\n\n\ndef get_hostsitepackages() -> str:\n return get_build_flag(\"HOSTSITEPACKAGES\")\n\n\[email protected]\ndef get_unisolated_packages() -> list[str]:\n PYODIDE_ROOT = get_pyodide_root()\n\n unisolated_file = PYODIDE_ROOT / \"unisolated.txt\"\n if unisolated_file.exists():\n # in xbuild env, read from file\n unisolated_packages = unisolated_file.read_text().splitlines()\n else:\n unisolated_packages = []\n recipe_dir = PYODIDE_ROOT / \"packages\"\n recipes = load_all_recipes(recipe_dir)\n for name, config in recipes.items():\n if config.build.cross_build_env:\n unisolated_packages.append(name)\n\n return unisolated_packages\n\n\ndef platform() -> str:\n emscripten_version = get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n version = emscripten_version.replace(\".\", \"_\")\n return f\"emscripten_{version}_wasm32\"\n\n\ndef pyodide_tags() -> Iterator[Tag]:\n \"\"\"\n Returns the sequence of tag triples for the Pyodide interpreter.\n\n The sequence is ordered in decreasing specificity.\n \"\"\"\n PYMAJOR = get_pyversion_major()\n PYMINOR = get_pyversion_minor()\n PLATFORM = platform()\n python_version = (int(PYMAJOR), int(PYMINOR))\n yield from cpython_tags(platforms=[PLATFORM], python_version=python_version)\n yield from compatible_tags(platforms=[PLATFORM], python_version=python_version)\n # Following line can be removed once packaging 22.0 is released and we update to it.\n yield Tag(interpreter=f\"cp{PYMAJOR}{PYMINOR}\", abi=\"none\", platform=\"any\")\n\n\ndef replace_so_abi_tags(wheel_dir: Path) -> None:\n \"\"\"Replace native abi tag with emscripten abi tag in .so file names\"\"\"\n import sysconfig\n\n build_soabi = sysconfig.get_config_var(\"SOABI\")\n assert build_soabi\n ext_suffix = sysconfig.get_config_var(\"EXT_SUFFIX\")\n assert ext_suffix\n build_triplet = \"-\".join(build_soabi.split(\"-\")[2:])\n host_triplet = get_build_flag(\"PLATFORM_TRIPLET\")\n for file in wheel_dir.glob(f\"**/*{ext_suffix}\"):\n file.rename(file.with_name(file.name.replace(build_triplet, host_triplet)))\n\n\ndef emscripten_version() -> str:\n return get_build_flag(\"PYODIDE_EMSCRIPTEN_VERSION\")\n\n\ndef get_emscripten_version_info() -> str:\n \"\"\"Extracted for testing purposes.\"\"\"\n return subprocess.run([\"emcc\", \"-v\"], capture_output=True, encoding=\"utf8\").stderr\n\n\ndef check_emscripten_version() -> None:\n needed_version = emscripten_version()\n try:\n version_info = get_emscripten_version_info()\n except FileNotFoundError:\n raise RuntimeError(\n f\"No Emscripten compiler found. Need Emscripten version {needed_version}\"\n ) from None\n installed_version = None\n try:\n for x in reversed(version_info.partition(\"\\n\")[0].split(\" \")):\n if re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", x):\n installed_version = x\n break\n except Exception:\n raise RuntimeError(\"Failed to determine Emscripten version.\") from None\n if installed_version is None:\n raise RuntimeError(\"Failed to determine Emscripten version.\")\n if installed_version != needed_version:\n raise RuntimeError(\n f\"Incorrect Emscripten version {installed_version}. Need Emscripten version {needed_version}\"\n )\n", "path": "pyodide-build/pyodide_build/build_env.py"}]}
4,031
303
gh_patches_debug_39411
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-924
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Forbid `Optional[Union[A, B]]`, use `Union[A, B, None]` instead # Rule request <!-- Thanks for coming up with a proposal! --> ## Thesis <!-- What exactly do you propose? Please, provide code examples. --> These two types are equal: `Optional[Union[A, B]] == Union[A, B, None]` For consistency reasons we need to enforce the second form over the first one. Related to #792 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wemake_python_styleguide/violations/annotations.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """ 4 These checks help to prevent incorrect usage of Python 3 typing annotations. 5 6 While they maybe of a great help in writing clear 7 and concise code, they still can be abused. 8 9 Once again, these rules are highly subjective. But, we love them. 10 11 .. currentmodule:: wemake_python_styleguide.violations.annotations 12 13 Summary 14 ------- 15 16 .. autosummary:: 17 :nosignatures: 18 19 LiteralNoneViolation 20 NestedAnnotationsViolation 21 22 Annotation checks 23 ------------------ 24 25 .. autoclass:: LiteralNoneViolation 26 .. autoclass:: NestedAnnotationsViolation 27 28 """ 29 30 from typing_extensions import final 31 32 from wemake_python_styleguide.violations.base import ASTViolation 33 34 35 @final 36 class LiteralNoneViolation(ASTViolation): 37 """ 38 Forbids to use ``Literal[None]`` typing annotation. 39 40 Reasoning: 41 Literal[None] is just the same as None. 42 There's no need to use the first version. 43 It is not type related, it is a consistency rule. 44 45 Solution: 46 Replace ``Literal[None]`` with ``None``. 47 48 Example:: 49 50 # Correct: 51 def func(empty: None): 52 '''Empty function.''' 53 54 # Wrong: 55 def func(empty: Literal[None]): 56 '''Empty function.''' 57 58 .. versionadded:: 0.13.0 59 60 """ 61 62 code = 701 63 error_template = 'Found useless `Literal[None]` typing annotation' 64 65 66 @final 67 class NestedAnnotationsViolation(ASTViolation): 68 """ 69 Forbids use of nested Literal and Union Annotation. 70 71 Reasoning: 72 There is no need to nest certain annotations of the same type. 73 They are exactly equivalent to the flattened version. 74 Use the flattened version for consistency. 75 76 Solution: 77 Flatten consecutively nested ``typing.Literal`` and ``typing.Union``. 78 79 Example:: 80 # Correct: 81 Literal[1, 2, 3, "foo", 5, None] 82 Union[int, str, float] 83 84 # Wrong: 85 Literal[Literal[Literal[1, 2, 3], "foo"], 5, None] 86 Union[Union[int, str], float] 87 88 .. versionadded:: 0.13.0 89 90 """ 91 92 error_template = 'Found redundant nested typing annotation' 93 code = 702 94 ``` Path: `wemake_python_styleguide/visitors/ast/annotations.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import ast 4 from typing import ClassVar, FrozenSet, Optional 5 6 from typing_extensions import final 7 8 from wemake_python_styleguide.logic import source 9 from wemake_python_styleguide.types import AnyFunctionDef 10 from wemake_python_styleguide.violations.annotations import ( 11 LiteralNoneViolation, 12 NestedAnnotationsViolation, 13 ) 14 from wemake_python_styleguide.violations.consistency import ( 15 MultilineFunctionAnnotationViolation, 16 ) 17 from wemake_python_styleguide.visitors.base import BaseNodeVisitor 18 from wemake_python_styleguide.visitors.decorators import alias 19 20 21 @alias('visit_any_function', ( 22 'visit_FunctionDef', 23 'visit_AsyncFunctionDef', 24 )) 25 class _GenericAnnotationVisitor(BaseNodeVisitor): 26 """Base class for all annotations visitors.""" 27 28 _possible_prefixes: ClassVar[FrozenSet[str]] = frozenset(( 29 'typing.', 30 'typing_extensions.', 31 )) 32 33 @final 34 def visit_AnnAssign(self, node: ast.AnnAssign) -> None: 35 """ 36 Checks for assigned value annotation. 37 38 Raises: 39 LiteralNoneAnnotation 40 41 """ 42 self._check_annotation(node.annotation) 43 self.generic_visit(node) 44 45 @final 46 def visit_any_function(self, node: AnyFunctionDef) -> None: 47 """ 48 Checks return type annotations. 49 50 Raises: 51 LiteralNoneAnnotation 52 53 """ 54 self._check_annotation(node.returns) 55 self.generic_visit(node) 56 57 @final 58 def visit_arg(self, node: ast.arg) -> None: 59 """ 60 Checks arguments annotations. 61 62 Raises: 63 LiteralNoneAnnotation 64 65 """ 66 self._check_annotation(node.annotation) 67 self.generic_visit(node) 68 69 @final 70 def _get_annotation(self, node: ast.AST) -> str: 71 """Smartly turns annotation node to string.""" 72 full_annotation = source.node_to_string(node) 73 for prefix in self._possible_prefixes: 74 full_annotation = full_annotation.replace(prefix, '') 75 return full_annotation 76 77 def _check_annotation(self, annotation: Optional[ast.expr]) -> None: 78 """The only method that need to me implemented in child visitors.""" 79 80 81 @final 82 class SemanticAnnotationVisitor(_GenericAnnotationVisitor): 83 """Ensures that nested annotations are used correctly.""" 84 85 _flat_types: ClassVar[FrozenSet[str]] = frozenset(( 86 'Literal', 87 'Union', 88 'Annotated', 89 )) 90 91 def _check_annotation(self, annotation: Optional[ast.expr]) -> None: 92 if not annotation: 93 return 94 95 self._check_nested_annotations(annotation) 96 self._check_literal_none(annotation) 97 98 def _check_nested_annotations(self, annotation: ast.expr) -> None: 99 annotation_string = self._get_annotation(annotation) 100 for flat_type in self._flat_types: 101 if annotation_string.count(flat_type) > 1: 102 self.add_violation(NestedAnnotationsViolation(annotation)) 103 104 def _check_literal_none(self, annotation: ast.expr) -> None: 105 annotation_string = self._get_annotation(annotation) 106 if 'Literal[None]' in annotation_string: 107 self.add_violation(LiteralNoneViolation(annotation)) 108 109 110 @final 111 @alias('visit_any_function', ( 112 'visit_FunctionDef', 113 'visit_AsyncFunctionDef', 114 )) 115 class WrongAnnotationVisitor(BaseNodeVisitor): 116 """Ensures that annotations are used correctly.""" 117 118 def visit_any_function(self, node: AnyFunctionDef) -> None: 119 """ 120 Checks return type annotations. 121 122 Raises: 123 MultilineFunctionAnnotationViolation 124 LiteralNoneAnnotation 125 126 """ 127 self._check_return_annotation(node) 128 self.generic_visit(node) 129 130 def visit_arg(self, node: ast.arg) -> None: 131 """ 132 Checks arguments annotations. 133 134 Raises: 135 MultilineFunctionAnnotationViolation 136 LiteralNoneAnnotation 137 138 """ 139 self._check_arg_annotation(node) 140 self.generic_visit(node) 141 142 def _check_arg_annotation(self, node: ast.arg) -> None: 143 for sub_node in ast.walk(node): 144 lineno = getattr(sub_node, 'lineno', None) 145 if lineno and lineno != node.lineno: 146 self.add_violation(MultilineFunctionAnnotationViolation(node)) 147 return 148 149 def _check_return_annotation(self, node: AnyFunctionDef) -> None: 150 if not node.returns: 151 return 152 153 for sub_node in ast.walk(node.returns): 154 lineno = getattr(sub_node, 'lineno', None) 155 if lineno and lineno != node.returns.lineno: 156 self.add_violation(MultilineFunctionAnnotationViolation(node)) 157 return 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wemake_python_styleguide/violations/annotations.py b/wemake_python_styleguide/violations/annotations.py --- a/wemake_python_styleguide/violations/annotations.py +++ b/wemake_python_styleguide/violations/annotations.py @@ -18,12 +18,14 @@ LiteralNoneViolation NestedAnnotationsViolation + UnionNestedInOptionalViolation Annotation checks ------------------ .. autoclass:: LiteralNoneViolation .. autoclass:: NestedAnnotationsViolation +.. autoclass:: UnionNestedInOptionalViolation """ @@ -77,6 +79,7 @@ Flatten consecutively nested ``typing.Literal`` and ``typing.Union``. Example:: + # Correct: Literal[1, 2, 3, "foo", 5, None] Union[int, str, float] @@ -91,3 +94,31 @@ error_template = 'Found redundant nested typing annotation' code = 702 + + +@final +class UnionNestedInOptionalViolation(ASTViolation): + """ + Forbids to use ``Optional[Union[int, str]]`` annotation. + + Reasoning: + Optional[Union[int, str]] equals to Union[int, str, None]. + Use Union[int, str, None] version for consistency. + + Solution: + Replace ``Optional[Union[int, str]]`` with ``Union[int, str, None]``. + + Example:: + + # Correct: + Union[int, str, None] + + # Wrong: + Optional[Union[int, str]] + + .. versionadded:: 0.13.0 + + """ + + error_template = 'Found typing annotation with `Union` nested in `Optional`' + code = 704 diff --git a/wemake_python_styleguide/visitors/ast/annotations.py b/wemake_python_styleguide/visitors/ast/annotations.py --- a/wemake_python_styleguide/visitors/ast/annotations.py +++ b/wemake_python_styleguide/visitors/ast/annotations.py @@ -10,6 +10,7 @@ from wemake_python_styleguide.violations.annotations import ( LiteralNoneViolation, NestedAnnotationsViolation, + UnionNestedInOptionalViolation, ) from wemake_python_styleguide.violations.consistency import ( MultilineFunctionAnnotationViolation, @@ -94,6 +95,7 @@ self._check_nested_annotations(annotation) self._check_literal_none(annotation) + self._check_union_nested_in_optional(annotation) def _check_nested_annotations(self, annotation: ast.expr) -> None: annotation_string = self._get_annotation(annotation) @@ -106,6 +108,11 @@ if 'Literal[None]' in annotation_string: self.add_violation(LiteralNoneViolation(annotation)) + def _check_union_nested_in_optional(self, annotation: ast.expr) -> None: + annotation_string = self._get_annotation(annotation) + if 'Optional[Union[' in annotation_string: + self.add_violation(UnionNestedInOptionalViolation(annotation)) + @final @alias('visit_any_function', (
{"golden_diff": "diff --git a/wemake_python_styleguide/violations/annotations.py b/wemake_python_styleguide/violations/annotations.py\n--- a/wemake_python_styleguide/violations/annotations.py\n+++ b/wemake_python_styleguide/violations/annotations.py\n@@ -18,12 +18,14 @@\n \n LiteralNoneViolation\n NestedAnnotationsViolation\n+ UnionNestedInOptionalViolation\n \n Annotation checks\n ------------------\n \n .. autoclass:: LiteralNoneViolation\n .. autoclass:: NestedAnnotationsViolation\n+.. autoclass:: UnionNestedInOptionalViolation\n \n \"\"\"\n \n@@ -77,6 +79,7 @@\n Flatten consecutively nested ``typing.Literal`` and ``typing.Union``.\n \n Example::\n+\n # Correct:\n Literal[1, 2, 3, \"foo\", 5, None]\n Union[int, str, float]\n@@ -91,3 +94,31 @@\n \n error_template = 'Found redundant nested typing annotation'\n code = 702\n+\n+\n+@final\n+class UnionNestedInOptionalViolation(ASTViolation):\n+ \"\"\"\n+ Forbids to use ``Optional[Union[int, str]]`` annotation.\n+\n+ Reasoning:\n+ Optional[Union[int, str]] equals to Union[int, str, None].\n+ Use Union[int, str, None] version for consistency.\n+\n+ Solution:\n+ Replace ``Optional[Union[int, str]]`` with ``Union[int, str, None]``.\n+\n+ Example::\n+\n+ # Correct:\n+ Union[int, str, None]\n+\n+ # Wrong:\n+ Optional[Union[int, str]]\n+\n+ .. versionadded:: 0.13.0\n+\n+ \"\"\"\n+\n+ error_template = 'Found typing annotation with `Union` nested in `Optional`'\n+ code = 704\ndiff --git a/wemake_python_styleguide/visitors/ast/annotations.py b/wemake_python_styleguide/visitors/ast/annotations.py\n--- a/wemake_python_styleguide/visitors/ast/annotations.py\n+++ b/wemake_python_styleguide/visitors/ast/annotations.py\n@@ -10,6 +10,7 @@\n from wemake_python_styleguide.violations.annotations import (\n LiteralNoneViolation,\n NestedAnnotationsViolation,\n+ UnionNestedInOptionalViolation,\n )\n from wemake_python_styleguide.violations.consistency import (\n MultilineFunctionAnnotationViolation,\n@@ -94,6 +95,7 @@\n \n self._check_nested_annotations(annotation)\n self._check_literal_none(annotation)\n+ self._check_union_nested_in_optional(annotation)\n \n def _check_nested_annotations(self, annotation: ast.expr) -> None:\n annotation_string = self._get_annotation(annotation)\n@@ -106,6 +108,11 @@\n if 'Literal[None]' in annotation_string:\n self.add_violation(LiteralNoneViolation(annotation))\n \n+ def _check_union_nested_in_optional(self, annotation: ast.expr) -> None:\n+ annotation_string = self._get_annotation(annotation)\n+ if 'Optional[Union[' in annotation_string:\n+ self.add_violation(UnionNestedInOptionalViolation(annotation))\n+\n \n @final\n @alias('visit_any_function', (\n", "issue": "Forbid `Optional[Union[A, B]]`, use `Union[A, B, None]` instead\n# Rule request\r\n\r\n<!-- Thanks for coming up with a proposal! -->\r\n\r\n## Thesis\r\n\r\n<!-- What exactly do you propose? Please, provide code examples. -->\r\n\r\nThese two types are equal: `Optional[Union[A, B]] == Union[A, B, None]`\r\n\r\nFor consistency reasons we need to enforce the second form over the first one.\r\nRelated to #792 \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThese checks help to prevent incorrect usage of Python 3 typing annotations.\n\nWhile they maybe of a great help in writing clear\nand concise code, they still can be abused.\n\nOnce again, these rules are highly subjective. But, we love them.\n\n.. currentmodule:: wemake_python_styleguide.violations.annotations\n\nSummary\n-------\n\n.. autosummary::\n :nosignatures:\n\n LiteralNoneViolation\n NestedAnnotationsViolation\n\nAnnotation checks\n------------------\n\n.. autoclass:: LiteralNoneViolation\n.. autoclass:: NestedAnnotationsViolation\n\n\"\"\"\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.base import ASTViolation\n\n\n@final\nclass LiteralNoneViolation(ASTViolation):\n \"\"\"\n Forbids to use ``Literal[None]`` typing annotation.\n\n Reasoning:\n Literal[None] is just the same as None.\n There's no need to use the first version.\n It is not type related, it is a consistency rule.\n\n Solution:\n Replace ``Literal[None]`` with ``None``.\n\n Example::\n\n # Correct:\n def func(empty: None):\n '''Empty function.'''\n\n # Wrong:\n def func(empty: Literal[None]):\n '''Empty function.'''\n\n .. versionadded:: 0.13.0\n\n \"\"\"\n\n code = 701\n error_template = 'Found useless `Literal[None]` typing annotation'\n\n\n@final\nclass NestedAnnotationsViolation(ASTViolation):\n \"\"\"\n Forbids use of nested Literal and Union Annotation.\n\n Reasoning:\n There is no need to nest certain annotations of the same type.\n They are exactly equivalent to the flattened version.\n Use the flattened version for consistency.\n\n Solution:\n Flatten consecutively nested ``typing.Literal`` and ``typing.Union``.\n\n Example::\n # Correct:\n Literal[1, 2, 3, \"foo\", 5, None]\n Union[int, str, float]\n\n # Wrong:\n Literal[Literal[Literal[1, 2, 3], \"foo\"], 5, None]\n Union[Union[int, str], float]\n\n .. versionadded:: 0.13.0\n\n \"\"\"\n\n error_template = 'Found redundant nested typing annotation'\n code = 702\n", "path": "wemake_python_styleguide/violations/annotations.py"}, {"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, FrozenSet, Optional\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.logic import source\nfrom wemake_python_styleguide.types import AnyFunctionDef\nfrom wemake_python_styleguide.violations.annotations import (\n LiteralNoneViolation,\n NestedAnnotationsViolation,\n)\nfrom wemake_python_styleguide.violations.consistency import (\n MultilineFunctionAnnotationViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\n\n@alias('visit_any_function', (\n 'visit_FunctionDef',\n 'visit_AsyncFunctionDef',\n))\nclass _GenericAnnotationVisitor(BaseNodeVisitor):\n \"\"\"Base class for all annotations visitors.\"\"\"\n\n _possible_prefixes: ClassVar[FrozenSet[str]] = frozenset((\n 'typing.',\n 'typing_extensions.',\n ))\n\n @final\n def visit_AnnAssign(self, node: ast.AnnAssign) -> None:\n \"\"\"\n Checks for assigned value annotation.\n\n Raises:\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_annotation(node.annotation)\n self.generic_visit(node)\n\n @final\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks return type annotations.\n\n Raises:\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_annotation(node.returns)\n self.generic_visit(node)\n\n @final\n def visit_arg(self, node: ast.arg) -> None:\n \"\"\"\n Checks arguments annotations.\n\n Raises:\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_annotation(node.annotation)\n self.generic_visit(node)\n\n @final\n def _get_annotation(self, node: ast.AST) -> str:\n \"\"\"Smartly turns annotation node to string.\"\"\"\n full_annotation = source.node_to_string(node)\n for prefix in self._possible_prefixes:\n full_annotation = full_annotation.replace(prefix, '')\n return full_annotation\n\n def _check_annotation(self, annotation: Optional[ast.expr]) -> None:\n \"\"\"The only method that need to me implemented in child visitors.\"\"\"\n\n\n@final\nclass SemanticAnnotationVisitor(_GenericAnnotationVisitor):\n \"\"\"Ensures that nested annotations are used correctly.\"\"\"\n\n _flat_types: ClassVar[FrozenSet[str]] = frozenset((\n 'Literal',\n 'Union',\n 'Annotated',\n ))\n\n def _check_annotation(self, annotation: Optional[ast.expr]) -> None:\n if not annotation:\n return\n\n self._check_nested_annotations(annotation)\n self._check_literal_none(annotation)\n\n def _check_nested_annotations(self, annotation: ast.expr) -> None:\n annotation_string = self._get_annotation(annotation)\n for flat_type in self._flat_types:\n if annotation_string.count(flat_type) > 1:\n self.add_violation(NestedAnnotationsViolation(annotation))\n\n def _check_literal_none(self, annotation: ast.expr) -> None:\n annotation_string = self._get_annotation(annotation)\n if 'Literal[None]' in annotation_string:\n self.add_violation(LiteralNoneViolation(annotation))\n\n\n@final\n@alias('visit_any_function', (\n 'visit_FunctionDef',\n 'visit_AsyncFunctionDef',\n))\nclass WrongAnnotationVisitor(BaseNodeVisitor):\n \"\"\"Ensures that annotations are used correctly.\"\"\"\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks return type annotations.\n\n Raises:\n MultilineFunctionAnnotationViolation\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_return_annotation(node)\n self.generic_visit(node)\n\n def visit_arg(self, node: ast.arg) -> None:\n \"\"\"\n Checks arguments annotations.\n\n Raises:\n MultilineFunctionAnnotationViolation\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_arg_annotation(node)\n self.generic_visit(node)\n\n def _check_arg_annotation(self, node: ast.arg) -> None:\n for sub_node in ast.walk(node):\n lineno = getattr(sub_node, 'lineno', None)\n if lineno and lineno != node.lineno:\n self.add_violation(MultilineFunctionAnnotationViolation(node))\n return\n\n def _check_return_annotation(self, node: AnyFunctionDef) -> None:\n if not node.returns:\n return\n\n for sub_node in ast.walk(node.returns):\n lineno = getattr(sub_node, 'lineno', None)\n if lineno and lineno != node.returns.lineno:\n self.add_violation(MultilineFunctionAnnotationViolation(node))\n return\n", "path": "wemake_python_styleguide/visitors/ast/annotations.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThese checks help to prevent incorrect usage of Python 3 typing annotations.\n\nWhile they maybe of a great help in writing clear\nand concise code, they still can be abused.\n\nOnce again, these rules are highly subjective. But, we love them.\n\n.. currentmodule:: wemake_python_styleguide.violations.annotations\n\nSummary\n-------\n\n.. autosummary::\n :nosignatures:\n\n LiteralNoneViolation\n NestedAnnotationsViolation\n UnionNestedInOptionalViolation\n\nAnnotation checks\n------------------\n\n.. autoclass:: LiteralNoneViolation\n.. autoclass:: NestedAnnotationsViolation\n.. autoclass:: UnionNestedInOptionalViolation\n\n\"\"\"\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.base import ASTViolation\n\n\n@final\nclass LiteralNoneViolation(ASTViolation):\n \"\"\"\n Forbids to use ``Literal[None]`` typing annotation.\n\n Reasoning:\n Literal[None] is just the same as None.\n There's no need to use the first version.\n It is not type related, it is a consistency rule.\n\n Solution:\n Replace ``Literal[None]`` with ``None``.\n\n Example::\n\n # Correct:\n def func(empty: None):\n '''Empty function.'''\n\n # Wrong:\n def func(empty: Literal[None]):\n '''Empty function.'''\n\n .. versionadded:: 0.13.0\n\n \"\"\"\n\n code = 701\n error_template = 'Found useless `Literal[None]` typing annotation'\n\n\n@final\nclass NestedAnnotationsViolation(ASTViolation):\n \"\"\"\n Forbids use of nested Literal and Union Annotation.\n\n Reasoning:\n There is no need to nest certain annotations of the same type.\n They are exactly equivalent to the flattened version.\n Use the flattened version for consistency.\n\n Solution:\n Flatten consecutively nested ``typing.Literal`` and ``typing.Union``.\n\n Example::\n\n # Correct:\n Literal[1, 2, 3, \"foo\", 5, None]\n Union[int, str, float]\n\n # Wrong:\n Literal[Literal[Literal[1, 2, 3], \"foo\"], 5, None]\n Union[Union[int, str], float]\n\n .. versionadded:: 0.13.0\n\n \"\"\"\n\n error_template = 'Found redundant nested typing annotation'\n code = 702\n\n\n@final\nclass UnionNestedInOptionalViolation(ASTViolation):\n \"\"\"\n Forbids to use ``Optional[Union[int, str]]`` annotation.\n\n Reasoning:\n Optional[Union[int, str]] equals to Union[int, str, None].\n Use Union[int, str, None] version for consistency.\n\n Solution:\n Replace ``Optional[Union[int, str]]`` with ``Union[int, str, None]``.\n\n Example::\n\n # Correct:\n Union[int, str, None]\n\n # Wrong:\n Optional[Union[int, str]]\n\n .. versionadded:: 0.13.0\n\n \"\"\"\n\n error_template = 'Found typing annotation with `Union` nested in `Optional`'\n code = 704\n", "path": "wemake_python_styleguide/violations/annotations.py"}, {"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import ClassVar, FrozenSet, Optional\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.logic import source\nfrom wemake_python_styleguide.types import AnyFunctionDef\nfrom wemake_python_styleguide.violations.annotations import (\n LiteralNoneViolation,\n NestedAnnotationsViolation,\n UnionNestedInOptionalViolation,\n)\nfrom wemake_python_styleguide.violations.consistency import (\n MultilineFunctionAnnotationViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\n\n@alias('visit_any_function', (\n 'visit_FunctionDef',\n 'visit_AsyncFunctionDef',\n))\nclass _GenericAnnotationVisitor(BaseNodeVisitor):\n \"\"\"Base class for all annotations visitors.\"\"\"\n\n _possible_prefixes: ClassVar[FrozenSet[str]] = frozenset((\n 'typing.',\n 'typing_extensions.',\n ))\n\n @final\n def visit_AnnAssign(self, node: ast.AnnAssign) -> None:\n \"\"\"\n Checks for assigned value annotation.\n\n Raises:\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_annotation(node.annotation)\n self.generic_visit(node)\n\n @final\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks return type annotations.\n\n Raises:\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_annotation(node.returns)\n self.generic_visit(node)\n\n @final\n def visit_arg(self, node: ast.arg) -> None:\n \"\"\"\n Checks arguments annotations.\n\n Raises:\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_annotation(node.annotation)\n self.generic_visit(node)\n\n @final\n def _get_annotation(self, node: ast.AST) -> str:\n \"\"\"Smartly turns annotation node to string.\"\"\"\n full_annotation = source.node_to_string(node)\n for prefix in self._possible_prefixes:\n full_annotation = full_annotation.replace(prefix, '')\n return full_annotation\n\n def _check_annotation(self, annotation: Optional[ast.expr]) -> None:\n \"\"\"The only method that need to me implemented in child visitors.\"\"\"\n\n\n@final\nclass SemanticAnnotationVisitor(_GenericAnnotationVisitor):\n \"\"\"Ensures that nested annotations are used correctly.\"\"\"\n\n _flat_types: ClassVar[FrozenSet[str]] = frozenset((\n 'Literal',\n 'Union',\n 'Annotated',\n ))\n\n def _check_annotation(self, annotation: Optional[ast.expr]) -> None:\n if not annotation:\n return\n\n self._check_nested_annotations(annotation)\n self._check_literal_none(annotation)\n self._check_union_nested_in_optional(annotation)\n\n def _check_nested_annotations(self, annotation: ast.expr) -> None:\n annotation_string = self._get_annotation(annotation)\n for flat_type in self._flat_types:\n if annotation_string.count(flat_type) > 1:\n self.add_violation(NestedAnnotationsViolation(annotation))\n\n def _check_literal_none(self, annotation: ast.expr) -> None:\n annotation_string = self._get_annotation(annotation)\n if 'Literal[None]' in annotation_string:\n self.add_violation(LiteralNoneViolation(annotation))\n\n def _check_union_nested_in_optional(self, annotation: ast.expr) -> None:\n annotation_string = self._get_annotation(annotation)\n if 'Optional[Union[' in annotation_string:\n self.add_violation(UnionNestedInOptionalViolation(annotation))\n\n\n@final\n@alias('visit_any_function', (\n 'visit_FunctionDef',\n 'visit_AsyncFunctionDef',\n))\nclass WrongAnnotationVisitor(BaseNodeVisitor):\n \"\"\"Ensures that annotations are used correctly.\"\"\"\n\n def visit_any_function(self, node: AnyFunctionDef) -> None:\n \"\"\"\n Checks return type annotations.\n\n Raises:\n MultilineFunctionAnnotationViolation\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_return_annotation(node)\n self.generic_visit(node)\n\n def visit_arg(self, node: ast.arg) -> None:\n \"\"\"\n Checks arguments annotations.\n\n Raises:\n MultilineFunctionAnnotationViolation\n LiteralNoneAnnotation\n\n \"\"\"\n self._check_arg_annotation(node)\n self.generic_visit(node)\n\n def _check_arg_annotation(self, node: ast.arg) -> None:\n for sub_node in ast.walk(node):\n lineno = getattr(sub_node, 'lineno', None)\n if lineno and lineno != node.lineno:\n self.add_violation(MultilineFunctionAnnotationViolation(node))\n return\n\n def _check_return_annotation(self, node: AnyFunctionDef) -> None:\n if not node.returns:\n return\n\n for sub_node in ast.walk(node.returns):\n lineno = getattr(sub_node, 'lineno', None)\n if lineno and lineno != node.returns.lineno:\n self.add_violation(MultilineFunctionAnnotationViolation(node))\n return\n", "path": "wemake_python_styleguide/visitors/ast/annotations.py"}]}
2,439
723
gh_patches_debug_9889
rasdani/github-patches
git_diff
pulp__pulpcore-299
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [PR #5443/c7079c89 backport][3.54] Pin the pytest-redis requirement for unit tests **This is a backport of PR #5443 as merged into main (c7079c891b87ecd9073260db5baa35826803f635).** [noissue] --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pulpcore/app/serializers/content.py` Content: ``` 1 import hashlib 2 from gettext import gettext as _ 3 4 from django.db import transaction 5 from rest_framework import serializers 6 from rest_framework.validators import UniqueValidator 7 8 from pulpcore.app import models 9 from pulpcore.app.serializers import base, fields 10 11 UNIQUE_ALGORITHMS = ['sha256', 'sha384', 'sha512'] 12 13 14 class BaseContentSerializer(base.MasterModelSerializer): 15 _href = base.DetailIdentityField() 16 17 class Meta: 18 model = models.Content 19 fields = base.MasterModelSerializer.Meta.fields 20 21 22 class NoArtifactContentSerializer(BaseContentSerializer): 23 24 class Meta: 25 model = models.Content 26 fields = BaseContentSerializer.Meta.fields 27 28 29 class SingleArtifactContentSerializer(BaseContentSerializer): 30 artifact = fields.SingleContentArtifactField( 31 help_text=_("Artifact file representing the physical content"), 32 ) 33 34 relative_path = serializers.CharField( 35 help_text=_("Path where the artifact is located relative to distributions base_path"), 36 validators=[fields.relative_path_validator], 37 write_only=True, 38 ) 39 40 def __init__(self, *args, **kwargs): 41 """ 42 Initializer for SingleArtifactContentSerializer 43 """ 44 super().__init__(*args, **kwargs) 45 46 # If the content model has its own database field 'relative_path', 47 # we should not mark the field write_only 48 if hasattr(self.Meta.model, 'relative_path') and "relative_path" in self.fields: 49 self.fields["relative_path"].write_only = False 50 51 @transaction.atomic 52 def create(self, validated_data): 53 """ 54 Create the content and associate it with its Artifact. 55 56 Args: 57 validated_data (dict): Data to save to the database 58 """ 59 artifact = validated_data.pop('artifact') 60 if "relative_path" in self.fields and self.fields["relative_path"].write_only: 61 relative_path = validated_data.pop('relative_path') 62 else: 63 relative_path = validated_data.get('relative_path') 64 content = self.Meta.model.objects.create(**validated_data) 65 models.ContentArtifact.objects.create( 66 artifact=artifact, 67 content=content, 68 relative_path=relative_path, 69 ) 70 return content 71 72 class Meta: 73 model = models.Content 74 fields = BaseContentSerializer.Meta.fields + ('artifact', 'relative_path') 75 76 77 class MultipleArtifactContentSerializer(BaseContentSerializer): 78 artifacts = fields.ContentArtifactsField( 79 help_text=_("A dict mapping relative paths inside the Content to the corresponding" 80 "Artifact URLs. E.g.: {'relative/path': " 81 "'/artifacts/1/'"), 82 ) 83 84 @transaction.atomic 85 def create(self, validated_data): 86 """ 87 Create the content and associate it with all its Artifacts. 88 89 Args: 90 validated_data (dict): Data to save to the database 91 """ 92 artifacts = validated_data.pop('artifacts') 93 content = self.Meta.model.objects.create(**validated_data) 94 for relative_path, artifact in artifacts.items(): 95 models.ContentArtifact.objects.create( 96 artifact=artifact, 97 content=content, 98 relative_path=relative_path, 99 ) 100 return content 101 102 class Meta: 103 model = models.Content 104 fields = BaseContentSerializer.Meta.fields + ('artifacts',) 105 106 107 class ContentChecksumSerializer(serializers.Serializer): 108 """ 109 Provide a serializer with artifact checksum fields for single artifact content. 110 111 If you use this serializer, it's recommended that you prefetch artifacts: 112 113 Content.objects.prefetch_related("_artifacts").all() 114 """ 115 116 md5 = fields.ContentArtifactChecksumField( 117 help_text=_("The MD5 checksum if available."), 118 checksum='md5', 119 ) 120 121 sha1 = fields.ContentArtifactChecksumField( 122 help_text=_("The SHA-1 checksum if available."), 123 checksum='sha1', 124 ) 125 126 sha224 = fields.ContentArtifactChecksumField( 127 help_text=_("The SHA-224 checksum if available."), 128 checksum='sha224', 129 ) 130 131 sha256 = fields.ContentArtifactChecksumField( 132 help_text=_("The SHA-256 checksum if available."), 133 checksum='sha256', 134 ) 135 136 sha384 = fields.ContentArtifactChecksumField( 137 help_text=_("The SHA-384 checksum if available."), 138 checksum='sha384', 139 ) 140 141 sha512 = fields.ContentArtifactChecksumField( 142 help_text=_("The SHA-512 checksum if available."), 143 checksum='sha512', 144 ) 145 146 class Meta: 147 model = models.Content 148 fields = base.ModelSerializer.Meta.fields + ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 149 'sha512') 150 151 152 class ArtifactSerializer(base.ModelSerializer): 153 _href = base.IdentityField( 154 view_name='artifacts-detail', 155 ) 156 157 file = serializers.FileField( 158 help_text=_("The stored file."), 159 allow_empty_file=True, 160 required=False 161 ) 162 163 size = serializers.IntegerField( 164 help_text=_("The size of the file in bytes."), 165 required=False 166 ) 167 168 md5 = serializers.CharField( 169 help_text=_("The MD5 checksum of the file if available."), 170 required=False, 171 allow_null=True, 172 ) 173 174 sha1 = serializers.CharField( 175 help_text=_("The SHA-1 checksum of the file if available."), 176 required=False, 177 allow_null=True, 178 ) 179 180 sha224 = serializers.CharField( 181 help_text=_("The SHA-224 checksum of the file if available."), 182 required=False, 183 allow_null=True, 184 ) 185 186 sha256 = serializers.CharField( 187 help_text=_("The SHA-256 checksum of the file if available."), 188 required=False, 189 allow_null=True, 190 ) 191 192 sha384 = serializers.CharField( 193 help_text=_("The SHA-384 checksum of the file if available."), 194 required=False, 195 allow_null=True, 196 ) 197 198 sha512 = serializers.CharField( 199 help_text=_("The SHA-512 checksum of the file if available."), 200 required=False, 201 allow_null=True, 202 ) 203 204 def validate(self, data): 205 """ 206 Validate file by size and by all checksums provided. 207 208 Args: 209 data (:class:`django.http.QueryDict`): QueryDict mapping Artifact model fields to their 210 values 211 212 Raises: 213 :class:`rest_framework.exceptions.ValidationError`: When the expected file size or any 214 of the checksums don't match their actual values. 215 """ 216 super().validate(data) 217 218 if 'size' in data: 219 if data['file'].size != int(data['size']): 220 raise serializers.ValidationError(_("The size did not match actual size of file.")) 221 else: 222 data['size'] = data['file'].size 223 224 for algorithm in hashlib.algorithms_guaranteed: 225 if algorithm in models.Artifact.DIGEST_FIELDS: 226 digest = data['file'].hashers[algorithm].hexdigest() 227 228 if algorithm in data and digest != data[algorithm]: 229 raise serializers.ValidationError(_("The %s checksum did not match.") 230 % algorithm) 231 else: 232 data[algorithm] = digest 233 if algorithm in UNIQUE_ALGORITHMS: 234 validator = UniqueValidator(models.Artifact.objects.all(), 235 message=_("{0} checksum must be " 236 "unique.").format(algorithm)) 237 validator.field_name = algorithm 238 validator.instance = None 239 validator(digest) 240 return data 241 242 class Meta: 243 model = models.Artifact 244 fields = base.ModelSerializer.Meta.fields + ('file', 'size', 'md5', 'sha1', 'sha224', 245 'sha256', 'sha384', 'sha512') 246 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py --- a/pulpcore/app/serializers/content.py +++ b/pulpcore/app/serializers/content.py @@ -57,7 +57,7 @@ validated_data (dict): Data to save to the database """ artifact = validated_data.pop('artifact') - if "relative_path" in self.fields and self.fields["relative_path"].write_only: + if "relative_path" not in self.fields or self.fields["relative_path"].write_only: relative_path = validated_data.pop('relative_path') else: relative_path = validated_data.get('relative_path')
{"golden_diff": "diff --git a/pulpcore/app/serializers/content.py b/pulpcore/app/serializers/content.py\n--- a/pulpcore/app/serializers/content.py\n+++ b/pulpcore/app/serializers/content.py\n@@ -57,7 +57,7 @@\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = validated_data.pop('artifact')\n- if \"relative_path\" in self.fields and self.fields[\"relative_path\"].write_only:\n+ if \"relative_path\" not in self.fields or self.fields[\"relative_path\"].write_only:\n relative_path = validated_data.pop('relative_path')\n else:\n relative_path = validated_data.get('relative_path')\n", "issue": "[PR #5443/c7079c89 backport][3.54] Pin the pytest-redis requirement for unit tests\n**This is a backport of PR #5443 as merged into main (c7079c891b87ecd9073260db5baa35826803f635).**\n\n[noissue]\n", "before_files": [{"content": "import hashlib\nfrom gettext import gettext as _\n\nfrom django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueValidator\n\nfrom pulpcore.app import models\nfrom pulpcore.app.serializers import base, fields\n\nUNIQUE_ALGORITHMS = ['sha256', 'sha384', 'sha512']\n\n\nclass BaseContentSerializer(base.MasterModelSerializer):\n _href = base.DetailIdentityField()\n\n class Meta:\n model = models.Content\n fields = base.MasterModelSerializer.Meta.fields\n\n\nclass NoArtifactContentSerializer(BaseContentSerializer):\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields\n\n\nclass SingleArtifactContentSerializer(BaseContentSerializer):\n artifact = fields.SingleContentArtifactField(\n help_text=_(\"Artifact file representing the physical content\"),\n )\n\n relative_path = serializers.CharField(\n help_text=_(\"Path where the artifact is located relative to distributions base_path\"),\n validators=[fields.relative_path_validator],\n write_only=True,\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializer for SingleArtifactContentSerializer\n \"\"\"\n super().__init__(*args, **kwargs)\n\n # If the content model has its own database field 'relative_path',\n # we should not mark the field write_only\n if hasattr(self.Meta.model, 'relative_path') and \"relative_path\" in self.fields:\n self.fields[\"relative_path\"].write_only = False\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with its Artifact.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = validated_data.pop('artifact')\n if \"relative_path\" in self.fields and self.fields[\"relative_path\"].write_only:\n relative_path = validated_data.pop('relative_path')\n else:\n relative_path = validated_data.get('relative_path')\n content = self.Meta.model.objects.create(**validated_data)\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('artifact', 'relative_path')\n\n\nclass MultipleArtifactContentSerializer(BaseContentSerializer):\n artifacts = fields.ContentArtifactsField(\n help_text=_(\"A dict mapping relative paths inside the Content to the corresponding\"\n \"Artifact URLs. E.g.: {'relative/path': \"\n \"'/artifacts/1/'\"),\n )\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with all its Artifacts.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifacts = validated_data.pop('artifacts')\n content = self.Meta.model.objects.create(**validated_data)\n for relative_path, artifact in artifacts.items():\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('artifacts',)\n\n\nclass ContentChecksumSerializer(serializers.Serializer):\n \"\"\"\n Provide a serializer with artifact checksum fields for single artifact content.\n\n If you use this serializer, it's recommended that you prefetch artifacts:\n\n Content.objects.prefetch_related(\"_artifacts\").all()\n \"\"\"\n\n md5 = fields.ContentArtifactChecksumField(\n help_text=_(\"The MD5 checksum if available.\"),\n checksum='md5',\n )\n\n sha1 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-1 checksum if available.\"),\n checksum='sha1',\n )\n\n sha224 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-224 checksum if available.\"),\n checksum='sha224',\n )\n\n sha256 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-256 checksum if available.\"),\n checksum='sha256',\n )\n\n sha384 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-384 checksum if available.\"),\n checksum='sha384',\n )\n\n sha512 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-512 checksum if available.\"),\n checksum='sha512',\n )\n\n class Meta:\n model = models.Content\n fields = base.ModelSerializer.Meta.fields + ('md5', 'sha1', 'sha224', 'sha256', 'sha384',\n 'sha512')\n\n\nclass ArtifactSerializer(base.ModelSerializer):\n _href = base.IdentityField(\n view_name='artifacts-detail',\n )\n\n file = serializers.FileField(\n help_text=_(\"The stored file.\"),\n allow_empty_file=True,\n required=False\n )\n\n size = serializers.IntegerField(\n help_text=_(\"The size of the file in bytes.\"),\n required=False\n )\n\n md5 = serializers.CharField(\n help_text=_(\"The MD5 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha1 = serializers.CharField(\n help_text=_(\"The SHA-1 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha224 = serializers.CharField(\n help_text=_(\"The SHA-224 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha256 = serializers.CharField(\n help_text=_(\"The SHA-256 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha384 = serializers.CharField(\n help_text=_(\"The SHA-384 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha512 = serializers.CharField(\n help_text=_(\"The SHA-512 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n def validate(self, data):\n \"\"\"\n Validate file by size and by all checksums provided.\n\n Args:\n data (:class:`django.http.QueryDict`): QueryDict mapping Artifact model fields to their\n values\n\n Raises:\n :class:`rest_framework.exceptions.ValidationError`: When the expected file size or any\n of the checksums don't match their actual values.\n \"\"\"\n super().validate(data)\n\n if 'size' in data:\n if data['file'].size != int(data['size']):\n raise serializers.ValidationError(_(\"The size did not match actual size of file.\"))\n else:\n data['size'] = data['file'].size\n\n for algorithm in hashlib.algorithms_guaranteed:\n if algorithm in models.Artifact.DIGEST_FIELDS:\n digest = data['file'].hashers[algorithm].hexdigest()\n\n if algorithm in data and digest != data[algorithm]:\n raise serializers.ValidationError(_(\"The %s checksum did not match.\")\n % algorithm)\n else:\n data[algorithm] = digest\n if algorithm in UNIQUE_ALGORITHMS:\n validator = UniqueValidator(models.Artifact.objects.all(),\n message=_(\"{0} checksum must be \"\n \"unique.\").format(algorithm))\n validator.field_name = algorithm\n validator.instance = None\n validator(digest)\n return data\n\n class Meta:\n model = models.Artifact\n fields = base.ModelSerializer.Meta.fields + ('file', 'size', 'md5', 'sha1', 'sha224',\n 'sha256', 'sha384', 'sha512')\n", "path": "pulpcore/app/serializers/content.py"}], "after_files": [{"content": "import hashlib\nfrom gettext import gettext as _\n\nfrom django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueValidator\n\nfrom pulpcore.app import models\nfrom pulpcore.app.serializers import base, fields\n\nUNIQUE_ALGORITHMS = ['sha256', 'sha384', 'sha512']\n\n\nclass BaseContentSerializer(base.MasterModelSerializer):\n _href = base.DetailIdentityField()\n\n class Meta:\n model = models.Content\n fields = base.MasterModelSerializer.Meta.fields\n\n\nclass NoArtifactContentSerializer(BaseContentSerializer):\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields\n\n\nclass SingleArtifactContentSerializer(BaseContentSerializer):\n artifact = fields.SingleContentArtifactField(\n help_text=_(\"Artifact file representing the physical content\"),\n )\n\n relative_path = serializers.CharField(\n help_text=_(\"Path where the artifact is located relative to distributions base_path\"),\n validators=[fields.relative_path_validator],\n write_only=True,\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initializer for SingleArtifactContentSerializer\n \"\"\"\n super().__init__(*args, **kwargs)\n\n # If the content model has its own database field 'relative_path',\n # we should not mark the field write_only\n if hasattr(self.Meta.model, 'relative_path') and \"relative_path\" in self.fields:\n self.fields[\"relative_path\"].write_only = False\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with its Artifact.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifact = validated_data.pop('artifact')\n if \"relative_path\" not in self.fields or self.fields[\"relative_path\"].write_only:\n relative_path = validated_data.pop('relative_path')\n else:\n relative_path = validated_data.get('relative_path')\n content = self.Meta.model.objects.create(**validated_data)\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('artifact', 'relative_path')\n\n\nclass MultipleArtifactContentSerializer(BaseContentSerializer):\n artifacts = fields.ContentArtifactsField(\n help_text=_(\"A dict mapping relative paths inside the Content to the corresponding\"\n \"Artifact URLs. E.g.: {'relative/path': \"\n \"'/artifacts/1/'\"),\n )\n\n @transaction.atomic\n def create(self, validated_data):\n \"\"\"\n Create the content and associate it with all its Artifacts.\n\n Args:\n validated_data (dict): Data to save to the database\n \"\"\"\n artifacts = validated_data.pop('artifacts')\n content = self.Meta.model.objects.create(**validated_data)\n for relative_path, artifact in artifacts.items():\n models.ContentArtifact.objects.create(\n artifact=artifact,\n content=content,\n relative_path=relative_path,\n )\n return content\n\n class Meta:\n model = models.Content\n fields = BaseContentSerializer.Meta.fields + ('artifacts',)\n\n\nclass ContentChecksumSerializer(serializers.Serializer):\n \"\"\"\n Provide a serializer with artifact checksum fields for single artifact content.\n\n If you use this serializer, it's recommended that you prefetch artifacts:\n\n Content.objects.prefetch_related(\"_artifacts\").all()\n \"\"\"\n\n md5 = fields.ContentArtifactChecksumField(\n help_text=_(\"The MD5 checksum if available.\"),\n checksum='md5',\n )\n\n sha1 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-1 checksum if available.\"),\n checksum='sha1',\n )\n\n sha224 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-224 checksum if available.\"),\n checksum='sha224',\n )\n\n sha256 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-256 checksum if available.\"),\n checksum='sha256',\n )\n\n sha384 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-384 checksum if available.\"),\n checksum='sha384',\n )\n\n sha512 = fields.ContentArtifactChecksumField(\n help_text=_(\"The SHA-512 checksum if available.\"),\n checksum='sha512',\n )\n\n class Meta:\n model = models.Content\n fields = base.ModelSerializer.Meta.fields + ('md5', 'sha1', 'sha224', 'sha256', 'sha384',\n 'sha512')\n\n\nclass ArtifactSerializer(base.ModelSerializer):\n _href = base.IdentityField(\n view_name='artifacts-detail',\n )\n\n file = serializers.FileField(\n help_text=_(\"The stored file.\"),\n allow_empty_file=True,\n required=False\n )\n\n size = serializers.IntegerField(\n help_text=_(\"The size of the file in bytes.\"),\n required=False\n )\n\n md5 = serializers.CharField(\n help_text=_(\"The MD5 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha1 = serializers.CharField(\n help_text=_(\"The SHA-1 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha224 = serializers.CharField(\n help_text=_(\"The SHA-224 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha256 = serializers.CharField(\n help_text=_(\"The SHA-256 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha384 = serializers.CharField(\n help_text=_(\"The SHA-384 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n sha512 = serializers.CharField(\n help_text=_(\"The SHA-512 checksum of the file if available.\"),\n required=False,\n allow_null=True,\n )\n\n def validate(self, data):\n \"\"\"\n Validate file by size and by all checksums provided.\n\n Args:\n data (:class:`django.http.QueryDict`): QueryDict mapping Artifact model fields to their\n values\n\n Raises:\n :class:`rest_framework.exceptions.ValidationError`: When the expected file size or any\n of the checksums don't match their actual values.\n \"\"\"\n super().validate(data)\n\n if 'size' in data:\n if data['file'].size != int(data['size']):\n raise serializers.ValidationError(_(\"The size did not match actual size of file.\"))\n else:\n data['size'] = data['file'].size\n\n for algorithm in hashlib.algorithms_guaranteed:\n if algorithm in models.Artifact.DIGEST_FIELDS:\n digest = data['file'].hashers[algorithm].hexdigest()\n\n if algorithm in data and digest != data[algorithm]:\n raise serializers.ValidationError(_(\"The %s checksum did not match.\")\n % algorithm)\n else:\n data[algorithm] = digest\n if algorithm in UNIQUE_ALGORITHMS:\n validator = UniqueValidator(models.Artifact.objects.all(),\n message=_(\"{0} checksum must be \"\n \"unique.\").format(algorithm))\n validator.field_name = algorithm\n validator.instance = None\n validator(digest)\n return data\n\n class Meta:\n model = models.Artifact\n fields = base.ModelSerializer.Meta.fields + ('file', 'size', 'md5', 'sha1', 'sha224',\n 'sha256', 'sha384', 'sha512')\n", "path": "pulpcore/app/serializers/content.py"}]}
2,616
150
gh_patches_debug_21325
rasdani/github-patches
git_diff
akvo__akvo-rsr-3020
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect mandatory validation for qualitative indicators The indicator measure for qualitative indicators is hidden, but the code checking for completion of sections doesn't ignore that field, and considers it mandatory. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `akvo/iati/checks/fields/results.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 8 def results(project): 9 """ 10 Check if result has a type, title and at least one indicator. 11 Check if indicator has a measure and title. 12 Check if indicator baseline has year and value. 13 Check if indicator period has a start and end date, and start before end. 14 15 :param project: Project object 16 :return: All checks passed boolean, [Check results] 17 """ 18 checks = [] 19 all_checks_passed = True 20 21 for result in project.results.all(): 22 if not result.type: 23 all_checks_passed = False 24 checks.append((u'error', u'result (id: %s) has no type specified' % str(result.pk))) 25 26 if not result.title: 27 all_checks_passed = False 28 checks.append((u'error', u'result (id: %s) has no title specified' % str(result.pk))) 29 30 if not result.indicators.all(): 31 all_checks_passed = False 32 checks.append((u'error', u'result (id: %s) has no indicator(s)' % str(result.pk))) 33 34 for indicator in result.indicators.all(): 35 if not indicator.measure: 36 all_checks_passed = False 37 checks.append((u'error', u'indicator (id: %s) has no measure specified' % 38 str(indicator.pk))) 39 40 if not indicator.title: 41 all_checks_passed = False 42 checks.append((u'error', u'indicator (id: %s) has no title specified' % 43 str(indicator.pk))) 44 45 if (indicator.baseline_value and not indicator.baseline_year) or \ 46 (not indicator.baseline_value and indicator.baseline_year): 47 all_checks_passed = False 48 checks.append((u'error', u'indicator (id: %s) baseline has no value or year ' 49 u'specified' % str(indicator.pk))) 50 51 for reference in indicator.references.all(): 52 if not reference.reference: 53 all_checks_passed = False 54 checks.append((u'error', u'indicator reference (id: %s) has no code ' 55 u'specified' % str(reference.pk))) 56 57 if not reference.vocabulary: 58 all_checks_passed = False 59 checks.append((u'error', u'indicator reference (id: %s) has no vocabulary ' 60 u'specified' % str(reference.pk))) 61 62 if reference.vocabulary == '99' and not reference.vocabulary_uri: 63 all_checks_passed = False 64 checks.append((u'error', u'indicator reference (id: %s) has vocabulary 99 ' 65 u'(reporting organisation) but no vocabulary URI ' 66 u'specified' % str(reference.pk))) 67 68 for period in indicator.periods.all(): 69 if not period.period_start: 70 all_checks_passed = False 71 checks.append((u'error', u'indicator period (id: %s) has no start date ' 72 u'specified' % str(period.pk))) 73 74 if not period.period_end: 75 all_checks_passed = False 76 checks.append((u'error', u'indicator period (id: %s) has no end date ' 77 u'specified' % str(period.pk))) 78 79 if period.period_start and period.period_end and \ 80 period.period_start > period.period_end: 81 all_checks_passed = False 82 checks.append((u'error', u'indicator period (id: %s) has a start date ' 83 u'later than the end date' % str(period.pk))) 84 85 if not period.target_value and (period.target_comment or 86 period.target_locations.all() or 87 period.target_dimensions.all()): 88 all_checks_passed = False 89 checks.append((u'error', u'indicator period (id: %s) has no target value, but ' 90 u'does have a target comment, target location(s) or ' 91 u'target dimension(s)' % str(period.pk))) 92 93 if not period.actual_value and (period.actual_comment or 94 period.actual_locations.all() or 95 period.actual_dimensions.all()): 96 all_checks_passed = False 97 checks.append((u'error', u'indicator period (id: %s) has no actual value, but ' 98 u'does have an actual comment, actual location(s) or ' 99 u'actual dimension(s)' % str(period.pk))) 100 101 if project.results.all() and all_checks_passed: 102 checks.append((u'success', u'has valid result(s)')) 103 104 return all_checks_passed, checks 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/akvo/iati/checks/fields/results.py b/akvo/iati/checks/fields/results.py --- a/akvo/iati/checks/fields/results.py +++ b/akvo/iati/checks/fields/results.py @@ -4,6 +4,8 @@ # See more details in the license.txt file located at the root folder of the Akvo RSR module. # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. +from akvo.rsr.models.result.utils import QUALITATIVE + def results(project): """ @@ -32,7 +34,7 @@ checks.append((u'error', u'result (id: %s) has no indicator(s)' % str(result.pk))) for indicator in result.indicators.all(): - if not indicator.measure: + if indicator.type != QUALITATIVE and not indicator.measure: all_checks_passed = False checks.append((u'error', u'indicator (id: %s) has no measure specified' % str(indicator.pk)))
{"golden_diff": "diff --git a/akvo/iati/checks/fields/results.py b/akvo/iati/checks/fields/results.py\n--- a/akvo/iati/checks/fields/results.py\n+++ b/akvo/iati/checks/fields/results.py\n@@ -4,6 +4,8 @@\n # See more details in the license.txt file located at the root folder of the Akvo RSR module.\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n+from akvo.rsr.models.result.utils import QUALITATIVE\n+\n \n def results(project):\n \"\"\"\n@@ -32,7 +34,7 @@\n checks.append((u'error', u'result (id: %s) has no indicator(s)' % str(result.pk)))\n \n for indicator in result.indicators.all():\n- if not indicator.measure:\n+ if indicator.type != QUALITATIVE and not indicator.measure:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no measure specified' %\n str(indicator.pk)))\n", "issue": "Incorrect mandatory validation for qualitative indicators\nThe indicator measure for qualitative indicators is hidden, but the code checking for completion of sections doesn't ignore that field, and considers it mandatory. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\ndef results(project):\n \"\"\"\n Check if result has a type, title and at least one indicator.\n Check if indicator has a measure and title.\n Check if indicator baseline has year and value.\n Check if indicator period has a start and end date, and start before end.\n\n :param project: Project object\n :return: All checks passed boolean, [Check results]\n \"\"\"\n checks = []\n all_checks_passed = True\n\n for result in project.results.all():\n if not result.type:\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no type specified' % str(result.pk)))\n\n if not result.title:\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no title specified' % str(result.pk)))\n\n if not result.indicators.all():\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no indicator(s)' % str(result.pk)))\n\n for indicator in result.indicators.all():\n if not indicator.measure:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no measure specified' %\n str(indicator.pk)))\n\n if not indicator.title:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no title specified' %\n str(indicator.pk)))\n\n if (indicator.baseline_value and not indicator.baseline_year) or \\\n (not indicator.baseline_value and indicator.baseline_year):\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) baseline has no value or year '\n u'specified' % str(indicator.pk)))\n\n for reference in indicator.references.all():\n if not reference.reference:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has no code '\n u'specified' % str(reference.pk)))\n\n if not reference.vocabulary:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has no vocabulary '\n u'specified' % str(reference.pk)))\n\n if reference.vocabulary == '99' and not reference.vocabulary_uri:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has vocabulary 99 '\n u'(reporting organisation) but no vocabulary URI '\n u'specified' % str(reference.pk)))\n\n for period in indicator.periods.all():\n if not period.period_start:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no start date '\n u'specified' % str(period.pk)))\n\n if not period.period_end:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no end date '\n u'specified' % str(period.pk)))\n\n if period.period_start and period.period_end and \\\n period.period_start > period.period_end:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has a start date '\n u'later than the end date' % str(period.pk)))\n\n if not period.target_value and (period.target_comment or\n period.target_locations.all() or\n period.target_dimensions.all()):\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no target value, but '\n u'does have a target comment, target location(s) or '\n u'target dimension(s)' % str(period.pk)))\n\n if not period.actual_value and (period.actual_comment or\n period.actual_locations.all() or\n period.actual_dimensions.all()):\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no actual value, but '\n u'does have an actual comment, actual location(s) or '\n u'actual dimension(s)' % str(period.pk)))\n\n if project.results.all() and all_checks_passed:\n checks.append((u'success', u'has valid result(s)'))\n\n return all_checks_passed, checks\n", "path": "akvo/iati/checks/fields/results.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom akvo.rsr.models.result.utils import QUALITATIVE\n\n\ndef results(project):\n \"\"\"\n Check if result has a type, title and at least one indicator.\n Check if indicator has a measure and title.\n Check if indicator baseline has year and value.\n Check if indicator period has a start and end date, and start before end.\n\n :param project: Project object\n :return: All checks passed boolean, [Check results]\n \"\"\"\n checks = []\n all_checks_passed = True\n\n for result in project.results.all():\n if not result.type:\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no type specified' % str(result.pk)))\n\n if not result.title:\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no title specified' % str(result.pk)))\n\n if not result.indicators.all():\n all_checks_passed = False\n checks.append((u'error', u'result (id: %s) has no indicator(s)' % str(result.pk)))\n\n for indicator in result.indicators.all():\n if indicator.type != QUALITATIVE and not indicator.measure:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no measure specified' %\n str(indicator.pk)))\n\n if not indicator.title:\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) has no title specified' %\n str(indicator.pk)))\n\n if (indicator.baseline_value and not indicator.baseline_year) or \\\n (not indicator.baseline_value and indicator.baseline_year):\n all_checks_passed = False\n checks.append((u'error', u'indicator (id: %s) baseline has no value or year '\n u'specified' % str(indicator.pk)))\n\n for reference in indicator.references.all():\n if not reference.reference:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has no code '\n u'specified' % str(reference.pk)))\n\n if not reference.vocabulary:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has no vocabulary '\n u'specified' % str(reference.pk)))\n\n if reference.vocabulary == '99' and not reference.vocabulary_uri:\n all_checks_passed = False\n checks.append((u'error', u'indicator reference (id: %s) has vocabulary 99 '\n u'(reporting organisation) but no vocabulary URI '\n u'specified' % str(reference.pk)))\n\n for period in indicator.periods.all():\n if not period.period_start:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no start date '\n u'specified' % str(period.pk)))\n\n if not period.period_end:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no end date '\n u'specified' % str(period.pk)))\n\n if period.period_start and period.period_end and \\\n period.period_start > period.period_end:\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has a start date '\n u'later than the end date' % str(period.pk)))\n\n if not period.target_value and (period.target_comment or\n period.target_locations.all() or\n period.target_dimensions.all()):\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no target value, but '\n u'does have a target comment, target location(s) or '\n u'target dimension(s)' % str(period.pk)))\n\n if not period.actual_value and (period.actual_comment or\n period.actual_locations.all() or\n period.actual_dimensions.all()):\n all_checks_passed = False\n checks.append((u'error', u'indicator period (id: %s) has no actual value, but '\n u'does have an actual comment, actual location(s) or '\n u'actual dimension(s)' % str(period.pk)))\n\n if project.results.all() and all_checks_passed:\n checks.append((u'success', u'has valid result(s)'))\n\n return all_checks_passed, checks\n", "path": "akvo/iati/checks/fields/results.py"}]}
1,526
240
gh_patches_debug_14323
rasdani/github-patches
git_diff
zalando__patroni-698
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Could not load library ".../libpqwalreceiver.so" We compile and install Postgres in a non-standard location (/opt/custom/share/postgresql-9.6.2_7 in the example below) which requires that we specify the `LD_LIBRARY_PATH` env var when starting up postgres and when working with postgres utilities. When starting up Postgres, Patroni currently passes along `PATH`, `LC_ALL` and `LANG` env vars if they are set, but does not do the same with `LD_LIBRARY_PATH`. This results in the following FATAL replication error on the replica node: ``` 2018-06-05 21:10:26,675 INFO: Lock owner: postgresdb-01; I am postgresdb-02 2018-06-05 21:10:26,675 INFO: does not have lock 2018-06-05 21:10:26,676 INFO: no action. i am a secondary and i am following a leader FATAL: could not load library "/opt/custom/share/postgresql-9.6.2_7/lib/libpqwalreceiver.so": libpq.so.5: cannot open shared object file: No such file or directory FATAL: could not load library "/opt/custom/share/postgresql-9.6.2_7/lib/libpqwalreceiver.so": libpq.so.5: cannot open shared object file: No such file or directory ``` The fix for this is to pass `LD_LIBRARY_PATH` along with the other env vars, which results in replication behaving as expected: ``` 2018-06-05 22:03:08,722 INFO: Lock owner: postgresdb-01; I am postgresdb-02 2018-06-05 22:03:08,722 INFO: does not have lock 2018-06-05 22:03:08,727 DEBUG: Sending request(xid=49): SetData(path='/service/postgres_cluster/members/postgresdb-02', data='{"conn_url":"postgres://postgresdb-02:5432/postgres","api_url":"http://postgresdb-02:8008/patroni","timeline":1,"state":"running","role":"replica","xlog_location":184664496}', version=-1) 2018-06-05 22:03:08,737 DEBUG: Received response(xid=49): ZnodeStat(czxid=4294967413, mzxid=4294967497, ctime=1528235848173, mtime=1528236188727, version=28, cversion=0, aversion=0, ephemeralOwner=216194533074731009, dataLength=169, numChildren=0, pzxid=4294967413) 2018-06-05 22:03:08,741 INFO: no action. i am a secondary and i am following a leader ``` I will submit a pull request shortly that addresses this problem. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `patroni/postmaster.py` Content: ``` 1 import logging 2 import os 3 import psutil 4 import re 5 import signal 6 import subprocess 7 8 from patroni import call_self 9 10 logger = logging.getLogger(__name__) 11 12 STOP_SIGNALS = { 13 'smart': signal.SIGTERM, 14 'fast': signal.SIGINT, 15 'immediate': signal.SIGQUIT, 16 } 17 18 19 class PostmasterProcess(psutil.Process): 20 21 def __init__(self, pid): 22 self.is_single_user = False 23 if pid < 0: 24 pid = -pid 25 self.is_single_user = True 26 super(PostmasterProcess, self).__init__(pid) 27 28 @staticmethod 29 def _read_postmaster_pidfile(data_dir): 30 """Reads and parses postmaster.pid from the data directory 31 32 :returns dictionary of values if successful, empty dictionary otherwise 33 """ 34 pid_line_names = ['pid', 'data_dir', 'start_time', 'port', 'socket_dir', 'listen_addr', 'shmem_key'] 35 try: 36 with open(os.path.join(data_dir, 'postmaster.pid')) as f: 37 return {name: line.rstrip('\n') for name, line in zip(pid_line_names, f)} 38 except IOError: 39 return {} 40 41 def _is_postmaster_process(self): 42 try: 43 start_time = int(self._postmaster_pid.get('start_time', 0)) 44 if start_time and abs(self.create_time() - start_time) > 3: 45 logger.info('Too much difference between %s and %s', self.create_time(), start_time) 46 return False 47 except ValueError: 48 logger.warning('Garbage start time value in pid file: %r', self._postmaster_pid.get('start_time')) 49 50 # Extra safety check. The process can't be ourselves, our parent or our direct child. 51 if self.pid == os.getpid() or self.pid == os.getppid() or self.ppid() == os.getpid(): 52 logger.info('Patroni (pid=%s, ppid=%s), "fake postmaster" (pid=%s, ppid=%s)', 53 os.getpid(), os.getppid(), self.pid, self.ppid()) 54 return False 55 56 return True 57 58 @classmethod 59 def _from_pidfile(cls, data_dir): 60 postmaster_pid = PostmasterProcess._read_postmaster_pidfile(data_dir) 61 try: 62 pid = int(postmaster_pid.get('pid', 0)) 63 if pid: 64 proc = cls(pid) 65 proc._postmaster_pid = postmaster_pid 66 return proc 67 except ValueError: 68 pass 69 70 @staticmethod 71 def from_pidfile(data_dir): 72 try: 73 proc = PostmasterProcess._from_pidfile(data_dir) 74 return proc if proc and proc._is_postmaster_process() else None 75 except psutil.NoSuchProcess: 76 return None 77 78 @classmethod 79 def from_pid(cls, pid): 80 try: 81 return cls(pid) 82 except psutil.NoSuchProcess: 83 return None 84 85 def signal_stop(self, mode): 86 """Signal postmaster process to stop 87 88 :returns None if signaled, True if process is already gone, False if error 89 """ 90 if self.is_single_user: 91 logger.warning("Cannot stop server; single-user server is running (PID: {0})".format(self.pid)) 92 return False 93 try: 94 self.send_signal(STOP_SIGNALS[mode]) 95 except psutil.NoSuchProcess: 96 return True 97 except psutil.AccessDenied as e: 98 logger.warning("Could not send stop signal to PostgreSQL (error: {0})".format(e)) 99 return False 100 101 return None 102 103 def wait_for_user_backends_to_close(self): 104 # These regexps are cross checked against versions PostgreSQL 9.1 .. 9.6 105 aux_proc_re = re.compile("(?:postgres:)( .*:)? (?:""(?:startup|logger|checkpointer|writer|wal writer|" 106 "autovacuum launcher|autovacuum worker|stats collector|wal receiver|archiver|" 107 "wal sender) process|bgworker: )") 108 109 try: 110 user_backends = [] 111 user_backends_cmdlines = [] 112 for child in self.children(): 113 try: 114 cmdline = child.cmdline()[0] 115 if not aux_proc_re.match(cmdline): 116 user_backends.append(child) 117 user_backends_cmdlines.append(cmdline) 118 except psutil.NoSuchProcess: 119 pass 120 if user_backends: 121 logger.debug('Waiting for user backends %s to close', ', '.join(user_backends_cmdlines)) 122 psutil.wait_procs(user_backends) 123 logger.debug("Backends closed") 124 except psutil.Error: 125 logger.exception('wait_for_user_backends_to_close') 126 127 @staticmethod 128 def start(pgcommand, data_dir, conf, options): 129 # Unfortunately `pg_ctl start` does not return postmaster pid to us. Without this information 130 # it is hard to know the current state of postgres startup, so we had to reimplement pg_ctl start 131 # in python. It will start postgres, wait for port to be open and wait until postgres will start 132 # accepting connections. 133 # Important!!! We can't just start postgres using subprocess.Popen, because in this case it 134 # will be our child for the rest of our live and we will have to take care of it (`waitpid`). 135 # So we will use the same approach as pg_ctl uses: start a new process, which will start postgres. 136 # This process will write postmaster pid to stdout and exit immediately. Now it's responsibility 137 # of init process to take care about postmaster. 138 # In order to make everything portable we can't use fork&exec approach here, so we will call 139 # ourselves and pass list of arguments which must be used to start postgres. 140 env = {p: os.environ[p] for p in ('PATH', 'LC_ALL', 'LANG') if p in os.environ} 141 try: 142 proc = PostmasterProcess._from_pidfile(data_dir) 143 if proc and not proc._is_postmaster_process(): 144 # Upon start postmaster process performs various safety checks if there is a postmaster.pid 145 # file in the data directory. Although Patroni already detected that the running process 146 # corresponding to the postmaster.pid is not a postmaster, the new postmaster might fail 147 # to start, because it thinks that postmaster.pid is already locked. 148 # Important!!! Unlink of postmaster.pid isn't an option, because it has a lot of nasty race conditions. 149 # Luckily there is a workaround to this problem, we can pass the pid from postmaster.pid 150 # in the `PG_GRANDPARENT_PID` environment variable and postmaster will ignore it. 151 env['PG_GRANDPARENT_PID'] = str(proc.pid) 152 except psutil.NoSuchProcess: 153 pass 154 155 proc = call_self(['pg_ctl_start', pgcommand, '-D', data_dir, 156 '--config-file={}'.format(conf)] + options, close_fds=True, 157 preexec_fn=os.setsid, stdout=subprocess.PIPE, env=env) 158 pid = int(proc.stdout.readline().strip()) 159 proc.wait() 160 logger.info('postmaster pid=%s', pid) 161 162 # TODO: In an extremely unlikely case, the process could have exited and the pid reassigned. The start 163 # initiation time is not accurate enough to compare to create time as start time would also likely 164 # be relatively close. We need the subprocess extract pid+start_time in a race free manner. 165 return PostmasterProcess.from_pid(pid) 166 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/patroni/postmaster.py b/patroni/postmaster.py --- a/patroni/postmaster.py +++ b/patroni/postmaster.py @@ -137,7 +137,7 @@ # of init process to take care about postmaster. # In order to make everything portable we can't use fork&exec approach here, so we will call # ourselves and pass list of arguments which must be used to start postgres. - env = {p: os.environ[p] for p in ('PATH', 'LC_ALL', 'LANG') if p in os.environ} + env = {p: os.environ[p] for p in ('PATH', 'LD_LIBRARY_PATH', 'LC_ALL', 'LANG') if p in os.environ} try: proc = PostmasterProcess._from_pidfile(data_dir) if proc and not proc._is_postmaster_process():
{"golden_diff": "diff --git a/patroni/postmaster.py b/patroni/postmaster.py\n--- a/patroni/postmaster.py\n+++ b/patroni/postmaster.py\n@@ -137,7 +137,7 @@\n # of init process to take care about postmaster.\n # In order to make everything portable we can't use fork&exec approach here, so we will call\n # ourselves and pass list of arguments which must be used to start postgres.\n- env = {p: os.environ[p] for p in ('PATH', 'LC_ALL', 'LANG') if p in os.environ}\n+ env = {p: os.environ[p] for p in ('PATH', 'LD_LIBRARY_PATH', 'LC_ALL', 'LANG') if p in os.environ}\n try:\n proc = PostmasterProcess._from_pidfile(data_dir)\n if proc and not proc._is_postmaster_process():\n", "issue": "Could not load library \".../libpqwalreceiver.so\"\nWe compile and install Postgres in a non-standard location (/opt/custom/share/postgresql-9.6.2_7 in the example below) which requires that we specify the `LD_LIBRARY_PATH` env var when starting up postgres and when working with postgres utilities. When starting up Postgres, Patroni currently passes along `PATH`, `LC_ALL` and `LANG` env vars if they are set, but does not do the same with `LD_LIBRARY_PATH`. This results in the following FATAL replication error on the replica node:\r\n```\r\n2018-06-05 21:10:26,675 INFO: Lock owner: postgresdb-01; I am postgresdb-02\r\n2018-06-05 21:10:26,675 INFO: does not have lock\r\n2018-06-05 21:10:26,676 INFO: no action. i am a secondary and i am following a leader\r\nFATAL: could not load library \"/opt/custom/share/postgresql-9.6.2_7/lib/libpqwalreceiver.so\": libpq.so.5: cannot open shared object file: No such file or directory\r\nFATAL: could not load library \"/opt/custom/share/postgresql-9.6.2_7/lib/libpqwalreceiver.so\": libpq.so.5: cannot open shared object file: No such file or directory\r\n```\r\n\r\nThe fix for this is to pass `LD_LIBRARY_PATH` along with the other env vars, which results in replication behaving as expected:\r\n```\r\n2018-06-05 22:03:08,722 INFO: Lock owner: postgresdb-01; I am postgresdb-02\r\n2018-06-05 22:03:08,722 INFO: does not have lock\r\n2018-06-05 22:03:08,727 DEBUG: Sending request(xid=49): SetData(path='/service/postgres_cluster/members/postgresdb-02', data='{\"conn_url\":\"postgres://postgresdb-02:5432/postgres\",\"api_url\":\"http://postgresdb-02:8008/patroni\",\"timeline\":1,\"state\":\"running\",\"role\":\"replica\",\"xlog_location\":184664496}', version=-1)\r\n2018-06-05 22:03:08,737 DEBUG: Received response(xid=49): ZnodeStat(czxid=4294967413, mzxid=4294967497, ctime=1528235848173, mtime=1528236188727, version=28, cversion=0, aversion=0, ephemeralOwner=216194533074731009, dataLength=169, numChildren=0, pzxid=4294967413)\r\n2018-06-05 22:03:08,741 INFO: no action. i am a secondary and i am following a leader\r\n```\r\n\r\nI will submit a pull request shortly that addresses this problem.\n", "before_files": [{"content": "import logging\nimport os\nimport psutil\nimport re\nimport signal\nimport subprocess\n\nfrom patroni import call_self\n\nlogger = logging.getLogger(__name__)\n\nSTOP_SIGNALS = {\n 'smart': signal.SIGTERM,\n 'fast': signal.SIGINT,\n 'immediate': signal.SIGQUIT,\n}\n\n\nclass PostmasterProcess(psutil.Process):\n\n def __init__(self, pid):\n self.is_single_user = False\n if pid < 0:\n pid = -pid\n self.is_single_user = True\n super(PostmasterProcess, self).__init__(pid)\n\n @staticmethod\n def _read_postmaster_pidfile(data_dir):\n \"\"\"Reads and parses postmaster.pid from the data directory\n\n :returns dictionary of values if successful, empty dictionary otherwise\n \"\"\"\n pid_line_names = ['pid', 'data_dir', 'start_time', 'port', 'socket_dir', 'listen_addr', 'shmem_key']\n try:\n with open(os.path.join(data_dir, 'postmaster.pid')) as f:\n return {name: line.rstrip('\\n') for name, line in zip(pid_line_names, f)}\n except IOError:\n return {}\n\n def _is_postmaster_process(self):\n try:\n start_time = int(self._postmaster_pid.get('start_time', 0))\n if start_time and abs(self.create_time() - start_time) > 3:\n logger.info('Too much difference between %s and %s', self.create_time(), start_time)\n return False\n except ValueError:\n logger.warning('Garbage start time value in pid file: %r', self._postmaster_pid.get('start_time'))\n\n # Extra safety check. The process can't be ourselves, our parent or our direct child.\n if self.pid == os.getpid() or self.pid == os.getppid() or self.ppid() == os.getpid():\n logger.info('Patroni (pid=%s, ppid=%s), \"fake postmaster\" (pid=%s, ppid=%s)',\n os.getpid(), os.getppid(), self.pid, self.ppid())\n return False\n\n return True\n\n @classmethod\n def _from_pidfile(cls, data_dir):\n postmaster_pid = PostmasterProcess._read_postmaster_pidfile(data_dir)\n try:\n pid = int(postmaster_pid.get('pid', 0))\n if pid:\n proc = cls(pid)\n proc._postmaster_pid = postmaster_pid\n return proc\n except ValueError:\n pass\n\n @staticmethod\n def from_pidfile(data_dir):\n try:\n proc = PostmasterProcess._from_pidfile(data_dir)\n return proc if proc and proc._is_postmaster_process() else None\n except psutil.NoSuchProcess:\n return None\n\n @classmethod\n def from_pid(cls, pid):\n try:\n return cls(pid)\n except psutil.NoSuchProcess:\n return None\n\n def signal_stop(self, mode):\n \"\"\"Signal postmaster process to stop\n\n :returns None if signaled, True if process is already gone, False if error\n \"\"\"\n if self.is_single_user:\n logger.warning(\"Cannot stop server; single-user server is running (PID: {0})\".format(self.pid))\n return False\n try:\n self.send_signal(STOP_SIGNALS[mode])\n except psutil.NoSuchProcess:\n return True\n except psutil.AccessDenied as e:\n logger.warning(\"Could not send stop signal to PostgreSQL (error: {0})\".format(e))\n return False\n\n return None\n\n def wait_for_user_backends_to_close(self):\n # These regexps are cross checked against versions PostgreSQL 9.1 .. 9.6\n aux_proc_re = re.compile(\"(?:postgres:)( .*:)? (?:\"\"(?:startup|logger|checkpointer|writer|wal writer|\"\n \"autovacuum launcher|autovacuum worker|stats collector|wal receiver|archiver|\"\n \"wal sender) process|bgworker: )\")\n\n try:\n user_backends = []\n user_backends_cmdlines = []\n for child in self.children():\n try:\n cmdline = child.cmdline()[0]\n if not aux_proc_re.match(cmdline):\n user_backends.append(child)\n user_backends_cmdlines.append(cmdline)\n except psutil.NoSuchProcess:\n pass\n if user_backends:\n logger.debug('Waiting for user backends %s to close', ', '.join(user_backends_cmdlines))\n psutil.wait_procs(user_backends)\n logger.debug(\"Backends closed\")\n except psutil.Error:\n logger.exception('wait_for_user_backends_to_close')\n\n @staticmethod\n def start(pgcommand, data_dir, conf, options):\n # Unfortunately `pg_ctl start` does not return postmaster pid to us. Without this information\n # it is hard to know the current state of postgres startup, so we had to reimplement pg_ctl start\n # in python. It will start postgres, wait for port to be open and wait until postgres will start\n # accepting connections.\n # Important!!! We can't just start postgres using subprocess.Popen, because in this case it\n # will be our child for the rest of our live and we will have to take care of it (`waitpid`).\n # So we will use the same approach as pg_ctl uses: start a new process, which will start postgres.\n # This process will write postmaster pid to stdout and exit immediately. Now it's responsibility\n # of init process to take care about postmaster.\n # In order to make everything portable we can't use fork&exec approach here, so we will call\n # ourselves and pass list of arguments which must be used to start postgres.\n env = {p: os.environ[p] for p in ('PATH', 'LC_ALL', 'LANG') if p in os.environ}\n try:\n proc = PostmasterProcess._from_pidfile(data_dir)\n if proc and not proc._is_postmaster_process():\n # Upon start postmaster process performs various safety checks if there is a postmaster.pid\n # file in the data directory. Although Patroni already detected that the running process\n # corresponding to the postmaster.pid is not a postmaster, the new postmaster might fail\n # to start, because it thinks that postmaster.pid is already locked.\n # Important!!! Unlink of postmaster.pid isn't an option, because it has a lot of nasty race conditions.\n # Luckily there is a workaround to this problem, we can pass the pid from postmaster.pid\n # in the `PG_GRANDPARENT_PID` environment variable and postmaster will ignore it.\n env['PG_GRANDPARENT_PID'] = str(proc.pid)\n except psutil.NoSuchProcess:\n pass\n\n proc = call_self(['pg_ctl_start', pgcommand, '-D', data_dir,\n '--config-file={}'.format(conf)] + options, close_fds=True,\n preexec_fn=os.setsid, stdout=subprocess.PIPE, env=env)\n pid = int(proc.stdout.readline().strip())\n proc.wait()\n logger.info('postmaster pid=%s', pid)\n\n # TODO: In an extremely unlikely case, the process could have exited and the pid reassigned. The start\n # initiation time is not accurate enough to compare to create time as start time would also likely\n # be relatively close. We need the subprocess extract pid+start_time in a race free manner.\n return PostmasterProcess.from_pid(pid)\n", "path": "patroni/postmaster.py"}], "after_files": [{"content": "import logging\nimport os\nimport psutil\nimport re\nimport signal\nimport subprocess\n\nfrom patroni import call_self\n\nlogger = logging.getLogger(__name__)\n\nSTOP_SIGNALS = {\n 'smart': signal.SIGTERM,\n 'fast': signal.SIGINT,\n 'immediate': signal.SIGQUIT,\n}\n\n\nclass PostmasterProcess(psutil.Process):\n\n def __init__(self, pid):\n self.is_single_user = False\n if pid < 0:\n pid = -pid\n self.is_single_user = True\n super(PostmasterProcess, self).__init__(pid)\n\n @staticmethod\n def _read_postmaster_pidfile(data_dir):\n \"\"\"Reads and parses postmaster.pid from the data directory\n\n :returns dictionary of values if successful, empty dictionary otherwise\n \"\"\"\n pid_line_names = ['pid', 'data_dir', 'start_time', 'port', 'socket_dir', 'listen_addr', 'shmem_key']\n try:\n with open(os.path.join(data_dir, 'postmaster.pid')) as f:\n return {name: line.rstrip('\\n') for name, line in zip(pid_line_names, f)}\n except IOError:\n return {}\n\n def _is_postmaster_process(self):\n try:\n start_time = int(self._postmaster_pid.get('start_time', 0))\n if start_time and abs(self.create_time() - start_time) > 3:\n logger.info('Too much difference between %s and %s', self.create_time(), start_time)\n return False\n except ValueError:\n logger.warning('Garbage start time value in pid file: %r', self._postmaster_pid.get('start_time'))\n\n # Extra safety check. The process can't be ourselves, our parent or our direct child.\n if self.pid == os.getpid() or self.pid == os.getppid() or self.ppid() == os.getpid():\n logger.info('Patroni (pid=%s, ppid=%s), \"fake postmaster\" (pid=%s, ppid=%s)',\n os.getpid(), os.getppid(), self.pid, self.ppid())\n return False\n\n return True\n\n @classmethod\n def _from_pidfile(cls, data_dir):\n postmaster_pid = PostmasterProcess._read_postmaster_pidfile(data_dir)\n try:\n pid = int(postmaster_pid.get('pid', 0))\n if pid:\n proc = cls(pid)\n proc._postmaster_pid = postmaster_pid\n return proc\n except ValueError:\n pass\n\n @staticmethod\n def from_pidfile(data_dir):\n try:\n proc = PostmasterProcess._from_pidfile(data_dir)\n return proc if proc and proc._is_postmaster_process() else None\n except psutil.NoSuchProcess:\n return None\n\n @classmethod\n def from_pid(cls, pid):\n try:\n return cls(pid)\n except psutil.NoSuchProcess:\n return None\n\n def signal_stop(self, mode):\n \"\"\"Signal postmaster process to stop\n\n :returns None if signaled, True if process is already gone, False if error\n \"\"\"\n if self.is_single_user:\n logger.warning(\"Cannot stop server; single-user server is running (PID: {0})\".format(self.pid))\n return False\n try:\n self.send_signal(STOP_SIGNALS[mode])\n except psutil.NoSuchProcess:\n return True\n except psutil.AccessDenied as e:\n logger.warning(\"Could not send stop signal to PostgreSQL (error: {0})\".format(e))\n return False\n\n return None\n\n def wait_for_user_backends_to_close(self):\n # These regexps are cross checked against versions PostgreSQL 9.1 .. 9.6\n aux_proc_re = re.compile(\"(?:postgres:)( .*:)? (?:\"\"(?:startup|logger|checkpointer|writer|wal writer|\"\n \"autovacuum launcher|autovacuum worker|stats collector|wal receiver|archiver|\"\n \"wal sender) process|bgworker: )\")\n\n try:\n user_backends = []\n user_backends_cmdlines = []\n for child in self.children():\n try:\n cmdline = child.cmdline()[0]\n if not aux_proc_re.match(cmdline):\n user_backends.append(child)\n user_backends_cmdlines.append(cmdline)\n except psutil.NoSuchProcess:\n pass\n if user_backends:\n logger.debug('Waiting for user backends %s to close', ', '.join(user_backends_cmdlines))\n psutil.wait_procs(user_backends)\n logger.debug(\"Backends closed\")\n except psutil.Error:\n logger.exception('wait_for_user_backends_to_close')\n\n @staticmethod\n def start(pgcommand, data_dir, conf, options):\n # Unfortunately `pg_ctl start` does not return postmaster pid to us. Without this information\n # it is hard to know the current state of postgres startup, so we had to reimplement pg_ctl start\n # in python. It will start postgres, wait for port to be open and wait until postgres will start\n # accepting connections.\n # Important!!! We can't just start postgres using subprocess.Popen, because in this case it\n # will be our child for the rest of our live and we will have to take care of it (`waitpid`).\n # So we will use the same approach as pg_ctl uses: start a new process, which will start postgres.\n # This process will write postmaster pid to stdout and exit immediately. Now it's responsibility\n # of init process to take care about postmaster.\n # In order to make everything portable we can't use fork&exec approach here, so we will call\n # ourselves and pass list of arguments which must be used to start postgres.\n env = {p: os.environ[p] for p in ('PATH', 'LD_LIBRARY_PATH', 'LC_ALL', 'LANG') if p in os.environ}\n try:\n proc = PostmasterProcess._from_pidfile(data_dir)\n if proc and not proc._is_postmaster_process():\n # Upon start postmaster process performs various safety checks if there is a postmaster.pid\n # file in the data directory. Although Patroni already detected that the running process\n # corresponding to the postmaster.pid is not a postmaster, the new postmaster might fail\n # to start, because it thinks that postmaster.pid is already locked.\n # Important!!! Unlink of postmaster.pid isn't an option, because it has a lot of nasty race conditions.\n # Luckily there is a workaround to this problem, we can pass the pid from postmaster.pid\n # in the `PG_GRANDPARENT_PID` environment variable and postmaster will ignore it.\n env['PG_GRANDPARENT_PID'] = str(proc.pid)\n except psutil.NoSuchProcess:\n pass\n\n proc = call_self(['pg_ctl_start', pgcommand, '-D', data_dir,\n '--config-file={}'.format(conf)] + options, close_fds=True,\n preexec_fn=os.setsid, stdout=subprocess.PIPE, env=env)\n pid = int(proc.stdout.readline().strip())\n proc.wait()\n logger.info('postmaster pid=%s', pid)\n\n # TODO: In an extremely unlikely case, the process could have exited and the pid reassigned. The start\n # initiation time is not accurate enough to compare to create time as start time would also likely\n # be relatively close. We need the subprocess extract pid+start_time in a race free manner.\n return PostmasterProcess.from_pid(pid)\n", "path": "patroni/postmaster.py"}]}
3,044
195
gh_patches_debug_16340
rasdani/github-patches
git_diff
qutip__qutip-2335
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Typos in qutip.expect()'s docstring ### Bug Description Two words in the docstring contain typos. See below. ### Code to Reproduce the Bug ```shell from qutip import expect print(expect.__doc__) ``` ### Code Output ```shell Parameters ---------- oper : qobj/array-like A single or a `list` or operators for expectation value. state : qobj/array-like A single or a `list` of quantum states or density matrices. Returns ------- expt : float/complex/array-like Expectation value. ``real`` if `oper` is Hermitian, ``complex`` otherwise. A (nested) array of expectaction values of state or operator are arrays. ``` ### Expected Behaviour Parameters ---------- oper : qobj/array-like A single or a `list` **~~or~~** **_of_** operators for expectation value. state : qobj/array-like A single or a `list` of quantum states or density matrices. Returns ------- expt : float/complex/array-like Expectation value. ``real`` if `oper` is Hermitian, ``complex`` otherwise. A (nested) array of expectaction values **~~of~~** **_if_** state or operator are arrays. ### Your Environment ```shell QuTiP Version: 4.7.5 Numpy Version: 1.26.4 Scipy Version: 1.11.4 Cython Version: None Matplotlib Version: 3.8.3 Python Version: 3.12.2 Number of CPUs: 8 BLAS Info: Generic OPENMP Installed: False INTEL MKL Ext: False Platform Info: Windows (AMD64) ``` ### Additional Context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qutip/core/expect.py` Content: ``` 1 __all__ = ['expect', 'variance'] 2 3 import numpy as np 4 5 from .qobj import Qobj 6 from . import data as _data 7 8 9 def expect(oper, state): 10 """ 11 Calculate the expectation value for operator(s) and state(s). The 12 expectation of state ``k`` on operator ``A`` is defined as 13 ``k.dag() @ A @ k``, and for density matrix ``R`` on operator ``A`` it is 14 ``trace(A @ R)``. 15 16 Parameters 17 ---------- 18 oper : qobj/array-like 19 A single or a `list` or operators for expectation value. 20 21 state : qobj/array-like 22 A single or a `list` of quantum states or density matrices. 23 24 Returns 25 ------- 26 expt : float/complex/array-like 27 Expectation value. ``real`` if ``oper`` is Hermitian, ``complex`` 28 otherwise. A (nested) array of expectaction values of state or operator 29 are arrays. 30 31 Examples 32 -------- 33 >>> expect(num(4), basis(4, 3)) == 3 # doctest: +NORMALIZE_WHITESPACE 34 True 35 36 """ 37 if isinstance(state, Qobj) and isinstance(oper, Qobj): 38 return _single_qobj_expect(oper, state) 39 40 elif isinstance(oper, (list, np.ndarray)): 41 if isinstance(state, Qobj): 42 dtype = np.complex128 43 if all(op.isherm for op in oper) and (state.isket or state.isherm): 44 dtype = np.float64 45 return np.array([_single_qobj_expect(op, state) for op in oper], 46 dtype=dtype) 47 return [expect(op, state) for op in oper] 48 49 elif isinstance(state, (list, np.ndarray)): 50 dtype = np.complex128 51 if oper.isherm and all(op.isherm or op.isket for op in state): 52 dtype = np.float64 53 return np.array([_single_qobj_expect(oper, x) for x in state], 54 dtype=dtype) 55 raise TypeError('Arguments must be quantum objects') 56 57 58 def _single_qobj_expect(oper, state): 59 """ 60 Private function used by expect to calculate expectation values of Qobjs. 61 """ 62 if not oper.isoper or not (state.isket or state.isoper): 63 raise TypeError('invalid operand types') 64 if oper.dims[1] != state.dims[0]: 65 msg = ( 66 "incompatible dimensions " 67 + str(oper.dims[1]) + " and " + str(state.dims[0]) 68 ) 69 raise ValueError(msg) 70 out = _data.expect(oper.data, state.data) 71 72 # This ensures that expect can return something that is not a number such 73 # as a `tensorflow.Tensor` in qutip-tensorflow. 74 return out.real if (oper.isherm 75 and (state.isket or state.isherm) 76 and hasattr(out, "real") 77 ) else out 78 79 80 def variance(oper, state): 81 """ 82 Variance of an operator for the given state vector or density matrix. 83 84 Parameters 85 ---------- 86 oper : qobj 87 Operator for expectation value. 88 89 state : qobj/list 90 A single or ``list`` of quantum states or density matrices.. 91 92 Returns 93 ------- 94 var : float 95 Variance of operator 'oper' for given state. 96 97 """ 98 return expect(oper**2, state) - expect(oper, state)**2 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qutip/core/expect.py b/qutip/core/expect.py --- a/qutip/core/expect.py +++ b/qutip/core/expect.py @@ -16,7 +16,7 @@ Parameters ---------- oper : qobj/array-like - A single or a `list` or operators for expectation value. + A single or a `list` of operators for expectation value. state : qobj/array-like A single or a `list` of quantum states or density matrices. @@ -25,8 +25,8 @@ ------- expt : float/complex/array-like Expectation value. ``real`` if ``oper`` is Hermitian, ``complex`` - otherwise. A (nested) array of expectaction values of state or operator - are arrays. + otherwise. A (nested) array of expectaction values if ``state`` or + ``oper`` are arrays. Examples --------
{"golden_diff": "diff --git a/qutip/core/expect.py b/qutip/core/expect.py\n--- a/qutip/core/expect.py\n+++ b/qutip/core/expect.py\n@@ -16,7 +16,7 @@\n Parameters\n ----------\n oper : qobj/array-like\n- A single or a `list` or operators for expectation value.\n+ A single or a `list` of operators for expectation value.\n \n state : qobj/array-like\n A single or a `list` of quantum states or density matrices.\n@@ -25,8 +25,8 @@\n -------\n expt : float/complex/array-like\n Expectation value. ``real`` if ``oper`` is Hermitian, ``complex``\n- otherwise. A (nested) array of expectaction values of state or operator\n- are arrays.\n+ otherwise. A (nested) array of expectaction values if ``state`` or\n+ ``oper`` are arrays.\n \n Examples\n --------\n", "issue": "Typos in qutip.expect()'s docstring\n### Bug Description\r\n\r\nTwo words in the docstring contain typos. See below.\r\n\r\n### Code to Reproduce the Bug\r\n\r\n```shell\r\nfrom qutip import expect\r\nprint(expect.__doc__)\r\n```\r\n\r\n\r\n### Code Output\r\n\r\n```shell\r\nParameters\r\n----------\r\noper : qobj/array-like\r\n A single or a `list` or operators for expectation value.\r\n\r\nstate : qobj/array-like\r\n A single or a `list` of quantum states or density matrices.\r\n\r\nReturns\r\n-------\r\nexpt : float/complex/array-like\r\n Expectation value. ``real`` if `oper` is Hermitian, ``complex``\r\n otherwise. A (nested) array of expectaction values of state or operator\r\n are arrays.\r\n```\r\n\r\n\r\n### Expected Behaviour\r\n\r\nParameters\r\n----------\r\noper : qobj/array-like\r\n A single or a `list` **~~or~~** **_of_** operators for expectation value.\r\n\r\nstate : qobj/array-like\r\n A single or a `list` of quantum states or density matrices.\r\n\r\nReturns\r\n-------\r\nexpt : float/complex/array-like\r\n Expectation value. ``real`` if `oper` is Hermitian, ``complex``\r\n otherwise. A (nested) array of expectaction values **~~of~~** **_if_** state or operator\r\n are arrays.\r\n\r\n### Your Environment\r\n\r\n```shell\r\nQuTiP Version: 4.7.5\r\nNumpy Version: 1.26.4\r\nScipy Version: 1.11.4\r\nCython Version: None\r\nMatplotlib Version: 3.8.3\r\nPython Version: 3.12.2\r\nNumber of CPUs: 8\r\nBLAS Info: Generic\r\nOPENMP Installed: False\r\nINTEL MKL Ext: False\r\nPlatform Info: Windows (AMD64)\r\n```\r\n\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "__all__ = ['expect', 'variance']\n\nimport numpy as np\n\nfrom .qobj import Qobj\nfrom . import data as _data\n\n\ndef expect(oper, state):\n \"\"\"\n Calculate the expectation value for operator(s) and state(s). The\n expectation of state ``k`` on operator ``A`` is defined as\n ``k.dag() @ A @ k``, and for density matrix ``R`` on operator ``A`` it is\n ``trace(A @ R)``.\n\n Parameters\n ----------\n oper : qobj/array-like\n A single or a `list` or operators for expectation value.\n\n state : qobj/array-like\n A single or a `list` of quantum states or density matrices.\n\n Returns\n -------\n expt : float/complex/array-like\n Expectation value. ``real`` if ``oper`` is Hermitian, ``complex``\n otherwise. A (nested) array of expectaction values of state or operator\n are arrays.\n\n Examples\n --------\n >>> expect(num(4), basis(4, 3)) == 3 # doctest: +NORMALIZE_WHITESPACE\n True\n\n \"\"\"\n if isinstance(state, Qobj) and isinstance(oper, Qobj):\n return _single_qobj_expect(oper, state)\n\n elif isinstance(oper, (list, np.ndarray)):\n if isinstance(state, Qobj):\n dtype = np.complex128\n if all(op.isherm for op in oper) and (state.isket or state.isherm):\n dtype = np.float64\n return np.array([_single_qobj_expect(op, state) for op in oper],\n dtype=dtype)\n return [expect(op, state) for op in oper]\n\n elif isinstance(state, (list, np.ndarray)):\n dtype = np.complex128\n if oper.isherm and all(op.isherm or op.isket for op in state):\n dtype = np.float64\n return np.array([_single_qobj_expect(oper, x) for x in state],\n dtype=dtype)\n raise TypeError('Arguments must be quantum objects')\n\n\ndef _single_qobj_expect(oper, state):\n \"\"\"\n Private function used by expect to calculate expectation values of Qobjs.\n \"\"\"\n if not oper.isoper or not (state.isket or state.isoper):\n raise TypeError('invalid operand types')\n if oper.dims[1] != state.dims[0]:\n msg = (\n \"incompatible dimensions \"\n + str(oper.dims[1]) + \" and \" + str(state.dims[0])\n )\n raise ValueError(msg)\n out = _data.expect(oper.data, state.data)\n\n # This ensures that expect can return something that is not a number such\n # as a `tensorflow.Tensor` in qutip-tensorflow.\n return out.real if (oper.isherm\n and (state.isket or state.isherm)\n and hasattr(out, \"real\")\n ) else out\n\n\ndef variance(oper, state):\n \"\"\"\n Variance of an operator for the given state vector or density matrix.\n\n Parameters\n ----------\n oper : qobj\n Operator for expectation value.\n\n state : qobj/list\n A single or ``list`` of quantum states or density matrices..\n\n Returns\n -------\n var : float\n Variance of operator 'oper' for given state.\n\n \"\"\"\n return expect(oper**2, state) - expect(oper, state)**2\n", "path": "qutip/core/expect.py"}], "after_files": [{"content": "__all__ = ['expect', 'variance']\n\nimport numpy as np\n\nfrom .qobj import Qobj\nfrom . import data as _data\n\n\ndef expect(oper, state):\n \"\"\"\n Calculate the expectation value for operator(s) and state(s). The\n expectation of state ``k`` on operator ``A`` is defined as\n ``k.dag() @ A @ k``, and for density matrix ``R`` on operator ``A`` it is\n ``trace(A @ R)``.\n\n Parameters\n ----------\n oper : qobj/array-like\n A single or a `list` of operators for expectation value.\n\n state : qobj/array-like\n A single or a `list` of quantum states or density matrices.\n\n Returns\n -------\n expt : float/complex/array-like\n Expectation value. ``real`` if ``oper`` is Hermitian, ``complex``\n otherwise. A (nested) array of expectaction values if ``state`` or\n ``oper`` are arrays.\n\n Examples\n --------\n >>> expect(num(4), basis(4, 3)) == 3 # doctest: +NORMALIZE_WHITESPACE\n True\n\n \"\"\"\n if isinstance(state, Qobj) and isinstance(oper, Qobj):\n return _single_qobj_expect(oper, state)\n\n elif isinstance(oper, (list, np.ndarray)):\n if isinstance(state, Qobj):\n dtype = np.complex128\n if all(op.isherm for op in oper) and (state.isket or state.isherm):\n dtype = np.float64\n return np.array([_single_qobj_expect(op, state) for op in oper],\n dtype=dtype)\n return [expect(op, state) for op in oper]\n\n elif isinstance(state, (list, np.ndarray)):\n dtype = np.complex128\n if oper.isherm and all(op.isherm or op.isket for op in state):\n dtype = np.float64\n return np.array([_single_qobj_expect(oper, x) for x in state],\n dtype=dtype)\n raise TypeError('Arguments must be quantum objects')\n\n\ndef _single_qobj_expect(oper, state):\n \"\"\"\n Private function used by expect to calculate expectation values of Qobjs.\n \"\"\"\n if not oper.isoper or not (state.isket or state.isoper):\n raise TypeError('invalid operand types')\n if oper.dims[1] != state.dims[0]:\n msg = (\n \"incompatible dimensions \"\n + str(oper.dims[1]) + \" and \" + str(state.dims[0])\n )\n raise ValueError(msg)\n out = _data.expect(oper.data, state.data)\n\n # This ensures that expect can return something that is not a number such\n # as a `tensorflow.Tensor` in qutip-tensorflow.\n return out.real if (oper.isherm\n and (state.isket or state.isherm)\n and hasattr(out, \"real\")\n ) else out\n\n\ndef variance(oper, state):\n \"\"\"\n Variance of an operator for the given state vector or density matrix.\n\n Parameters\n ----------\n oper : qobj\n Operator for expectation value.\n\n state : qobj/list\n A single or ``list`` of quantum states or density matrices..\n\n Returns\n -------\n var : float\n Variance of operator 'oper' for given state.\n\n \"\"\"\n return expect(oper**2, state) - expect(oper, state)**2\n", "path": "qutip/core/expect.py"}]}
1,644
220
gh_patches_debug_22545
rasdani/github-patches
git_diff
learningequality__kolibri-3179
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Rename Buildkite assets for clarity ## Summary When downloading builds from `buildkite/kolibri/assets` details link, filenames are too complex and difficult to distinguish. ![selection_655](https://user-images.githubusercontent.com/1457929/31338580-259d1370-ad00-11e7-9c8a-f890d2515631.png) These links do save some time, but filenames are longer then those downloaded individually from Buildkite (green). Could we make them shorter? For example, get rid of the useless prefix and various `%2F` in between in order to make this: `kolibri%2Fbuildkite%2Fbuild-2313%2F2755%2Fkolibri-0.6.dev020170929214748-git.pex` (red) into this: `build-2313-kolibri-0.6.dev020170929214748-git.pex` (yellow) Or just `kolibri-0.6.dev020170929214748-git.pex` if those build details are not necessary. ![selection_653](https://user-images.githubusercontent.com/1457929/31339729-198fddf2-ad04-11e7-8b85-d1e0125600e4.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `.buildkite/upload_artifacts.py` Content: ``` 1 """ 2 # Requirements: 3 * Generate access token in your Github account, then create environment variable GITHUB_ACCESS_TOKEN. 4 - e.g export GITHUB_ACCESS_TOKEN=1ns3rt-my-t0k3n-h3re. 5 6 * Generate a service account key for your Google API credentials, then create environment variable GOOGLE_APPLICATION_CREDENTIALS. 7 - e.g export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json. 8 9 # Environment Variable/s: 10 * IS_KOLIBRI_RELEASE = Upload artifacts to the Google Cloud as a release candidate. 11 * GITHUB_ACCESS_TOKEN = Personal access token used to authenticate in your Github account via API. 12 * BUILDKITE_BUILD_NUMBER = Build identifier for each directory created. 13 * BUILDKITE_PULL_REQUEST = Pull request issue or the value is false. 14 * BUILDKITE_TAG = Tag identifier if this build was built from a tag. 15 * BUILDKITE_COMMIT = Git commit hash that the build was made from. 16 * GOOGLE_APPLICATION_CREDENTIALS = Your service account key. 17 """ 18 import logging 19 import os 20 import sys 21 from os import listdir 22 23 import requests 24 from gcloud import storage 25 from github3 import login 26 27 logging.getLogger().setLevel(logging.INFO) 28 29 ACCESS_TOKEN = os.getenv("GITHUB_ACCESS_TOKEN") 30 REPO_OWNER = "learningequality" 31 REPO_NAME = "kolibri" 32 ISSUE_ID = os.getenv("BUILDKITE_PULL_REQUEST") 33 BUILD_ID = os.getenv("BUILDKITE_BUILD_NUMBER") 34 TAG = os.getenv("BUILDKITE_TAG") 35 COMMIT = os.getenv("BUILDKITE_COMMIT") 36 37 38 RELEASE_DIR = 'release' 39 PROJECT_PATH = os.path.join(os.getcwd()) 40 41 # Python packages artifact location 42 DIST_DIR = os.path.join(PROJECT_PATH, "dist") 43 44 # Installer artifact location 45 INSTALLER_DIR = os.path.join(PROJECT_PATH, "installer") 46 47 headers = {'Authorization': 'token %s' % ACCESS_TOKEN} 48 49 INSTALLER_CAT = 'Installers' 50 51 PYTHON_PKG_CAT = 'Python packages' 52 53 # Manifest of files, keyed by extension 54 file_manifest = { 55 'exe': { 56 'extension': 'exe', 57 'description': 'Windows Installer', 58 'category': INSTALLER_CAT, 59 'content_type': 'application/x-ms-dos-executable', 60 }, 61 'pex': { 62 'extension': 'pex', 63 'description': 'Pex file', 64 'category': PYTHON_PKG_CAT, 65 'content_type': 'application/octet-stream', 66 }, 67 'whl': { 68 'extension': 'whl', 69 'description': 'Whl file', 70 'category': PYTHON_PKG_CAT, 71 'content_type': 'application/zip', 72 }, 73 'gz': { 74 'extension': 'gz', 75 'description': 'Tar file', 76 'category': PYTHON_PKG_CAT, 77 'content_type': 'application/gzip', 78 }, 79 'apk': { 80 'extension': 'apk', 81 'description': 'Android Installer', 82 'category': INSTALLER_CAT, 83 'content_type': 'application/vnd.android.package-archive', 84 }, 85 } 86 87 file_order = [ 88 'exe', 89 'apk', 90 'pex', 91 'whl', 92 'gz', 93 ] 94 95 gh = login(token=ACCESS_TOKEN) 96 repository = gh.repository(REPO_OWNER, REPO_NAME) 97 98 99 def create_status_report_html(artifacts): 100 """ 101 Create html page to list build artifacts for linking from github status. 102 """ 103 html = "<html>\n<body>\n<h1>Build Artifacts</h1>\n" 104 current_heading = None 105 for ext in file_order: 106 artifact = artifacts[ext] 107 if artifact['category'] != current_heading: 108 current_heading = artifact['category'] 109 html += "<h2>{heading}</h2>\n".format(heading=current_heading) 110 html += "<p>{description}: <a href='{media_url}'>{name}</a></p>\n".format( 111 **artifact 112 ) 113 html += "</body>\n</html>" 114 return html 115 116 def create_github_status(report_url): 117 """ 118 Create a github status with a link to the report URL, 119 only do this once buildkite has been successful, so only report success here. 120 """ 121 status = repository.create_status( 122 COMMIT, 123 "success", 124 target_url=report_url, 125 description="Kolibri Buildkite assets", 126 context="buildkite/kolibri/assets" 127 ) 128 if status: 129 logging.info('Successfully created Github status for commit %s.' % COMMIT) 130 else: 131 logging.info('Error encounter. Now exiting!') 132 sys.exit(1) 133 134 135 def collect_local_artifacts(): 136 """ 137 Create a dict of the artifact name and the location. 138 """ 139 140 artifacts_dict = {} 141 142 def create_artifact_data(artifact_dir): 143 for artifact in listdir(artifact_dir): 144 filename, file_extension = os.path.splitext(artifact) 145 # Remove leading '.' 146 file_extension = file_extension[1:] 147 if file_extension in file_manifest: 148 data = {"name": artifact, 149 "file_location": "%s/%s" % (artifact_dir, artifact)} 150 data.update(file_manifest[file_extension]) 151 logging.info("Collect file data: (%s)" % data) 152 artifacts_dict[file_extension] = data 153 create_artifact_data(DIST_DIR) 154 create_artifact_data(INSTALLER_DIR) 155 return artifacts_dict 156 157 158 def upload_artifacts(): 159 """ 160 Upload the artifacts on the Google Cloud Storage. 161 Create a comment on the pull requester with artifact media link. 162 """ 163 client = storage.Client() 164 bucket = client.bucket("le-downloads") 165 artifacts = collect_local_artifacts() 166 is_release = os.getenv("IS_KOLIBRI_RELEASE") 167 for file_data in artifacts.values(): 168 logging.info("Uploading file (%s)" % (file_data.get("name"))) 169 if is_release: 170 blob = bucket.blob('kolibri/%s/%s/%s' % (RELEASE_DIR, BUILD_ID, file_data.get("name"))) 171 else: 172 blob = bucket.blob('kolibri/buildkite/build-%s/%s/%s' % (ISSUE_ID, BUILD_ID, file_data.get("name"))) 173 blob.upload_from_filename(filename=file_data.get("file_location")) 174 blob.make_public() 175 file_data.update({'media_url': blob.media_link}) 176 177 html = create_status_report_html(artifacts) 178 179 blob = bucket.blob('kolibri/%s/%s/report.html' % (RELEASE_DIR, BUILD_ID)) 180 181 blob.upload_from_string(html, content_type='text/html') 182 183 blob.make_public() 184 185 create_github_status(blob.public_url) 186 187 if TAG: 188 # Building from a tag, this is probably a release! 189 # Have to do this with requests because github3 does not support this interface yet 190 get_release_asset_url = requests.get("https://api.github.com/repos/{owner}/{repo}/releases/tags/{tag}".format( 191 owner=REPO_OWNER, 192 repo=REPO_NAME, 193 tag=TAG, 194 )) 195 if get_release_asset_url.status_code == 200: 196 # Definitely a release! 197 release_id = get_release_asset_url.json()['id'] 198 release_name = get_release_asset_url.json()['name'] 199 release = repository.release(id=release_id) 200 logging.info("Uploading built assets to Github Release: %s" % release_name) 201 for file_extension in file_order: 202 artifact = artifacts[file_extension] 203 logging.info("Uploading release asset: %s" % (artifact.get("name"))) 204 # For some reason github3 does not let us set a label at initial upload 205 asset = release.upload_asset( 206 content_type=artifact['content_type'], 207 name=artifact['name'], 208 asset=open(artifact['file_location'], 'rb') 209 ) 210 if asset: 211 # So do it after the initial upload instead 212 asset.edit(artifact['name'], label=artifact['description']) 213 logging.info("Successfully uploaded release asset: %s" % (artifact.get('name'))) 214 else: 215 logging.error("Error uploading release asset: %s" % (artifact.get('name'))) 216 217 218 def main(): 219 upload_artifacts() 220 221 222 if __name__ == "__main__": 223 main() 224 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/.buildkite/upload_artifacts.py b/.buildkite/upload_artifacts.py --- a/.buildkite/upload_artifacts.py +++ b/.buildkite/upload_artifacts.py @@ -167,16 +167,16 @@ for file_data in artifacts.values(): logging.info("Uploading file (%s)" % (file_data.get("name"))) if is_release: - blob = bucket.blob('kolibri/%s/%s/%s' % (RELEASE_DIR, BUILD_ID, file_data.get("name"))) + blob = bucket.blob('kolibri-%s-%s-%s' % (RELEASE_DIR, BUILD_ID, file_data.get("name"))) else: - blob = bucket.blob('kolibri/buildkite/build-%s/%s/%s' % (ISSUE_ID, BUILD_ID, file_data.get("name"))) + blob = bucket.blob('kolibri-buildkite-build-%s-%s-%s' % (ISSUE_ID, BUILD_ID, file_data.get("name"))) blob.upload_from_filename(filename=file_data.get("file_location")) blob.make_public() file_data.update({'media_url': blob.media_link}) html = create_status_report_html(artifacts) - blob = bucket.blob('kolibri/%s/%s/report.html' % (RELEASE_DIR, BUILD_ID)) + blob = bucket.blob('kolibri-%s-%s-report.html' % (RELEASE_DIR, BUILD_ID)) blob.upload_from_string(html, content_type='text/html')
{"golden_diff": "diff --git a/.buildkite/upload_artifacts.py b/.buildkite/upload_artifacts.py\n--- a/.buildkite/upload_artifacts.py\n+++ b/.buildkite/upload_artifacts.py\n@@ -167,16 +167,16 @@\n for file_data in artifacts.values():\n logging.info(\"Uploading file (%s)\" % (file_data.get(\"name\")))\n if is_release:\n- blob = bucket.blob('kolibri/%s/%s/%s' % (RELEASE_DIR, BUILD_ID, file_data.get(\"name\")))\n+ blob = bucket.blob('kolibri-%s-%s-%s' % (RELEASE_DIR, BUILD_ID, file_data.get(\"name\")))\n else:\n- blob = bucket.blob('kolibri/buildkite/build-%s/%s/%s' % (ISSUE_ID, BUILD_ID, file_data.get(\"name\")))\n+ blob = bucket.blob('kolibri-buildkite-build-%s-%s-%s' % (ISSUE_ID, BUILD_ID, file_data.get(\"name\")))\n blob.upload_from_filename(filename=file_data.get(\"file_location\"))\n blob.make_public()\n file_data.update({'media_url': blob.media_link})\n \n html = create_status_report_html(artifacts)\n \n- blob = bucket.blob('kolibri/%s/%s/report.html' % (RELEASE_DIR, BUILD_ID))\n+ blob = bucket.blob('kolibri-%s-%s-report.html' % (RELEASE_DIR, BUILD_ID))\n \n blob.upload_from_string(html, content_type='text/html')\n", "issue": "Rename Buildkite assets for clarity\n## Summary\r\n\r\nWhen downloading builds from `buildkite/kolibri/assets` details link, filenames are too complex and difficult to distinguish. \r\n\r\n![selection_655](https://user-images.githubusercontent.com/1457929/31338580-259d1370-ad00-11e7-9c8a-f890d2515631.png)\r\n\r\nThese links do save some time, but filenames are longer then those downloaded individually from Buildkite (green). Could we make them shorter? For example, get rid of the useless prefix and various `%2F` in between in order to make this:\r\n\r\n`kolibri%2Fbuildkite%2Fbuild-2313%2F2755%2Fkolibri-0.6.dev020170929214748-git.pex` (red)\r\n\r\ninto this:\r\n\r\n`build-2313-kolibri-0.6.dev020170929214748-git.pex` (yellow)\r\n\r\nOr just `kolibri-0.6.dev020170929214748-git.pex` if those build details are not necessary.\r\n\r\n![selection_653](https://user-images.githubusercontent.com/1457929/31339729-198fddf2-ad04-11e7-8b85-d1e0125600e4.png)\n", "before_files": [{"content": "\"\"\"\n# Requirements:\n * Generate access token in your Github account, then create environment variable GITHUB_ACCESS_TOKEN.\n - e.g export GITHUB_ACCESS_TOKEN=1ns3rt-my-t0k3n-h3re.\n\n * Generate a service account key for your Google API credentials, then create environment variable GOOGLE_APPLICATION_CREDENTIALS.\n - e.g export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json.\n\n# Environment Variable/s:\n * IS_KOLIBRI_RELEASE = Upload artifacts to the Google Cloud as a release candidate.\n * GITHUB_ACCESS_TOKEN = Personal access token used to authenticate in your Github account via API.\n * BUILDKITE_BUILD_NUMBER = Build identifier for each directory created.\n * BUILDKITE_PULL_REQUEST = Pull request issue or the value is false.\n * BUILDKITE_TAG = Tag identifier if this build was built from a tag.\n * BUILDKITE_COMMIT = Git commit hash that the build was made from.\n * GOOGLE_APPLICATION_CREDENTIALS = Your service account key.\n\"\"\"\nimport logging\nimport os\nimport sys\nfrom os import listdir\n\nimport requests\nfrom gcloud import storage\nfrom github3 import login\n\nlogging.getLogger().setLevel(logging.INFO)\n\nACCESS_TOKEN = os.getenv(\"GITHUB_ACCESS_TOKEN\")\nREPO_OWNER = \"learningequality\"\nREPO_NAME = \"kolibri\"\nISSUE_ID = os.getenv(\"BUILDKITE_PULL_REQUEST\")\nBUILD_ID = os.getenv(\"BUILDKITE_BUILD_NUMBER\")\nTAG = os.getenv(\"BUILDKITE_TAG\")\nCOMMIT = os.getenv(\"BUILDKITE_COMMIT\")\n\n\nRELEASE_DIR = 'release'\nPROJECT_PATH = os.path.join(os.getcwd())\n\n# Python packages artifact location\nDIST_DIR = os.path.join(PROJECT_PATH, \"dist\")\n\n# Installer artifact location\nINSTALLER_DIR = os.path.join(PROJECT_PATH, \"installer\")\n\nheaders = {'Authorization': 'token %s' % ACCESS_TOKEN}\n\nINSTALLER_CAT = 'Installers'\n\nPYTHON_PKG_CAT = 'Python packages'\n\n# Manifest of files, keyed by extension\nfile_manifest = {\n 'exe': {\n 'extension': 'exe',\n 'description': 'Windows Installer',\n 'category': INSTALLER_CAT,\n 'content_type': 'application/x-ms-dos-executable',\n },\n 'pex': {\n 'extension': 'pex',\n 'description': 'Pex file',\n 'category': PYTHON_PKG_CAT,\n 'content_type': 'application/octet-stream',\n },\n 'whl': {\n 'extension': 'whl',\n 'description': 'Whl file',\n 'category': PYTHON_PKG_CAT,\n 'content_type': 'application/zip',\n },\n 'gz': {\n 'extension': 'gz',\n 'description': 'Tar file',\n 'category': PYTHON_PKG_CAT,\n 'content_type': 'application/gzip',\n },\n 'apk': {\n 'extension': 'apk',\n 'description': 'Android Installer',\n 'category': INSTALLER_CAT,\n 'content_type': 'application/vnd.android.package-archive',\n },\n}\n\nfile_order = [\n 'exe',\n 'apk',\n 'pex',\n 'whl',\n 'gz',\n]\n\ngh = login(token=ACCESS_TOKEN)\nrepository = gh.repository(REPO_OWNER, REPO_NAME)\n\n\ndef create_status_report_html(artifacts):\n \"\"\"\n Create html page to list build artifacts for linking from github status.\n \"\"\"\n html = \"<html>\\n<body>\\n<h1>Build Artifacts</h1>\\n\"\n current_heading = None\n for ext in file_order:\n artifact = artifacts[ext]\n if artifact['category'] != current_heading:\n current_heading = artifact['category']\n html += \"<h2>{heading}</h2>\\n\".format(heading=current_heading)\n html += \"<p>{description}: <a href='{media_url}'>{name}</a></p>\\n\".format(\n **artifact\n )\n html += \"</body>\\n</html>\"\n return html\n\ndef create_github_status(report_url):\n \"\"\"\n Create a github status with a link to the report URL,\n only do this once buildkite has been successful, so only report success here.\n \"\"\"\n status = repository.create_status(\n COMMIT,\n \"success\",\n target_url=report_url,\n description=\"Kolibri Buildkite assets\",\n context=\"buildkite/kolibri/assets\"\n )\n if status:\n logging.info('Successfully created Github status for commit %s.' % COMMIT)\n else:\n logging.info('Error encounter. Now exiting!')\n sys.exit(1)\n\n\ndef collect_local_artifacts():\n \"\"\"\n Create a dict of the artifact name and the location.\n \"\"\"\n\n artifacts_dict = {}\n\n def create_artifact_data(artifact_dir):\n for artifact in listdir(artifact_dir):\n filename, file_extension = os.path.splitext(artifact)\n # Remove leading '.'\n file_extension = file_extension[1:]\n if file_extension in file_manifest:\n data = {\"name\": artifact,\n \"file_location\": \"%s/%s\" % (artifact_dir, artifact)}\n data.update(file_manifest[file_extension])\n logging.info(\"Collect file data: (%s)\" % data)\n artifacts_dict[file_extension] = data\n create_artifact_data(DIST_DIR)\n create_artifact_data(INSTALLER_DIR)\n return artifacts_dict\n\n\ndef upload_artifacts():\n \"\"\"\n Upload the artifacts on the Google Cloud Storage.\n Create a comment on the pull requester with artifact media link.\n \"\"\"\n client = storage.Client()\n bucket = client.bucket(\"le-downloads\")\n artifacts = collect_local_artifacts()\n is_release = os.getenv(\"IS_KOLIBRI_RELEASE\")\n for file_data in artifacts.values():\n logging.info(\"Uploading file (%s)\" % (file_data.get(\"name\")))\n if is_release:\n blob = bucket.blob('kolibri/%s/%s/%s' % (RELEASE_DIR, BUILD_ID, file_data.get(\"name\")))\n else:\n blob = bucket.blob('kolibri/buildkite/build-%s/%s/%s' % (ISSUE_ID, BUILD_ID, file_data.get(\"name\")))\n blob.upload_from_filename(filename=file_data.get(\"file_location\"))\n blob.make_public()\n file_data.update({'media_url': blob.media_link})\n\n html = create_status_report_html(artifacts)\n\n blob = bucket.blob('kolibri/%s/%s/report.html' % (RELEASE_DIR, BUILD_ID))\n\n blob.upload_from_string(html, content_type='text/html')\n\n blob.make_public()\n\n create_github_status(blob.public_url)\n\n if TAG:\n # Building from a tag, this is probably a release!\n # Have to do this with requests because github3 does not support this interface yet\n get_release_asset_url = requests.get(\"https://api.github.com/repos/{owner}/{repo}/releases/tags/{tag}\".format(\n owner=REPO_OWNER,\n repo=REPO_NAME,\n tag=TAG,\n ))\n if get_release_asset_url.status_code == 200:\n # Definitely a release!\n release_id = get_release_asset_url.json()['id']\n release_name = get_release_asset_url.json()['name']\n release = repository.release(id=release_id)\n logging.info(\"Uploading built assets to Github Release: %s\" % release_name)\n for file_extension in file_order:\n artifact = artifacts[file_extension]\n logging.info(\"Uploading release asset: %s\" % (artifact.get(\"name\")))\n # For some reason github3 does not let us set a label at initial upload\n asset = release.upload_asset(\n content_type=artifact['content_type'],\n name=artifact['name'],\n asset=open(artifact['file_location'], 'rb')\n )\n if asset:\n # So do it after the initial upload instead\n asset.edit(artifact['name'], label=artifact['description'])\n logging.info(\"Successfully uploaded release asset: %s\" % (artifact.get('name')))\n else:\n logging.error(\"Error uploading release asset: %s\" % (artifact.get('name')))\n\n\ndef main():\n upload_artifacts()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": ".buildkite/upload_artifacts.py"}], "after_files": [{"content": "\"\"\"\n# Requirements:\n * Generate access token in your Github account, then create environment variable GITHUB_ACCESS_TOKEN.\n - e.g export GITHUB_ACCESS_TOKEN=1ns3rt-my-t0k3n-h3re.\n\n * Generate a service account key for your Google API credentials, then create environment variable GOOGLE_APPLICATION_CREDENTIALS.\n - e.g export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json.\n\n# Environment Variable/s:\n * IS_KOLIBRI_RELEASE = Upload artifacts to the Google Cloud as a release candidate.\n * GITHUB_ACCESS_TOKEN = Personal access token used to authenticate in your Github account via API.\n * BUILDKITE_BUILD_NUMBER = Build identifier for each directory created.\n * BUILDKITE_PULL_REQUEST = Pull request issue or the value is false.\n * BUILDKITE_TAG = Tag identifier if this build was built from a tag.\n * BUILDKITE_COMMIT = Git commit hash that the build was made from.\n * GOOGLE_APPLICATION_CREDENTIALS = Your service account key.\n\"\"\"\nimport logging\nimport os\nimport sys\nfrom os import listdir\n\nimport requests\nfrom gcloud import storage\nfrom github3 import login\n\nlogging.getLogger().setLevel(logging.INFO)\n\nACCESS_TOKEN = os.getenv(\"GITHUB_ACCESS_TOKEN\")\nREPO_OWNER = \"learningequality\"\nREPO_NAME = \"kolibri\"\nISSUE_ID = os.getenv(\"BUILDKITE_PULL_REQUEST\")\nBUILD_ID = os.getenv(\"BUILDKITE_BUILD_NUMBER\")\nTAG = os.getenv(\"BUILDKITE_TAG\")\nCOMMIT = os.getenv(\"BUILDKITE_COMMIT\")\n\n\nRELEASE_DIR = 'release'\nPROJECT_PATH = os.path.join(os.getcwd())\n\n# Python packages artifact location\nDIST_DIR = os.path.join(PROJECT_PATH, \"dist\")\n\n# Installer artifact location\nINSTALLER_DIR = os.path.join(PROJECT_PATH, \"installer\")\n\nheaders = {'Authorization': 'token %s' % ACCESS_TOKEN}\n\nINSTALLER_CAT = 'Installers'\n\nPYTHON_PKG_CAT = 'Python packages'\n\n# Manifest of files, keyed by extension\nfile_manifest = {\n 'exe': {\n 'extension': 'exe',\n 'description': 'Windows Installer',\n 'category': INSTALLER_CAT,\n 'content_type': 'application/x-ms-dos-executable',\n },\n 'pex': {\n 'extension': 'pex',\n 'description': 'Pex file',\n 'category': PYTHON_PKG_CAT,\n 'content_type': 'application/octet-stream',\n },\n 'whl': {\n 'extension': 'whl',\n 'description': 'Whl file',\n 'category': PYTHON_PKG_CAT,\n 'content_type': 'application/zip',\n },\n 'gz': {\n 'extension': 'gz',\n 'description': 'Tar file',\n 'category': PYTHON_PKG_CAT,\n 'content_type': 'application/gzip',\n },\n 'apk': {\n 'extension': 'apk',\n 'description': 'Android Installer',\n 'category': INSTALLER_CAT,\n 'content_type': 'application/vnd.android.package-archive',\n },\n}\n\nfile_order = [\n 'exe',\n 'apk',\n 'pex',\n 'whl',\n 'gz',\n]\n\ngh = login(token=ACCESS_TOKEN)\nrepository = gh.repository(REPO_OWNER, REPO_NAME)\n\n\ndef create_status_report_html(artifacts):\n \"\"\"\n Create html page to list build artifacts for linking from github status.\n \"\"\"\n html = \"<html>\\n<body>\\n<h1>Build Artifacts</h1>\\n\"\n current_heading = None\n for ext in file_order:\n artifact = artifacts[ext]\n if artifact['category'] != current_heading:\n current_heading = artifact['category']\n html += \"<h2>{heading}</h2>\\n\".format(heading=current_heading)\n html += \"<p>{description}: <a href='{media_url}'>{name}</a></p>\\n\".format(\n **artifact\n )\n html += \"</body>\\n</html>\"\n return html\n\ndef create_github_status(report_url):\n \"\"\"\n Create a github status with a link to the report URL,\n only do this once buildkite has been successful, so only report success here.\n \"\"\"\n status = repository.create_status(\n COMMIT,\n \"success\",\n target_url=report_url,\n description=\"Kolibri Buildkite assets\",\n context=\"buildkite/kolibri/assets\"\n )\n if status:\n logging.info('Successfully created Github status for commit %s.' % COMMIT)\n else:\n logging.info('Error encounter. Now exiting!')\n sys.exit(1)\n\n\ndef collect_local_artifacts():\n \"\"\"\n Create a dict of the artifact name and the location.\n \"\"\"\n\n artifacts_dict = {}\n\n def create_artifact_data(artifact_dir):\n for artifact in listdir(artifact_dir):\n filename, file_extension = os.path.splitext(artifact)\n # Remove leading '.'\n file_extension = file_extension[1:]\n if file_extension in file_manifest:\n data = {\"name\": artifact,\n \"file_location\": \"%s/%s\" % (artifact_dir, artifact)}\n data.update(file_manifest[file_extension])\n logging.info(\"Collect file data: (%s)\" % data)\n artifacts_dict[file_extension] = data\n create_artifact_data(DIST_DIR)\n create_artifact_data(INSTALLER_DIR)\n return artifacts_dict\n\n\ndef upload_artifacts():\n \"\"\"\n Upload the artifacts on the Google Cloud Storage.\n Create a comment on the pull requester with artifact media link.\n \"\"\"\n client = storage.Client()\n bucket = client.bucket(\"le-downloads\")\n artifacts = collect_local_artifacts()\n is_release = os.getenv(\"IS_KOLIBRI_RELEASE\")\n for file_data in artifacts.values():\n logging.info(\"Uploading file (%s)\" % (file_data.get(\"name\")))\n if is_release:\n blob = bucket.blob('kolibri-%s-%s-%s' % (RELEASE_DIR, BUILD_ID, file_data.get(\"name\")))\n else:\n blob = bucket.blob('kolibri-buildkite-build-%s-%s-%s' % (ISSUE_ID, BUILD_ID, file_data.get(\"name\")))\n blob.upload_from_filename(filename=file_data.get(\"file_location\"))\n blob.make_public()\n file_data.update({'media_url': blob.media_link})\n\n html = create_status_report_html(artifacts)\n\n blob = bucket.blob('kolibri-%s-%s-report.html' % (RELEASE_DIR, BUILD_ID))\n\n blob.upload_from_string(html, content_type='text/html')\n\n blob.make_public()\n\n create_github_status(blob.public_url)\n\n if TAG:\n # Building from a tag, this is probably a release!\n # Have to do this with requests because github3 does not support this interface yet\n get_release_asset_url = requests.get(\"https://api.github.com/repos/{owner}/{repo}/releases/tags/{tag}\".format(\n owner=REPO_OWNER,\n repo=REPO_NAME,\n tag=TAG,\n ))\n if get_release_asset_url.status_code == 200:\n # Definitely a release!\n release_id = get_release_asset_url.json()['id']\n release_name = get_release_asset_url.json()['name']\n release = repository.release(id=release_id)\n logging.info(\"Uploading built assets to Github Release: %s\" % release_name)\n for file_extension in file_order:\n artifact = artifacts[file_extension]\n logging.info(\"Uploading release asset: %s\" % (artifact.get(\"name\")))\n # For some reason github3 does not let us set a label at initial upload\n asset = release.upload_asset(\n content_type=artifact['content_type'],\n name=artifact['name'],\n asset=open(artifact['file_location'], 'rb')\n )\n if asset:\n # So do it after the initial upload instead\n asset.edit(artifact['name'], label=artifact['description'])\n logging.info(\"Successfully uploaded release asset: %s\" % (artifact.get('name')))\n else:\n logging.error(\"Error uploading release asset: %s\" % (artifact.get('name')))\n\n\ndef main():\n upload_artifacts()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": ".buildkite/upload_artifacts.py"}]}
2,924
329
gh_patches_debug_505
rasdani/github-patches
git_diff
googleapis__google-cloud-python-3693
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Increment threadsafety on BigQuery DB-API interface when httplib2 is removed From https://github.com/GoogleCloudPlatform/google-cloud-python/pull/2921/files/5b29a9e42a40f7c74ced0b7a7e9db0add52a20a1#r123596828 Blocked by https://github.com/GoogleCloudPlatform/google-cloud-python/issues/1998 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bigquery/google/cloud/bigquery/dbapi/__init__.py` Content: ``` 1 # Copyright 2017 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Google BigQuery implementation of the Database API Specification v2.0. 16 17 This module implements the `Python Database API Specification v2.0 (DB-API)`_ 18 for Google BigQuery. 19 20 .. _Python Database API Specification v2.0 (DB-API): 21 https://www.python.org/dev/peps/pep-0249/ 22 23 .. warning:: 24 The ``dbapi`` module is **alpha**. The implementation is not complete. It 25 might be changed in backward-incompatible ways and is not subject to any SLA 26 or deprecation policy. 27 """ 28 29 from google.cloud.bigquery.dbapi.connection import connect 30 from google.cloud.bigquery.dbapi.connection import Connection 31 from google.cloud.bigquery.dbapi.cursor import Cursor 32 from google.cloud.bigquery.dbapi.exceptions import Warning 33 from google.cloud.bigquery.dbapi.exceptions import Error 34 from google.cloud.bigquery.dbapi.exceptions import InterfaceError 35 from google.cloud.bigquery.dbapi.exceptions import DatabaseError 36 from google.cloud.bigquery.dbapi.exceptions import DataError 37 from google.cloud.bigquery.dbapi.exceptions import OperationalError 38 from google.cloud.bigquery.dbapi.exceptions import IntegrityError 39 from google.cloud.bigquery.dbapi.exceptions import InternalError 40 from google.cloud.bigquery.dbapi.exceptions import ProgrammingError 41 from google.cloud.bigquery.dbapi.exceptions import NotSupportedError 42 from google.cloud.bigquery.dbapi.types import Binary 43 from google.cloud.bigquery.dbapi.types import Date 44 from google.cloud.bigquery.dbapi.types import DateFromTicks 45 from google.cloud.bigquery.dbapi.types import Time 46 from google.cloud.bigquery.dbapi.types import TimeFromTicks 47 from google.cloud.bigquery.dbapi.types import Timestamp 48 from google.cloud.bigquery.dbapi.types import TimestampFromTicks 49 from google.cloud.bigquery.dbapi.types import BINARY 50 from google.cloud.bigquery.dbapi.types import DATETIME 51 from google.cloud.bigquery.dbapi.types import NUMBER 52 from google.cloud.bigquery.dbapi.types import ROWID 53 from google.cloud.bigquery.dbapi.types import STRING 54 55 56 apilevel = '2.0' 57 58 # Threads may share the module, but not connections. 59 threadsafety = 1 60 61 paramstyle = 'pyformat' 62 63 __all__ = [ 64 'apilevel', 'threadsafety', 'paramstyle', 'connect', 'Connection', 65 'Cursor', 'Warning', 'Error', 'InterfaceError', 'DatabaseError', 66 'DataError', 'OperationalError', 'IntegrityError', 'InternalError', 67 'ProgrammingError', 'NotSupportedError', 'Binary', 'Date', 'DateFromTicks', 68 'Time', 'TimeFromTicks', 'Timestamp', 'TimestampFromTicks', 'BINARY', 69 'DATETIME', 'NUMBER', 'ROWID', 'STRING', 70 ] 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bigquery/google/cloud/bigquery/dbapi/__init__.py b/bigquery/google/cloud/bigquery/dbapi/__init__.py --- a/bigquery/google/cloud/bigquery/dbapi/__init__.py +++ b/bigquery/google/cloud/bigquery/dbapi/__init__.py @@ -55,8 +55,8 @@ apilevel = '2.0' -# Threads may share the module, but not connections. -threadsafety = 1 +# Threads may share the module and connections, but not cursors. +threadsafety = 2 paramstyle = 'pyformat'
{"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/dbapi/__init__.py b/bigquery/google/cloud/bigquery/dbapi/__init__.py\n--- a/bigquery/google/cloud/bigquery/dbapi/__init__.py\n+++ b/bigquery/google/cloud/bigquery/dbapi/__init__.py\n@@ -55,8 +55,8 @@\n \n apilevel = '2.0'\n \n-# Threads may share the module, but not connections.\n-threadsafety = 1\n+# Threads may share the module and connections, but not cursors.\n+threadsafety = 2\n \n paramstyle = 'pyformat'\n", "issue": "Increment threadsafety on BigQuery DB-API interface when httplib2 is removed\nFrom https://github.com/GoogleCloudPlatform/google-cloud-python/pull/2921/files/5b29a9e42a40f7c74ced0b7a7e9db0add52a20a1#r123596828\r\n\r\nBlocked by https://github.com/GoogleCloudPlatform/google-cloud-python/issues/1998\n", "before_files": [{"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google BigQuery implementation of the Database API Specification v2.0.\n\nThis module implements the `Python Database API Specification v2.0 (DB-API)`_\nfor Google BigQuery.\n\n.. _Python Database API Specification v2.0 (DB-API):\n https://www.python.org/dev/peps/pep-0249/\n\n.. warning::\n The ``dbapi`` module is **alpha**. The implementation is not complete. It\n might be changed in backward-incompatible ways and is not subject to any SLA\n or deprecation policy.\n\"\"\"\n\nfrom google.cloud.bigquery.dbapi.connection import connect\nfrom google.cloud.bigquery.dbapi.connection import Connection\nfrom google.cloud.bigquery.dbapi.cursor import Cursor\nfrom google.cloud.bigquery.dbapi.exceptions import Warning\nfrom google.cloud.bigquery.dbapi.exceptions import Error\nfrom google.cloud.bigquery.dbapi.exceptions import InterfaceError\nfrom google.cloud.bigquery.dbapi.exceptions import DatabaseError\nfrom google.cloud.bigquery.dbapi.exceptions import DataError\nfrom google.cloud.bigquery.dbapi.exceptions import OperationalError\nfrom google.cloud.bigquery.dbapi.exceptions import IntegrityError\nfrom google.cloud.bigquery.dbapi.exceptions import InternalError\nfrom google.cloud.bigquery.dbapi.exceptions import ProgrammingError\nfrom google.cloud.bigquery.dbapi.exceptions import NotSupportedError\nfrom google.cloud.bigquery.dbapi.types import Binary\nfrom google.cloud.bigquery.dbapi.types import Date\nfrom google.cloud.bigquery.dbapi.types import DateFromTicks\nfrom google.cloud.bigquery.dbapi.types import Time\nfrom google.cloud.bigquery.dbapi.types import TimeFromTicks\nfrom google.cloud.bigquery.dbapi.types import Timestamp\nfrom google.cloud.bigquery.dbapi.types import TimestampFromTicks\nfrom google.cloud.bigquery.dbapi.types import BINARY\nfrom google.cloud.bigquery.dbapi.types import DATETIME\nfrom google.cloud.bigquery.dbapi.types import NUMBER\nfrom google.cloud.bigquery.dbapi.types import ROWID\nfrom google.cloud.bigquery.dbapi.types import STRING\n\n\napilevel = '2.0'\n\n# Threads may share the module, but not connections.\nthreadsafety = 1\n\nparamstyle = 'pyformat'\n\n__all__ = [\n 'apilevel', 'threadsafety', 'paramstyle', 'connect', 'Connection',\n 'Cursor', 'Warning', 'Error', 'InterfaceError', 'DatabaseError',\n 'DataError', 'OperationalError', 'IntegrityError', 'InternalError',\n 'ProgrammingError', 'NotSupportedError', 'Binary', 'Date', 'DateFromTicks',\n 'Time', 'TimeFromTicks', 'Timestamp', 'TimestampFromTicks', 'BINARY',\n 'DATETIME', 'NUMBER', 'ROWID', 'STRING',\n]\n", "path": "bigquery/google/cloud/bigquery/dbapi/__init__.py"}], "after_files": [{"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google BigQuery implementation of the Database API Specification v2.0.\n\nThis module implements the `Python Database API Specification v2.0 (DB-API)`_\nfor Google BigQuery.\n\n.. _Python Database API Specification v2.0 (DB-API):\n https://www.python.org/dev/peps/pep-0249/\n\n.. warning::\n The ``dbapi`` module is **alpha**. The implementation is not complete. It\n might be changed in backward-incompatible ways and is not subject to any SLA\n or deprecation policy.\n\"\"\"\n\nfrom google.cloud.bigquery.dbapi.connection import connect\nfrom google.cloud.bigquery.dbapi.connection import Connection\nfrom google.cloud.bigquery.dbapi.cursor import Cursor\nfrom google.cloud.bigquery.dbapi.exceptions import Warning\nfrom google.cloud.bigquery.dbapi.exceptions import Error\nfrom google.cloud.bigquery.dbapi.exceptions import InterfaceError\nfrom google.cloud.bigquery.dbapi.exceptions import DatabaseError\nfrom google.cloud.bigquery.dbapi.exceptions import DataError\nfrom google.cloud.bigquery.dbapi.exceptions import OperationalError\nfrom google.cloud.bigquery.dbapi.exceptions import IntegrityError\nfrom google.cloud.bigquery.dbapi.exceptions import InternalError\nfrom google.cloud.bigquery.dbapi.exceptions import ProgrammingError\nfrom google.cloud.bigquery.dbapi.exceptions import NotSupportedError\nfrom google.cloud.bigquery.dbapi.types import Binary\nfrom google.cloud.bigquery.dbapi.types import Date\nfrom google.cloud.bigquery.dbapi.types import DateFromTicks\nfrom google.cloud.bigquery.dbapi.types import Time\nfrom google.cloud.bigquery.dbapi.types import TimeFromTicks\nfrom google.cloud.bigquery.dbapi.types import Timestamp\nfrom google.cloud.bigquery.dbapi.types import TimestampFromTicks\nfrom google.cloud.bigquery.dbapi.types import BINARY\nfrom google.cloud.bigquery.dbapi.types import DATETIME\nfrom google.cloud.bigquery.dbapi.types import NUMBER\nfrom google.cloud.bigquery.dbapi.types import ROWID\nfrom google.cloud.bigquery.dbapi.types import STRING\n\n\napilevel = '2.0'\n\n# Threads may share the module and connections, but not cursors.\nthreadsafety = 2\n\nparamstyle = 'pyformat'\n\n__all__ = [\n 'apilevel', 'threadsafety', 'paramstyle', 'connect', 'Connection',\n 'Cursor', 'Warning', 'Error', 'InterfaceError', 'DatabaseError',\n 'DataError', 'OperationalError', 'IntegrityError', 'InternalError',\n 'ProgrammingError', 'NotSupportedError', 'Binary', 'Date', 'DateFromTicks',\n 'Time', 'TimeFromTicks', 'Timestamp', 'TimestampFromTicks', 'BINARY',\n 'DATETIME', 'NUMBER', 'ROWID', 'STRING',\n]\n", "path": "bigquery/google/cloud/bigquery/dbapi/__init__.py"}]}
1,213
130
gh_patches_debug_25995
rasdani/github-patches
git_diff
pytorch__vision-2979
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [DOC] Documentation for sigmoid focal loss ## 📚 Documentation Sigmoid focal loss was added to torchvision, but I think docs are not generated for it It was initalized in `init` as well [here](https://github.com/pytorch/vision/blob/v0.8.0/torchvision/ops/__init__.py#L11) I think it's really minor fix, we might need `..autodoc::sigmoid_focal_loss` and it should be fine. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/ops/focal_loss.py` Content: ``` 1 import torch 2 import torch.nn.functional as F 3 4 5 def sigmoid_focal_loss( 6 inputs, 7 targets, 8 alpha: float = 0.25, 9 gamma: float = 2, 10 reduction: str = "none", 11 ): 12 """ 13 Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py . 14 Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. 15 Args: 16 inputs: A float tensor of arbitrary shape. 17 The predictions for each example. 18 targets: A float tensor with the same shape as inputs. Stores the binary 19 classification label for each element in inputs 20 (0 for the negative class and 1 for the positive class). 21 alpha: (optional) Weighting factor in range (0,1) to balance 22 positive vs negative examples or -1 for ignore. Default = 0.25 23 gamma: Exponent of the modulating factor (1 - p_t) to 24 balance easy vs hard examples. 25 reduction: 'none' | 'mean' | 'sum' 26 'none': No reduction will be applied to the output. 27 'mean': The output will be averaged. 28 'sum': The output will be summed. 29 Returns: 30 Loss tensor with the reduction option applied. 31 """ 32 p = torch.sigmoid(inputs) 33 ce_loss = F.binary_cross_entropy_with_logits( 34 inputs, targets, reduction="none" 35 ) 36 p_t = p * targets + (1 - p) * (1 - targets) 37 loss = ce_loss * ((1 - p_t) ** gamma) 38 39 if alpha >= 0: 40 alpha_t = alpha * targets + (1 - alpha) * (1 - targets) 41 loss = alpha_t * loss 42 43 if reduction == "mean": 44 loss = loss.mean() 45 elif reduction == "sum": 46 loss = loss.sum() 47 48 return loss 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/ops/focal_loss.py b/torchvision/ops/focal_loss.py --- a/torchvision/ops/focal_loss.py +++ b/torchvision/ops/focal_loss.py @@ -3,8 +3,8 @@ def sigmoid_focal_loss( - inputs, - targets, + inputs: torch.Tensor, + targets: torch.Tensor, alpha: float = 0.25, gamma: float = 2, reduction: str = "none", @@ -12,11 +12,12 @@ """ Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py . Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: + + Arguments: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs + classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples or -1 for ignore. Default = 0.25
{"golden_diff": "diff --git a/torchvision/ops/focal_loss.py b/torchvision/ops/focal_loss.py\n--- a/torchvision/ops/focal_loss.py\n+++ b/torchvision/ops/focal_loss.py\n@@ -3,8 +3,8 @@\n \n \n def sigmoid_focal_loss(\n- inputs,\n- targets,\n+ inputs: torch.Tensor,\n+ targets: torch.Tensor,\n alpha: float = 0.25,\n gamma: float = 2,\n reduction: str = \"none\",\n@@ -12,11 +12,12 @@\n \"\"\"\n Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py .\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n- Args:\n+\n+ Arguments:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n- classification label for each element in inputs\n+ classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples or -1 for ignore. Default = 0.25\n", "issue": "[DOC] Documentation for sigmoid focal loss\n## \ud83d\udcda Documentation\r\n\r\nSigmoid focal loss was added to torchvision, but I think docs are not generated for it\r\n\r\nIt was initalized in `init` as well [here](https://github.com/pytorch/vision/blob/v0.8.0/torchvision/ops/__init__.py#L11)\r\n\r\nI think it's really minor fix, we might need `..autodoc::sigmoid_focal_loss` and it should be fine.\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\n\n\ndef sigmoid_focal_loss(\n inputs,\n targets,\n alpha: float = 0.25,\n gamma: float = 2,\n reduction: str = \"none\",\n):\n \"\"\"\n Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py .\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples or -1 for ignore. Default = 0.25\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n reduction: 'none' | 'mean' | 'sum'\n 'none': No reduction will be applied to the output.\n 'mean': The output will be averaged.\n 'sum': The output will be summed.\n Returns:\n Loss tensor with the reduction option applied.\n \"\"\"\n p = torch.sigmoid(inputs)\n ce_loss = F.binary_cross_entropy_with_logits(\n inputs, targets, reduction=\"none\"\n )\n p_t = p * targets + (1 - p) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if reduction == \"mean\":\n loss = loss.mean()\n elif reduction == \"sum\":\n loss = loss.sum()\n\n return loss\n", "path": "torchvision/ops/focal_loss.py"}], "after_files": [{"content": "import torch\nimport torch.nn.functional as F\n\n\ndef sigmoid_focal_loss(\n inputs: torch.Tensor,\n targets: torch.Tensor,\n alpha: float = 0.25,\n gamma: float = 2,\n reduction: str = \"none\",\n):\n \"\"\"\n Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py .\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n\n Arguments:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples or -1 for ignore. Default = 0.25\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n reduction: 'none' | 'mean' | 'sum'\n 'none': No reduction will be applied to the output.\n 'mean': The output will be averaged.\n 'sum': The output will be summed.\n Returns:\n Loss tensor with the reduction option applied.\n \"\"\"\n p = torch.sigmoid(inputs)\n ce_loss = F.binary_cross_entropy_with_logits(\n inputs, targets, reduction=\"none\"\n )\n p_t = p * targets + (1 - p) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if reduction == \"mean\":\n loss = loss.mean()\n elif reduction == \"sum\":\n loss = loss.sum()\n\n return loss\n", "path": "torchvision/ops/focal_loss.py"}]}
880
301
gh_patches_debug_19341
rasdani/github-patches
git_diff
praw-dev__praw-1737
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Assertion error on 429 HTTP status code **Describe the bug** From time to time, I found my codes reported 429 errors. **To Reproduce** Steps to reproduce the behavior: `submission.comments.replace_more(limit=limit)` with a higher limit for the query, running multiple processes for the same key. **Expected behavior** Expect to handle this error as ratelimit error **Code/Logs** <!-- include your code, without the Reddit() initialization, so as to not leak private credentials. --> submission.comments.replace_more(limit=limit) File "/usr/local/lib/python3.7/site-packages/praw/models/comment_forest.py", line 178, in replace_more new_comments = item.comments(update=False) File "/usr/local/lib/python3.7/site-packages/praw/models/reddit/more.py", line 74, in comments self._comments = self._reddit.post(API_PATH["morechildren"], data=data) File "/usr/local/lib/python3.7/site-packages/praw/reddit.py", line 758, in post path=path, File "/usr/local/lib/python3.7/site-packages/praw/reddit.py", line 672, in _objectify_request path=path, File "/usr/local/lib/python3.7/site-packages/praw/reddit.py", line 855, in request json=json, File "/usr/local/lib/python3.7/site-packages/prawcore/sessions.py", line 331, in request url=url, File "/usr/local/lib/python3.7/site-packages/prawcore/sessions.py", line 265, in _request_with_retries ), f"Unexpected status code: {response.status_code}" AssertionError: Unexpected status code: 429 **System Info** root@1be190a235da:/aitrading# uname -a Linux 1be190a235da 4.19.0-14-cloud-amd64 #1 SMP Debian 4.19.171-2 (2021-01-30) x86_64 GNU/Linux root@1be190a235da:/aitrading# python --version Python 3.7.10 root@1be190a235da:/aitrading# pip show praw Name: praw Version: 7.2.0 Summary: PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that allows for simple access to reddit's API. Home-page: UNKNOWN Author: Bryce Boe Author-email: [email protected] License: Simplified BSD License Location: /usr/local/lib/python3.7/site-packages Requires: prawcore, update-checker, websocket-client Required-by: root@1be190a235da:/aitrading# - OS: - Python: python3.7 - PRAW Version: --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `praw/models/comment_forest.py` Content: ``` 1 """Provide CommentForest for Submission comments.""" 2 from heapq import heappop, heappush 3 from typing import TYPE_CHECKING, List, Optional, Union 4 5 from ..exceptions import DuplicateReplaceException 6 from .reddit.more import MoreComments 7 8 if TYPE_CHECKING: # pragma: no cover 9 from ... import praw 10 11 12 class CommentForest: 13 """A forest of comments starts with multiple top-level comments. 14 15 Each of these comments can be a tree of replies. 16 17 """ 18 19 @staticmethod 20 def _gather_more_comments(tree, parent_tree=None): 21 """Return a list of MoreComments objects obtained from tree.""" 22 more_comments = [] 23 queue = [(None, x) for x in tree] 24 while queue: 25 parent, comment = queue.pop(0) 26 if isinstance(comment, MoreComments): 27 heappush(more_comments, comment) 28 if parent: 29 comment._remove_from = parent.replies._comments 30 else: 31 comment._remove_from = parent_tree or tree 32 else: 33 for item in comment.replies: 34 queue.append((comment, item)) 35 return more_comments 36 37 def __getitem__(self, index: int): 38 """Return the comment at position ``index`` in the list. 39 40 This method is to be used like an array access, such as: 41 42 .. code-block:: python 43 44 first_comment = submission.comments[0] 45 46 Alternatively, the presence of this method enables one to iterate over all top 47 level comments, like so: 48 49 .. code-block:: python 50 51 for comment in submission.comments: 52 print(comment.body) 53 54 """ 55 return self._comments[index] 56 57 def __init__( 58 self, 59 submission: "praw.models.Submission", 60 comments: Optional[List["praw.models.Comment"]] = None, 61 ): 62 """Initialize a CommentForest instance. 63 64 :param submission: An instance of :class:`~.Subreddit` that is the parent of the 65 comments. 66 :param comments: Initialize the Forest with a list of comments (default: None). 67 68 """ 69 self._comments = comments 70 self._submission = submission 71 72 def __len__(self) -> int: 73 """Return the number of top-level comments in the forest.""" 74 return len(self._comments) 75 76 def _insert_comment(self, comment): 77 if comment.name in self._submission._comments_by_id: 78 raise DuplicateReplaceException 79 comment.submission = self._submission 80 if isinstance(comment, MoreComments) or comment.is_root: 81 self._comments.append(comment) 82 else: 83 assert comment.parent_id in self._submission._comments_by_id, ( 84 "PRAW Error occurred. Please file a bug report and include the code" 85 " that caused the error." 86 ) 87 parent = self._submission._comments_by_id[comment.parent_id] 88 parent.replies._comments.append(comment) 89 90 def _update(self, comments): 91 self._comments = comments 92 for comment in comments: 93 comment.submission = self._submission 94 95 def list(self) -> List[Union["praw.models.Comment", "praw.models.MoreComments"]]: 96 """Return a flattened list of all Comments. 97 98 This list may contain :class:`.MoreComments` instances if :meth:`.replace_more` 99 was not called first. 100 101 """ 102 comments = [] 103 queue = list(self) 104 while queue: 105 comment = queue.pop(0) 106 comments.append(comment) 107 if not isinstance(comment, MoreComments): 108 queue.extend(comment.replies) 109 return comments 110 111 def replace_more( 112 self, limit: int = 32, threshold: int = 0 113 ) -> List["praw.models.MoreComments"]: 114 """Update the comment forest by resolving instances of MoreComments. 115 116 :param limit: The maximum number of :class:`.MoreComments` instances to replace. 117 Each replacement requires 1 API request. Set to ``None`` to have no limit, 118 or to ``0`` to remove all :class:`.MoreComments` instances without 119 additional requests (default: 32). 120 :param threshold: The minimum number of children comments a 121 :class:`.MoreComments` instance must have in order to be replaced. 122 :class:`.MoreComments` instances that represent "continue this thread" links 123 unfortunately appear to have 0 children. (default: 0). 124 125 :returns: A list of :class:`.MoreComments` instances that were not replaced. 126 127 For example, to replace up to 32 :class:`.MoreComments` instances of a 128 submission try: 129 130 .. code-block:: python 131 132 submission = reddit.submission("3hahrw") 133 submission.comments.replace_more() 134 135 Alternatively, to replace :class:`.MoreComments` instances within the replies of 136 a single comment try: 137 138 .. code-block:: python 139 140 comment = reddit.comment("d8r4im1") 141 comment.refresh() 142 comment.replies.replace_more() 143 144 .. note:: 145 146 This method can take a long time as each replacement will discover at most 147 20 new :class:`.Comment` or :class:`.MoreComments` instances. As a result, 148 consider looping and handling exceptions until the method returns 149 successfully. For example: 150 151 .. code-block:: python 152 153 while True: 154 try: 155 submission.comments.replace_more() 156 break 157 except PossibleExceptions: 158 print("Handling replace_more exception") 159 sleep(1) 160 161 .. warning:: 162 163 If this method is called, and the comments are refreshed, calling this 164 method again will result in a :class:`.DuplicateReplaceException`. 165 166 """ 167 remaining = limit 168 more_comments = self._gather_more_comments(self._comments) 169 skipped = [] 170 171 # Fetch largest more_comments until reaching the limit or the threshold 172 while more_comments: 173 item = heappop(more_comments) 174 if remaining is not None and remaining <= 0 or item.count < threshold: 175 skipped.append(item) 176 item._remove_from.remove(item) 177 continue 178 179 new_comments = item.comments(update=False) 180 if remaining is not None: 181 remaining -= 1 182 183 # Add new MoreComment objects to the heap of more_comments 184 for more in self._gather_more_comments(new_comments, self._comments): 185 more.submission = self._submission 186 heappush(more_comments, more) 187 # Insert all items into the tree 188 for comment in new_comments: 189 self._insert_comment(comment) 190 191 # Remove from forest 192 item._remove_from.remove(item) 193 194 return more_comments + skipped 195 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/praw/models/comment_forest.py b/praw/models/comment_forest.py --- a/praw/models/comment_forest.py +++ b/praw/models/comment_forest.py @@ -124,6 +124,8 @@ :returns: A list of :class:`.MoreComments` instances that were not replaced. + :raises: ``prawcore.TooManyRequests`` when used concurrently. + For example, to replace up to 32 :class:`.MoreComments` instances of a submission try: @@ -144,9 +146,8 @@ .. note:: This method can take a long time as each replacement will discover at most - 20 new :class:`.Comment` or :class:`.MoreComments` instances. As a result, - consider looping and handling exceptions until the method returns - successfully. For example: + 100 new :class:`.Comment` instances. As a result, consider looping and + handling exceptions until the method returns successfully. For example: .. code-block:: python
{"golden_diff": "diff --git a/praw/models/comment_forest.py b/praw/models/comment_forest.py\n--- a/praw/models/comment_forest.py\n+++ b/praw/models/comment_forest.py\n@@ -124,6 +124,8 @@\n \n :returns: A list of :class:`.MoreComments` instances that were not replaced.\n \n+ :raises: ``prawcore.TooManyRequests`` when used concurrently.\n+\n For example, to replace up to 32 :class:`.MoreComments` instances of a\n submission try:\n \n@@ -144,9 +146,8 @@\n .. note::\n \n This method can take a long time as each replacement will discover at most\n- 20 new :class:`.Comment` or :class:`.MoreComments` instances. As a result,\n- consider looping and handling exceptions until the method returns\n- successfully. For example:\n+ 100 new :class:`.Comment` instances. As a result, consider looping and\n+ handling exceptions until the method returns successfully. For example:\n \n .. code-block:: python\n", "issue": "Assertion error on 429 HTTP status code\n**Describe the bug**\r\nFrom time to time, I found my codes reported 429 errors.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n`submission.comments.replace_more(limit=limit)` with a higher limit for the query, running multiple processes for the same key.\r\n\r\n**Expected behavior**\r\nExpect to handle this error as ratelimit error\r\n\r\n**Code/Logs**\r\n<!-- include your code, without the Reddit() initialization, so as to not leak private credentials. -->\r\n submission.comments.replace_more(limit=limit)\r\n File \"/usr/local/lib/python3.7/site-packages/praw/models/comment_forest.py\", line 178, in replace_more\r\n new_comments = item.comments(update=False)\r\n File \"/usr/local/lib/python3.7/site-packages/praw/models/reddit/more.py\", line 74, in comments\r\n self._comments = self._reddit.post(API_PATH[\"morechildren\"], data=data)\r\n File \"/usr/local/lib/python3.7/site-packages/praw/reddit.py\", line 758, in post\r\n path=path,\r\n File \"/usr/local/lib/python3.7/site-packages/praw/reddit.py\", line 672, in _objectify_request\r\n path=path,\r\n File \"/usr/local/lib/python3.7/site-packages/praw/reddit.py\", line 855, in request\r\n json=json,\r\n File \"/usr/local/lib/python3.7/site-packages/prawcore/sessions.py\", line 331, in request\r\n url=url,\r\n File \"/usr/local/lib/python3.7/site-packages/prawcore/sessions.py\", line 265, in _request_with_retries\r\n ), f\"Unexpected status code: {response.status_code}\"\r\nAssertionError: Unexpected status code: 429\r\n**System Info**\r\n\r\nroot@1be190a235da:/aitrading# uname -a\r\nLinux 1be190a235da 4.19.0-14-cloud-amd64 #1 SMP Debian 4.19.171-2 (2021-01-30) x86_64 GNU/Linux\r\nroot@1be190a235da:/aitrading# python --version\r\nPython 3.7.10\r\nroot@1be190a235da:/aitrading# pip show praw\r\nName: praw\r\nVersion: 7.2.0\r\nSummary: PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that allows for simple access to reddit's API.\r\nHome-page: UNKNOWN\r\nAuthor: Bryce Boe\r\nAuthor-email: [email protected]\r\nLicense: Simplified BSD License\r\nLocation: /usr/local/lib/python3.7/site-packages\r\nRequires: prawcore, update-checker, websocket-client\r\nRequired-by:\r\nroot@1be190a235da:/aitrading#\r\n\r\n - OS: \r\n - Python: python3.7\r\n - PRAW Version:\r\n\n", "before_files": [{"content": "\"\"\"Provide CommentForest for Submission comments.\"\"\"\nfrom heapq import heappop, heappush\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nfrom ..exceptions import DuplicateReplaceException\nfrom .reddit.more import MoreComments\n\nif TYPE_CHECKING: # pragma: no cover\n from ... import praw\n\n\nclass CommentForest:\n \"\"\"A forest of comments starts with multiple top-level comments.\n\n Each of these comments can be a tree of replies.\n\n \"\"\"\n\n @staticmethod\n def _gather_more_comments(tree, parent_tree=None):\n \"\"\"Return a list of MoreComments objects obtained from tree.\"\"\"\n more_comments = []\n queue = [(None, x) for x in tree]\n while queue:\n parent, comment = queue.pop(0)\n if isinstance(comment, MoreComments):\n heappush(more_comments, comment)\n if parent:\n comment._remove_from = parent.replies._comments\n else:\n comment._remove_from = parent_tree or tree\n else:\n for item in comment.replies:\n queue.append((comment, item))\n return more_comments\n\n def __getitem__(self, index: int):\n \"\"\"Return the comment at position ``index`` in the list.\n\n This method is to be used like an array access, such as:\n\n .. code-block:: python\n\n first_comment = submission.comments[0]\n\n Alternatively, the presence of this method enables one to iterate over all top\n level comments, like so:\n\n .. code-block:: python\n\n for comment in submission.comments:\n print(comment.body)\n\n \"\"\"\n return self._comments[index]\n\n def __init__(\n self,\n submission: \"praw.models.Submission\",\n comments: Optional[List[\"praw.models.Comment\"]] = None,\n ):\n \"\"\"Initialize a CommentForest instance.\n\n :param submission: An instance of :class:`~.Subreddit` that is the parent of the\n comments.\n :param comments: Initialize the Forest with a list of comments (default: None).\n\n \"\"\"\n self._comments = comments\n self._submission = submission\n\n def __len__(self) -> int:\n \"\"\"Return the number of top-level comments in the forest.\"\"\"\n return len(self._comments)\n\n def _insert_comment(self, comment):\n if comment.name in self._submission._comments_by_id:\n raise DuplicateReplaceException\n comment.submission = self._submission\n if isinstance(comment, MoreComments) or comment.is_root:\n self._comments.append(comment)\n else:\n assert comment.parent_id in self._submission._comments_by_id, (\n \"PRAW Error occurred. Please file a bug report and include the code\"\n \" that caused the error.\"\n )\n parent = self._submission._comments_by_id[comment.parent_id]\n parent.replies._comments.append(comment)\n\n def _update(self, comments):\n self._comments = comments\n for comment in comments:\n comment.submission = self._submission\n\n def list(self) -> List[Union[\"praw.models.Comment\", \"praw.models.MoreComments\"]]:\n \"\"\"Return a flattened list of all Comments.\n\n This list may contain :class:`.MoreComments` instances if :meth:`.replace_more`\n was not called first.\n\n \"\"\"\n comments = []\n queue = list(self)\n while queue:\n comment = queue.pop(0)\n comments.append(comment)\n if not isinstance(comment, MoreComments):\n queue.extend(comment.replies)\n return comments\n\n def replace_more(\n self, limit: int = 32, threshold: int = 0\n ) -> List[\"praw.models.MoreComments\"]:\n \"\"\"Update the comment forest by resolving instances of MoreComments.\n\n :param limit: The maximum number of :class:`.MoreComments` instances to replace.\n Each replacement requires 1 API request. Set to ``None`` to have no limit,\n or to ``0`` to remove all :class:`.MoreComments` instances without\n additional requests (default: 32).\n :param threshold: The minimum number of children comments a\n :class:`.MoreComments` instance must have in order to be replaced.\n :class:`.MoreComments` instances that represent \"continue this thread\" links\n unfortunately appear to have 0 children. (default: 0).\n\n :returns: A list of :class:`.MoreComments` instances that were not replaced.\n\n For example, to replace up to 32 :class:`.MoreComments` instances of a\n submission try:\n\n .. code-block:: python\n\n submission = reddit.submission(\"3hahrw\")\n submission.comments.replace_more()\n\n Alternatively, to replace :class:`.MoreComments` instances within the replies of\n a single comment try:\n\n .. code-block:: python\n\n comment = reddit.comment(\"d8r4im1\")\n comment.refresh()\n comment.replies.replace_more()\n\n .. note::\n\n This method can take a long time as each replacement will discover at most\n 20 new :class:`.Comment` or :class:`.MoreComments` instances. As a result,\n consider looping and handling exceptions until the method returns\n successfully. For example:\n\n .. code-block:: python\n\n while True:\n try:\n submission.comments.replace_more()\n break\n except PossibleExceptions:\n print(\"Handling replace_more exception\")\n sleep(1)\n\n .. warning::\n\n If this method is called, and the comments are refreshed, calling this\n method again will result in a :class:`.DuplicateReplaceException`.\n\n \"\"\"\n remaining = limit\n more_comments = self._gather_more_comments(self._comments)\n skipped = []\n\n # Fetch largest more_comments until reaching the limit or the threshold\n while more_comments:\n item = heappop(more_comments)\n if remaining is not None and remaining <= 0 or item.count < threshold:\n skipped.append(item)\n item._remove_from.remove(item)\n continue\n\n new_comments = item.comments(update=False)\n if remaining is not None:\n remaining -= 1\n\n # Add new MoreComment objects to the heap of more_comments\n for more in self._gather_more_comments(new_comments, self._comments):\n more.submission = self._submission\n heappush(more_comments, more)\n # Insert all items into the tree\n for comment in new_comments:\n self._insert_comment(comment)\n\n # Remove from forest\n item._remove_from.remove(item)\n\n return more_comments + skipped\n", "path": "praw/models/comment_forest.py"}], "after_files": [{"content": "\"\"\"Provide CommentForest for Submission comments.\"\"\"\nfrom heapq import heappop, heappush\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nfrom ..exceptions import DuplicateReplaceException\nfrom .reddit.more import MoreComments\n\nif TYPE_CHECKING: # pragma: no cover\n from ... import praw\n\n\nclass CommentForest:\n \"\"\"A forest of comments starts with multiple top-level comments.\n\n Each of these comments can be a tree of replies.\n\n \"\"\"\n\n @staticmethod\n def _gather_more_comments(tree, parent_tree=None):\n \"\"\"Return a list of MoreComments objects obtained from tree.\"\"\"\n more_comments = []\n queue = [(None, x) for x in tree]\n while queue:\n parent, comment = queue.pop(0)\n if isinstance(comment, MoreComments):\n heappush(more_comments, comment)\n if parent:\n comment._remove_from = parent.replies._comments\n else:\n comment._remove_from = parent_tree or tree\n else:\n for item in comment.replies:\n queue.append((comment, item))\n return more_comments\n\n def __getitem__(self, index: int):\n \"\"\"Return the comment at position ``index`` in the list.\n\n This method is to be used like an array access, such as:\n\n .. code-block:: python\n\n first_comment = submission.comments[0]\n\n Alternatively, the presence of this method enables one to iterate over all top\n level comments, like so:\n\n .. code-block:: python\n\n for comment in submission.comments:\n print(comment.body)\n\n \"\"\"\n return self._comments[index]\n\n def __init__(\n self,\n submission: \"praw.models.Submission\",\n comments: Optional[List[\"praw.models.Comment\"]] = None,\n ):\n \"\"\"Initialize a CommentForest instance.\n\n :param submission: An instance of :class:`~.Subreddit` that is the parent of the\n comments.\n :param comments: Initialize the Forest with a list of comments (default: None).\n\n \"\"\"\n self._comments = comments\n self._submission = submission\n\n def __len__(self) -> int:\n \"\"\"Return the number of top-level comments in the forest.\"\"\"\n return len(self._comments)\n\n def _insert_comment(self, comment):\n if comment.name in self._submission._comments_by_id:\n raise DuplicateReplaceException\n comment.submission = self._submission\n if isinstance(comment, MoreComments) or comment.is_root:\n self._comments.append(comment)\n else:\n assert comment.parent_id in self._submission._comments_by_id, (\n \"PRAW Error occurred. Please file a bug report and include the code\"\n \" that caused the error.\"\n )\n parent = self._submission._comments_by_id[comment.parent_id]\n parent.replies._comments.append(comment)\n\n def _update(self, comments):\n self._comments = comments\n for comment in comments:\n comment.submission = self._submission\n\n def list(self) -> List[Union[\"praw.models.Comment\", \"praw.models.MoreComments\"]]:\n \"\"\"Return a flattened list of all Comments.\n\n This list may contain :class:`.MoreComments` instances if :meth:`.replace_more`\n was not called first.\n\n \"\"\"\n comments = []\n queue = list(self)\n while queue:\n comment = queue.pop(0)\n comments.append(comment)\n if not isinstance(comment, MoreComments):\n queue.extend(comment.replies)\n return comments\n\n def replace_more(\n self, limit: int = 32, threshold: int = 0\n ) -> List[\"praw.models.MoreComments\"]:\n \"\"\"Update the comment forest by resolving instances of MoreComments.\n\n :param limit: The maximum number of :class:`.MoreComments` instances to replace.\n Each replacement requires 1 API request. Set to ``None`` to have no limit,\n or to ``0`` to remove all :class:`.MoreComments` instances without\n additional requests (default: 32).\n :param threshold: The minimum number of children comments a\n :class:`.MoreComments` instance must have in order to be replaced.\n :class:`.MoreComments` instances that represent \"continue this thread\" links\n unfortunately appear to have 0 children. (default: 0).\n\n :returns: A list of :class:`.MoreComments` instances that were not replaced.\n\n :raises: ``prawcore.TooManyRequests`` when used concurrently.\n\n For example, to replace up to 32 :class:`.MoreComments` instances of a\n submission try:\n\n .. code-block:: python\n\n submission = reddit.submission(\"3hahrw\")\n submission.comments.replace_more()\n\n Alternatively, to replace :class:`.MoreComments` instances within the replies of\n a single comment try:\n\n .. code-block:: python\n\n comment = reddit.comment(\"d8r4im1\")\n comment.refresh()\n comment.replies.replace_more()\n\n .. note::\n\n This method can take a long time as each replacement will discover at most\n 100 new :class:`.Comment` instances. As a result, consider looping and\n handling exceptions until the method returns successfully. For example:\n\n .. code-block:: python\n\n while True:\n try:\n submission.comments.replace_more()\n break\n except PossibleExceptions:\n print(\"Handling replace_more exception\")\n sleep(1)\n\n .. warning::\n\n If this method is called, and the comments are refreshed, calling this\n method again will result in a :class:`.DuplicateReplaceException`.\n\n \"\"\"\n remaining = limit\n more_comments = self._gather_more_comments(self._comments)\n skipped = []\n\n # Fetch largest more_comments until reaching the limit or the threshold\n while more_comments:\n item = heappop(more_comments)\n if remaining is not None and remaining <= 0 or item.count < threshold:\n skipped.append(item)\n item._remove_from.remove(item)\n continue\n\n new_comments = item.comments(update=False)\n if remaining is not None:\n remaining -= 1\n\n # Add new MoreComment objects to the heap of more_comments\n for more in self._gather_more_comments(new_comments, self._comments):\n more.submission = self._submission\n heappush(more_comments, more)\n # Insert all items into the tree\n for comment in new_comments:\n self._insert_comment(comment)\n\n # Remove from forest\n item._remove_from.remove(item)\n\n return more_comments + skipped\n", "path": "praw/models/comment_forest.py"}]}
2,821
246
gh_patches_debug_41570
rasdani/github-patches
git_diff
genialis__resolwe-328
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 500 error returned when permission does not exist We should return 400 Bad request response. Failing unit test: https://github.com/genialis/resolwe/pull/309 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `resolwe/permissions/utils.py` Content: ``` 1 """.. Ignore pydocstyle D400. 2 3 ================= 4 Permissions utils 5 ================= 6 7 .. autofunction:: copy_permissions 8 9 """ 10 from __future__ import absolute_import, division, print_function, unicode_literals 11 12 import copy 13 14 from django.contrib.auth import get_user_model 15 from django.contrib.auth.models import AnonymousUser, Group 16 from django.contrib.contenttypes.models import ContentType 17 18 from guardian.models import GroupObjectPermission, UserObjectPermission 19 from guardian.shortcuts import assign_perm, remove_perm 20 from rest_framework import exceptions 21 22 23 def copy_permissions(src_obj, dest_obj): 24 """Copy permissions form ``src_obj`` to ``dest_obj``.""" 25 src_obj_ctype = ContentType.objects.get_for_model(src_obj) 26 dest_obj_ctype = ContentType.objects.get_for_model(dest_obj) 27 28 if src_obj_ctype != dest_obj_ctype: 29 raise AssertionError('Content types of source and destination objects are not equal.') 30 31 for perm in UserObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype): 32 assign_perm(perm.permission.codename, perm.user, dest_obj) 33 for perm in GroupObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype): 34 assign_perm(perm.permission.codename, perm.group, dest_obj) 35 36 37 def fetch_user(query): 38 """Get user by ``pk`` or ``username``. Return ``None`` if doesn't exist.""" 39 user_filter = {'pk': query} if query.isdigit() else {'username': query} 40 return get_user_model().objects.get(**user_filter) 41 42 43 def fetch_group(query): 44 """Get group by ``pk`` or ``name``. Return ``None`` if doesn't exist.""" 45 group_filter = {'pk': query} if query.isdigit() else {'name': query} 46 return Group.objects.get(**group_filter) 47 48 49 def check_owner_permission(payload, allow_user_owner): 50 """Raise ``PermissionDenied``if ``owner`` found in ``data``.""" 51 for entity_type in ['users', 'groups']: 52 for perm_type in ['add', 'remove']: 53 for perms in payload.get(entity_type, {}).get(perm_type, {}).values(): 54 if 'owner' in perms: 55 if entity_type == 'users' and allow_user_owner: 56 continue 57 58 if entity_type == 'groups': 59 raise exceptions.ParseError("Owner permission cannot be assigned to a group") 60 61 raise exceptions.PermissionDenied("Only owners can grant/revoke owner permission") 62 63 64 def check_public_permissions(payload): 65 """Raise ``PermissionDenied`` if public permissions are too open.""" 66 allowed_public_permissions = ['view', 'add', 'download'] 67 for perm_type in ['add', 'remove']: 68 for perm in payload.get('public', {}).get(perm_type, []): 69 if perm not in allowed_public_permissions: 70 raise exceptions.PermissionDenied("Permissions for public users are too open") 71 72 73 def check_user_permissions(payload, user_pk): 74 """Raise ``PermissionDenied`` if ``payload`` includes ``user_pk``.""" 75 for perm_type in ['add', 'remove']: 76 user_pks = payload.get('users', {}).get(perm_type, {}).keys() 77 if user_pk in user_pks: 78 raise exceptions.PermissionDenied("You cannot change your own permissions") 79 80 81 def remove_permission(payload, permission): 82 """Remove all occurrences of ``permission`` from ``payload``.""" 83 payload = copy.deepcopy(payload) 84 85 for entity_type in ['users', 'groups']: 86 for perm_type in ['add', 'remove']: 87 for perms in payload.get(entity_type, {}).get(perm_type, {}).values(): 88 if permission in perms: 89 perms.remove(permission) 90 91 for perm_type in ['add', 'remove']: 92 perms = payload.get('public', {}).get(perm_type, []) 93 if permission in perms: 94 perms.remove(permission) 95 96 return payload 97 98 99 def update_permission(obj, data): 100 """Update object permissions.""" 101 content_type = ContentType.objects.get_for_model(obj) 102 full_permissions = list(zip(*obj._meta.permissions))[0] # pylint: disable=protected-access 103 104 def set_permissions(entity_type, perm_type): 105 """Set object permissions.""" 106 perm_func = assign_perm if perm_type == 'add' else remove_perm 107 fetch_fn = fetch_user if entity_type == 'users' else fetch_group 108 109 for entity_id in data.get(entity_type, {}).get(perm_type, []): 110 entity = fetch_fn(entity_id) 111 if entity: 112 perms = data[entity_type][perm_type][entity_id] 113 if perms == u'ALL': 114 perms = full_permissions 115 for perm in perms: 116 perm_func('{}_{}'.format(perm.lower(), content_type), entity, obj) 117 118 set_permissions('users', 'add') 119 set_permissions('users', 'remove') 120 set_permissions('groups', 'add') 121 set_permissions('groups', 'remove') 122 123 def set_public_permissions(perm_type): 124 """Set public permissions.""" 125 perm_func = assign_perm if perm_type == 'add' else remove_perm 126 user = AnonymousUser() 127 perms = data.get('public', {}).get(perm_type, []) 128 if perms == u'ALL': 129 perms = full_permissions 130 for perm in perms: 131 perm_func('{}_{}'.format(perm.lower(), content_type), user, obj) 132 133 set_public_permissions('add') 134 set_public_permissions('remove') 135 136 137 def assign_contributor_permissions(obj): 138 """Assign all permissions to object's contributor.""" 139 for permission in list(zip(*obj._meta.permissions))[0]: # pylint: disable=protected-access 140 assign_perm(permission, obj.contributor, obj) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/resolwe/permissions/utils.py b/resolwe/permissions/utils.py --- a/resolwe/permissions/utils.py +++ b/resolwe/permissions/utils.py @@ -14,6 +14,7 @@ from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser, Group from django.contrib.contenttypes.models import ContentType +from django.db import transaction from guardian.models import GroupObjectPermission, UserObjectPermission from guardian.shortcuts import assign_perm, remove_perm @@ -101,6 +102,35 @@ content_type = ContentType.objects.get_for_model(obj) full_permissions = list(zip(*obj._meta.permissions))[0] # pylint: disable=protected-access + def apply_perm(perm_func, perms, entity): + """Apply permissions using given ``perm_func``. + + ``perm_func`` is intended to be ``assign_perms`` or + ``remove_perms`` shortcut function from ``django-guardian``, but + can be any function that accepts permission codename, + user/group and object parameters (in this order). + + If given permission does not exist, ``exceptions.ParseError`` is + raised. + + "ALL" passed as ``perms`` parameter, will call ``perm_function`` + with ``full_permissions`` list. + + :param func perm_func: Permissions function to be applied + :param list params: list of params to be allpied + :param entity: user or group to be passed to ``perm_func`` + :type entity: `~django.contrib.auth.models.User` or + `~django.contrib.auth.models.Group` + + """ + if perms == u'ALL': + perms = full_permissions + for perm in perms: + perm_codename = '{}_{}'.format(perm.lower(), content_type) + if perm_codename not in full_permissions: + raise exceptions.ParseError("Unknown permission: {}".format(perm)) + perm_func(perm_codename, entity, obj) + def set_permissions(entity_type, perm_type): """Set object permissions.""" perm_func = assign_perm if perm_type == 'add' else remove_perm @@ -110,28 +140,22 @@ entity = fetch_fn(entity_id) if entity: perms = data[entity_type][perm_type][entity_id] - if perms == u'ALL': - perms = full_permissions - for perm in perms: - perm_func('{}_{}'.format(perm.lower(), content_type), entity, obj) - - set_permissions('users', 'add') - set_permissions('users', 'remove') - set_permissions('groups', 'add') - set_permissions('groups', 'remove') + apply_perm(perm_func, perms, entity) def set_public_permissions(perm_type): """Set public permissions.""" perm_func = assign_perm if perm_type == 'add' else remove_perm user = AnonymousUser() perms = data.get('public', {}).get(perm_type, []) - if perms == u'ALL': - perms = full_permissions - for perm in perms: - perm_func('{}_{}'.format(perm.lower(), content_type), user, obj) - - set_public_permissions('add') - set_public_permissions('remove') + apply_perm(perm_func, perms, user) + + with transaction.atomic(): + set_permissions('users', 'add') + set_permissions('users', 'remove') + set_permissions('groups', 'add') + set_permissions('groups', 'remove') + set_public_permissions('add') + set_public_permissions('remove') def assign_contributor_permissions(obj):
{"golden_diff": "diff --git a/resolwe/permissions/utils.py b/resolwe/permissions/utils.py\n--- a/resolwe/permissions/utils.py\n+++ b/resolwe/permissions/utils.py\n@@ -14,6 +14,7 @@\n from django.contrib.auth import get_user_model\n from django.contrib.auth.models import AnonymousUser, Group\n from django.contrib.contenttypes.models import ContentType\n+from django.db import transaction\n \n from guardian.models import GroupObjectPermission, UserObjectPermission\n from guardian.shortcuts import assign_perm, remove_perm\n@@ -101,6 +102,35 @@\n content_type = ContentType.objects.get_for_model(obj)\n full_permissions = list(zip(*obj._meta.permissions))[0] # pylint: disable=protected-access\n \n+ def apply_perm(perm_func, perms, entity):\n+ \"\"\"Apply permissions using given ``perm_func``.\n+\n+ ``perm_func`` is intended to be ``assign_perms`` or\n+ ``remove_perms`` shortcut function from ``django-guardian``, but\n+ can be any function that accepts permission codename,\n+ user/group and object parameters (in this order).\n+\n+ If given permission does not exist, ``exceptions.ParseError`` is\n+ raised.\n+\n+ \"ALL\" passed as ``perms`` parameter, will call ``perm_function``\n+ with ``full_permissions`` list.\n+\n+ :param func perm_func: Permissions function to be applied\n+ :param list params: list of params to be allpied\n+ :param entity: user or group to be passed to ``perm_func``\n+ :type entity: `~django.contrib.auth.models.User` or\n+ `~django.contrib.auth.models.Group`\n+\n+ \"\"\"\n+ if perms == u'ALL':\n+ perms = full_permissions\n+ for perm in perms:\n+ perm_codename = '{}_{}'.format(perm.lower(), content_type)\n+ if perm_codename not in full_permissions:\n+ raise exceptions.ParseError(\"Unknown permission: {}\".format(perm))\n+ perm_func(perm_codename, entity, obj)\n+\n def set_permissions(entity_type, perm_type):\n \"\"\"Set object permissions.\"\"\"\n perm_func = assign_perm if perm_type == 'add' else remove_perm\n@@ -110,28 +140,22 @@\n entity = fetch_fn(entity_id)\n if entity:\n perms = data[entity_type][perm_type][entity_id]\n- if perms == u'ALL':\n- perms = full_permissions\n- for perm in perms:\n- perm_func('{}_{}'.format(perm.lower(), content_type), entity, obj)\n-\n- set_permissions('users', 'add')\n- set_permissions('users', 'remove')\n- set_permissions('groups', 'add')\n- set_permissions('groups', 'remove')\n+ apply_perm(perm_func, perms, entity)\n \n def set_public_permissions(perm_type):\n \"\"\"Set public permissions.\"\"\"\n perm_func = assign_perm if perm_type == 'add' else remove_perm\n user = AnonymousUser()\n perms = data.get('public', {}).get(perm_type, [])\n- if perms == u'ALL':\n- perms = full_permissions\n- for perm in perms:\n- perm_func('{}_{}'.format(perm.lower(), content_type), user, obj)\n-\n- set_public_permissions('add')\n- set_public_permissions('remove')\n+ apply_perm(perm_func, perms, user)\n+\n+ with transaction.atomic():\n+ set_permissions('users', 'add')\n+ set_permissions('users', 'remove')\n+ set_permissions('groups', 'add')\n+ set_permissions('groups', 'remove')\n+ set_public_permissions('add')\n+ set_public_permissions('remove')\n \n \n def assign_contributor_permissions(obj):\n", "issue": "500 error returned when permission does not exist\nWe should return 400 Bad request response.\r\n\r\nFailing unit test: https://github.com/genialis/resolwe/pull/309\n", "before_files": [{"content": "\"\"\".. Ignore pydocstyle D400.\n\n=================\nPermissions utils\n=================\n\n.. autofunction:: copy_permissions\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AnonymousUser, Group\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom guardian.models import GroupObjectPermission, UserObjectPermission\nfrom guardian.shortcuts import assign_perm, remove_perm\nfrom rest_framework import exceptions\n\n\ndef copy_permissions(src_obj, dest_obj):\n \"\"\"Copy permissions form ``src_obj`` to ``dest_obj``.\"\"\"\n src_obj_ctype = ContentType.objects.get_for_model(src_obj)\n dest_obj_ctype = ContentType.objects.get_for_model(dest_obj)\n\n if src_obj_ctype != dest_obj_ctype:\n raise AssertionError('Content types of source and destination objects are not equal.')\n\n for perm in UserObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype):\n assign_perm(perm.permission.codename, perm.user, dest_obj)\n for perm in GroupObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype):\n assign_perm(perm.permission.codename, perm.group, dest_obj)\n\n\ndef fetch_user(query):\n \"\"\"Get user by ``pk`` or ``username``. Return ``None`` if doesn't exist.\"\"\"\n user_filter = {'pk': query} if query.isdigit() else {'username': query}\n return get_user_model().objects.get(**user_filter)\n\n\ndef fetch_group(query):\n \"\"\"Get group by ``pk`` or ``name``. Return ``None`` if doesn't exist.\"\"\"\n group_filter = {'pk': query} if query.isdigit() else {'name': query}\n return Group.objects.get(**group_filter)\n\n\ndef check_owner_permission(payload, allow_user_owner):\n \"\"\"Raise ``PermissionDenied``if ``owner`` found in ``data``.\"\"\"\n for entity_type in ['users', 'groups']:\n for perm_type in ['add', 'remove']:\n for perms in payload.get(entity_type, {}).get(perm_type, {}).values():\n if 'owner' in perms:\n if entity_type == 'users' and allow_user_owner:\n continue\n\n if entity_type == 'groups':\n raise exceptions.ParseError(\"Owner permission cannot be assigned to a group\")\n\n raise exceptions.PermissionDenied(\"Only owners can grant/revoke owner permission\")\n\n\ndef check_public_permissions(payload):\n \"\"\"Raise ``PermissionDenied`` if public permissions are too open.\"\"\"\n allowed_public_permissions = ['view', 'add', 'download']\n for perm_type in ['add', 'remove']:\n for perm in payload.get('public', {}).get(perm_type, []):\n if perm not in allowed_public_permissions:\n raise exceptions.PermissionDenied(\"Permissions for public users are too open\")\n\n\ndef check_user_permissions(payload, user_pk):\n \"\"\"Raise ``PermissionDenied`` if ``payload`` includes ``user_pk``.\"\"\"\n for perm_type in ['add', 'remove']:\n user_pks = payload.get('users', {}).get(perm_type, {}).keys()\n if user_pk in user_pks:\n raise exceptions.PermissionDenied(\"You cannot change your own permissions\")\n\n\ndef remove_permission(payload, permission):\n \"\"\"Remove all occurrences of ``permission`` from ``payload``.\"\"\"\n payload = copy.deepcopy(payload)\n\n for entity_type in ['users', 'groups']:\n for perm_type in ['add', 'remove']:\n for perms in payload.get(entity_type, {}).get(perm_type, {}).values():\n if permission in perms:\n perms.remove(permission)\n\n for perm_type in ['add', 'remove']:\n perms = payload.get('public', {}).get(perm_type, [])\n if permission in perms:\n perms.remove(permission)\n\n return payload\n\n\ndef update_permission(obj, data):\n \"\"\"Update object permissions.\"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n full_permissions = list(zip(*obj._meta.permissions))[0] # pylint: disable=protected-access\n\n def set_permissions(entity_type, perm_type):\n \"\"\"Set object permissions.\"\"\"\n perm_func = assign_perm if perm_type == 'add' else remove_perm\n fetch_fn = fetch_user if entity_type == 'users' else fetch_group\n\n for entity_id in data.get(entity_type, {}).get(perm_type, []):\n entity = fetch_fn(entity_id)\n if entity:\n perms = data[entity_type][perm_type][entity_id]\n if perms == u'ALL':\n perms = full_permissions\n for perm in perms:\n perm_func('{}_{}'.format(perm.lower(), content_type), entity, obj)\n\n set_permissions('users', 'add')\n set_permissions('users', 'remove')\n set_permissions('groups', 'add')\n set_permissions('groups', 'remove')\n\n def set_public_permissions(perm_type):\n \"\"\"Set public permissions.\"\"\"\n perm_func = assign_perm if perm_type == 'add' else remove_perm\n user = AnonymousUser()\n perms = data.get('public', {}).get(perm_type, [])\n if perms == u'ALL':\n perms = full_permissions\n for perm in perms:\n perm_func('{}_{}'.format(perm.lower(), content_type), user, obj)\n\n set_public_permissions('add')\n set_public_permissions('remove')\n\n\ndef assign_contributor_permissions(obj):\n \"\"\"Assign all permissions to object's contributor.\"\"\"\n for permission in list(zip(*obj._meta.permissions))[0]: # pylint: disable=protected-access\n assign_perm(permission, obj.contributor, obj)\n", "path": "resolwe/permissions/utils.py"}], "after_files": [{"content": "\"\"\".. Ignore pydocstyle D400.\n\n=================\nPermissions utils\n=================\n\n.. autofunction:: copy_permissions\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AnonymousUser, Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import transaction\n\nfrom guardian.models import GroupObjectPermission, UserObjectPermission\nfrom guardian.shortcuts import assign_perm, remove_perm\nfrom rest_framework import exceptions\n\n\ndef copy_permissions(src_obj, dest_obj):\n \"\"\"Copy permissions form ``src_obj`` to ``dest_obj``.\"\"\"\n src_obj_ctype = ContentType.objects.get_for_model(src_obj)\n dest_obj_ctype = ContentType.objects.get_for_model(dest_obj)\n\n if src_obj_ctype != dest_obj_ctype:\n raise AssertionError('Content types of source and destination objects are not equal.')\n\n for perm in UserObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype):\n assign_perm(perm.permission.codename, perm.user, dest_obj)\n for perm in GroupObjectPermission.objects.filter(object_pk=src_obj.pk, content_type=src_obj_ctype):\n assign_perm(perm.permission.codename, perm.group, dest_obj)\n\n\ndef fetch_user(query):\n \"\"\"Get user by ``pk`` or ``username``. Return ``None`` if doesn't exist.\"\"\"\n user_filter = {'pk': query} if query.isdigit() else {'username': query}\n return get_user_model().objects.get(**user_filter)\n\n\ndef fetch_group(query):\n \"\"\"Get group by ``pk`` or ``name``. Return ``None`` if doesn't exist.\"\"\"\n group_filter = {'pk': query} if query.isdigit() else {'name': query}\n return Group.objects.get(**group_filter)\n\n\ndef check_owner_permission(payload, allow_user_owner):\n \"\"\"Raise ``PermissionDenied``if ``owner`` found in ``data``.\"\"\"\n for entity_type in ['users', 'groups']:\n for perm_type in ['add', 'remove']:\n for perms in payload.get(entity_type, {}).get(perm_type, {}).values():\n if 'owner' in perms:\n if entity_type == 'users' and allow_user_owner:\n continue\n\n if entity_type == 'groups':\n raise exceptions.ParseError(\"Owner permission cannot be assigned to a group\")\n\n raise exceptions.PermissionDenied(\"Only owners can grant/revoke owner permission\")\n\n\ndef check_public_permissions(payload):\n \"\"\"Raise ``PermissionDenied`` if public permissions are too open.\"\"\"\n allowed_public_permissions = ['view', 'add', 'download']\n for perm_type in ['add', 'remove']:\n for perm in payload.get('public', {}).get(perm_type, []):\n if perm not in allowed_public_permissions:\n raise exceptions.PermissionDenied(\"Permissions for public users are too open\")\n\n\ndef check_user_permissions(payload, user_pk):\n \"\"\"Raise ``PermissionDenied`` if ``payload`` includes ``user_pk``.\"\"\"\n for perm_type in ['add', 'remove']:\n user_pks = payload.get('users', {}).get(perm_type, {}).keys()\n if user_pk in user_pks:\n raise exceptions.PermissionDenied(\"You cannot change your own permissions\")\n\n\ndef remove_permission(payload, permission):\n \"\"\"Remove all occurrences of ``permission`` from ``payload``.\"\"\"\n payload = copy.deepcopy(payload)\n\n for entity_type in ['users', 'groups']:\n for perm_type in ['add', 'remove']:\n for perms in payload.get(entity_type, {}).get(perm_type, {}).values():\n if permission in perms:\n perms.remove(permission)\n\n for perm_type in ['add', 'remove']:\n perms = payload.get('public', {}).get(perm_type, [])\n if permission in perms:\n perms.remove(permission)\n\n return payload\n\n\ndef update_permission(obj, data):\n \"\"\"Update object permissions.\"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n full_permissions = list(zip(*obj._meta.permissions))[0] # pylint: disable=protected-access\n\n def apply_perm(perm_func, perms, entity):\n \"\"\"Apply permissions using given ``perm_func``.\n\n ``perm_func`` is intended to be ``assign_perms`` or\n ``remove_perms`` shortcut function from ``django-guardian``, but\n can be any function that accepts permission codename,\n user/group and object parameters (in this order).\n\n If given permission does not exist, ``exceptions.ParseError`` is\n raised.\n\n \"ALL\" passed as ``perms`` parameter, will call ``perm_function``\n with ``full_permissions`` list.\n\n :param func perm_func: Permissions function to be applied\n :param list params: list of params to be allpied\n :param entity: user or group to be passed to ``perm_func``\n :type entity: `~django.contrib.auth.models.User` or\n `~django.contrib.auth.models.Group`\n\n \"\"\"\n if perms == u'ALL':\n perms = full_permissions\n for perm in perms:\n perm_codename = '{}_{}'.format(perm.lower(), content_type)\n if perm_codename not in full_permissions:\n raise exceptions.ParseError(\"Unknown permission: {}\".format(perm))\n perm_func(perm_codename, entity, obj)\n\n def set_permissions(entity_type, perm_type):\n \"\"\"Set object permissions.\"\"\"\n perm_func = assign_perm if perm_type == 'add' else remove_perm\n fetch_fn = fetch_user if entity_type == 'users' else fetch_group\n\n for entity_id in data.get(entity_type, {}).get(perm_type, []):\n entity = fetch_fn(entity_id)\n if entity:\n perms = data[entity_type][perm_type][entity_id]\n apply_perm(perm_func, perms, entity)\n\n def set_public_permissions(perm_type):\n \"\"\"Set public permissions.\"\"\"\n perm_func = assign_perm if perm_type == 'add' else remove_perm\n user = AnonymousUser()\n perms = data.get('public', {}).get(perm_type, [])\n apply_perm(perm_func, perms, user)\n\n with transaction.atomic():\n set_permissions('users', 'add')\n set_permissions('users', 'remove')\n set_permissions('groups', 'add')\n set_permissions('groups', 'remove')\n set_public_permissions('add')\n set_public_permissions('remove')\n\n\ndef assign_contributor_permissions(obj):\n \"\"\"Assign all permissions to object's contributor.\"\"\"\n for permission in list(zip(*obj._meta.permissions))[0]: # pylint: disable=protected-access\n assign_perm(permission, obj.contributor, obj)\n", "path": "resolwe/permissions/utils.py"}]}
1,810
814
gh_patches_debug_3415
rasdani/github-patches
git_diff
pyro-ppl__numpyro-1041
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- New versions of sphinx and jinja2 break docs linting This is observed by @tcbegley in #1034 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright Contributors to the Pyro project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 from __future__ import absolute_import, division, print_function 5 6 import os 7 import sys 8 9 from setuptools import find_packages, setup 10 11 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)) 12 13 # Find version 14 for line in open(os.path.join(PROJECT_PATH, "numpyro", "version.py")): 15 if line.startswith("__version__ = "): 16 version = line.strip().split()[2][1:-1] 17 18 # READ README.md for long description on PyPi. 19 try: 20 long_description = open("README.md", encoding="utf-8").read() 21 except Exception as e: 22 sys.stderr.write("Failed to read README.md:\n {}\n".format(e)) 23 sys.stderr.flush() 24 long_description = "" 25 26 27 setup( 28 name="numpyro", 29 version=version, 30 description="Pyro PPL on NumPy", 31 packages=find_packages(include=["numpyro", "numpyro.*"]), 32 url="https://github.com/pyro-ppl/numpyro", 33 author="Uber AI Labs", 34 install_requires=[ 35 "jax>=0.2.11", 36 "jaxlib>=0.1.62", 37 "tqdm", 38 ], 39 extras_require={ 40 "doc": [ 41 "ipython", # sphinx needs this to render codes 42 "jinja2<3.0.0", 43 "nbsphinx", 44 "sphinx<4.0.0", 45 "sphinx_rtd_theme", 46 "sphinx-gallery", 47 ], 48 "test": [ 49 "black", 50 "flake8", 51 "isort>=5.0", 52 "pytest>=4.1", 53 "pyro-api>=0.1.1", 54 "scipy>=1.1", 55 ], 56 "dev": [ 57 "dm-haiku", 58 "flax", 59 # TODO: bump funsor version before the release 60 "funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc", 61 "graphviz", 62 "optax==0.0.6", 63 # TODO: change this to tensorflow_probability>0.12.1 when the next version 64 # of tfp is released. The current release is not compatible with jax>=0.2.12. 65 "tfp-nightly", 66 ], 67 "examples": ["arviz", "jupyter", "matplotlib", "pandas", "seaborn"], 68 }, 69 long_description=long_description, 70 long_description_content_type="text/markdown", 71 keywords="probabilistic machine learning bayesian statistics", 72 license="Apache License 2.0", 73 classifiers=[ 74 "Intended Audience :: Developers", 75 "Intended Audience :: Education", 76 "Intended Audience :: Science/Research", 77 "License :: OSI Approved :: Apache Software License", 78 "Operating System :: POSIX :: Linux", 79 "Operating System :: MacOS :: MacOS X", 80 "Programming Language :: Python :: 3.6", 81 "Programming Language :: Python :: 3.7", 82 "Programming Language :: Python :: 3.8", 83 "Programming Language :: Python :: 3.9", 84 ], 85 ) 86 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -39,9 +39,8 @@ extras_require={ "doc": [ "ipython", # sphinx needs this to render codes - "jinja2<3.0.0", - "nbsphinx", - "sphinx<4.0.0", + "nbsphinx>=0.8.5", + "sphinx", "sphinx_rtd_theme", "sphinx-gallery", ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,9 +39,8 @@\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n- \"jinja2<3.0.0\",\n- \"nbsphinx\",\n- \"sphinx<4.0.0\",\n+ \"nbsphinx>=0.8.5\",\n+ \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n", "issue": "New versions of sphinx and jinja2 break docs linting\nThis is observed by @tcbegley in #1034\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n \"jax>=0.2.11\",\n \"jaxlib>=0.1.62\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"jinja2<3.0.0\",\n \"nbsphinx\",\n \"sphinx<4.0.0\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.1\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n # TODO: bump funsor version before the release\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n \"optax==0.0.6\",\n # TODO: change this to tensorflow_probability>0.12.1 when the next version\n # of tfp is released. The current release is not compatible with jax>=0.2.12.\n \"tfp-nightly\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# Find version\nfor line in open(os.path.join(PROJECT_PATH, \"numpyro\", \"version.py\")):\n if line.startswith(\"__version__ = \"):\n version = line.strip().split()[2][1:-1]\n\n# READ README.md for long description on PyPi.\ntry:\n long_description = open(\"README.md\", encoding=\"utf-8\").read()\nexcept Exception as e:\n sys.stderr.write(\"Failed to read README.md:\\n {}\\n\".format(e))\n sys.stderr.flush()\n long_description = \"\"\n\n\nsetup(\n name=\"numpyro\",\n version=version,\n description=\"Pyro PPL on NumPy\",\n packages=find_packages(include=[\"numpyro\", \"numpyro.*\"]),\n url=\"https://github.com/pyro-ppl/numpyro\",\n author=\"Uber AI Labs\",\n install_requires=[\n \"jax>=0.2.11\",\n \"jaxlib>=0.1.62\",\n \"tqdm\",\n ],\n extras_require={\n \"doc\": [\n \"ipython\", # sphinx needs this to render codes\n \"nbsphinx>=0.8.5\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinx-gallery\",\n ],\n \"test\": [\n \"black\",\n \"flake8\",\n \"isort>=5.0\",\n \"pytest>=4.1\",\n \"pyro-api>=0.1.1\",\n \"scipy>=1.1\",\n ],\n \"dev\": [\n \"dm-haiku\",\n \"flax\",\n # TODO: bump funsor version before the release\n \"funsor @ git+https://github.com/pyro-ppl/funsor.git@d5574988665dd822ec64e41f2b54b9dc929959dc\",\n \"graphviz\",\n \"optax==0.0.6\",\n # TODO: change this to tensorflow_probability>0.12.1 when the next version\n # of tfp is released. The current release is not compatible with jax>=0.2.12.\n \"tfp-nightly\",\n ],\n \"examples\": [\"arviz\", \"jupyter\", \"matplotlib\", \"pandas\", \"seaborn\"],\n },\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n keywords=\"probabilistic machine learning bayesian statistics\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n)\n", "path": "setup.py"}]}
1,179
123
gh_patches_debug_39181
rasdani/github-patches
git_diff
scikit-image__scikit-image-1895
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- denoise_bilateral works separately on channels, but no warning raised for 3-D images The implementation of `restoration.denoise_bilateral` is 2-D only, with the last dimension considered to be for channels. However, this is not documented in the docstring, and no warning is raised if a 3-D image (with last dimension > 3) is passed. I used `denoise_bilateral` in the past on 3-D images thinking that the implementation was 3-D. We could consider making the code 3-D or nD in the future, but for now fixing the docstring + warning looks like a reasonable goal for 0.12. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/restoration/_denoise.py` Content: ``` 1 # coding: utf-8 2 import numpy as np 3 from .. import img_as_float 4 from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman 5 from .._shared.utils import _mode_deprecations 6 7 8 def denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1, 9 bins=10000, mode='constant', cval=0): 10 """Denoise image using bilateral filter. 11 12 This is an edge-preserving and noise reducing denoising filter. It averages 13 pixels based on their spatial closeness and radiometric similarity. 14 15 Spatial closeness is measured by the gaussian function of the euclidian 16 distance between two pixels and a certain standard deviation 17 (`sigma_spatial`). 18 19 Radiometric similarity is measured by the gaussian function of the euclidian 20 distance between two color values and a certain standard deviation 21 (`sigma_range`). 22 23 Parameters 24 ---------- 25 image : ndarray, shape (M, N[, 3]) 26 Input image, 2D grayscale or RGB. 27 win_size : int 28 Window size for filtering. 29 sigma_range : float 30 Standard deviation for grayvalue/color distance (radiometric 31 similarity). A larger value results in averaging of pixels with larger 32 radiometric differences. Note, that the image will be converted using 33 the `img_as_float` function and thus the standard deviation is in 34 respect to the range ``[0, 1]``. If the value is ``None`` the standard 35 deviation of the ``image`` will be used. 36 sigma_spatial : float 37 Standard deviation for range distance. A larger value results in 38 averaging of pixels with larger spatial differences. 39 bins : int 40 Number of discrete values for gaussian weights of color filtering. 41 A larger value results in improved accuracy. 42 mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'} 43 How to handle values outside the image borders. See 44 `numpy.pad` for detail. 45 cval : string 46 Used in conjunction with mode 'constant', the value outside 47 the image boundaries. 48 49 Returns 50 ------- 51 denoised : ndarray 52 Denoised image. 53 54 References 55 ---------- 56 .. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf 57 58 Examples 59 -------- 60 >>> from skimage import data, img_as_float 61 >>> astro = img_as_float(data.astronaut()) 62 >>> astro = astro[220:300, 220:320] 63 >>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape) 64 >>> noisy = np.clip(noisy, 0, 1) 65 >>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15) 66 """ 67 mode = _mode_deprecations(mode) 68 return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial, 69 bins, mode, cval) 70 71 72 def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True): 73 """Perform total-variation denoising using split-Bregman optimization. 74 75 Total-variation denoising (also know as total-variation regularization) 76 tries to find an image with less total-variation under the constraint 77 of being similar to the input image, which is controlled by the 78 regularization parameter. 79 80 Parameters 81 ---------- 82 image : ndarray 83 Input data to be denoised (converted using img_as_float`). 84 weight : float 85 Denoising weight. The smaller the `weight`, the more denoising (at 86 the expense of less similarity to the `input`). The regularization 87 parameter `lambda` is chosen as `2 * weight`. 88 eps : float, optional 89 Relative difference of the value of the cost function that determines 90 the stop criterion. The algorithm stops when:: 91 92 SUM((u(n) - u(n-1))**2) < eps 93 94 max_iter : int, optional 95 Maximal number of iterations used for the optimization. 96 isotropic : boolean, optional 97 Switch between isotropic and anisotropic TV denoising. 98 99 Returns 100 ------- 101 u : ndarray 102 Denoised image. 103 104 References 105 ---------- 106 .. [1] http://en.wikipedia.org/wiki/Total_variation_denoising 107 .. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1 108 Regularized Problems", 109 ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf 110 .. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising 111 using Split Bregman" in Image Processing On Line on 2012–05–19, 112 http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf 113 .. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf 114 115 """ 116 return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic) 117 118 119 def _denoise_tv_chambolle_nd(im, weight=0.1, eps=2.e-4, n_iter_max=200): 120 """Perform total-variation denoising on n-dimensional images. 121 122 Parameters 123 ---------- 124 im : ndarray 125 n-D input data to be denoised. 126 weight : float, optional 127 Denoising weight. The greater `weight`, the more denoising (at 128 the expense of fidelity to `input`). 129 eps : float, optional 130 Relative difference of the value of the cost function that determines 131 the stop criterion. The algorithm stops when: 132 133 (E_(n-1) - E_n) < eps * E_0 134 135 n_iter_max : int, optional 136 Maximal number of iterations used for the optimization. 137 138 Returns 139 ------- 140 out : ndarray 141 Denoised array of floats. 142 143 Notes 144 ----- 145 Rudin, Osher and Fatemi algorithm. 146 147 """ 148 149 ndim = im.ndim 150 p = np.zeros((im.ndim, ) + im.shape, dtype=im.dtype) 151 g = np.zeros_like(p) 152 d = np.zeros_like(im) 153 i = 0 154 while i < n_iter_max: 155 if i > 0: 156 # d will be the (negative) divergence of p 157 d = -p.sum(0) 158 slices_d = [slice(None), ] * ndim 159 slices_p = [slice(None), ] * (ndim + 1) 160 for ax in range(ndim): 161 slices_d[ax] = slice(1, None) 162 slices_p[ax+1] = slice(0, -1) 163 slices_p[0] = ax 164 d[slices_d] += p[slices_p] 165 slices_d[ax] = slice(None) 166 slices_p[ax+1] = slice(None) 167 out = im + d 168 else: 169 out = im 170 E = (d ** 2).sum() 171 172 # g stores the gradients of out along each axis 173 # e.g. g[0] is the first order finite difference along axis 0 174 slices_g = [slice(None), ] * (ndim + 1) 175 for ax in range(ndim): 176 slices_g[ax+1] = slice(0, -1) 177 slices_g[0] = ax 178 g[slices_g] = np.diff(out, axis=ax) 179 slices_g[ax+1] = slice(None) 180 181 norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...] 182 E += weight * norm.sum() 183 tau = 1. / (2.*ndim) 184 norm *= tau / weight 185 norm += 1. 186 p -= tau * g 187 p /= norm 188 E /= float(im.size) 189 if i == 0: 190 E_init = E 191 E_previous = E 192 else: 193 if np.abs(E_previous - E) < eps * E_init: 194 break 195 else: 196 E_previous = E 197 i += 1 198 return out 199 200 201 def denoise_tv_chambolle(im, weight=0.1, eps=2.e-4, n_iter_max=200, 202 multichannel=False): 203 """Perform total-variation denoising on n-dimensional images. 204 205 Parameters 206 ---------- 207 im : ndarray of ints, uints or floats 208 Input data to be denoised. `im` can be of any numeric type, 209 but it is cast into an ndarray of floats for the computation 210 of the denoised image. 211 weight : float, optional 212 Denoising weight. The greater `weight`, the more denoising (at 213 the expense of fidelity to `input`). 214 eps : float, optional 215 Relative difference of the value of the cost function that 216 determines the stop criterion. The algorithm stops when: 217 218 (E_(n-1) - E_n) < eps * E_0 219 220 n_iter_max : int, optional 221 Maximal number of iterations used for the optimization. 222 multichannel : bool, optional 223 Apply total-variation denoising separately for each channel. This 224 option should be true for color images, otherwise the denoising is 225 also applied in the channels dimension. 226 227 Returns 228 ------- 229 out : ndarray 230 Denoised image. 231 232 Notes 233 ----- 234 Make sure to set the multichannel parameter appropriately for color images. 235 236 The principle of total variation denoising is explained in 237 http://en.wikipedia.org/wiki/Total_variation_denoising 238 239 The principle of total variation denoising is to minimize the 240 total variation of the image, which can be roughly described as 241 the integral of the norm of the image gradient. Total variation 242 denoising tends to produce "cartoon-like" images, that is, 243 piecewise-constant images. 244 245 This code is an implementation of the algorithm of Rudin, Fatemi and Osher 246 that was proposed by Chambolle in [1]_. 247 248 References 249 ---------- 250 .. [1] A. Chambolle, An algorithm for total variation minimization and 251 applications, Journal of Mathematical Imaging and Vision, 252 Springer, 2004, 20, 89-97. 253 254 Examples 255 -------- 256 2D example on astronaut image: 257 258 >>> from skimage import color, data 259 >>> img = color.rgb2gray(data.astronaut())[:50, :50] 260 >>> img += 0.5 * img.std() * np.random.randn(*img.shape) 261 >>> denoised_img = denoise_tv_chambolle(img, weight=60) 262 263 3D example on synthetic data: 264 265 >>> x, y, z = np.ogrid[0:20, 0:20, 0:20] 266 >>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2 267 >>> mask = mask.astype(np.float) 268 >>> mask += 0.2*np.random.randn(*mask.shape) 269 >>> res = denoise_tv_chambolle(mask, weight=100) 270 271 """ 272 273 im_type = im.dtype 274 if not im_type.kind == 'f': 275 im = img_as_float(im) 276 277 if multichannel: 278 out = np.zeros_like(im) 279 for c in range(im.shape[-1]): 280 out[..., c] = _denoise_tv_chambolle_nd(im[..., c], weight, eps, 281 n_iter_max) 282 else: 283 out = _denoise_tv_chambolle_nd(im, weight, eps, n_iter_max) 284 return out 285 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/restoration/_denoise.py b/skimage/restoration/_denoise.py --- a/skimage/restoration/_denoise.py +++ b/skimage/restoration/_denoise.py @@ -3,10 +3,11 @@ from .. import img_as_float from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman from .._shared.utils import _mode_deprecations +import warnings def denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1, - bins=10000, mode='constant', cval=0): + bins=10000, mode='constant', cval=0, multichannel=True): """Denoise image using bilateral filter. This is an edge-preserving and noise reducing denoising filter. It averages @@ -45,6 +46,9 @@ cval : string Used in conjunction with mode 'constant', the value outside the image boundaries. + multichannel : bool + Whether the last axis of the image is to be interpreted as multiple + channels or another spatial dimension. Returns ------- @@ -64,6 +68,38 @@ >>> noisy = np.clip(noisy, 0, 1) >>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15) """ + if multichannel: + if image.ndim != 3: + if image.ndim == 2: + raise ValueError("Use ``multichannel=False`` for 2D grayscale " + "images. The last axis of the input image " + "must be multiple color channels not another " + "spatial dimension.") + else: + raise ValueError("Bilateral filter is only implemented for " + "2D grayscale images (image.ndim == 2) and " + "2D multichannel (image.ndim == 3) images, " + "but the input image has {0} dimensions. " + "".format(image.ndim)) + elif image.shape[2] not in (3, 4): + if image.shape[2] > 4: + warnings.warn("The last axis of the input image is interpreted " + "as channels. Input image with shape {0} has {1} " + "channels in last axis. ``denoise_bilateral`` is " + "implemented for 2D grayscale and color images " + "only.".format(image.shape, image.shape[2])) + else: + msg = "Input image must be grayscale, RGB, or RGBA; but has shape {0}." + warnings.warn(msg.format(image.shape)) + else: + if image.ndim > 2: + raise ValueError("Bilateral filter is not implemented for " + "grayscale images of 3 or more dimensions, " + "but input image has {0} dimension. Use " + "``multichannel=True`` for 2-D RGB " + "images.".format(image.shape)) + + mode = _mode_deprecations(mode) return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial, bins, mode, cval)
{"golden_diff": "diff --git a/skimage/restoration/_denoise.py b/skimage/restoration/_denoise.py\n--- a/skimage/restoration/_denoise.py\n+++ b/skimage/restoration/_denoise.py\n@@ -3,10 +3,11 @@\n from .. import img_as_float\n from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman\n from .._shared.utils import _mode_deprecations\n+import warnings\n \n \n def denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1,\n- bins=10000, mode='constant', cval=0):\n+ bins=10000, mode='constant', cval=0, multichannel=True):\n \"\"\"Denoise image using bilateral filter.\n \n This is an edge-preserving and noise reducing denoising filter. It averages\n@@ -45,6 +46,9 @@\n cval : string\n Used in conjunction with mode 'constant', the value outside\n the image boundaries.\n+ multichannel : bool\n+ Whether the last axis of the image is to be interpreted as multiple\n+ channels or another spatial dimension.\n \n Returns\n -------\n@@ -64,6 +68,38 @@\n >>> noisy = np.clip(noisy, 0, 1)\n >>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15)\n \"\"\"\n+ if multichannel:\n+ if image.ndim != 3:\n+ if image.ndim == 2:\n+ raise ValueError(\"Use ``multichannel=False`` for 2D grayscale \"\n+ \"images. The last axis of the input image \"\n+ \"must be multiple color channels not another \"\n+ \"spatial dimension.\")\n+ else:\n+ raise ValueError(\"Bilateral filter is only implemented for \"\n+ \"2D grayscale images (image.ndim == 2) and \"\n+ \"2D multichannel (image.ndim == 3) images, \"\n+ \"but the input image has {0} dimensions. \"\n+ \"\".format(image.ndim))\n+ elif image.shape[2] not in (3, 4):\n+ if image.shape[2] > 4:\n+ warnings.warn(\"The last axis of the input image is interpreted \"\n+ \"as channels. Input image with shape {0} has {1} \"\n+ \"channels in last axis. ``denoise_bilateral`` is \"\n+ \"implemented for 2D grayscale and color images \"\n+ \"only.\".format(image.shape, image.shape[2]))\n+ else:\n+ msg = \"Input image must be grayscale, RGB, or RGBA; but has shape {0}.\"\n+ warnings.warn(msg.format(image.shape))\n+ else:\n+ if image.ndim > 2:\n+ raise ValueError(\"Bilateral filter is not implemented for \"\n+ \"grayscale images of 3 or more dimensions, \"\n+ \"but input image has {0} dimension. Use \"\n+ \"``multichannel=True`` for 2-D RGB \"\n+ \"images.\".format(image.shape))\n+\n+\n mode = _mode_deprecations(mode)\n return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial,\n bins, mode, cval)\n", "issue": "denoise_bilateral works separately on channels, but no warning raised for 3-D images\nThe implementation of `restoration.denoise_bilateral` is 2-D only, with the last dimension considered to be for channels. However, this is not documented in the docstring, and no warning is raised if a 3-D image (with last dimension > 3) is passed. \n\nI used `denoise_bilateral` in the past on 3-D images thinking that the implementation was 3-D. We could consider making the code 3-D or nD in the future, but for now fixing the docstring + warning looks like a reasonable goal for 0.12.\n\n", "before_files": [{"content": "# coding: utf-8\nimport numpy as np\nfrom .. import img_as_float\nfrom ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman\nfrom .._shared.utils import _mode_deprecations\n\n\ndef denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1,\n bins=10000, mode='constant', cval=0):\n \"\"\"Denoise image using bilateral filter.\n\n This is an edge-preserving and noise reducing denoising filter. It averages\n pixels based on their spatial closeness and radiometric similarity.\n\n Spatial closeness is measured by the gaussian function of the euclidian\n distance between two pixels and a certain standard deviation\n (`sigma_spatial`).\n\n Radiometric similarity is measured by the gaussian function of the euclidian\n distance between two color values and a certain standard deviation\n (`sigma_range`).\n\n Parameters\n ----------\n image : ndarray, shape (M, N[, 3])\n Input image, 2D grayscale or RGB.\n win_size : int\n Window size for filtering.\n sigma_range : float\n Standard deviation for grayvalue/color distance (radiometric\n similarity). A larger value results in averaging of pixels with larger\n radiometric differences. Note, that the image will be converted using\n the `img_as_float` function and thus the standard deviation is in\n respect to the range ``[0, 1]``. If the value is ``None`` the standard\n deviation of the ``image`` will be used.\n sigma_spatial : float\n Standard deviation for range distance. A larger value results in\n averaging of pixels with larger spatial differences.\n bins : int\n Number of discrete values for gaussian weights of color filtering.\n A larger value results in improved accuracy.\n mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}\n How to handle values outside the image borders. See\n `numpy.pad` for detail.\n cval : string\n Used in conjunction with mode 'constant', the value outside\n the image boundaries.\n\n Returns\n -------\n denoised : ndarray\n Denoised image.\n\n References\n ----------\n .. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf\n\n Examples\n --------\n >>> from skimage import data, img_as_float\n >>> astro = img_as_float(data.astronaut())\n >>> astro = astro[220:300, 220:320]\n >>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)\n >>> noisy = np.clip(noisy, 0, 1)\n >>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15)\n \"\"\"\n mode = _mode_deprecations(mode)\n return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial,\n bins, mode, cval)\n\n\ndef denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True):\n \"\"\"Perform total-variation denoising using split-Bregman optimization.\n\n Total-variation denoising (also know as total-variation regularization)\n tries to find an image with less total-variation under the constraint\n of being similar to the input image, which is controlled by the\n regularization parameter.\n\n Parameters\n ----------\n image : ndarray\n Input data to be denoised (converted using img_as_float`).\n weight : float\n Denoising weight. The smaller the `weight`, the more denoising (at\n the expense of less similarity to the `input`). The regularization\n parameter `lambda` is chosen as `2 * weight`.\n eps : float, optional\n Relative difference of the value of the cost function that determines\n the stop criterion. The algorithm stops when::\n\n SUM((u(n) - u(n-1))**2) < eps\n\n max_iter : int, optional\n Maximal number of iterations used for the optimization.\n isotropic : boolean, optional\n Switch between isotropic and anisotropic TV denoising.\n\n Returns\n -------\n u : ndarray\n Denoised image.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Total_variation_denoising\n .. [2] Tom Goldstein and Stanley Osher, \"The Split Bregman Method For L1\n Regularized Problems\",\n ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf\n .. [3] Pascal Getreuer, \"Rudin\u2013Osher\u2013Fatemi Total Variation Denoising\n using Split Bregman\" in Image Processing On Line on 2012\u201305\u201319,\n http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf\n .. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf\n\n \"\"\"\n return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)\n\n\ndef _denoise_tv_chambolle_nd(im, weight=0.1, eps=2.e-4, n_iter_max=200):\n \"\"\"Perform total-variation denoising on n-dimensional images.\n\n Parameters\n ----------\n im : ndarray\n n-D input data to be denoised.\n weight : float, optional\n Denoising weight. The greater `weight`, the more denoising (at\n the expense of fidelity to `input`).\n eps : float, optional\n Relative difference of the value of the cost function that determines\n the stop criterion. The algorithm stops when:\n\n (E_(n-1) - E_n) < eps * E_0\n\n n_iter_max : int, optional\n Maximal number of iterations used for the optimization.\n\n Returns\n -------\n out : ndarray\n Denoised array of floats.\n\n Notes\n -----\n Rudin, Osher and Fatemi algorithm.\n\n \"\"\"\n\n ndim = im.ndim\n p = np.zeros((im.ndim, ) + im.shape, dtype=im.dtype)\n g = np.zeros_like(p)\n d = np.zeros_like(im)\n i = 0\n while i < n_iter_max:\n if i > 0:\n # d will be the (negative) divergence of p\n d = -p.sum(0)\n slices_d = [slice(None), ] * ndim\n slices_p = [slice(None), ] * (ndim + 1)\n for ax in range(ndim):\n slices_d[ax] = slice(1, None)\n slices_p[ax+1] = slice(0, -1)\n slices_p[0] = ax\n d[slices_d] += p[slices_p]\n slices_d[ax] = slice(None)\n slices_p[ax+1] = slice(None)\n out = im + d\n else:\n out = im\n E = (d ** 2).sum()\n\n # g stores the gradients of out along each axis\n # e.g. g[0] is the first order finite difference along axis 0\n slices_g = [slice(None), ] * (ndim + 1)\n for ax in range(ndim):\n slices_g[ax+1] = slice(0, -1)\n slices_g[0] = ax\n g[slices_g] = np.diff(out, axis=ax)\n slices_g[ax+1] = slice(None)\n\n norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]\n E += weight * norm.sum()\n tau = 1. / (2.*ndim)\n norm *= tau / weight\n norm += 1.\n p -= tau * g\n p /= norm\n E /= float(im.size)\n if i == 0:\n E_init = E\n E_previous = E\n else:\n if np.abs(E_previous - E) < eps * E_init:\n break\n else:\n E_previous = E\n i += 1\n return out\n\n\ndef denoise_tv_chambolle(im, weight=0.1, eps=2.e-4, n_iter_max=200,\n multichannel=False):\n \"\"\"Perform total-variation denoising on n-dimensional images.\n\n Parameters\n ----------\n im : ndarray of ints, uints or floats\n Input data to be denoised. `im` can be of any numeric type,\n but it is cast into an ndarray of floats for the computation\n of the denoised image.\n weight : float, optional\n Denoising weight. The greater `weight`, the more denoising (at\n the expense of fidelity to `input`).\n eps : float, optional\n Relative difference of the value of the cost function that\n determines the stop criterion. The algorithm stops when:\n\n (E_(n-1) - E_n) < eps * E_0\n\n n_iter_max : int, optional\n Maximal number of iterations used for the optimization.\n multichannel : bool, optional\n Apply total-variation denoising separately for each channel. This\n option should be true for color images, otherwise the denoising is\n also applied in the channels dimension.\n\n Returns\n -------\n out : ndarray\n Denoised image.\n\n Notes\n -----\n Make sure to set the multichannel parameter appropriately for color images.\n\n The principle of total variation denoising is explained in\n http://en.wikipedia.org/wiki/Total_variation_denoising\n\n The principle of total variation denoising is to minimize the\n total variation of the image, which can be roughly described as\n the integral of the norm of the image gradient. Total variation\n denoising tends to produce \"cartoon-like\" images, that is,\n piecewise-constant images.\n\n This code is an implementation of the algorithm of Rudin, Fatemi and Osher\n that was proposed by Chambolle in [1]_.\n\n References\n ----------\n .. [1] A. Chambolle, An algorithm for total variation minimization and\n applications, Journal of Mathematical Imaging and Vision,\n Springer, 2004, 20, 89-97.\n\n Examples\n --------\n 2D example on astronaut image:\n\n >>> from skimage import color, data\n >>> img = color.rgb2gray(data.astronaut())[:50, :50]\n >>> img += 0.5 * img.std() * np.random.randn(*img.shape)\n >>> denoised_img = denoise_tv_chambolle(img, weight=60)\n\n 3D example on synthetic data:\n\n >>> x, y, z = np.ogrid[0:20, 0:20, 0:20]\n >>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2\n >>> mask = mask.astype(np.float)\n >>> mask += 0.2*np.random.randn(*mask.shape)\n >>> res = denoise_tv_chambolle(mask, weight=100)\n\n \"\"\"\n\n im_type = im.dtype\n if not im_type.kind == 'f':\n im = img_as_float(im)\n\n if multichannel:\n out = np.zeros_like(im)\n for c in range(im.shape[-1]):\n out[..., c] = _denoise_tv_chambolle_nd(im[..., c], weight, eps,\n n_iter_max)\n else:\n out = _denoise_tv_chambolle_nd(im, weight, eps, n_iter_max)\n return out\n", "path": "skimage/restoration/_denoise.py"}], "after_files": [{"content": "# coding: utf-8\nimport numpy as np\nfrom .. import img_as_float\nfrom ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman\nfrom .._shared.utils import _mode_deprecations\nimport warnings\n\n\ndef denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1,\n bins=10000, mode='constant', cval=0, multichannel=True):\n \"\"\"Denoise image using bilateral filter.\n\n This is an edge-preserving and noise reducing denoising filter. It averages\n pixels based on their spatial closeness and radiometric similarity.\n\n Spatial closeness is measured by the gaussian function of the euclidian\n distance between two pixels and a certain standard deviation\n (`sigma_spatial`).\n\n Radiometric similarity is measured by the gaussian function of the euclidian\n distance between two color values and a certain standard deviation\n (`sigma_range`).\n\n Parameters\n ----------\n image : ndarray, shape (M, N[, 3])\n Input image, 2D grayscale or RGB.\n win_size : int\n Window size for filtering.\n sigma_range : float\n Standard deviation for grayvalue/color distance (radiometric\n similarity). A larger value results in averaging of pixels with larger\n radiometric differences. Note, that the image will be converted using\n the `img_as_float` function and thus the standard deviation is in\n respect to the range ``[0, 1]``. If the value is ``None`` the standard\n deviation of the ``image`` will be used.\n sigma_spatial : float\n Standard deviation for range distance. A larger value results in\n averaging of pixels with larger spatial differences.\n bins : int\n Number of discrete values for gaussian weights of color filtering.\n A larger value results in improved accuracy.\n mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}\n How to handle values outside the image borders. See\n `numpy.pad` for detail.\n cval : string\n Used in conjunction with mode 'constant', the value outside\n the image boundaries.\n multichannel : bool\n Whether the last axis of the image is to be interpreted as multiple\n channels or another spatial dimension.\n\n Returns\n -------\n denoised : ndarray\n Denoised image.\n\n References\n ----------\n .. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf\n\n Example\n -------\n >>> from skimage import data, img_as_float\n >>> astro = img_as_float(data.astronaut())\n >>> astro = astro[220:300, 220:320]\n >>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)\n >>> noisy = np.clip(noisy, 0, 1)\n >>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15)\n \"\"\"\n if multichannel:\n if image.ndim != 3:\n if image.ndim == 2:\n raise ValueError(\"Use ``multichannel=False`` for 2D grayscale \"\n \"images. The last axis of the input image \"\n \"must be multiple color channels not another \"\n \"spatial dimension.\")\n else:\n raise ValueError(\"Bilateral filter is only implemented for \"\n \"2D grayscale images (image.ndim == 2) and \"\n \"2D multichannel (image.ndim == 3) images, \"\n \"but the input image has {0} dimensions. \"\n \"\".format(image.ndim))\n elif image.shape[2] not in (3, 4):\n if image.shape[2] > 4:\n warnings.warn(\"The last axis of the input image is interpreted \"\n \"as channels. Input image with shape {0} has {1} \"\n \"channels in last axis. ``denoise_bilateral`` is \"\n \"implemented for 2D grayscale and color images \"\n \"only.\".format(image.shape, image.shape[2]))\n else:\n msg = \"Input image must be grayscale, RGB, or RGBA; but has shape {0}.\"\n warnings.warn(msg.format(image.shape))\n else:\n if image.ndim > 2:\n raise ValueError(\"Bilateral filter is not implemented for \"\n \"grayscale images of 3 or more dimensions, \"\n \"but input image has {0} dimension. Use \"\n \"``multichannel=True`` for 2-D RGB \"\n \"images.\".format(image.shape))\n\n\n mode = _mode_deprecations(mode)\n return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial,\n bins, mode, cval)\n\n\ndef denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True):\n \"\"\"Perform total-variation denoising using split-Bregman optimization.\n\n Total-variation denoising (also know as total-variation regularization)\n tries to find an image with less total-variation under the constraint\n of being similar to the input image, which is controlled by the\n regularization parameter.\n\n Parameters\n ----------\n image : ndarray\n Input data to be denoised (converted using img_as_float`).\n weight : float\n Denoising weight. The smaller the `weight`, the more denoising (at\n the expense of less similarity to the `input`). The regularization\n parameter `lambda` is chosen as `2 * weight`.\n eps : float, optional\n Relative difference of the value of the cost function that determines\n the stop criterion. The algorithm stops when::\n\n SUM((u(n) - u(n-1))**2) < eps\n\n max_iter : int, optional\n Maximal number of iterations used for the optimization.\n isotropic : boolean, optional\n Switch between isotropic and anisotropic TV denoising.\n\n Returns\n -------\n u : ndarray\n Denoised image.\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Total_variation_denoising\n .. [2] Tom Goldstein and Stanley Osher, \"The Split Bregman Method For L1\n Regularized Problems\",\n ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf\n .. [3] Pascal Getreuer, \"Rudin\u2013Osher\u2013Fatemi Total Variation Denoising\n using Split Bregman\" in Image Processing On Line on 2012\u201305\u201319,\n http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf\n .. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf\n\n \"\"\"\n return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)\n\n\ndef _denoise_tv_chambolle_nd(im, weight=0.1, eps=2.e-4, n_iter_max=200):\n \"\"\"Perform total-variation denoising on n-dimensional images.\n\n Parameters\n ----------\n im : ndarray\n n-D input data to be denoised.\n weight : float, optional\n Denoising weight. The greater `weight`, the more denoising (at\n the expense of fidelity to `input`).\n eps : float, optional\n Relative difference of the value of the cost function that determines\n the stop criterion. The algorithm stops when:\n\n (E_(n-1) - E_n) < eps * E_0\n\n n_iter_max : int, optional\n Maximal number of iterations used for the optimization.\n\n Returns\n -------\n out : ndarray\n Denoised array of floats.\n\n Notes\n -----\n Rudin, Osher and Fatemi algorithm.\n\n \"\"\"\n\n ndim = im.ndim\n p = np.zeros((im.ndim, ) + im.shape, dtype=im.dtype)\n g = np.zeros_like(p)\n d = np.zeros_like(im)\n i = 0\n while i < n_iter_max:\n if i > 0:\n # d will be the (negative) divergence of p\n d = -p.sum(0)\n slices_d = [slice(None), ] * ndim\n slices_p = [slice(None), ] * (ndim + 1)\n for ax in range(ndim):\n slices_d[ax] = slice(1, None)\n slices_p[ax+1] = slice(0, -1)\n slices_p[0] = ax\n d[slices_d] += p[slices_p]\n slices_d[ax] = slice(None)\n slices_p[ax+1] = slice(None)\n out = im + d\n else:\n out = im\n E = (d ** 2).sum()\n\n # g stores the gradients of out along each axis\n # e.g. g[0] is the first order finite difference along axis 0\n slices_g = [slice(None), ] * (ndim + 1)\n for ax in range(ndim):\n slices_g[ax+1] = slice(0, -1)\n slices_g[0] = ax\n g[slices_g] = np.diff(out, axis=ax)\n slices_g[ax+1] = slice(None)\n\n norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]\n E += weight * norm.sum()\n tau = 1. / (2.*ndim)\n norm *= tau / weight\n norm += 1.\n p -= tau * g\n p /= norm\n E /= float(im.size)\n if i == 0:\n E_init = E\n E_previous = E\n else:\n if np.abs(E_previous - E) < eps * E_init:\n break\n else:\n E_previous = E\n i += 1\n return out\n\n\ndef denoise_tv_chambolle(im, weight=0.1, eps=2.e-4, n_iter_max=200,\n multichannel=False):\n \"\"\"Perform total-variation denoising on n-dimensional images.\n\n Parameters\n ----------\n im : ndarray of ints, uints or floats\n Input data to be denoised. `im` can be of any numeric type,\n but it is cast into an ndarray of floats for the computation\n of the denoised image.\n weight : float, optional\n Denoising weight. The greater `weight`, the more denoising (at\n the expense of fidelity to `input`).\n eps : float, optional\n Relative difference of the value of the cost function that\n determines the stop criterion. The algorithm stops when:\n\n (E_(n-1) - E_n) < eps * E_0\n\n n_iter_max : int, optional\n Maximal number of iterations used for the optimization.\n multichannel : bool, optional\n Apply total-variation denoising separately for each channel. This\n option should be true for color images, otherwise the denoising is\n also applied in the channels dimension.\n\n Returns\n -------\n out : ndarray\n Denoised image.\n\n Notes\n -----\n Make sure to set the multichannel parameter appropriately for color images.\n\n The principle of total variation denoising is explained in\n http://en.wikipedia.org/wiki/Total_variation_denoising\n\n The principle of total variation denoising is to minimize the\n total variation of the image, which can be roughly described as\n the integral of the norm of the image gradient. Total variation\n denoising tends to produce \"cartoon-like\" images, that is,\n piecewise-constant images.\n\n This code is an implementation of the algorithm of Rudin, Fatemi and Osher\n that was proposed by Chambolle in [1]_.\n\n References\n ----------\n .. [1] A. Chambolle, An algorithm for total variation minimization and\n applications, Journal of Mathematical Imaging and Vision,\n Springer, 2004, 20, 89-97.\n\n Examples\n --------\n 2D example on astronaut image:\n\n >>> from skimage import color, data\n >>> img = color.rgb2gray(data.astronaut())[:50, :50]\n >>> img += 0.5 * img.std() * np.random.randn(*img.shape)\n >>> denoised_img = denoise_tv_chambolle(img, weight=60)\n\n 3D example on synthetic data:\n\n >>> x, y, z = np.ogrid[0:20, 0:20, 0:20]\n >>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2\n >>> mask = mask.astype(np.float)\n >>> mask += 0.2*np.random.randn(*mask.shape)\n >>> res = denoise_tv_chambolle(mask, weight=100)\n\n \"\"\"\n\n im_type = im.dtype\n if not im_type.kind == 'f':\n im = img_as_float(im)\n\n if multichannel:\n out = np.zeros_like(im)\n for c in range(im.shape[-1]):\n out[..., c] = _denoise_tv_chambolle_nd(im[..., c], weight, eps,\n n_iter_max)\n else:\n out = _denoise_tv_chambolle_nd(im, weight, eps, n_iter_max)\n return out\n", "path": "skimage/restoration/_denoise.py"}]}
3,796
737
gh_patches_debug_15692
rasdani/github-patches
git_diff
ethereum__web3.py-1198
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop repeated rpc endpoints * Version: 4.6 ### What was wrong? web3.py has a few repeated endpoints, for example: `web3.version.net` and `web3.net.version` ### How can it be fixed? Deprecate the endpoint api's that don't mirror the json-rpc spec namespacing, in favor of those that do. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `web3/version.py` Content: ``` 1 from web3.module import ( 2 Module, 3 ) 4 5 6 class Version(Module): 7 @property 8 def api(self): 9 from web3 import __version__ 10 return __version__ 11 12 @property 13 def node(self): 14 return self.web3.manager.request_blocking("web3_clientVersion", []) 15 16 @property 17 def network(self): 18 return self.web3.manager.request_blocking("net_version", []) 19 20 @property 21 def ethereum(self): 22 return self.web3.manager.request_blocking("eth_protocolVersion", []) 23 ``` Path: `web3/miner.py` Content: ``` 1 from web3.module import ( 2 Module, 3 ) 4 5 6 class Miner(Module): 7 @property 8 def hashrate(self): 9 return self.web3.manager.request_blocking("eth_hashrate", []) 10 11 def makeDAG(self, number): 12 return self.web3.manager.request_blocking("miner_makeDag", [number]) 13 14 def setExtra(self, extra): 15 return self.web3.manager.request_blocking("miner_setExtra", [extra]) 16 17 def setEtherBase(self, etherbase): 18 return self.web3.manager.request_blocking("miner_setEtherbase", [etherbase]) 19 20 def setGasPrice(self, gas_price): 21 return self.web3.manager.request_blocking( 22 "miner_setGasPrice", [gas_price], 23 ) 24 25 def start(self, num_threads): 26 return self.web3.manager.request_blocking( 27 "miner_start", [num_threads], 28 ) 29 30 def stop(self): 31 return self.web3.manager.request_blocking("miner_stop", []) 32 33 def startAutoDAG(self): 34 return self.web3.manager.request_blocking("miner_startAutoDag", []) 35 36 def stopAutoDAG(self): 37 return self.web3.manager.request_blocking("miner_stopAutoDag", []) 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/web3/miner.py b/web3/miner.py --- a/web3/miner.py +++ b/web3/miner.py @@ -4,10 +4,6 @@ class Miner(Module): - @property - def hashrate(self): - return self.web3.manager.request_blocking("eth_hashrate", []) - def makeDAG(self, number): return self.web3.manager.request_blocking("miner_makeDag", [number]) diff --git a/web3/version.py b/web3/version.py --- a/web3/version.py +++ b/web3/version.py @@ -13,10 +13,6 @@ def node(self): return self.web3.manager.request_blocking("web3_clientVersion", []) - @property - def network(self): - return self.web3.manager.request_blocking("net_version", []) - @property def ethereum(self): return self.web3.manager.request_blocking("eth_protocolVersion", [])
{"golden_diff": "diff --git a/web3/miner.py b/web3/miner.py\n--- a/web3/miner.py\n+++ b/web3/miner.py\n@@ -4,10 +4,6 @@\n \n \n class Miner(Module):\n- @property\n- def hashrate(self):\n- return self.web3.manager.request_blocking(\"eth_hashrate\", [])\n-\n def makeDAG(self, number):\n return self.web3.manager.request_blocking(\"miner_makeDag\", [number])\n \ndiff --git a/web3/version.py b/web3/version.py\n--- a/web3/version.py\n+++ b/web3/version.py\n@@ -13,10 +13,6 @@\n def node(self):\n return self.web3.manager.request_blocking(\"web3_clientVersion\", [])\n \n- @property\n- def network(self):\n- return self.web3.manager.request_blocking(\"net_version\", [])\n-\n @property\n def ethereum(self):\n return self.web3.manager.request_blocking(\"eth_protocolVersion\", [])\n", "issue": "Drop repeated rpc endpoints\n* Version: 4.6\r\n\r\n### What was wrong?\r\n\r\nweb3.py has a few repeated endpoints, for example: `web3.version.net` and `web3.net.version`\r\n\r\n\r\n### How can it be fixed?\r\nDeprecate the endpoint api's that don't mirror the json-rpc spec namespacing, in favor of those that do.\n", "before_files": [{"content": "from web3.module import (\n Module,\n)\n\n\nclass Version(Module):\n @property\n def api(self):\n from web3 import __version__\n return __version__\n\n @property\n def node(self):\n return self.web3.manager.request_blocking(\"web3_clientVersion\", [])\n\n @property\n def network(self):\n return self.web3.manager.request_blocking(\"net_version\", [])\n\n @property\n def ethereum(self):\n return self.web3.manager.request_blocking(\"eth_protocolVersion\", [])\n", "path": "web3/version.py"}, {"content": "from web3.module import (\n Module,\n)\n\n\nclass Miner(Module):\n @property\n def hashrate(self):\n return self.web3.manager.request_blocking(\"eth_hashrate\", [])\n\n def makeDAG(self, number):\n return self.web3.manager.request_blocking(\"miner_makeDag\", [number])\n\n def setExtra(self, extra):\n return self.web3.manager.request_blocking(\"miner_setExtra\", [extra])\n\n def setEtherBase(self, etherbase):\n return self.web3.manager.request_blocking(\"miner_setEtherbase\", [etherbase])\n\n def setGasPrice(self, gas_price):\n return self.web3.manager.request_blocking(\n \"miner_setGasPrice\", [gas_price],\n )\n\n def start(self, num_threads):\n return self.web3.manager.request_blocking(\n \"miner_start\", [num_threads],\n )\n\n def stop(self):\n return self.web3.manager.request_blocking(\"miner_stop\", [])\n\n def startAutoDAG(self):\n return self.web3.manager.request_blocking(\"miner_startAutoDag\", [])\n\n def stopAutoDAG(self):\n return self.web3.manager.request_blocking(\"miner_stopAutoDag\", [])\n", "path": "web3/miner.py"}], "after_files": [{"content": "from web3.module import (\n Module,\n)\n\n\nclass Version(Module):\n @property\n def api(self):\n from web3 import __version__\n return __version__\n\n @property\n def node(self):\n return self.web3.manager.request_blocking(\"web3_clientVersion\", [])\n\n @property\n def ethereum(self):\n return self.web3.manager.request_blocking(\"eth_protocolVersion\", [])\n", "path": "web3/version.py"}, {"content": "from web3.module import (\n Module,\n)\n\n\nclass Miner(Module):\n def makeDAG(self, number):\n return self.web3.manager.request_blocking(\"miner_makeDag\", [number])\n\n def setExtra(self, extra):\n return self.web3.manager.request_blocking(\"miner_setExtra\", [extra])\n\n def setEtherBase(self, etherbase):\n return self.web3.manager.request_blocking(\"miner_setEtherbase\", [etherbase])\n\n def setGasPrice(self, gas_price):\n return self.web3.manager.request_blocking(\n \"miner_setGasPrice\", [gas_price],\n )\n\n def start(self, num_threads):\n return self.web3.manager.request_blocking(\n \"miner_start\", [num_threads],\n )\n\n def stop(self):\n return self.web3.manager.request_blocking(\"miner_stop\", [])\n\n def startAutoDAG(self):\n return self.web3.manager.request_blocking(\"miner_startAutoDag\", [])\n\n def stopAutoDAG(self):\n return self.web3.manager.request_blocking(\"miner_stopAutoDag\", [])\n", "path": "web3/miner.py"}]}
820
215
gh_patches_debug_41673
rasdani/github-patches
git_diff
rucio__rucio-3087
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- check global quota in running RSESelector Motivation ---------- check global quota not only when initializing the RSESelector but also when calling the select_rse method. Modification ------------ update select_rse/__update_quota method in the RSESelector --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/rucio/core/rse_selector.py` Content: ``` 1 # Copyright European Organization for Nuclear Research (CERN) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # You may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Authors: 9 # - Martin Barisits, <[email protected]>, 2013-2017 10 # - Cedric Serfon, <[email protected]>, 2015-2019 11 # - Robert Illingworth, <[email protected]>, 2019 12 # - Andrew Lister, <[email protected]>, 2019 13 # - Hannes Hansen, <[email protected]>, 2019 14 # 15 # PY3K COMPATIBLE 16 17 from random import uniform, shuffle 18 19 from rucio.common.exception import InsufficientAccountLimit, InsufficientTargetRSEs, InvalidRuleWeight, RSEOverQuota 20 from rucio.core.account import has_account_attribute, get_usage, get_all_rse_usages_per_account 21 from rucio.core.account_limit import get_local_account_limit, get_global_account_limits 22 from rucio.core.rse import list_rse_attributes, has_rse_attribute, get_rse_limits 23 from rucio.core.rse_counter import get_counter as get_rse_counter 24 from rucio.db.sqla.session import read_session 25 26 27 class RSESelector(): 28 """ 29 Representation of the RSE selector 30 """ 31 32 @read_session 33 def __init__(self, account, rses, weight, copies, ignore_account_limit=False, session=None): 34 """ 35 Initialize the RSE Selector. 36 37 :param account: Account owning the rule. 38 :param rses: List of rse dictionaries. 39 :param weight: Weighting to use. 40 :param copies: Number of copies to create. 41 :param ignore_account_limit: Flag if the quota should be ignored. 42 :param session: DB Session in use. 43 :raises: InvalidRuleWeight, InsufficientAccountLimit, InsufficientTargetRSEs 44 """ 45 self.account = account 46 self.rses = [] # [{'rse_id':, 'weight':, 'staging_area'}] 47 self.copies = copies 48 if weight is not None: 49 for rse in rses: 50 attributes = list_rse_attributes(rse_id=rse['id'], session=session) 51 availability_write = True if rse.get('availability', 7) & 2 else False 52 if weight not in attributes: 53 continue # The RSE does not have the required weight set, therefore it is ignored 54 try: 55 self.rses.append({'rse_id': rse['id'], 56 'weight': float(attributes[weight]), 57 'mock_rse': attributes.get('mock', False), 58 'availability_write': availability_write, 59 'staging_area': rse['staging_area']}) 60 except ValueError: 61 raise InvalidRuleWeight('The RSE \'%s\' has a non-number specified for the weight \'%s\'' % (rse['rse'], weight)) 62 else: 63 for rse in rses: 64 mock_rse = has_rse_attribute(rse['id'], 'mock', session=session) 65 availability_write = True if rse.get('availability', 7) & 2 else False 66 self.rses.append({'rse_id': rse['id'], 67 'weight': 1, 68 'mock_rse': mock_rse, 69 'availability_write': availability_write, 70 'staging_area': rse['staging_area']}) 71 72 if len(self.rses) < self.copies: 73 raise InsufficientTargetRSEs('Target RSE set not sufficient for number of copies. (%s copies requested, RSE set size %s)' % (self.copies, len(self.rses))) 74 75 rses_with_enough_quota = [] 76 if has_account_attribute(account=account, key='admin', session=session) or ignore_account_limit: 77 for rse in self.rses: 78 rse['quota_left'] = float('inf') 79 rse['space_left'] = float('inf') 80 rses_with_enough_quota.append(rse) 81 else: 82 global_quota_limit = get_global_account_limits(account=account, session=session) 83 all_rse_usages = {usage['rse_id']: usage['bytes'] for usage in get_all_rse_usages_per_account(account=account, session=session)} 84 for rse in self.rses: 85 if rse['mock_rse']: 86 rse['quota_left'] = float('inf') 87 rse['space_left'] = float('inf') 88 rses_with_enough_quota.append(rse) 89 else: 90 # check local quota 91 local_quota_left = None 92 quota_limit = get_local_account_limit(account=account, rse_id=rse['rse_id'], session=session) 93 if quota_limit is None: 94 local_quota_left = 0 95 else: 96 local_quota_left = quota_limit - get_usage(rse_id=rse['rse_id'], account=account, session=session)['bytes'] 97 # check global quota 98 all_global_quota_enough = True 99 for rse_expression, limit in global_quota_limit.items(): 100 if rse['rse_id'] in limit['resolved_rse_ids']: 101 quota_limit = limit['limit'] 102 global_quota_left = None 103 if quota_limit is None: 104 global_quota_left = 0 105 else: 106 rse_expression_usage = 0 107 for rse_id in limit['resolved_rse_ids']: 108 rse_expression_usage += all_rse_usages.get(rse_id, 0) 109 global_quota_left = quota_limit - rse_expression_usage 110 if global_quota_left <= 0: 111 all_global_quota_enough = False 112 break 113 if local_quota_left > 0 and all_global_quota_enough: 114 rse['quota_left'] = local_quota_left 115 space_limit = get_rse_limits(name='MaxSpaceAvailable', rse_id=rse['rse_id'], session=session).get('MaxSpaceAvailable') 116 if space_limit is None or space_limit < 0: 117 rse['space_left'] = float('inf') 118 else: 119 rse['space_left'] = space_limit - get_rse_counter(rse_id=rse['rse_id'], session=session)['bytes'] 120 rses_with_enough_quota.append(rse) 121 122 self.rses = rses_with_enough_quota 123 if len(self.rses) < self.copies: 124 raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\'s to fullfill the operation.') 125 126 # don't consider removing rses based on the total space here - because files already on the RSE are taken into account 127 # it is possible to have no space but still be able to fulfil the rule 128 129 def select_rse(self, size, preferred_rse_ids, copies=0, blacklist=[], prioritize_order_over_weight=False, existing_rse_size=None): 130 """ 131 Select n RSEs to replicate data to. 132 133 :param size: Size of the block being replicated. 134 :param preferred_rse_ids: Ordered list of preferred rses. (If possible replicate to them) 135 :param copies: Select this amount of copies, if 0 use the pre-defined rule value. 136 :param blacklist: List of blacklisted rses. (Do not put replicas on these sites) 137 :param prioritze_order_over_weight: Prioritize the order of the preferred_rse_ids list over the picking done by weight. 138 :existing_rse_size: Dictionary of size of files already present at each rse 139 :returns: List of (RSE_id, staging_area, availability_write) tuples. 140 :raises: InsufficientAccountLimit, InsufficientTargetRSEs 141 """ 142 143 result = [] 144 rses = self.rses 145 count = self.copies if copies == 0 else copies 146 147 # Remove blacklisted rses 148 if blacklist: 149 rses = [rse for rse in self.rses if rse['rse_id'] not in blacklist] 150 if len(rses) < count: 151 raise InsufficientTargetRSEs('There are not enough target RSEs to fulfil the request at this time.') 152 153 # Remove rses which do not have enough space, accounting for the files already at each rse 154 if existing_rse_size is None: 155 existing_rse_size = {} 156 rses = [rse for rse in rses if rse['space_left'] >= size - existing_rse_size.get(rse['rse_id'], 0)] 157 if len(rses) < count: 158 raise RSEOverQuota('There is insufficient space on any of the target RSE\'s to fullfill the operation.') 159 160 # Remove rses which do not have enough quota 161 rses = [rse for rse in rses if rse['quota_left'] > size] 162 if len(rses) < count: 163 raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\'s to fullfill the operation.') 164 165 for copy in range(count): 166 # Remove rses already in the result set 167 rses = [rse for rse in rses if rse['rse_id'] not in [item[0] for item in result]] 168 rses_dict = {} 169 for rse in rses: 170 rses_dict[rse['rse_id']] = rse 171 # Prioritize the preffered rses 172 preferred_rses = [rses_dict[rse_id] for rse_id in preferred_rse_ids if rse_id in rses_dict] 173 if prioritize_order_over_weight and preferred_rses: 174 rse = (preferred_rses[0]['rse_id'], preferred_rses[0]['staging_area'], preferred_rses[0]['availability_write']) 175 elif preferred_rses: 176 rse = self.__choose_rse(preferred_rses) 177 else: 178 rse = self.__choose_rse(rses) 179 result.append(rse) 180 self.__update_quota(rse, size) 181 return result 182 183 def get_rse_dictionary(self): 184 """ 185 Return the current dictionary of potential RSEs stored in the RSE selector 186 187 :returns: List of RSE dictionaries 188 """ 189 rse_dict = {} 190 for rse in self.rses: 191 rse_dict[rse['rse_id']] = rse 192 return rse_dict 193 194 def __update_quota(self, rse, size): 195 """ 196 Update the internal quota value. 197 198 :param rse: RSE tuple to update. 199 :param size: Size to substract. 200 """ 201 202 for element in self.rses: 203 if element['rse_id'] == rse[0]: 204 element['quota_left'] -= size 205 return 206 207 def __choose_rse(self, rses): 208 """ 209 Choose an RSE based on weighting. 210 211 :param rses: The rses to be considered for the choose. 212 :return: The (rse_id, staging_area) tuple of the chosen RSE. 213 """ 214 215 shuffle(rses) 216 pick = uniform(0, sum([rse['weight'] for rse in rses])) 217 weight = 0 218 for rse in rses: 219 weight += rse['weight'] 220 if pick <= weight: 221 return (rse['rse_id'], rse['staging_area'], rse['availability_write']) 222 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/rucio/core/rse_selector.py b/lib/rucio/core/rse_selector.py --- a/lib/rucio/core/rse_selector.py +++ b/lib/rucio/core/rse_selector.py @@ -94,7 +94,9 @@ local_quota_left = 0 else: local_quota_left = quota_limit - get_usage(rse_id=rse['rse_id'], account=account, session=session)['bytes'] + # check global quota + rse['global_quota_left'] = {} all_global_quota_enough = True for rse_expression, limit in global_quota_limit.items(): if rse['rse_id'] in limit['resolved_rse_ids']: @@ -110,6 +112,8 @@ if global_quota_left <= 0: all_global_quota_enough = False break + else: + rse['global_quota_left'][rse_expression] = global_quota_left if local_quota_left > 0 and all_global_quota_enough: rse['quota_left'] = local_quota_left space_limit = get_rse_limits(name='MaxSpaceAvailable', rse_id=rse['rse_id'], session=session).get('MaxSpaceAvailable') @@ -157,11 +161,25 @@ if len(rses) < count: raise RSEOverQuota('There is insufficient space on any of the target RSE\'s to fullfill the operation.') - # Remove rses which do not have enough quota + # Remove rses which do not have enough local quota rses = [rse for rse in rses if rse['quota_left'] > size] if len(rses) < count: raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\'s to fullfill the operation.') + # Remove rses which do not have enough global quota + rses_with_enough_quota = [] + for rse in rses: + enough_global_quota = True + for rse_expression in rse.get('global_quota_left', []): + if rse['global_quota_left'][rse_expression] < size: + enough_global_quota = False + break + if enough_global_quota: + rses_with_enough_quota.append(rse) + rses = rses_with_enough_quota + if len(rses) < count: + raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\'s to fullfill the operation.') + for copy in range(count): # Remove rses already in the result set rses = [rse for rse in rses if rse['rse_id'] not in [item[0] for item in result]] @@ -202,6 +220,8 @@ for element in self.rses: if element['rse_id'] == rse[0]: element['quota_left'] -= size + for rse_expression in element.get('global_quota_left', []): + element['global_quota_left'][rse_expression] -= size return def __choose_rse(self, rses):
{"golden_diff": "diff --git a/lib/rucio/core/rse_selector.py b/lib/rucio/core/rse_selector.py\n--- a/lib/rucio/core/rse_selector.py\n+++ b/lib/rucio/core/rse_selector.py\n@@ -94,7 +94,9 @@\n local_quota_left = 0\n else:\n local_quota_left = quota_limit - get_usage(rse_id=rse['rse_id'], account=account, session=session)['bytes']\n+\n # check global quota\n+ rse['global_quota_left'] = {}\n all_global_quota_enough = True\n for rse_expression, limit in global_quota_limit.items():\n if rse['rse_id'] in limit['resolved_rse_ids']:\n@@ -110,6 +112,8 @@\n if global_quota_left <= 0:\n all_global_quota_enough = False\n break\n+ else:\n+ rse['global_quota_left'][rse_expression] = global_quota_left\n if local_quota_left > 0 and all_global_quota_enough:\n rse['quota_left'] = local_quota_left\n space_limit = get_rse_limits(name='MaxSpaceAvailable', rse_id=rse['rse_id'], session=session).get('MaxSpaceAvailable')\n@@ -157,11 +161,25 @@\n if len(rses) < count:\n raise RSEOverQuota('There is insufficient space on any of the target RSE\\'s to fullfill the operation.')\n \n- # Remove rses which do not have enough quota\n+ # Remove rses which do not have enough local quota\n rses = [rse for rse in rses if rse['quota_left'] > size]\n if len(rses) < count:\n raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\\'s to fullfill the operation.')\n \n+ # Remove rses which do not have enough global quota\n+ rses_with_enough_quota = []\n+ for rse in rses:\n+ enough_global_quota = True\n+ for rse_expression in rse.get('global_quota_left', []):\n+ if rse['global_quota_left'][rse_expression] < size:\n+ enough_global_quota = False\n+ break\n+ if enough_global_quota:\n+ rses_with_enough_quota.append(rse)\n+ rses = rses_with_enough_quota\n+ if len(rses) < count:\n+ raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\\'s to fullfill the operation.')\n+\n for copy in range(count):\n # Remove rses already in the result set\n rses = [rse for rse in rses if rse['rse_id'] not in [item[0] for item in result]]\n@@ -202,6 +220,8 @@\n for element in self.rses:\n if element['rse_id'] == rse[0]:\n element['quota_left'] -= size\n+ for rse_expression in element.get('global_quota_left', []):\n+ element['global_quota_left'][rse_expression] -= size\n return\n \n def __choose_rse(self, rses):\n", "issue": "check global quota in running RSESelector\nMotivation\r\n----------\r\ncheck global quota not only when initializing the RSESelector but also when calling the select_rse method.\r\n\r\n\r\nModification\r\n------------\r\nupdate select_rse/__update_quota method in the RSESelector\r\n\r\n\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Martin Barisits, <[email protected]>, 2013-2017\n# - Cedric Serfon, <[email protected]>, 2015-2019\n# - Robert Illingworth, <[email protected]>, 2019\n# - Andrew Lister, <[email protected]>, 2019\n# - Hannes Hansen, <[email protected]>, 2019\n#\n# PY3K COMPATIBLE\n\nfrom random import uniform, shuffle\n\nfrom rucio.common.exception import InsufficientAccountLimit, InsufficientTargetRSEs, InvalidRuleWeight, RSEOverQuota\nfrom rucio.core.account import has_account_attribute, get_usage, get_all_rse_usages_per_account\nfrom rucio.core.account_limit import get_local_account_limit, get_global_account_limits\nfrom rucio.core.rse import list_rse_attributes, has_rse_attribute, get_rse_limits\nfrom rucio.core.rse_counter import get_counter as get_rse_counter\nfrom rucio.db.sqla.session import read_session\n\n\nclass RSESelector():\n \"\"\"\n Representation of the RSE selector\n \"\"\"\n\n @read_session\n def __init__(self, account, rses, weight, copies, ignore_account_limit=False, session=None):\n \"\"\"\n Initialize the RSE Selector.\n\n :param account: Account owning the rule.\n :param rses: List of rse dictionaries.\n :param weight: Weighting to use.\n :param copies: Number of copies to create.\n :param ignore_account_limit: Flag if the quota should be ignored.\n :param session: DB Session in use.\n :raises: InvalidRuleWeight, InsufficientAccountLimit, InsufficientTargetRSEs\n \"\"\"\n self.account = account\n self.rses = [] # [{'rse_id':, 'weight':, 'staging_area'}]\n self.copies = copies\n if weight is not None:\n for rse in rses:\n attributes = list_rse_attributes(rse_id=rse['id'], session=session)\n availability_write = True if rse.get('availability', 7) & 2 else False\n if weight not in attributes:\n continue # The RSE does not have the required weight set, therefore it is ignored\n try:\n self.rses.append({'rse_id': rse['id'],\n 'weight': float(attributes[weight]),\n 'mock_rse': attributes.get('mock', False),\n 'availability_write': availability_write,\n 'staging_area': rse['staging_area']})\n except ValueError:\n raise InvalidRuleWeight('The RSE \\'%s\\' has a non-number specified for the weight \\'%s\\'' % (rse['rse'], weight))\n else:\n for rse in rses:\n mock_rse = has_rse_attribute(rse['id'], 'mock', session=session)\n availability_write = True if rse.get('availability', 7) & 2 else False\n self.rses.append({'rse_id': rse['id'],\n 'weight': 1,\n 'mock_rse': mock_rse,\n 'availability_write': availability_write,\n 'staging_area': rse['staging_area']})\n\n if len(self.rses) < self.copies:\n raise InsufficientTargetRSEs('Target RSE set not sufficient for number of copies. (%s copies requested, RSE set size %s)' % (self.copies, len(self.rses)))\n\n rses_with_enough_quota = []\n if has_account_attribute(account=account, key='admin', session=session) or ignore_account_limit:\n for rse in self.rses:\n rse['quota_left'] = float('inf')\n rse['space_left'] = float('inf')\n rses_with_enough_quota.append(rse)\n else:\n global_quota_limit = get_global_account_limits(account=account, session=session)\n all_rse_usages = {usage['rse_id']: usage['bytes'] for usage in get_all_rse_usages_per_account(account=account, session=session)}\n for rse in self.rses:\n if rse['mock_rse']:\n rse['quota_left'] = float('inf')\n rse['space_left'] = float('inf')\n rses_with_enough_quota.append(rse)\n else:\n # check local quota\n local_quota_left = None\n quota_limit = get_local_account_limit(account=account, rse_id=rse['rse_id'], session=session)\n if quota_limit is None:\n local_quota_left = 0\n else:\n local_quota_left = quota_limit - get_usage(rse_id=rse['rse_id'], account=account, session=session)['bytes']\n # check global quota\n all_global_quota_enough = True\n for rse_expression, limit in global_quota_limit.items():\n if rse['rse_id'] in limit['resolved_rse_ids']:\n quota_limit = limit['limit']\n global_quota_left = None\n if quota_limit is None:\n global_quota_left = 0\n else:\n rse_expression_usage = 0\n for rse_id in limit['resolved_rse_ids']:\n rse_expression_usage += all_rse_usages.get(rse_id, 0)\n global_quota_left = quota_limit - rse_expression_usage\n if global_quota_left <= 0:\n all_global_quota_enough = False\n break\n if local_quota_left > 0 and all_global_quota_enough:\n rse['quota_left'] = local_quota_left\n space_limit = get_rse_limits(name='MaxSpaceAvailable', rse_id=rse['rse_id'], session=session).get('MaxSpaceAvailable')\n if space_limit is None or space_limit < 0:\n rse['space_left'] = float('inf')\n else:\n rse['space_left'] = space_limit - get_rse_counter(rse_id=rse['rse_id'], session=session)['bytes']\n rses_with_enough_quota.append(rse)\n\n self.rses = rses_with_enough_quota\n if len(self.rses) < self.copies:\n raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\\'s to fullfill the operation.')\n\n # don't consider removing rses based on the total space here - because files already on the RSE are taken into account\n # it is possible to have no space but still be able to fulfil the rule\n\n def select_rse(self, size, preferred_rse_ids, copies=0, blacklist=[], prioritize_order_over_weight=False, existing_rse_size=None):\n \"\"\"\n Select n RSEs to replicate data to.\n\n :param size: Size of the block being replicated.\n :param preferred_rse_ids: Ordered list of preferred rses. (If possible replicate to them)\n :param copies: Select this amount of copies, if 0 use the pre-defined rule value.\n :param blacklist: List of blacklisted rses. (Do not put replicas on these sites)\n :param prioritze_order_over_weight: Prioritize the order of the preferred_rse_ids list over the picking done by weight.\n :existing_rse_size: Dictionary of size of files already present at each rse\n :returns: List of (RSE_id, staging_area, availability_write) tuples.\n :raises: InsufficientAccountLimit, InsufficientTargetRSEs\n \"\"\"\n\n result = []\n rses = self.rses\n count = self.copies if copies == 0 else copies\n\n # Remove blacklisted rses\n if blacklist:\n rses = [rse for rse in self.rses if rse['rse_id'] not in blacklist]\n if len(rses) < count:\n raise InsufficientTargetRSEs('There are not enough target RSEs to fulfil the request at this time.')\n\n # Remove rses which do not have enough space, accounting for the files already at each rse\n if existing_rse_size is None:\n existing_rse_size = {}\n rses = [rse for rse in rses if rse['space_left'] >= size - existing_rse_size.get(rse['rse_id'], 0)]\n if len(rses) < count:\n raise RSEOverQuota('There is insufficient space on any of the target RSE\\'s to fullfill the operation.')\n\n # Remove rses which do not have enough quota\n rses = [rse for rse in rses if rse['quota_left'] > size]\n if len(rses) < count:\n raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\\'s to fullfill the operation.')\n\n for copy in range(count):\n # Remove rses already in the result set\n rses = [rse for rse in rses if rse['rse_id'] not in [item[0] for item in result]]\n rses_dict = {}\n for rse in rses:\n rses_dict[rse['rse_id']] = rse\n # Prioritize the preffered rses\n preferred_rses = [rses_dict[rse_id] for rse_id in preferred_rse_ids if rse_id in rses_dict]\n if prioritize_order_over_weight and preferred_rses:\n rse = (preferred_rses[0]['rse_id'], preferred_rses[0]['staging_area'], preferred_rses[0]['availability_write'])\n elif preferred_rses:\n rse = self.__choose_rse(preferred_rses)\n else:\n rse = self.__choose_rse(rses)\n result.append(rse)\n self.__update_quota(rse, size)\n return result\n\n def get_rse_dictionary(self):\n \"\"\"\n Return the current dictionary of potential RSEs stored in the RSE selector\n\n :returns: List of RSE dictionaries\n \"\"\"\n rse_dict = {}\n for rse in self.rses:\n rse_dict[rse['rse_id']] = rse\n return rse_dict\n\n def __update_quota(self, rse, size):\n \"\"\"\n Update the internal quota value.\n\n :param rse: RSE tuple to update.\n :param size: Size to substract.\n \"\"\"\n\n for element in self.rses:\n if element['rse_id'] == rse[0]:\n element['quota_left'] -= size\n return\n\n def __choose_rse(self, rses):\n \"\"\"\n Choose an RSE based on weighting.\n\n :param rses: The rses to be considered for the choose.\n :return: The (rse_id, staging_area) tuple of the chosen RSE.\n \"\"\"\n\n shuffle(rses)\n pick = uniform(0, sum([rse['weight'] for rse in rses]))\n weight = 0\n for rse in rses:\n weight += rse['weight']\n if pick <= weight:\n return (rse['rse_id'], rse['staging_area'], rse['availability_write'])\n", "path": "lib/rucio/core/rse_selector.py"}], "after_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Martin Barisits, <[email protected]>, 2013-2017\n# - Cedric Serfon, <[email protected]>, 2015-2019\n# - Robert Illingworth, <[email protected]>, 2019\n# - Andrew Lister, <[email protected]>, 2019\n# - Hannes Hansen, <[email protected]>, 2019\n#\n# PY3K COMPATIBLE\n\nfrom random import uniform, shuffle\n\nfrom rucio.common.exception import InsufficientAccountLimit, InsufficientTargetRSEs, InvalidRuleWeight, RSEOverQuota\nfrom rucio.core.account import has_account_attribute, get_usage, get_all_rse_usages_per_account\nfrom rucio.core.account_limit import get_local_account_limit, get_global_account_limits\nfrom rucio.core.rse import list_rse_attributes, has_rse_attribute, get_rse_limits\nfrom rucio.core.rse_counter import get_counter as get_rse_counter\nfrom rucio.db.sqla.session import read_session\n\n\nclass RSESelector():\n \"\"\"\n Representation of the RSE selector\n \"\"\"\n\n @read_session\n def __init__(self, account, rses, weight, copies, ignore_account_limit=False, session=None):\n \"\"\"\n Initialize the RSE Selector.\n\n :param account: Account owning the rule.\n :param rses: List of rse dictionaries.\n :param weight: Weighting to use.\n :param copies: Number of copies to create.\n :param ignore_account_limit: Flag if the quota should be ignored.\n :param session: DB Session in use.\n :raises: InvalidRuleWeight, InsufficientAccountLimit, InsufficientTargetRSEs\n \"\"\"\n self.account = account\n self.rses = [] # [{'rse_id':, 'weight':, 'staging_area'}]\n self.copies = copies\n if weight is not None:\n for rse in rses:\n attributes = list_rse_attributes(rse_id=rse['id'], session=session)\n availability_write = True if rse.get('availability', 7) & 2 else False\n if weight not in attributes:\n continue # The RSE does not have the required weight set, therefore it is ignored\n try:\n self.rses.append({'rse_id': rse['id'],\n 'weight': float(attributes[weight]),\n 'mock_rse': attributes.get('mock', False),\n 'availability_write': availability_write,\n 'staging_area': rse['staging_area']})\n except ValueError:\n raise InvalidRuleWeight('The RSE \\'%s\\' has a non-number specified for the weight \\'%s\\'' % (rse['rse'], weight))\n else:\n for rse in rses:\n mock_rse = has_rse_attribute(rse['id'], 'mock', session=session)\n availability_write = True if rse.get('availability', 7) & 2 else False\n self.rses.append({'rse_id': rse['id'],\n 'weight': 1,\n 'mock_rse': mock_rse,\n 'availability_write': availability_write,\n 'staging_area': rse['staging_area']})\n\n if len(self.rses) < self.copies:\n raise InsufficientTargetRSEs('Target RSE set not sufficient for number of copies. (%s copies requested, RSE set size %s)' % (self.copies, len(self.rses)))\n\n rses_with_enough_quota = []\n if has_account_attribute(account=account, key='admin', session=session) or ignore_account_limit:\n for rse in self.rses:\n rse['quota_left'] = float('inf')\n rse['space_left'] = float('inf')\n rses_with_enough_quota.append(rse)\n else:\n global_quota_limit = get_global_account_limits(account=account, session=session)\n all_rse_usages = {usage['rse_id']: usage['bytes'] for usage in get_all_rse_usages_per_account(account=account, session=session)}\n for rse in self.rses:\n if rse['mock_rse']:\n rse['quota_left'] = float('inf')\n rse['space_left'] = float('inf')\n rses_with_enough_quota.append(rse)\n else:\n # check local quota\n local_quota_left = None\n quota_limit = get_local_account_limit(account=account, rse_id=rse['rse_id'], session=session)\n if quota_limit is None:\n local_quota_left = 0\n else:\n local_quota_left = quota_limit - get_usage(rse_id=rse['rse_id'], account=account, session=session)['bytes']\n\n # check global quota\n rse['global_quota_left'] = {}\n all_global_quota_enough = True\n for rse_expression, limit in global_quota_limit.items():\n if rse['rse_id'] in limit['resolved_rse_ids']:\n quota_limit = limit['limit']\n global_quota_left = None\n if quota_limit is None:\n global_quota_left = 0\n else:\n rse_expression_usage = 0\n for rse_id in limit['resolved_rse_ids']:\n rse_expression_usage += all_rse_usages.get(rse_id, 0)\n global_quota_left = quota_limit - rse_expression_usage\n if global_quota_left <= 0:\n all_global_quota_enough = False\n break\n else:\n rse['global_quota_left'][rse_expression] = global_quota_left\n if local_quota_left > 0 and all_global_quota_enough:\n rse['quota_left'] = local_quota_left\n space_limit = get_rse_limits(name='MaxSpaceAvailable', rse_id=rse['rse_id'], session=session).get('MaxSpaceAvailable')\n if space_limit is None or space_limit < 0:\n rse['space_left'] = float('inf')\n else:\n rse['space_left'] = space_limit - get_rse_counter(rse_id=rse['rse_id'], session=session)['bytes']\n rses_with_enough_quota.append(rse)\n\n self.rses = rses_with_enough_quota\n if len(self.rses) < self.copies:\n raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\\'s to fullfill the operation.')\n\n # don't consider removing rses based on the total space here - because files already on the RSE are taken into account\n # it is possible to have no space but still be able to fulfil the rule\n\n def select_rse(self, size, preferred_rse_ids, copies=0, blacklist=[], prioritize_order_over_weight=False, existing_rse_size=None):\n \"\"\"\n Select n RSEs to replicate data to.\n\n :param size: Size of the block being replicated.\n :param preferred_rse_ids: Ordered list of preferred rses. (If possible replicate to them)\n :param copies: Select this amount of copies, if 0 use the pre-defined rule value.\n :param blacklist: List of blacklisted rses. (Do not put replicas on these sites)\n :param prioritze_order_over_weight: Prioritize the order of the preferred_rse_ids list over the picking done by weight.\n :existing_rse_size: Dictionary of size of files already present at each rse\n :returns: List of (RSE_id, staging_area, availability_write) tuples.\n :raises: InsufficientAccountLimit, InsufficientTargetRSEs\n \"\"\"\n\n result = []\n rses = self.rses\n count = self.copies if copies == 0 else copies\n\n # Remove blacklisted rses\n if blacklist:\n rses = [rse for rse in self.rses if rse['rse_id'] not in blacklist]\n if len(rses) < count:\n raise InsufficientTargetRSEs('There are not enough target RSEs to fulfil the request at this time.')\n\n # Remove rses which do not have enough space, accounting for the files already at each rse\n if existing_rse_size is None:\n existing_rse_size = {}\n rses = [rse for rse in rses if rse['space_left'] >= size - existing_rse_size.get(rse['rse_id'], 0)]\n if len(rses) < count:\n raise RSEOverQuota('There is insufficient space on any of the target RSE\\'s to fullfill the operation.')\n\n # Remove rses which do not have enough local quota\n rses = [rse for rse in rses if rse['quota_left'] > size]\n if len(rses) < count:\n raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\\'s to fullfill the operation.')\n\n # Remove rses which do not have enough global quota\n rses_with_enough_quota = []\n for rse in rses:\n enough_global_quota = True\n for rse_expression in rse.get('global_quota_left', []):\n if rse['global_quota_left'][rse_expression] < size:\n enough_global_quota = False\n break\n if enough_global_quota:\n rses_with_enough_quota.append(rse)\n rses = rses_with_enough_quota\n if len(rses) < count:\n raise InsufficientAccountLimit('There is insufficient quota on any of the target RSE\\'s to fullfill the operation.')\n\n for copy in range(count):\n # Remove rses already in the result set\n rses = [rse for rse in rses if rse['rse_id'] not in [item[0] for item in result]]\n rses_dict = {}\n for rse in rses:\n rses_dict[rse['rse_id']] = rse\n # Prioritize the preffered rses\n preferred_rses = [rses_dict[rse_id] for rse_id in preferred_rse_ids if rse_id in rses_dict]\n if prioritize_order_over_weight and preferred_rses:\n rse = (preferred_rses[0]['rse_id'], preferred_rses[0]['staging_area'], preferred_rses[0]['availability_write'])\n elif preferred_rses:\n rse = self.__choose_rse(preferred_rses)\n else:\n rse = self.__choose_rse(rses)\n result.append(rse)\n self.__update_quota(rse, size)\n return result\n\n def get_rse_dictionary(self):\n \"\"\"\n Return the current dictionary of potential RSEs stored in the RSE selector\n\n :returns: List of RSE dictionaries\n \"\"\"\n rse_dict = {}\n for rse in self.rses:\n rse_dict[rse['rse_id']] = rse\n return rse_dict\n\n def __update_quota(self, rse, size):\n \"\"\"\n Update the internal quota value.\n\n :param rse: RSE tuple to update.\n :param size: Size to substract.\n \"\"\"\n\n for element in self.rses:\n if element['rse_id'] == rse[0]:\n element['quota_left'] -= size\n for rse_expression in element.get('global_quota_left', []):\n element['global_quota_left'][rse_expression] -= size\n return\n\n def __choose_rse(self, rses):\n \"\"\"\n Choose an RSE based on weighting.\n\n :param rses: The rses to be considered for the choose.\n :return: The (rse_id, staging_area) tuple of the chosen RSE.\n \"\"\"\n\n shuffle(rses)\n pick = uniform(0, sum([rse['weight'] for rse in rses]))\n weight = 0\n for rse in rses:\n weight += rse['weight']\n if pick <= weight:\n return (rse['rse_id'], rse['staging_area'], rse['availability_write'])\n", "path": "lib/rucio/core/rse_selector.py"}]}
3,423
710
gh_patches_debug_20807
rasdani/github-patches
git_diff
google__flax-2540
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make RNG name configurable in Dropout Currently `'dropout'` is hard-coded as an argument of `make_rng` inside `Dropout`. However, when implementing support for "recurrent dropout" in an LSTMCell or similar you need two kinds of dropout: 1. A regular dropout which is applied to the inputs with a different mask at each step. 2. A "recurrent dropout" that is applied to the state with the same mask at each step. To implement `2` a possibility is to set the RNG name to `'recurrent_dropout'` on the Dropout layer applied to the state and guarantee that each step uses the same random state. From `nn.scan`'s perspective the would look like this: ```python nn.scan(..., split_rngs={'dropout': True, 'recurrent_dropout': False}) ``` The proposal is to add an `rng_name` (or similar) attribute to `Dropout` so we are able support these kind of use-cases. The alternative would be to create a separate `RecurrentDropout` layer with the same code but different hard-coded value. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flax/linen/stochastic.py` Content: ``` 1 # Copyright 2022 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Stochastic modules.""" 16 17 from typing import Optional, Sequence 18 19 from flax.linen.module import compact 20 from flax.linen.module import merge_param 21 from flax.linen.module import Module 22 from jax import lax 23 from jax import random 24 import jax.numpy as jnp 25 26 27 class Dropout(Module): 28 """Create a dropout layer. 29 30 Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure 31 to include an RNG seed named `'dropout'`. For example:: 32 33 model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})` 34 35 Attributes: 36 rate: the dropout probability. (_not_ the keep rate!) 37 broadcast_dims: dimensions that will share the same dropout mask 38 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and 39 masked, whereas if true, no mask is applied and the inputs are returned 40 as is. 41 """ 42 rate: float 43 broadcast_dims: Sequence[int] = () 44 deterministic: Optional[bool] = None 45 46 @compact 47 def __call__(self, inputs, deterministic: Optional[bool] = None): 48 """Applies a random dropout mask to the input. 49 50 Args: 51 inputs: the inputs that should be randomly masked. 52 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and 53 masked, whereas if true, no mask is applied and the inputs are returned 54 as is. 55 56 Returns: 57 The masked inputs reweighted to preserve mean. 58 """ 59 deterministic = merge_param( 60 'deterministic', self.deterministic, deterministic) 61 if self.rate == 0.: 62 return inputs 63 # Prevent gradient NaNs in 1.0 edge-case. 64 if self.rate == 1.0: 65 return jnp.zeros_like(inputs) 66 keep_prob = 1. - self.rate 67 if deterministic: 68 return inputs 69 else: 70 rng = self.make_rng('dropout') 71 broadcast_shape = list(inputs.shape) 72 for dim in self.broadcast_dims: 73 broadcast_shape[dim] = 1 74 mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape) 75 mask = jnp.broadcast_to(mask, inputs.shape) 76 return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs)) 77 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py --- a/flax/linen/stochastic.py +++ b/flax/linen/stochastic.py @@ -38,10 +38,12 @@ deterministic: if false the inputs are scaled by `1 / (1 - rate)` and masked, whereas if true, no mask is applied and the inputs are returned as is. + rng_collection: the rng collection name to use when requesting an rng key. """ rate: float broadcast_dims: Sequence[int] = () deterministic: Optional[bool] = None + rng_collection: str = 'dropout' @compact def __call__(self, inputs, deterministic: Optional[bool] = None): @@ -67,7 +69,7 @@ if deterministic: return inputs else: - rng = self.make_rng('dropout') + rng = self.make_rng(self.rng_collection) broadcast_shape = list(inputs.shape) for dim in self.broadcast_dims: broadcast_shape[dim] = 1
{"golden_diff": "diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py\n--- a/flax/linen/stochastic.py\n+++ b/flax/linen/stochastic.py\n@@ -38,10 +38,12 @@\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n+ rng_collection: the rng collection name to use when requesting an rng key.\n \"\"\"\n rate: float\n broadcast_dims: Sequence[int] = ()\n deterministic: Optional[bool] = None\n+ rng_collection: str = 'dropout'\n \n @compact\n def __call__(self, inputs, deterministic: Optional[bool] = None):\n@@ -67,7 +69,7 @@\n if deterministic:\n return inputs\n else:\n- rng = self.make_rng('dropout')\n+ rng = self.make_rng(self.rng_collection)\n broadcast_shape = list(inputs.shape)\n for dim in self.broadcast_dims:\n broadcast_shape[dim] = 1\n", "issue": "Make RNG name configurable in Dropout\nCurrently `'dropout'` is hard-coded as an argument of `make_rng` inside `Dropout`. However, when implementing support for \"recurrent dropout\" in an LSTMCell or similar you need two kinds of dropout:\r\n1. A regular dropout which is applied to the inputs with a different mask at each step.\r\n2. A \"recurrent dropout\" that is applied to the state with the same mask at each step.\r\n\r\nTo implement `2` a possibility is to set the RNG name to `'recurrent_dropout'` on the Dropout layer applied to the state and guarantee that each step uses the same random state. From `nn.scan`'s perspective the would look like this:\r\n\r\n```python\r\nnn.scan(..., split_rngs={'dropout': True, 'recurrent_dropout': False})\r\n```\r\nThe proposal is to add an `rng_name` (or similar) attribute to `Dropout` so we are able support these kind of use-cases. The alternative would be to create a separate `RecurrentDropout` layer with the same code but different hard-coded value.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stochastic modules.\"\"\"\n\nfrom typing import Optional, Sequence\n\nfrom flax.linen.module import compact\nfrom flax.linen.module import merge_param\nfrom flax.linen.module import Module\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\nclass Dropout(Module):\n \"\"\"Create a dropout layer.\n\n Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure\n to include an RNG seed named `'dropout'`. For example::\n\n model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})`\n\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n broadcast_dims: dimensions that will share the same dropout mask\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n \"\"\"\n rate: float\n broadcast_dims: Sequence[int] = ()\n deterministic: Optional[bool] = None\n\n @compact\n def __call__(self, inputs, deterministic: Optional[bool] = None):\n \"\"\"Applies a random dropout mask to the input.\n\n Args:\n inputs: the inputs that should be randomly masked.\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n\n Returns:\n The masked inputs reweighted to preserve mean.\n \"\"\"\n deterministic = merge_param(\n 'deterministic', self.deterministic, deterministic)\n if self.rate == 0.:\n return inputs\n # Prevent gradient NaNs in 1.0 edge-case.\n if self.rate == 1.0:\n return jnp.zeros_like(inputs)\n keep_prob = 1. - self.rate\n if deterministic:\n return inputs\n else:\n rng = self.make_rng('dropout')\n broadcast_shape = list(inputs.shape)\n for dim in self.broadcast_dims:\n broadcast_shape[dim] = 1\n mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape)\n mask = jnp.broadcast_to(mask, inputs.shape)\n return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs))\n", "path": "flax/linen/stochastic.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stochastic modules.\"\"\"\n\nfrom typing import Optional, Sequence\n\nfrom flax.linen.module import compact\nfrom flax.linen.module import merge_param\nfrom flax.linen.module import Module\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\nclass Dropout(Module):\n \"\"\"Create a dropout layer.\n\n Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure\n to include an RNG seed named `'dropout'`. For example::\n\n model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})`\n\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n broadcast_dims: dimensions that will share the same dropout mask\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n rng_collection: the rng collection name to use when requesting an rng key.\n \"\"\"\n rate: float\n broadcast_dims: Sequence[int] = ()\n deterministic: Optional[bool] = None\n rng_collection: str = 'dropout'\n\n @compact\n def __call__(self, inputs, deterministic: Optional[bool] = None):\n \"\"\"Applies a random dropout mask to the input.\n\n Args:\n inputs: the inputs that should be randomly masked.\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n\n Returns:\n The masked inputs reweighted to preserve mean.\n \"\"\"\n deterministic = merge_param(\n 'deterministic', self.deterministic, deterministic)\n if self.rate == 0.:\n return inputs\n # Prevent gradient NaNs in 1.0 edge-case.\n if self.rate == 1.0:\n return jnp.zeros_like(inputs)\n keep_prob = 1. - self.rate\n if deterministic:\n return inputs\n else:\n rng = self.make_rng(self.rng_collection)\n broadcast_shape = list(inputs.shape)\n for dim in self.broadcast_dims:\n broadcast_shape[dim] = 1\n mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape)\n mask = jnp.broadcast_to(mask, inputs.shape)\n return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs))\n", "path": "flax/linen/stochastic.py"}]}
1,281
245
gh_patches_debug_19569
rasdani/github-patches
git_diff
mozilla__telemetry-analysis-service-989
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sort EMR versions naturally The EMR versions are currently not sorted naturally but by alphabet, which breaks for example with EMR 5.11.0. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `atmo/clusters/queries.py` Content: ``` 1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, you can obtain one at http://mozilla.org/MPL/2.0/. 4 from django.db import models 5 6 7 class EMRReleaseQuerySet(models.QuerySet): 8 """ 9 A Django queryset for the :class:`~atmo.clusters.models.EMRRelease` model. 10 """ 11 12 def active(self): 13 return self.filter( 14 is_active=True, 15 ) 16 17 def stable(self): 18 """ 19 The EMR releases that are considered stable. 20 """ 21 return self.filter( 22 is_experimental=False, 23 is_deprecated=False, 24 is_active=True, 25 ) 26 27 def experimental(self): 28 """ 29 The EMR releases that are considered experimental. 30 """ 31 return self.filter( 32 is_experimental=True, 33 is_active=True, 34 ) 35 36 def deprecated(self): 37 """ 38 The EMR releases that are deprecated. 39 """ 40 return self.filter( 41 is_deprecated=True, 42 is_active=True, 43 ) 44 45 46 class ClusterQuerySet(models.QuerySet): 47 """A Django queryset that filters by cluster status. 48 49 Used by the :class:`~atmo.clusters.models.Cluster` model. 50 """ 51 52 def active(self): 53 """ 54 The clusters that have an active status. 55 """ 56 return self.filter( 57 most_recent_status__in=self.model.ACTIVE_STATUS_LIST, 58 ) 59 60 def terminated(self): 61 """ 62 The clusters that have an terminated status. 63 """ 64 return self.filter( 65 most_recent_status__in=self.model.TERMINATED_STATUS_LIST, 66 ) 67 68 def failed(self): 69 """ 70 The clusters that have an failed status. 71 """ 72 return self.filter( 73 most_recent_status__in=self.model.FAILED_STATUS_LIST, 74 ) 75 ``` Path: `atmo/clusters/forms.py` Content: ``` 1 # This Source Code Form is subject to the terms of the Mozilla Public 2 # License, v. 2.0. If a copy of the MPL was not distributed with this 3 # file, you can obtain one at http://mozilla.org/MPL/2.0/. 4 from django import forms 5 from django.conf import settings 6 from django.core.urlresolvers import reverse 7 from django.utils.safestring import mark_safe 8 9 from . import models 10 from ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin 11 from ..keys.models import SSHKey 12 13 14 class EMRReleaseChoiceField(forms.ModelChoiceField): 15 """ 16 A :class:`~django.forms.ModelChoiceField` subclass that uses 17 :class:`~atmo.clusters.models.EMRRelease` objects for the choices 18 and automatically uses a "radioset" rendering -- a horizontal button 19 group for easier selection. 20 """ 21 def __init__(self, *args, **kwargs): 22 super().__init__( 23 label='EMR release', 24 queryset=models.EMRRelease.objects.active(), 25 required=True, 26 empty_label=None, 27 widget=forms.RadioSelect(attrs={ 28 'required': 'required', 29 'class': 'radioset', 30 }), 31 help_text=models.Cluster.EMR_RELEASE_HELP, 32 ) 33 34 def label_from_instance(self, obj): 35 """ 36 Append the status of the EMR release if it's 37 experimental or deprecated. 38 """ 39 label = obj.version 40 extra = [] 41 if obj.is_experimental: 42 extra.append('<span class="label label-info">experimental</span>') 43 elif obj.is_deprecated: 44 extra.append('<span class="label label-warning">deprecated</span>') 45 if extra: 46 label = mark_safe('%s %s' % (label, ''.join(extra))) 47 return label 48 49 50 class NewClusterForm(AutoClassFormMixin, CreatedByModelFormMixin, 51 forms.ModelForm): 52 """ 53 A form used for creating new clusters. 54 """ 55 prefix = 'new' 56 57 identifier = forms.RegexField( 58 required=True, 59 label='Identifier', 60 regex=r'^[a-z0-9-]{1,100}$', 61 widget=forms.TextInput(attrs={ 62 'pattern': r'[a-z0-9-]{1,100}', 63 'data-parsley-pattern-message': 'Identifier contains invalid characters.', 64 }), 65 help_text='A unique identifier for your cluster, visible in ' 66 'the AWS management console. (Lowercase, use hyphens ' 67 'instead of spaces.)' 68 ) 69 size = forms.IntegerField( 70 label='Size', 71 required=True, 72 min_value=1, 73 max_value=settings.AWS_CONFIG['MAX_CLUSTER_SIZE'], 74 widget=forms.NumberInput(attrs={ 75 'min': '1', 76 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_SIZE']), 77 }), 78 help_text=('Number of workers to use in the cluster, between 1 and %s. ' 79 'For testing or development 1 is recommended.' % 80 settings.AWS_CONFIG['MAX_CLUSTER_SIZE']) 81 ) 82 lifetime = forms.IntegerField( 83 label='Lifetime', 84 required=True, 85 min_value=2, 86 max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'], 87 widget=forms.NumberInput(attrs={ 88 'min': '2', 89 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']), 90 }), 91 help_text=('Lifetime in hours after which the cluster is automatically ' 92 'terminated, between 2 and %s.' % 93 settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']) 94 ) 95 ssh_key = forms.ModelChoiceField( 96 label='SSH key', 97 queryset=SSHKey.objects.all(), 98 required=True, 99 empty_label=None, 100 ) 101 emr_release = EMRReleaseChoiceField() 102 103 class Meta: 104 model = models.Cluster 105 fields = ['identifier', 'size', 'lifetime', 'ssh_key', 'emr_release'] 106 107 def __init__(self, *args, **kwargs): 108 super().__init__(*args, **kwargs) 109 user_sshkeys = self.created_by.created_sshkeys.all() 110 self.fields['ssh_key'].queryset = user_sshkeys.all() 111 self.fields['ssh_key'].help_text = ( 112 'The SSH key to deploy to the cluster. ' 113 'See <a href="%s">your keys</a> or ' 114 '<a href="%s">add a new one</a>.' % 115 (reverse('keys-list'), reverse('keys-new')) 116 ) 117 # if there are fewer options we just show radio select buttons 118 if user_sshkeys.count() <= 6: 119 self.fields['ssh_key'].widget = forms.RadioSelect( 120 choices=self.fields['ssh_key'].choices, 121 attrs={ 122 'class': 'radioset', 123 }, 124 ) 125 126 127 class ExtendClusterForm(AutoClassFormMixin, forms.Form): 128 prefix = 'extend' 129 extension = forms.IntegerField( 130 label='Lifetime extension in hours', 131 required=True, 132 min_value=2, 133 max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'], 134 widget=forms.NumberInput(attrs={ 135 'min': '2', 136 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']), 137 }), 138 help_text=("Number of hours to extend the cluster's lifetime with, between 2 and %s." % 139 settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']) 140 ) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/atmo/clusters/forms.py b/atmo/clusters/forms.py --- a/atmo/clusters/forms.py +++ b/atmo/clusters/forms.py @@ -21,7 +21,7 @@ def __init__(self, *args, **kwargs): super().__init__( label='EMR release', - queryset=models.EMRRelease.objects.active(), + queryset=models.EMRRelease.objects.active().natural_sort_by_version(), required=True, empty_label=None, widget=forms.RadioSelect(attrs={ diff --git a/atmo/clusters/queries.py b/atmo/clusters/queries.py --- a/atmo/clusters/queries.py +++ b/atmo/clusters/queries.py @@ -8,6 +8,15 @@ """ A Django queryset for the :class:`~atmo.clusters.models.EMRRelease` model. """ + def natural_sort_by_version(self): + """ + Sorts this queryset by the EMR version naturally (human-readable). + """ + return self.extra( + select={ + 'natural_version': "string_to_array(version, '.')::int[]", + }, + ).order_by('-natural_version') def active(self): return self.filter(
{"golden_diff": "diff --git a/atmo/clusters/forms.py b/atmo/clusters/forms.py\n--- a/atmo/clusters/forms.py\n+++ b/atmo/clusters/forms.py\n@@ -21,7 +21,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(\n label='EMR release',\n- queryset=models.EMRRelease.objects.active(),\n+ queryset=models.EMRRelease.objects.active().natural_sort_by_version(),\n required=True,\n empty_label=None,\n widget=forms.RadioSelect(attrs={\ndiff --git a/atmo/clusters/queries.py b/atmo/clusters/queries.py\n--- a/atmo/clusters/queries.py\n+++ b/atmo/clusters/queries.py\n@@ -8,6 +8,15 @@\n \"\"\"\n A Django queryset for the :class:`~atmo.clusters.models.EMRRelease` model.\n \"\"\"\n+ def natural_sort_by_version(self):\n+ \"\"\"\n+ Sorts this queryset by the EMR version naturally (human-readable).\n+ \"\"\"\n+ return self.extra(\n+ select={\n+ 'natural_version': \"string_to_array(version, '.')::int[]\",\n+ },\n+ ).order_by('-natural_version')\n \n def active(self):\n return self.filter(\n", "issue": "Sort EMR versions naturally\nThe EMR versions are currently not sorted naturally but by alphabet, which breaks for example with EMR 5.11.0.\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.db import models\n\n\nclass EMRReleaseQuerySet(models.QuerySet):\n \"\"\"\n A Django queryset for the :class:`~atmo.clusters.models.EMRRelease` model.\n \"\"\"\n\n def active(self):\n return self.filter(\n is_active=True,\n )\n\n def stable(self):\n \"\"\"\n The EMR releases that are considered stable.\n \"\"\"\n return self.filter(\n is_experimental=False,\n is_deprecated=False,\n is_active=True,\n )\n\n def experimental(self):\n \"\"\"\n The EMR releases that are considered experimental.\n \"\"\"\n return self.filter(\n is_experimental=True,\n is_active=True,\n )\n\n def deprecated(self):\n \"\"\"\n The EMR releases that are deprecated.\n \"\"\"\n return self.filter(\n is_deprecated=True,\n is_active=True,\n )\n\n\nclass ClusterQuerySet(models.QuerySet):\n \"\"\"A Django queryset that filters by cluster status.\n\n Used by the :class:`~atmo.clusters.models.Cluster` model.\n \"\"\"\n\n def active(self):\n \"\"\"\n The clusters that have an active status.\n \"\"\"\n return self.filter(\n most_recent_status__in=self.model.ACTIVE_STATUS_LIST,\n )\n\n def terminated(self):\n \"\"\"\n The clusters that have an terminated status.\n \"\"\"\n return self.filter(\n most_recent_status__in=self.model.TERMINATED_STATUS_LIST,\n )\n\n def failed(self):\n \"\"\"\n The clusters that have an failed status.\n \"\"\"\n return self.filter(\n most_recent_status__in=self.model.FAILED_STATUS_LIST,\n )\n", "path": "atmo/clusters/queries.py"}, {"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.utils.safestring import mark_safe\n\nfrom . import models\nfrom ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin\nfrom ..keys.models import SSHKey\n\n\nclass EMRReleaseChoiceField(forms.ModelChoiceField):\n \"\"\"\n A :class:`~django.forms.ModelChoiceField` subclass that uses\n :class:`~atmo.clusters.models.EMRRelease` objects for the choices\n and automatically uses a \"radioset\" rendering -- a horizontal button\n group for easier selection.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(\n label='EMR release',\n queryset=models.EMRRelease.objects.active(),\n required=True,\n empty_label=None,\n widget=forms.RadioSelect(attrs={\n 'required': 'required',\n 'class': 'radioset',\n }),\n help_text=models.Cluster.EMR_RELEASE_HELP,\n )\n\n def label_from_instance(self, obj):\n \"\"\"\n Append the status of the EMR release if it's\n experimental or deprecated.\n \"\"\"\n label = obj.version\n extra = []\n if obj.is_experimental:\n extra.append('<span class=\"label label-info\">experimental</span>')\n elif obj.is_deprecated:\n extra.append('<span class=\"label label-warning\">deprecated</span>')\n if extra:\n label = mark_safe('%s %s' % (label, ''.join(extra)))\n return label\n\n\nclass NewClusterForm(AutoClassFormMixin, CreatedByModelFormMixin,\n forms.ModelForm):\n \"\"\"\n A form used for creating new clusters.\n \"\"\"\n prefix = 'new'\n\n identifier = forms.RegexField(\n required=True,\n label='Identifier',\n regex=r'^[a-z0-9-]{1,100}$',\n widget=forms.TextInput(attrs={\n 'pattern': r'[a-z0-9-]{1,100}',\n 'data-parsley-pattern-message': 'Identifier contains invalid characters.',\n }),\n help_text='A unique identifier for your cluster, visible in '\n 'the AWS management console. (Lowercase, use hyphens '\n 'instead of spaces.)'\n )\n size = forms.IntegerField(\n label='Size',\n required=True,\n min_value=1,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_SIZE'],\n widget=forms.NumberInput(attrs={\n 'min': '1',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_SIZE']),\n }),\n help_text=('Number of workers to use in the cluster, between 1 and %s. '\n 'For testing or development 1 is recommended.' %\n settings.AWS_CONFIG['MAX_CLUSTER_SIZE'])\n )\n lifetime = forms.IntegerField(\n label='Lifetime',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=('Lifetime in hours after which the cluster is automatically '\n 'terminated, between 2 and %s.' %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n ssh_key = forms.ModelChoiceField(\n label='SSH key',\n queryset=SSHKey.objects.all(),\n required=True,\n empty_label=None,\n )\n emr_release = EMRReleaseChoiceField()\n\n class Meta:\n model = models.Cluster\n fields = ['identifier', 'size', 'lifetime', 'ssh_key', 'emr_release']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n user_sshkeys = self.created_by.created_sshkeys.all()\n self.fields['ssh_key'].queryset = user_sshkeys.all()\n self.fields['ssh_key'].help_text = (\n 'The SSH key to deploy to the cluster. '\n 'See <a href=\"%s\">your keys</a> or '\n '<a href=\"%s\">add a new one</a>.' %\n (reverse('keys-list'), reverse('keys-new'))\n )\n # if there are fewer options we just show radio select buttons\n if user_sshkeys.count() <= 6:\n self.fields['ssh_key'].widget = forms.RadioSelect(\n choices=self.fields['ssh_key'].choices,\n attrs={\n 'class': 'radioset',\n },\n )\n\n\nclass ExtendClusterForm(AutoClassFormMixin, forms.Form):\n prefix = 'extend'\n extension = forms.IntegerField(\n label='Lifetime extension in hours',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=(\"Number of hours to extend the cluster's lifetime with, between 2 and %s.\" %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n", "path": "atmo/clusters/forms.py"}], "after_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django.db import models\n\n\nclass EMRReleaseQuerySet(models.QuerySet):\n \"\"\"\n A Django queryset for the :class:`~atmo.clusters.models.EMRRelease` model.\n \"\"\"\n def natural_sort_by_version(self):\n \"\"\"\n Sorts this queryset by the EMR version naturally (human-readable).\n \"\"\"\n return self.extra(\n select={\n 'natural_version': \"string_to_array(version, '.')::int[]\",\n },\n ).order_by('-natural_version')\n\n def active(self):\n return self.filter(\n is_active=True,\n )\n\n def stable(self):\n \"\"\"\n The EMR releases that are considered stable.\n \"\"\"\n return self.filter(\n is_experimental=False,\n is_deprecated=False,\n is_active=True,\n )\n\n def experimental(self):\n \"\"\"\n The EMR releases that are considered experimental.\n \"\"\"\n return self.filter(\n is_experimental=True,\n is_active=True,\n )\n\n def deprecated(self):\n \"\"\"\n The EMR releases that are deprecated.\n \"\"\"\n return self.filter(\n is_deprecated=True,\n is_active=True,\n )\n\n\nclass ClusterQuerySet(models.QuerySet):\n \"\"\"A Django queryset that filters by cluster status.\n\n Used by the :class:`~atmo.clusters.models.Cluster` model.\n \"\"\"\n\n def active(self):\n \"\"\"\n The clusters that have an active status.\n \"\"\"\n return self.filter(\n most_recent_status__in=self.model.ACTIVE_STATUS_LIST,\n )\n\n def terminated(self):\n \"\"\"\n The clusters that have an terminated status.\n \"\"\"\n return self.filter(\n most_recent_status__in=self.model.TERMINATED_STATUS_LIST,\n )\n\n def failed(self):\n \"\"\"\n The clusters that have an failed status.\n \"\"\"\n return self.filter(\n most_recent_status__in=self.model.FAILED_STATUS_LIST,\n )\n", "path": "atmo/clusters/queries.py"}, {"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.utils.safestring import mark_safe\n\nfrom . import models\nfrom ..forms.mixins import AutoClassFormMixin, CreatedByModelFormMixin\nfrom ..keys.models import SSHKey\n\n\nclass EMRReleaseChoiceField(forms.ModelChoiceField):\n \"\"\"\n A :class:`~django.forms.ModelChoiceField` subclass that uses\n :class:`~atmo.clusters.models.EMRRelease` objects for the choices\n and automatically uses a \"radioset\" rendering -- a horizontal button\n group for easier selection.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(\n label='EMR release',\n queryset=models.EMRRelease.objects.active().natural_sort_by_version(),\n required=True,\n empty_label=None,\n widget=forms.RadioSelect(attrs={\n 'required': 'required',\n 'class': 'radioset',\n }),\n help_text=models.Cluster.EMR_RELEASE_HELP,\n )\n\n def label_from_instance(self, obj):\n \"\"\"\n Append the status of the EMR release if it's\n experimental or deprecated.\n \"\"\"\n label = obj.version\n extra = []\n if obj.is_experimental:\n extra.append('<span class=\"label label-info\">experimental</span>')\n elif obj.is_deprecated:\n extra.append('<span class=\"label label-warning\">deprecated</span>')\n if extra:\n label = mark_safe('%s %s' % (label, ''.join(extra)))\n return label\n\n\nclass NewClusterForm(AutoClassFormMixin, CreatedByModelFormMixin,\n forms.ModelForm):\n \"\"\"\n A form used for creating new clusters.\n \"\"\"\n prefix = 'new'\n\n identifier = forms.RegexField(\n required=True,\n label='Identifier',\n regex=r'^[a-z0-9-]{1,100}$',\n widget=forms.TextInput(attrs={\n 'pattern': r'[a-z0-9-]{1,100}',\n 'data-parsley-pattern-message': 'Identifier contains invalid characters.',\n }),\n help_text='A unique identifier for your cluster, visible in '\n 'the AWS management console. (Lowercase, use hyphens '\n 'instead of spaces.)'\n )\n size = forms.IntegerField(\n label='Size',\n required=True,\n min_value=1,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_SIZE'],\n widget=forms.NumberInput(attrs={\n 'min': '1',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_SIZE']),\n }),\n help_text=('Number of workers to use in the cluster, between 1 and %s. '\n 'For testing or development 1 is recommended.' %\n settings.AWS_CONFIG['MAX_CLUSTER_SIZE'])\n )\n lifetime = forms.IntegerField(\n label='Lifetime',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=('Lifetime in hours after which the cluster is automatically '\n 'terminated, between 2 and %s.' %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n ssh_key = forms.ModelChoiceField(\n label='SSH key',\n queryset=SSHKey.objects.all(),\n required=True,\n empty_label=None,\n )\n emr_release = EMRReleaseChoiceField()\n\n class Meta:\n model = models.Cluster\n fields = ['identifier', 'size', 'lifetime', 'ssh_key', 'emr_release']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n user_sshkeys = self.created_by.created_sshkeys.all()\n self.fields['ssh_key'].queryset = user_sshkeys.all()\n self.fields['ssh_key'].help_text = (\n 'The SSH key to deploy to the cluster. '\n 'See <a href=\"%s\">your keys</a> or '\n '<a href=\"%s\">add a new one</a>.' %\n (reverse('keys-list'), reverse('keys-new'))\n )\n # if there are fewer options we just show radio select buttons\n if user_sshkeys.count() <= 6:\n self.fields['ssh_key'].widget = forms.RadioSelect(\n choices=self.fields['ssh_key'].choices,\n attrs={\n 'class': 'radioset',\n },\n )\n\n\nclass ExtendClusterForm(AutoClassFormMixin, forms.Form):\n prefix = 'extend'\n extension = forms.IntegerField(\n label='Lifetime extension in hours',\n required=True,\n min_value=2,\n max_value=settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'],\n widget=forms.NumberInput(attrs={\n 'min': '2',\n 'max': str(settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME']),\n }),\n help_text=(\"Number of hours to extend the cluster's lifetime with, between 2 and %s.\" %\n settings.AWS_CONFIG['MAX_CLUSTER_LIFETIME'])\n )\n", "path": "atmo/clusters/forms.py"}]}
2,303
274
gh_patches_debug_2855
rasdani/github-patches
git_diff
xonsh__xonsh-89
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- setup.py install fails if working directory not location of setup.py C:\Python34>.\python.exe \temp\xonsh-0.1.2\setup.py install ... Traceback (most recent call last): File "\temp\xonsh-0.1.2\setup.py", line 114, in <module> main() File "\temp\xonsh-0.1.2\setup.py", line 50, in main with open('README.rst', 'r') as f: FileNotFoundError: [Errno 2] No such file or directory: 'README.rst' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # coding=utf-8 3 """The xonsh installer.""" 4 from __future__ import print_function, unicode_literals 5 import os 6 import sys 7 try: 8 from setuptools import setup 9 from setuptools.command.sdist import sdist 10 from setuptools.command.install import install 11 HAVE_SETUPTOOLS = True 12 except ImportError: 13 from distutils.core import setup 14 from distutils.command.sdist import sdist as sdist 15 from distutils.command.install import install as install 16 HAVE_SETUPTOOLS = False 17 18 VERSION = '0.1.2' 19 20 TABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py'] 21 22 def clean_tables(): 23 for f in TABLES: 24 if os.path.isfile(f): 25 os.remove(f) 26 print('Remove ' + f) 27 28 def build_tables(): 29 print('Building lexer and parser tables.') 30 sys.path.insert(0, os.path.dirname(__file__)) 31 from xonsh.parser import Parser 32 Parser(lexer_table='lexer_table', yacc_table='parser_table', 33 outputdir='xonsh') 34 sys.path.pop(0) 35 36 class xinstall(install): 37 def run(self): 38 clean_tables() 39 build_tables() 40 install.run(self) 41 42 class xsdist(sdist): 43 def make_release_tree(self, basedir, files): 44 clean_tables() 45 build_tables() 46 sdist.make_release_tree(self, basedir, files) 47 48 def main(): 49 if sys.version_info[0] < 3: 50 sys.exit('xonsh currently requires Python 3.4+') 51 print(logo) 52 with open('README.rst', 'r') as f: 53 readme = f.read() 54 skw = dict( 55 name='xonsh', 56 description='an exotic, usable shell', 57 long_description=readme, 58 license='BSD', 59 version=VERSION, 60 author='Anthony Scopatz', 61 maintainer='Anthony Scopatz', 62 author_email='[email protected]', 63 url='https://github.com/scopatz/xonsh', 64 platforms='Cross Platform', 65 classifiers = ['Programming Language :: Python :: 3'], 66 packages=['xonsh'], 67 scripts=['scripts/xonsh'], 68 cmdclass={'install': xinstall, 'sdist': xsdist}, 69 ) 70 if HAVE_SETUPTOOLS: 71 skw['setup_requires'] = ['ply'] 72 skw['install_requires'] = ['ply'] 73 setup(**skw) 74 75 logo = """ 76 ╓██▄ 77 ╙██▀██╕ 78 ▐██4Φ█▀█▌ 79 ²██▄███▀██^██ 80 -███╩▀ " ╒▄█████▀█ 81 ║██▀▀W╤▄▀ ▐║█╘ ╝█ 82 ▄m▀%Φ▀▀ ╝*" ,α█████▓▄,▄▀Γ"▀╕ 83 "▀██¼" ▄═╦█╟║█▀ ╓ `^` ,▄ ╢╕ 84 ,▀╫M█▐j╓╟▀ ╔▓▄█▀ '║ ╔ ╣║▌ ▀▄ 85 ▄m▀▀███╬█╝▀ █▀^ "ÜM j▐╟╫╨▒ ╙▀≡═╤═m▀╗ 86 █æsæ╓ ╕, ,▄Ä ▐'╕H LU ║║╠╫Å^2=⌐ █ 87 ▄æ%Å███╠█ª╙▄█▀ $1╙ ║║╟╫╩*T▄ ▌ 88 ╙╗%▄,╦██▌█▌█╢M ╕ M║║║║█═⌐ⁿ"^ ╫ 89 ╙╣▀████@█░█ ▌╕╕ ` ▌║▐▐║█D═≈⌐¬ⁿ s ║⌐ 90 ╙╬███▓║█` ▌╚ ╕ ╕▌║▐▐╣▌⌐*▒▒Dù` ▐▌ 91 ╙╬██╨U█ ╟ $ ▌ ▌▌▐▐▐M█▄═≤⌐% ╓⌐ ▌ 92 ║║█▄▌║ ╟ ▌ ▌M▐▐▐M█▀▒▒▒22, ▐▌ 93 ███╙^▌ ║ ▌ ⌐M▐▐▐M█≤⌐⌐¬── ▐M 94 ║██ ▌╙ ╓ H║ ▌╒ M║▐▐M█"^^^^^"ⁿ ║ 95 ██╕╙@▓ ╕ ▌║ H' ║▐▐▐█══=.,,, █ 96 ╙█▓╔╚╚█ ╠ ▌└╒ ▌▐ ╚║║║▀****ⁿ - ╓▌ 97 ╙█▌¼V╚▌ ▌ ╕ ▌ ║╒ ║ ▌▒╠█▀≤≤≤≤≤⌐ █ 98 ╙█▌╔█╚▌ ┘ M ▌║ ╫ UUM██J^^" ▐▌ 99 ╙██╙█╙▌ ╕$j ▐⌐▌ ▌║╝╟█Å%%%≈═ █ 100 ╙╣█╣█^▌ ╠║▐ ║ ▌▐.DU██^[""ⁿ -╒▌ 101 ▀█▄█`▌ ░M▀ ▌▐ Å£╝╝█╜%≈═╓""w ⁿ⌐ █ 102 `▀▄▀`▌ ▌█▐⌐║▐UW╖██%≤═░*─ =z ▄Γ 103 ╙██╙▄▌█ ▌Å╛╣██╨%╤ƒⁿ= -` ▄┘ 104 █▌╢▓▌▌ W £6█╤,"ⁿ ` ▄≡▀▀▀ 105 █"█▌▌╟Å╓█╓█▀%` ▄▀ 106 ╙▌██`▒U▓U█%╗* █ 107 ▌╫║ ▌ÅÅ║▀╛¬` `"█ 108 ▌╫ ╫╟ █▄ ~╦%▒╥4^ 109 ▌▌ "M█ `▀╕ X╕"╗▄▀^ 110 █▌ ╓M ╙▀e▀▀^ 111 ╙██▄▄▀ 112 ^^ 113 """ 114 115 if __name__ == '__main__': 116 main() 117 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ if sys.version_info[0] < 3: sys.exit('xonsh currently requires Python 3.4+') print(logo) - with open('README.rst', 'r') as f: + with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f: readme = f.read() skw = dict( name='xonsh',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -49,7 +49,7 @@\n if sys.version_info[0] < 3:\n sys.exit('xonsh currently requires Python 3.4+')\n print(logo)\n- with open('README.rst', 'r') as f:\n+ with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f:\n readme = f.read()\n skw = dict(\n name='xonsh',\n", "issue": "setup.py install fails if working directory not location of setup.py\nC:\\Python34>.\\python.exe \\temp\\xonsh-0.1.2\\setup.py install\n...\nTraceback (most recent call last):\n File \"\\temp\\xonsh-0.1.2\\setup.py\", line 114, in <module>\n main()\n File \"\\temp\\xonsh-0.1.2\\setup.py\", line 50, in main\n with open('README.rst', 'r') as f:\nFileNotFoundError: [Errno 2] No such file or directory: 'README.rst'\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\"\"\"The xonsh installer.\"\"\"\nfrom __future__ import print_function, unicode_literals\nimport os\nimport sys\ntry:\n from setuptools import setup\n from setuptools.command.sdist import sdist\n from setuptools.command.install import install\n HAVE_SETUPTOOLS = True\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.sdist import sdist as sdist\n from distutils.command.install import install as install\n HAVE_SETUPTOOLS = False\n\nVERSION = '0.1.2'\n\nTABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py']\n\ndef clean_tables():\n for f in TABLES:\n if os.path.isfile(f):\n os.remove(f)\n print('Remove ' + f)\n\ndef build_tables():\n print('Building lexer and parser tables.')\n sys.path.insert(0, os.path.dirname(__file__))\n from xonsh.parser import Parser\n Parser(lexer_table='lexer_table', yacc_table='parser_table',\n outputdir='xonsh')\n sys.path.pop(0)\n\nclass xinstall(install):\n def run(self):\n clean_tables()\n build_tables()\n install.run(self)\n\nclass xsdist(sdist):\n def make_release_tree(self, basedir, files):\n clean_tables()\n build_tables()\n sdist.make_release_tree(self, basedir, files)\n\ndef main():\n if sys.version_info[0] < 3:\n sys.exit('xonsh currently requires Python 3.4+')\n print(logo)\n with open('README.rst', 'r') as f:\n readme = f.read()\n skw = dict(\n name='xonsh',\n description='an exotic, usable shell',\n long_description=readme,\n license='BSD',\n version=VERSION,\n author='Anthony Scopatz',\n maintainer='Anthony Scopatz',\n author_email='[email protected]',\n url='https://github.com/scopatz/xonsh',\n platforms='Cross Platform',\n classifiers = ['Programming Language :: Python :: 3'],\n packages=['xonsh'],\n scripts=['scripts/xonsh'],\n cmdclass={'install': xinstall, 'sdist': xsdist},\n )\n if HAVE_SETUPTOOLS:\n skw['setup_requires'] = ['ply']\n skw['install_requires'] = ['ply']\n setup(**skw)\n\nlogo = \"\"\"\n \u2553\u2588\u2588\u2584 \n \u2559\u2588\u2588\u2580\u2588\u2588\u2555 \n \u2590\u2588\u25884\u03a6\u2588\u2580\u2588\u258c \n \u00b2\u2588\u2588\u2584\u2588\u2588\u2588\u2580\u2588\u2588^\u2588\u2588 \n -\u2588\u2588\u2588\u2569\u2580 \" \u2552\u2584\u2588\u2588\u2588\u2588\u2588\u2580\u2588 \n \u2551\u2588\u2588\u2580\u2580W\u2564\u2584\u2580 \u2590\u2551\u2588\u2558 \u255d\u2588 \n \u2584m\u2580%\u03a6\u2580\u2580 \u255d*\" ,\u03b1\u2588\u2588\u2588\u2588\u2588\u2593\u2584,\u2584\u2580\u0393\"\u2580\u2555 \n \"\u2580\u2588\u2588\u00bc\" \u2584\u2550\u2566\u2588\u255f\u2551\u2588\u2580 \u2553 `^` ,\u2584 \u2562\u2555 \n ,\u2580\u256bM\u2588\u2590j\u2553\u255f\u2580 \u2554\u2593\u2584\u2588\u2580 '\u2551 \u2554 \u2563\u2551\u258c \u2580\u2584 \n \u2584m\u2580\u2580\u2588\u2588\u2588\u256c\u2588\u255d\u2580 \u2588\u2580^ \"\u00dcM j\u2590\u255f\u256b\u2568\u2592 \u2559\u2580\u2261\u2550\u2564\u2550m\u2580\u2557 \n \u2588\u00e6s\u00e6\u2553 \u2555, ,\u2584\u00c4 \u2590'\u2555H LU \u2551\u2551\u2560\u256b\u00c5^2=\u2310 \u2588 \n \u2584\u00e6%\u00c5\u2588\u2588\u2588\u2560\u2588\u00aa\u2559\u2584\u2588\u2580 $1\u2559 \u2551\u2551\u255f\u256b\u2569*T\u2584 \u258c \n \u2559\u2557%\u2584,\u2566\u2588\u2588\u258c\u2588\u258c\u2588\u2562M \u2555 M\u2551\u2551\u2551\u2551\u2588\u2550\u2310\u207f\"^ \u256b \n \u2559\u2563\u2580\u2588\u2588\u2588\u2588@\u2588\u2591\u2588 \u258c\u2555\u2555 ` \u258c\u2551\u2590\u2590\u2551\u2588D\u2550\u2248\u2310\u00ac\u207f s \u2551\u2310 \n \u2559\u256c\u2588\u2588\u2588\u2593\u2551\u2588` \u258c\u255a \u2555 \u2555\u258c\u2551\u2590\u2590\u2563\u258c\u2310*\u2592\u2592D\u00f9` \u2590\u258c \n \u2559\u256c\u2588\u2588\u2568U\u2588 \u255f $ \u258c \u258c\u258c\u2590\u2590\u2590M\u2588\u2584\u2550\u2264\u2310% \u2553\u2310 \u258c \n \u2551\u2551\u2588\u2584\u258c\u2551 \u255f \u258c \u258cM\u2590\u2590\u2590M\u2588\u2580\u2592\u2592\u259222, \u2590\u258c \n \u2588\u2588\u2588\u2559^\u258c \u2551 \u258c \u2310M\u2590\u2590\u2590M\u2588\u2264\u2310\u2310\u00ac\u2500\u2500 \u2590M \n \u2551\u2588\u2588 \u258c\u2559 \u2553 H\u2551 \u258c\u2552 M\u2551\u2590\u2590M\u2588\"^^^^^\"\u207f \u2551 \n \u2588\u2588\u2555\u2559@\u2593 \u2555 \u258c\u2551 H' \u2551\u2590\u2590\u2590\u2588\u2550\u2550=.,,, \u2588 \n \u2559\u2588\u2593\u2554\u255a\u255a\u2588 \u2560 \u258c\u2514\u2552 \u258c\u2590 \u255a\u2551\u2551\u2551\u2580****\u207f - \u2553\u258c \n \u2559\u2588\u258c\u00bcV\u255a\u258c \u258c \u2555 \u258c \u2551\u2552 \u2551 \u258c\u2592\u2560\u2588\u2580\u2264\u2264\u2264\u2264\u2264\u2310 \u2588 \n \u2559\u2588\u258c\u2554\u2588\u255a\u258c \u2518 M \u258c\u2551 \u256b UUM\u2588\u2588J^^\" \u2590\u258c \n \u2559\u2588\u2588\u2559\u2588\u2559\u258c \u2555$j \u2590\u2310\u258c \u258c\u2551\u255d\u255f\u2588\u00c5%%%\u2248\u2550 \u2588 \n \u2559\u2563\u2588\u2563\u2588^\u258c \u2560\u2551\u2590 \u2551 \u258c\u2590.DU\u2588\u2588^[\"\"\u207f -\u2552\u258c \n \u2580\u2588\u2584\u2588`\u258c \u2591M\u2580 \u258c\u2590 \u00c5\u00a3\u255d\u255d\u2588\u255c%\u2248\u2550\u2553\"\"w \u207f\u2310 \u2588 \n `\u2580\u2584\u2580`\u258c \u258c\u2588\u2590\u2310\u2551\u2590UW\u2556\u2588\u2588%\u2264\u2550\u2591*\u2500 =z \u2584\u0393 \n \u2559\u2588\u2588\u2559\u2584\u258c\u2588 \u258c\u00c5\u255b\u2563\u2588\u2588\u2568%\u2564\u0192\u207f= -` \u2584\u2518 \n \u2588\u258c\u2562\u2593\u258c\u258c W \u00a36\u2588\u2564,\"\u207f ` \u2584\u2261\u2580\u2580\u2580 \n \u2588\"\u2588\u258c\u258c\u255f\u00c5\u2553\u2588\u2553\u2588\u2580%` \u2584\u2580 \n \u2559\u258c\u2588\u2588`\u2592U\u2593U\u2588%\u2557* \u2588 \n \u258c\u256b\u2551 \u258c\u00c5\u00c5\u2551\u2580\u255b\u00ac` `\"\u2588 \n \u258c\u256b \u256b\u255f \u2588\u2584 ~\u2566%\u2592\u25654^ \n \u258c\u258c \"M\u2588 `\u2580\u2555 X\u2555\"\u2557\u2584\u2580^ \n \u2588\u258c \u2553M \u2559\u2580e\u2580\u2580^ \n \u2559\u2588\u2588\u2584\u2584\u2580 \n ^^ \n\"\"\"\n\nif __name__ == '__main__':\n main()\n\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\"\"\"The xonsh installer.\"\"\"\nfrom __future__ import print_function, unicode_literals\nimport os\nimport sys\ntry:\n from setuptools import setup\n from setuptools.command.sdist import sdist\n from setuptools.command.install import install\n HAVE_SETUPTOOLS = True\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.sdist import sdist as sdist\n from distutils.command.install import install as install\n HAVE_SETUPTOOLS = False\n\nVERSION = '0.1.2'\n\nTABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py']\n\ndef clean_tables():\n for f in TABLES:\n if os.path.isfile(f):\n os.remove(f)\n print('Remove ' + f)\n\ndef build_tables():\n print('Building lexer and parser tables.')\n sys.path.insert(0, os.path.dirname(__file__))\n from xonsh.parser import Parser\n Parser(lexer_table='lexer_table', yacc_table='parser_table',\n outputdir='xonsh')\n sys.path.pop(0)\n\nclass xinstall(install):\n def run(self):\n clean_tables()\n build_tables()\n install.run(self)\n\nclass xsdist(sdist):\n def make_release_tree(self, basedir, files):\n clean_tables()\n build_tables()\n sdist.make_release_tree(self, basedir, files)\n\ndef main():\n if sys.version_info[0] < 3:\n sys.exit('xonsh currently requires Python 3.4+')\n print(logo)\n with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f:\n readme = f.read()\n skw = dict(\n name='xonsh',\n description='an exotic, usable shell',\n long_description=readme,\n license='BSD',\n version=VERSION,\n author='Anthony Scopatz',\n maintainer='Anthony Scopatz',\n author_email='[email protected]',\n url='https://github.com/scopatz/xonsh',\n platforms='Cross Platform',\n classifiers = ['Programming Language :: Python :: 3'],\n packages=['xonsh'],\n scripts=['scripts/xonsh'],\n cmdclass={'install': xinstall, 'sdist': xsdist},\n )\n if HAVE_SETUPTOOLS:\n skw['setup_requires'] = ['ply']\n skw['install_requires'] = ['ply']\n setup(**skw)\n\nlogo = \"\"\"\n \u2553\u2588\u2588\u2584 \n \u2559\u2588\u2588\u2580\u2588\u2588\u2555 \n \u2590\u2588\u25884\u03a6\u2588\u2580\u2588\u258c \n \u00b2\u2588\u2588\u2584\u2588\u2588\u2588\u2580\u2588\u2588^\u2588\u2588 \n -\u2588\u2588\u2588\u2569\u2580 \" \u2552\u2584\u2588\u2588\u2588\u2588\u2588\u2580\u2588 \n \u2551\u2588\u2588\u2580\u2580W\u2564\u2584\u2580 \u2590\u2551\u2588\u2558 \u255d\u2588 \n \u2584m\u2580%\u03a6\u2580\u2580 \u255d*\" ,\u03b1\u2588\u2588\u2588\u2588\u2588\u2593\u2584,\u2584\u2580\u0393\"\u2580\u2555 \n \"\u2580\u2588\u2588\u00bc\" \u2584\u2550\u2566\u2588\u255f\u2551\u2588\u2580 \u2553 `^` ,\u2584 \u2562\u2555 \n ,\u2580\u256bM\u2588\u2590j\u2553\u255f\u2580 \u2554\u2593\u2584\u2588\u2580 '\u2551 \u2554 \u2563\u2551\u258c \u2580\u2584 \n \u2584m\u2580\u2580\u2588\u2588\u2588\u256c\u2588\u255d\u2580 \u2588\u2580^ \"\u00dcM j\u2590\u255f\u256b\u2568\u2592 \u2559\u2580\u2261\u2550\u2564\u2550m\u2580\u2557 \n \u2588\u00e6s\u00e6\u2553 \u2555, ,\u2584\u00c4 \u2590'\u2555H LU \u2551\u2551\u2560\u256b\u00c5^2=\u2310 \u2588 \n \u2584\u00e6%\u00c5\u2588\u2588\u2588\u2560\u2588\u00aa\u2559\u2584\u2588\u2580 $1\u2559 \u2551\u2551\u255f\u256b\u2569*T\u2584 \u258c \n \u2559\u2557%\u2584,\u2566\u2588\u2588\u258c\u2588\u258c\u2588\u2562M \u2555 M\u2551\u2551\u2551\u2551\u2588\u2550\u2310\u207f\"^ \u256b \n \u2559\u2563\u2580\u2588\u2588\u2588\u2588@\u2588\u2591\u2588 \u258c\u2555\u2555 ` \u258c\u2551\u2590\u2590\u2551\u2588D\u2550\u2248\u2310\u00ac\u207f s \u2551\u2310 \n \u2559\u256c\u2588\u2588\u2588\u2593\u2551\u2588` \u258c\u255a \u2555 \u2555\u258c\u2551\u2590\u2590\u2563\u258c\u2310*\u2592\u2592D\u00f9` \u2590\u258c \n \u2559\u256c\u2588\u2588\u2568U\u2588 \u255f $ \u258c \u258c\u258c\u2590\u2590\u2590M\u2588\u2584\u2550\u2264\u2310% \u2553\u2310 \u258c \n \u2551\u2551\u2588\u2584\u258c\u2551 \u255f \u258c \u258cM\u2590\u2590\u2590M\u2588\u2580\u2592\u2592\u259222, \u2590\u258c \n \u2588\u2588\u2588\u2559^\u258c \u2551 \u258c \u2310M\u2590\u2590\u2590M\u2588\u2264\u2310\u2310\u00ac\u2500\u2500 \u2590M \n \u2551\u2588\u2588 \u258c\u2559 \u2553 H\u2551 \u258c\u2552 M\u2551\u2590\u2590M\u2588\"^^^^^\"\u207f \u2551 \n \u2588\u2588\u2555\u2559@\u2593 \u2555 \u258c\u2551 H' \u2551\u2590\u2590\u2590\u2588\u2550\u2550=.,,, \u2588 \n \u2559\u2588\u2593\u2554\u255a\u255a\u2588 \u2560 \u258c\u2514\u2552 \u258c\u2590 \u255a\u2551\u2551\u2551\u2580****\u207f - \u2553\u258c \n \u2559\u2588\u258c\u00bcV\u255a\u258c \u258c \u2555 \u258c \u2551\u2552 \u2551 \u258c\u2592\u2560\u2588\u2580\u2264\u2264\u2264\u2264\u2264\u2310 \u2588 \n \u2559\u2588\u258c\u2554\u2588\u255a\u258c \u2518 M \u258c\u2551 \u256b UUM\u2588\u2588J^^\" \u2590\u258c \n \u2559\u2588\u2588\u2559\u2588\u2559\u258c \u2555$j \u2590\u2310\u258c \u258c\u2551\u255d\u255f\u2588\u00c5%%%\u2248\u2550 \u2588 \n \u2559\u2563\u2588\u2563\u2588^\u258c \u2560\u2551\u2590 \u2551 \u258c\u2590.DU\u2588\u2588^[\"\"\u207f -\u2552\u258c \n \u2580\u2588\u2584\u2588`\u258c \u2591M\u2580 \u258c\u2590 \u00c5\u00a3\u255d\u255d\u2588\u255c%\u2248\u2550\u2553\"\"w \u207f\u2310 \u2588 \n `\u2580\u2584\u2580`\u258c \u258c\u2588\u2590\u2310\u2551\u2590UW\u2556\u2588\u2588%\u2264\u2550\u2591*\u2500 =z \u2584\u0393 \n \u2559\u2588\u2588\u2559\u2584\u258c\u2588 \u258c\u00c5\u255b\u2563\u2588\u2588\u2568%\u2564\u0192\u207f= -` \u2584\u2518 \n \u2588\u258c\u2562\u2593\u258c\u258c W \u00a36\u2588\u2564,\"\u207f ` \u2584\u2261\u2580\u2580\u2580 \n \u2588\"\u2588\u258c\u258c\u255f\u00c5\u2553\u2588\u2553\u2588\u2580%` \u2584\u2580 \n \u2559\u258c\u2588\u2588`\u2592U\u2593U\u2588%\u2557* \u2588 \n \u258c\u256b\u2551 \u258c\u00c5\u00c5\u2551\u2580\u255b\u00ac` `\"\u2588 \n \u258c\u256b \u256b\u255f \u2588\u2584 ~\u2566%\u2592\u25654^ \n \u258c\u258c \"M\u2588 `\u2580\u2555 X\u2555\"\u2557\u2584\u2580^ \n \u2588\u258c \u2553M \u2559\u2580e\u2580\u2580^ \n \u2559\u2588\u2588\u2584\u2584\u2580 \n ^^ \n\"\"\"\n\nif __name__ == '__main__':\n main()\n\n", "path": "setup.py"}]}
2,122
121
gh_patches_debug_21187
rasdani/github-patches
git_diff
pypa__setuptools-2159
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Plugins using setuptools entry points resolve to system and not setup_requires Assume that you have e.g. setuptools_scm 3 installed system-wide, and try to to run the following setup.py: ```python from setuptools import setup setup(name="foo", setup_requires=["setuptools_scm>=4"]) ``` you get the warning ``` /usr/lib/python3.8/site-packages/setuptools/dist.py:701: UserWarning: Module setuptools_scm was already imported from /usr/lib/python3.8/site-packages/setuptools_scm/__init__.py, but /tmp/foo/.eggs/setuptools_scm-4.1.1-py3.8.egg is being added to sys.path ``` (i.e. the system-wide setuptools_scm was loaded, even though it is too old). One can confirm that the "correct" newer setuptools_scm indeed doesn't get used by changing setup.py to ```python from setuptools import setup setup(name="foo", setup_requires=["setuptools_scm>=4"], use_scm_version={"parentdir_prefix_version": "foo-"}) ``` in which case one gets the same warning and then a traceback ``` TypeError: __init__() got an unexpected keyword argument 'parentdir_prefix_version' ``` I guess a solution may be e.g. to move the entry point (which is what gets eagerly loaded by setuptools) to a separate package (let's say `_setuptools_scm_helper`) which itself imports setuptools_scm only when actually needed with a pass-through shim so that setuptools only imports `_setuptools_scm_helper` and then the correct version of setuptools_scm can be loaded when needed. Plugins using setuptools entry points resolve to system and not setup_requires Assume that you have e.g. setuptools_scm 3 installed system-wide, and try to to run the following setup.py: ```python from setuptools import setup setup(name="foo", setup_requires=["setuptools_scm>=4"]) ``` you get the warning ``` /usr/lib/python3.8/site-packages/setuptools/dist.py:701: UserWarning: Module setuptools_scm was already imported from /usr/lib/python3.8/site-packages/setuptools_scm/__init__.py, but /tmp/foo/.eggs/setuptools_scm-4.1.1-py3.8.egg is being added to sys.path ``` (i.e. the system-wide setuptools_scm was loaded, even though it is too old). One can confirm that the "correct" newer setuptools_scm indeed doesn't get used by changing setup.py to ```python from setuptools import setup setup(name="foo", setup_requires=["setuptools_scm>=4"], use_scm_version={"parentdir_prefix_version": "foo-"}) ``` in which case one gets the same warning and then a traceback ``` TypeError: __init__() got an unexpected keyword argument 'parentdir_prefix_version' ``` I guess a solution may be e.g. to move the entry point (which is what gets eagerly loaded by setuptools) to a separate package (let's say `_setuptools_scm_helper`) which itself imports setuptools_scm only when actually needed with a pass-through shim so that setuptools only imports `_setuptools_scm_helper` and then the correct version of setuptools_scm can be loaded when needed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/__init__.py` Content: ``` 1 """Extensions to the 'distutils' for large or complex distributions""" 2 3 import os 4 import sys 5 import functools 6 import distutils.core 7 import distutils.filelist 8 import re 9 from distutils.errors import DistutilsOptionError 10 from distutils.util import convert_path 11 from fnmatch import fnmatchcase 12 13 from ._deprecation_warning import SetuptoolsDeprecationWarning 14 15 from setuptools.extern.six import PY3, string_types 16 from setuptools.extern.six.moves import filter, map 17 18 import setuptools.version 19 from setuptools.extension import Extension 20 from setuptools.dist import Distribution, Feature 21 from setuptools.depends import Require 22 from . import monkey 23 24 __metaclass__ = type 25 26 27 __all__ = [ 28 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require', 29 'SetuptoolsDeprecationWarning', 30 'find_packages' 31 ] 32 33 if PY3: 34 __all__.append('find_namespace_packages') 35 36 __version__ = setuptools.version.__version__ 37 38 bootstrap_install_from = None 39 40 # If we run 2to3 on .py files, should we also convert docstrings? 41 # Default: yes; assume that we can detect doctests reliably 42 run_2to3_on_doctests = True 43 # Standard package names for fixer packages 44 lib2to3_fixer_packages = ['lib2to3.fixes'] 45 46 47 class PackageFinder: 48 """ 49 Generate a list of all Python packages found within a directory 50 """ 51 52 @classmethod 53 def find(cls, where='.', exclude=(), include=('*',)): 54 """Return a list all Python packages found within directory 'where' 55 56 'where' is the root directory which will be searched for packages. It 57 should be supplied as a "cross-platform" (i.e. URL-style) path; it will 58 be converted to the appropriate local path syntax. 59 60 'exclude' is a sequence of package names to exclude; '*' can be used 61 as a wildcard in the names, such that 'foo.*' will exclude all 62 subpackages of 'foo' (but not 'foo' itself). 63 64 'include' is a sequence of package names to include. If it's 65 specified, only the named packages will be included. If it's not 66 specified, all found packages will be included. 'include' can contain 67 shell style wildcard patterns just like 'exclude'. 68 """ 69 70 return list(cls._find_packages_iter( 71 convert_path(where), 72 cls._build_filter('ez_setup', '*__pycache__', *exclude), 73 cls._build_filter(*include))) 74 75 @classmethod 76 def _find_packages_iter(cls, where, exclude, include): 77 """ 78 All the packages found in 'where' that pass the 'include' filter, but 79 not the 'exclude' filter. 80 """ 81 for root, dirs, files in os.walk(where, followlinks=True): 82 # Copy dirs to iterate over it, then empty dirs. 83 all_dirs = dirs[:] 84 dirs[:] = [] 85 86 for dir in all_dirs: 87 full_path = os.path.join(root, dir) 88 rel_path = os.path.relpath(full_path, where) 89 package = rel_path.replace(os.path.sep, '.') 90 91 # Skip directory trees that are not valid packages 92 if ('.' in dir or not cls._looks_like_package(full_path)): 93 continue 94 95 # Should this package be included? 96 if include(package) and not exclude(package): 97 yield package 98 99 # Keep searching subdirectories, as there may be more packages 100 # down there, even if the parent was excluded. 101 dirs.append(dir) 102 103 @staticmethod 104 def _looks_like_package(path): 105 """Does a directory look like a package?""" 106 return os.path.isfile(os.path.join(path, '__init__.py')) 107 108 @staticmethod 109 def _build_filter(*patterns): 110 """ 111 Given a list of patterns, return a callable that will be true only if 112 the input matches at least one of the patterns. 113 """ 114 return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) 115 116 117 class PEP420PackageFinder(PackageFinder): 118 @staticmethod 119 def _looks_like_package(path): 120 return True 121 122 123 find_packages = PackageFinder.find 124 125 if PY3: 126 find_namespace_packages = PEP420PackageFinder.find 127 128 129 def _install_setup_requires(attrs): 130 # Note: do not use `setuptools.Distribution` directly, as 131 # our PEP 517 backend patch `distutils.core.Distribution`. 132 dist = distutils.core.Distribution(dict( 133 (k, v) for k, v in attrs.items() 134 if k in ('dependency_links', 'setup_requires') 135 )) 136 # Honor setup.cfg's options. 137 dist.parse_config_files(ignore_option_errors=True) 138 if dist.setup_requires: 139 dist.fetch_build_eggs(dist.setup_requires) 140 141 142 def setup(**attrs): 143 # Make sure we have any requirements needed to interpret 'attrs'. 144 _install_setup_requires(attrs) 145 return distutils.core.setup(**attrs) 146 147 setup.__doc__ = distutils.core.setup.__doc__ 148 149 150 _Command = monkey.get_unpatched(distutils.core.Command) 151 152 153 class Command(_Command): 154 __doc__ = _Command.__doc__ 155 156 command_consumes_arguments = False 157 158 def __init__(self, dist, **kw): 159 """ 160 Construct the command for dist, updating 161 vars(self) with any keyword parameters. 162 """ 163 _Command.__init__(self, dist) 164 vars(self).update(kw) 165 166 def _ensure_stringlike(self, option, what, default=None): 167 val = getattr(self, option) 168 if val is None: 169 setattr(self, option, default) 170 return default 171 elif not isinstance(val, string_types): 172 raise DistutilsOptionError("'%s' must be a %s (got `%s`)" 173 % (option, what, val)) 174 return val 175 176 def ensure_string_list(self, option): 177 r"""Ensure that 'option' is a list of strings. If 'option' is 178 currently a string, we split it either on /,\s*/ or /\s+/, so 179 "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become 180 ["foo", "bar", "baz"]. 181 """ 182 val = getattr(self, option) 183 if val is None: 184 return 185 elif isinstance(val, string_types): 186 setattr(self, option, re.split(r',\s*|\s+', val)) 187 else: 188 if isinstance(val, list): 189 ok = all(isinstance(v, string_types) for v in val) 190 else: 191 ok = False 192 if not ok: 193 raise DistutilsOptionError( 194 "'%s' must be a list of strings (got %r)" 195 % (option, val)) 196 197 def reinitialize_command(self, command, reinit_subcommands=0, **kw): 198 cmd = _Command.reinitialize_command(self, command, reinit_subcommands) 199 vars(cmd).update(kw) 200 return cmd 201 202 203 def _find_all_simple(path): 204 """ 205 Find all files under 'path' 206 """ 207 results = ( 208 os.path.join(base, file) 209 for base, dirs, files in os.walk(path, followlinks=True) 210 for file in files 211 ) 212 return filter(os.path.isfile, results) 213 214 215 def findall(dir=os.curdir): 216 """ 217 Find all files under 'dir' and return the list of full filenames. 218 Unless dir is '.', return full filenames with dir prepended. 219 """ 220 files = _find_all_simple(dir) 221 if dir == os.curdir: 222 make_rel = functools.partial(os.path.relpath, start=dir) 223 files = map(make_rel, files) 224 return list(files) 225 226 227 # Apply monkey patches 228 monkey.patch_all() 229 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/__init__.py b/setuptools/__init__.py --- a/setuptools/__init__.py +++ b/setuptools/__init__.py @@ -129,10 +129,27 @@ def _install_setup_requires(attrs): # Note: do not use `setuptools.Distribution` directly, as # our PEP 517 backend patch `distutils.core.Distribution`. - dist = distutils.core.Distribution(dict( - (k, v) for k, v in attrs.items() - if k in ('dependency_links', 'setup_requires') - )) + class MinimalDistribution(distutils.core.Distribution): + """ + A minimal version of a distribution for supporting the + fetch_build_eggs interface. + """ + def __init__(self, attrs): + _incl = 'dependency_links', 'setup_requires' + filtered = { + k: attrs[k] + for k in set(_incl) & set(attrs) + } + distutils.core.Distribution.__init__(self, filtered) + + def finalize_options(self): + """ + Disable finalize_options to avoid building the working set. + Ref #2158. + """ + + dist = MinimalDistribution(attrs) + # Honor setup.cfg's options. dist.parse_config_files(ignore_option_errors=True) if dist.setup_requires:
{"golden_diff": "diff --git a/setuptools/__init__.py b/setuptools/__init__.py\n--- a/setuptools/__init__.py\n+++ b/setuptools/__init__.py\n@@ -129,10 +129,27 @@\n def _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n- dist = distutils.core.Distribution(dict(\n- (k, v) for k, v in attrs.items()\n- if k in ('dependency_links', 'setup_requires')\n- ))\n+ class MinimalDistribution(distutils.core.Distribution):\n+ \"\"\"\n+ A minimal version of a distribution for supporting the\n+ fetch_build_eggs interface.\n+ \"\"\"\n+ def __init__(self, attrs):\n+ _incl = 'dependency_links', 'setup_requires'\n+ filtered = {\n+ k: attrs[k]\n+ for k in set(_incl) & set(attrs)\n+ }\n+ distutils.core.Distribution.__init__(self, filtered)\n+\n+ def finalize_options(self):\n+ \"\"\"\n+ Disable finalize_options to avoid building the working set.\n+ Ref #2158.\n+ \"\"\"\n+\n+ dist = MinimalDistribution(attrs)\n+\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n", "issue": "Plugins using setuptools entry points resolve to system and not setup_requires\nAssume that you have e.g. setuptools_scm 3 installed system-wide, and try to to run the following setup.py:\r\n```python\r\nfrom setuptools import setup\r\nsetup(name=\"foo\", setup_requires=[\"setuptools_scm>=4\"])\r\n```\r\nyou get the warning\r\n```\r\n/usr/lib/python3.8/site-packages/setuptools/dist.py:701: UserWarning: Module setuptools_scm was already imported from /usr/lib/python3.8/site-packages/setuptools_scm/__init__.py, but /tmp/foo/.eggs/setuptools_scm-4.1.1-py3.8.egg is being added to sys.path\r\n```\r\n(i.e. the system-wide setuptools_scm was loaded, even though it is too old). One can confirm that the \"correct\" newer setuptools_scm indeed doesn't get used by changing setup.py to\r\n```python\r\nfrom setuptools import setup\r\nsetup(name=\"foo\",\r\n setup_requires=[\"setuptools_scm>=4\"],\r\n use_scm_version={\"parentdir_prefix_version\": \"foo-\"})\r\n```\r\nin which case one gets the same warning and then a traceback\r\n```\r\nTypeError: __init__() got an unexpected keyword argument 'parentdir_prefix_version'\r\n```\r\n\r\nI guess a solution may be e.g. to move the entry point (which is what gets eagerly loaded by setuptools) to a separate package (let's say `_setuptools_scm_helper`) which itself imports setuptools_scm only when actually needed with a pass-through shim so that setuptools only imports `_setuptools_scm_helper` and then the correct version of setuptools_scm can be loaded when needed.\nPlugins using setuptools entry points resolve to system and not setup_requires\nAssume that you have e.g. setuptools_scm 3 installed system-wide, and try to to run the following setup.py:\r\n```python\r\nfrom setuptools import setup\r\nsetup(name=\"foo\", setup_requires=[\"setuptools_scm>=4\"])\r\n```\r\nyou get the warning\r\n```\r\n/usr/lib/python3.8/site-packages/setuptools/dist.py:701: UserWarning: Module setuptools_scm was already imported from /usr/lib/python3.8/site-packages/setuptools_scm/__init__.py, but /tmp/foo/.eggs/setuptools_scm-4.1.1-py3.8.egg is being added to sys.path\r\n```\r\n(i.e. the system-wide setuptools_scm was loaded, even though it is too old). One can confirm that the \"correct\" newer setuptools_scm indeed doesn't get used by changing setup.py to\r\n```python\r\nfrom setuptools import setup\r\nsetup(name=\"foo\",\r\n setup_requires=[\"setuptools_scm>=4\"],\r\n use_scm_version={\"parentdir_prefix_version\": \"foo-\"})\r\n```\r\nin which case one gets the same warning and then a traceback\r\n```\r\nTypeError: __init__() got an unexpected keyword argument 'parentdir_prefix_version'\r\n```\r\n\r\nI guess a solution may be e.g. to move the entry point (which is what gets eagerly loaded by setuptools) to a separate package (let's say `_setuptools_scm_helper`) which itself imports setuptools_scm only when actually needed with a pass-through shim so that setuptools only imports `_setuptools_scm_helper` and then the correct version of setuptools_scm can be loaded when needed.\n", "before_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport os\nimport sys\nimport functools\nimport distutils.core\nimport distutils.filelist\nimport re\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nfrom setuptools.extern.six import PY3, string_types\nfrom setuptools.extern.six.moves import filter, map\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution, Feature\nfrom setuptools.depends import Require\nfrom . import monkey\n\n__metaclass__ = type\n\n\n__all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages'\n]\n\nif PY3:\n __all__.append('find_namespace_packages')\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n# If we run 2to3 on .py files, should we also convert docstrings?\n# Default: yes; assume that we can detect doctests reliably\nrun_2to3_on_doctests = True\n# Standard package names for fixer packages\nlib2to3_fixer_packages = ['lib2to3.fixes']\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include)))\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if ('.' in dir or not cls._looks_like_package(full_path)):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\n\nif PY3:\n find_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n dist = distutils.core.Distribution(dict(\n (k, v) for k, v in attrs.items()\n if k in ('dependency_links', 'setup_requires')\n ))\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, string_types):\n raise DistutilsOptionError(\"'%s' must be a %s (got `%s`)\"\n % (option, what, val))\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, string_types):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, string_types) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\"\n % (option, val))\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}], "after_files": [{"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport os\nimport sys\nimport functools\nimport distutils.core\nimport distutils.filelist\nimport re\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import convert_path\nfrom fnmatch import fnmatchcase\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nfrom setuptools.extern.six import PY3, string_types\nfrom setuptools.extern.six.moves import filter, map\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution, Feature\nfrom setuptools.depends import Require\nfrom . import monkey\n\n__metaclass__ = type\n\n\n__all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages'\n]\n\nif PY3:\n __all__.append('find_namespace_packages')\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n# If we run 2to3 on .py files, should we also convert docstrings?\n# Default: yes; assume that we can detect doctests reliably\nrun_2to3_on_doctests = True\n# Standard package names for fixer packages\nlib2to3_fixer_packages = ['lib2to3.fixes']\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include)))\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if ('.' in dir or not cls._looks_like_package(full_path)):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\n\nif PY3:\n find_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n class MinimalDistribution(distutils.core.Distribution):\n \"\"\"\n A minimal version of a distribution for supporting the\n fetch_build_eggs interface.\n \"\"\"\n def __init__(self, attrs):\n _incl = 'dependency_links', 'setup_requires'\n filtered = {\n k: attrs[k]\n for k in set(_incl) & set(attrs)\n }\n distutils.core.Distribution.__init__(self, filtered)\n\n def finalize_options(self):\n \"\"\"\n Disable finalize_options to avoid building the working set.\n Ref #2158.\n \"\"\"\n\n dist = MinimalDistribution(attrs)\n\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, string_types):\n raise DistutilsOptionError(\"'%s' must be a %s (got `%s`)\"\n % (option, what, val))\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, string_types):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, string_types) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\"\n % (option, val))\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}]}
3,233
307
gh_patches_debug_7091
rasdani/github-patches
git_diff
mkdocs__mkdocs-397
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- NoneType object is not callable I'm not sure if this is my own stupidity, related to #283, or something I've ended up doing myself by accident. I'm working on developing a mkdocs theme, and `mkdocs build` and `mkdocs build --clean` both run without any issues. However, if I try and `mkdocs serve` then I get hit by an error. ``` Traceback (most recent call last): File "/home/pi/.virtualenvs/face/bin/mkdocs", line 9, in <module> load_entry_point('mkdocs==0.11.1', 'console_scripts', 'mkdocs')() File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/main.py", line 60, in run_main main(cmd, args=sys.argv[2:], options=dict(opts)) File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/main.py", line 33, in main serve(config, options=options) File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/serve.py", line 96, in serve observer.start() File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/api.py", line 255, in start emitter.start() File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/utils/__init__.py", line 111, in start self.on_thread_start() File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py", line 77, in on_thread_start self._snapshot = self._take_snapshot() File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py", line 74, in <lambda> self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir) File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/utils/dirsnapshot.py", line 207, in __init__ st = stat(path) OSError: [Errno 2] No such file or directory: '/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/themes/face' Exception in thread Thread-3 (most likely raised during interpreter shutdown): Traceback (most recent call last): File "/usr/lib/python2.7/threading.py", line 552, in __bootstrap_inner File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/api.py", line 146, in run File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py", line 91, in queue_events File "/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py", line 74, in <lambda> <type 'exceptions.TypeError'>: 'NoneType' object is not callable ``` My `mkdocs.yml` file is simple, and I don't think it is the actual cause, but it might be in my own inept way: ``` site_name: Face Theme theme_dir: face theme: face use_directory_urls: true repo_url: https://bitbucket.org/shakna-israel/face dev_addr: 0.0.0.0:8000 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mkdocs/serve.py` Content: ``` 1 # coding: utf-8 2 from __future__ import print_function 3 4 from watchdog import events 5 from watchdog.observers.polling import PollingObserver 6 from mkdocs.build import build 7 from mkdocs.compat import httpserver, socketserver, urlunquote 8 from mkdocs.config import load_config 9 import os 10 import posixpath 11 import shutil 12 import sys 13 import tempfile 14 15 16 class BuildEventHandler(events.FileSystemEventHandler): 17 """ 18 Perform a rebuild when anything in the theme or docs directory changes. 19 """ 20 def __init__(self, options): 21 super(BuildEventHandler, self).__init__() 22 self.options = options 23 24 def on_any_event(self, event): 25 if not isinstance(event, events.DirModifiedEvent): 26 print('Rebuilding documentation...', end='') 27 config = load_config(options=self.options) 28 build(config, live_server=True) 29 print(' done') 30 31 32 class ConfigEventHandler(BuildEventHandler): 33 """ 34 Perform a rebuild when the config file changes. 35 """ 36 def on_any_event(self, event): 37 try: 38 if os.path.basename(event.src_path) == 'mkdocs.yml': 39 super(ConfigEventHandler, self).on_any_event(event) 40 except Exception as e: 41 print(e) 42 43 44 class FixedDirectoryHandler(httpserver.SimpleHTTPRequestHandler): 45 """ 46 Override the default implementation to allow us to specify the served 47 directory, instead of being hardwired to the current working directory. 48 """ 49 base_dir = os.getcwd() 50 51 def translate_path(self, path): 52 # abandon query parameters 53 path = path.split('?', 1)[0] 54 path = path.split('#', 1)[0] 55 path = posixpath.normpath(urlunquote(path)) 56 words = path.split('/') 57 words = filter(None, words) 58 path = self.base_dir 59 for word in words: 60 drive, word = os.path.splitdrive(word) 61 head, word = os.path.split(word) 62 if word in (os.curdir, os.pardir): 63 continue 64 path = os.path.join(path, word) 65 return path 66 67 def log_message(self, format, *args): 68 date_str = self.log_date_time_string() 69 sys.stderr.write('[%s] %s\n' % (date_str, format % args)) 70 71 72 def serve(config, options=None): 73 """ 74 Start the devserver, and rebuild the docs whenever any changes take effect. 75 """ 76 # Create a temporary build directory, and set some options to serve it 77 tempdir = tempfile.mkdtemp() 78 options['site_dir'] = tempdir 79 80 # Only use user-friendly URLs when running the live server 81 options['use_directory_urls'] = True 82 83 # Perform the initial build 84 config = load_config(options=options) 85 build(config, live_server=True) 86 87 # Note: We pass any command-line options through so that we 88 # can re-apply them if the config file is reloaded. 89 event_handler = BuildEventHandler(options) 90 config_event_handler = ConfigEventHandler(options) 91 92 # We could have used `Observer()`, which can be faster, but 93 # `PollingObserver()` works more universally. 94 observer = PollingObserver() 95 observer.schedule(event_handler, config['docs_dir'], recursive=True) 96 for theme_dir in config['theme_dir']: 97 observer.schedule(event_handler, theme_dir, recursive=True) 98 observer.schedule(config_event_handler, '.') 99 observer.start() 100 101 class TCPServer(socketserver.TCPServer): 102 allow_reuse_address = True 103 104 class DocsDirectoryHandler(FixedDirectoryHandler): 105 base_dir = config['site_dir'] 106 107 host, port = config['dev_addr'].split(':', 1) 108 server = TCPServer((host, int(port)), DocsDirectoryHandler) 109 110 print('Running at: http://%s:%s/' % (host, port)) 111 print('Live reload enabled.') 112 print('Hold ctrl+c to quit.') 113 try: 114 server.serve_forever() 115 except KeyboardInterrupt: 116 print('Stopping server...') 117 118 # Clean up 119 observer.stop() 120 observer.join() 121 shutil.rmtree(tempdir) 122 print('Quit complete') 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mkdocs/serve.py b/mkdocs/serve.py --- a/mkdocs/serve.py +++ b/mkdocs/serve.py @@ -94,6 +94,8 @@ observer = PollingObserver() observer.schedule(event_handler, config['docs_dir'], recursive=True) for theme_dir in config['theme_dir']: + if not os.path.exists(theme_dir): + continue observer.schedule(event_handler, theme_dir, recursive=True) observer.schedule(config_event_handler, '.') observer.start()
{"golden_diff": "diff --git a/mkdocs/serve.py b/mkdocs/serve.py\n--- a/mkdocs/serve.py\n+++ b/mkdocs/serve.py\n@@ -94,6 +94,8 @@\n observer = PollingObserver()\n observer.schedule(event_handler, config['docs_dir'], recursive=True)\n for theme_dir in config['theme_dir']:\n+ if not os.path.exists(theme_dir):\n+ continue\n observer.schedule(event_handler, theme_dir, recursive=True)\n observer.schedule(config_event_handler, '.')\n observer.start()\n", "issue": "NoneType object is not callable\nI'm not sure if this is my own stupidity, related to #283, or something I've ended up doing myself by accident.\n\nI'm working on developing a mkdocs theme, and `mkdocs build` and `mkdocs build --clean` both run without any issues.\n\nHowever, if I try and `mkdocs serve` then I get hit by an error.\n\n```\nTraceback (most recent call last):\n File \"/home/pi/.virtualenvs/face/bin/mkdocs\", line 9, in <module>\n load_entry_point('mkdocs==0.11.1', 'console_scripts', 'mkdocs')()\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/main.py\", line 60, in run_main\n main(cmd, args=sys.argv[2:], options=dict(opts))\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/main.py\", line 33, in main\n serve(config, options=options)\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/serve.py\", line 96, in serve\n observer.start()\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/api.py\", line 255, in start\n emitter.start()\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/utils/__init__.py\", line 111, in start\n self.on_thread_start()\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py\", line 77, in on_thread_start\n self._snapshot = self._take_snapshot()\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py\", line 74, in <lambda>\n self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/utils/dirsnapshot.py\", line 207, in __init__\n st = stat(path)\nOSError: [Errno 2] No such file or directory: '/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/mkdocs/themes/face'\nException in thread Thread-3 (most likely raised during interpreter shutdown):\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/threading.py\", line 552, in __bootstrap_inner\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/api.py\", line 146, in run\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py\", line 91, in queue_events\n File \"/home/pi/.virtualenvs/face/local/lib/python2.7/site-packages/watchdog/observers/polling.py\", line 74, in <lambda>\n<type 'exceptions.TypeError'>: 'NoneType' object is not callable\n```\n\nMy `mkdocs.yml` file is simple, and I don't think it is the actual cause, but it might be in my own inept way:\n\n```\nsite_name: Face Theme\ntheme_dir: face\ntheme: face\nuse_directory_urls: true\nrepo_url: https://bitbucket.org/shakna-israel/face\ndev_addr: 0.0.0.0:8000\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom watchdog import events\nfrom watchdog.observers.polling import PollingObserver\nfrom mkdocs.build import build\nfrom mkdocs.compat import httpserver, socketserver, urlunquote\nfrom mkdocs.config import load_config\nimport os\nimport posixpath\nimport shutil\nimport sys\nimport tempfile\n\n\nclass BuildEventHandler(events.FileSystemEventHandler):\n \"\"\"\n Perform a rebuild when anything in the theme or docs directory changes.\n \"\"\"\n def __init__(self, options):\n super(BuildEventHandler, self).__init__()\n self.options = options\n\n def on_any_event(self, event):\n if not isinstance(event, events.DirModifiedEvent):\n print('Rebuilding documentation...', end='')\n config = load_config(options=self.options)\n build(config, live_server=True)\n print(' done')\n\n\nclass ConfigEventHandler(BuildEventHandler):\n \"\"\"\n Perform a rebuild when the config file changes.\n \"\"\"\n def on_any_event(self, event):\n try:\n if os.path.basename(event.src_path) == 'mkdocs.yml':\n super(ConfigEventHandler, self).on_any_event(event)\n except Exception as e:\n print(e)\n\n\nclass FixedDirectoryHandler(httpserver.SimpleHTTPRequestHandler):\n \"\"\"\n Override the default implementation to allow us to specify the served\n directory, instead of being hardwired to the current working directory.\n \"\"\"\n base_dir = os.getcwd()\n\n def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n path = posixpath.normpath(urlunquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = self.base_dir\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (os.curdir, os.pardir):\n continue\n path = os.path.join(path, word)\n return path\n\n def log_message(self, format, *args):\n date_str = self.log_date_time_string()\n sys.stderr.write('[%s] %s\\n' % (date_str, format % args))\n\n\ndef serve(config, options=None):\n \"\"\"\n Start the devserver, and rebuild the docs whenever any changes take effect.\n \"\"\"\n # Create a temporary build directory, and set some options to serve it\n tempdir = tempfile.mkdtemp()\n options['site_dir'] = tempdir\n\n # Only use user-friendly URLs when running the live server\n options['use_directory_urls'] = True\n\n # Perform the initial build\n config = load_config(options=options)\n build(config, live_server=True)\n\n # Note: We pass any command-line options through so that we\n # can re-apply them if the config file is reloaded.\n event_handler = BuildEventHandler(options)\n config_event_handler = ConfigEventHandler(options)\n\n # We could have used `Observer()`, which can be faster, but\n # `PollingObserver()` works more universally.\n observer = PollingObserver()\n observer.schedule(event_handler, config['docs_dir'], recursive=True)\n for theme_dir in config['theme_dir']:\n observer.schedule(event_handler, theme_dir, recursive=True)\n observer.schedule(config_event_handler, '.')\n observer.start()\n\n class TCPServer(socketserver.TCPServer):\n allow_reuse_address = True\n\n class DocsDirectoryHandler(FixedDirectoryHandler):\n base_dir = config['site_dir']\n\n host, port = config['dev_addr'].split(':', 1)\n server = TCPServer((host, int(port)), DocsDirectoryHandler)\n\n print('Running at: http://%s:%s/' % (host, port))\n print('Live reload enabled.')\n print('Hold ctrl+c to quit.')\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n print('Stopping server...')\n\n # Clean up\n observer.stop()\n observer.join()\n shutil.rmtree(tempdir)\n print('Quit complete')\n", "path": "mkdocs/serve.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nfrom watchdog import events\nfrom watchdog.observers.polling import PollingObserver\nfrom mkdocs.build import build\nfrom mkdocs.compat import httpserver, socketserver, urlunquote\nfrom mkdocs.config import load_config\nimport os\nimport posixpath\nimport shutil\nimport sys\nimport tempfile\n\n\nclass BuildEventHandler(events.FileSystemEventHandler):\n \"\"\"\n Perform a rebuild when anything in the theme or docs directory changes.\n \"\"\"\n def __init__(self, options):\n super(BuildEventHandler, self).__init__()\n self.options = options\n\n def on_any_event(self, event):\n if not isinstance(event, events.DirModifiedEvent):\n print('Rebuilding documentation...', end='')\n config = load_config(options=self.options)\n build(config, live_server=True)\n print(' done')\n\n\nclass ConfigEventHandler(BuildEventHandler):\n \"\"\"\n Perform a rebuild when the config file changes.\n \"\"\"\n def on_any_event(self, event):\n try:\n if os.path.basename(event.src_path) == 'mkdocs.yml':\n super(ConfigEventHandler, self).on_any_event(event)\n except Exception as e:\n print(e)\n\n\nclass FixedDirectoryHandler(httpserver.SimpleHTTPRequestHandler):\n \"\"\"\n Override the default implementation to allow us to specify the served\n directory, instead of being hardwired to the current working directory.\n \"\"\"\n base_dir = os.getcwd()\n\n def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n path = posixpath.normpath(urlunquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = self.base_dir\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (os.curdir, os.pardir):\n continue\n path = os.path.join(path, word)\n return path\n\n def log_message(self, format, *args):\n date_str = self.log_date_time_string()\n sys.stderr.write('[%s] %s\\n' % (date_str, format % args))\n\n\ndef serve(config, options=None):\n \"\"\"\n Start the devserver, and rebuild the docs whenever any changes take effect.\n \"\"\"\n # Create a temporary build directory, and set some options to serve it\n tempdir = tempfile.mkdtemp()\n options['site_dir'] = tempdir\n\n # Only use user-friendly URLs when running the live server\n options['use_directory_urls'] = True\n\n # Perform the initial build\n config = load_config(options=options)\n build(config, live_server=True)\n\n # Note: We pass any command-line options through so that we\n # can re-apply them if the config file is reloaded.\n event_handler = BuildEventHandler(options)\n config_event_handler = ConfigEventHandler(options)\n\n # We could have used `Observer()`, which can be faster, but\n # `PollingObserver()` works more universally.\n observer = PollingObserver()\n observer.schedule(event_handler, config['docs_dir'], recursive=True)\n for theme_dir in config['theme_dir']:\n if not os.path.exists(theme_dir):\n continue\n observer.schedule(event_handler, theme_dir, recursive=True)\n observer.schedule(config_event_handler, '.')\n observer.start()\n\n class TCPServer(socketserver.TCPServer):\n allow_reuse_address = True\n\n class DocsDirectoryHandler(FixedDirectoryHandler):\n base_dir = config['site_dir']\n\n host, port = config['dev_addr'].split(':', 1)\n server = TCPServer((host, int(port)), DocsDirectoryHandler)\n\n print('Running at: http://%s:%s/' % (host, port))\n print('Live reload enabled.')\n print('Hold ctrl+c to quit.')\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n print('Stopping server...')\n\n # Clean up\n observer.stop()\n observer.join()\n shutil.rmtree(tempdir)\n print('Quit complete')\n", "path": "mkdocs/serve.py"}]}
2,202
118
gh_patches_debug_34360
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2137
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DeprecationWarning: The distutils package is deprecated ## 🐛 Bug TorchMetrics still uses distutils, but distutils is deprecated. ### To Reproduce ```console > python3 -Wall >>> import torchmetrics lib/python3.11/site-packages/torchmetrics/utilities/imports.py:18: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives from distutils.version import LooseVersion ``` ### Expected behavior I would not expect TorchMetrics to still be using distutils. ### Environment - TorchMetrics version: 1.1, 1.2, master (spack) - Python & PyTorch Version: 3.11.4 & 2.0.1 - Any other relevant information such as OS: macOS 13.5.2 ### Additional context I also sometimes see a different error message from pytest: ``` DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead. ``` This gives one possible solution. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/utilities/imports.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Import utilities.""" 15 import operator 16 import shutil 17 import sys 18 from distutils.version import LooseVersion 19 from typing import Optional 20 21 from lightning_utilities.core.imports import compare_version, package_available 22 23 _PYTHON_VERSION = ".".join(map(str, [sys.version_info.major, sys.version_info.minor, sys.version_info.micro])) 24 _PYTHON_LOWER_3_8 = LooseVersion(_PYTHON_VERSION) < LooseVersion("3.8") 25 _TORCH_LOWER_1_12_DEV: Optional[bool] = compare_version("torch", operator.lt, "1.12.0.dev") 26 _TORCH_GREATER_EQUAL_1_9: Optional[bool] = compare_version("torch", operator.ge, "1.9.0") 27 _TORCH_GREATER_EQUAL_1_10: Optional[bool] = compare_version("torch", operator.ge, "1.10.0") 28 _TORCH_GREATER_EQUAL_1_11: Optional[bool] = compare_version("torch", operator.ge, "1.11.0") 29 _TORCH_GREATER_EQUAL_1_12: Optional[bool] = compare_version("torch", operator.ge, "1.12.0") 30 _TORCH_GREATER_EQUAL_1_13: Optional[bool] = compare_version("torch", operator.ge, "1.13.0") 31 32 _JIWER_AVAILABLE: bool = package_available("jiwer") 33 _NLTK_AVAILABLE: bool = package_available("nltk") 34 _ROUGE_SCORE_AVAILABLE: bool = package_available("rouge_score") 35 _BERTSCORE_AVAILABLE: bool = package_available("bert_score") 36 _SCIPY_AVAILABLE: bool = package_available("scipy") 37 _TORCH_FIDELITY_AVAILABLE: bool = package_available("torch_fidelity") 38 _LPIPS_AVAILABLE: bool = package_available("lpips") 39 _PYCOCOTOOLS_AVAILABLE: bool = package_available("pycocotools") 40 _TORCHVISION_AVAILABLE: bool = package_available("torchvision") 41 _TORCHVISION_GREATER_EQUAL_0_8: Optional[bool] = compare_version("torchvision", operator.ge, "0.8.0") 42 _TORCHVISION_GREATER_EQUAL_0_13: Optional[bool] = compare_version("torchvision", operator.ge, "0.13.0") 43 _TQDM_AVAILABLE: bool = package_available("tqdm") 44 _TRANSFORMERS_AVAILABLE: bool = package_available("transformers") 45 _TRANSFORMERS_GREATER_EQUAL_4_4: Optional[bool] = compare_version("transformers", operator.ge, "4.4.0") 46 _TRANSFORMERS_GREATER_EQUAL_4_10: Optional[bool] = compare_version("transformers", operator.ge, "4.10.0") 47 _PESQ_AVAILABLE: bool = package_available("pesq") 48 _GAMMATONE_AVAILABLE: bool = package_available("gammatone") 49 _TORCHAUDIO_AVAILABLE: bool = package_available("torchaudio") 50 _TORCHAUDIO_GREATER_EQUAL_0_10: Optional[bool] = compare_version("torchaudio", operator.ge, "0.10.0") 51 _SACREBLEU_AVAILABLE: bool = package_available("sacrebleu") 52 _REGEX_AVAILABLE: bool = package_available("regex") 53 _PYSTOI_AVAILABLE: bool = package_available("pystoi") 54 _FAST_BSS_EVAL_AVAILABLE: bool = package_available("fast_bss_eval") 55 _MATPLOTLIB_AVAILABLE: bool = package_available("matplotlib") 56 _SCIENCEPLOT_AVAILABLE: bool = package_available("scienceplots") 57 _MULTIPROCESSING_AVAILABLE: bool = package_available("multiprocessing") 58 _XLA_AVAILABLE: bool = package_available("torch_xla") 59 _PIQ_GREATER_EQUAL_0_8: Optional[bool] = compare_version("piq", operator.ge, "0.8.0") 60 _FASTER_COCO_EVAL_AVAILABLE: bool = package_available("faster_coco_eval") 61 62 _LATEX_AVAILABLE: bool = shutil.which("latex") is not None 63 ``` Path: `.github/assistant.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import glob 15 import json 16 import logging 17 import os 18 import re 19 import sys 20 import traceback 21 from distutils.version import LooseVersion 22 from typing import List, Optional, Tuple, Union 23 24 import fire 25 import requests 26 from pkg_resources import parse_requirements 27 28 _REQUEST_TIMEOUT = 10 29 _PATH_ROOT = os.path.dirname(os.path.dirname(__file__)) 30 _PKG_WIDE_SUBPACKAGES = ("utilities", "helpers") 31 LUT_PYTHON_TORCH = { 32 "3.8": "1.4", 33 "3.9": "1.7.1", 34 "3.10": "1.11", 35 "3.11": "1.13", 36 } 37 _path = lambda *ds: os.path.join(_PATH_ROOT, *ds) 38 REQUIREMENTS_FILES = (*glob.glob(_path("requirements", "*.txt")), _path("requirements.txt")) 39 40 41 def request_url(url: str, auth_token: Optional[str] = None) -> Optional[dict]: 42 """General request with checking if request limit was reached.""" 43 auth_header = {"Authorization": f"token {auth_token}"} if auth_token else {} 44 try: 45 req = requests.get(url, headers=auth_header, timeout=_REQUEST_TIMEOUT) 46 except requests.exceptions.Timeout: 47 traceback.print_exc() 48 return None 49 if req.status_code == 403: 50 return None 51 return json.loads(req.content.decode(req.encoding)) 52 53 54 class AssistantCLI: 55 """CLI assistant for local CI.""" 56 57 @staticmethod 58 def prune_packages(req_file: str, *pkgs: str) -> None: 59 """Prune packages from requirement file.""" 60 with open(req_file) as fp: 61 lines = fp.readlines() 62 63 for pkg in pkgs: 64 lines = [ln for ln in lines if not ln.startswith(pkg)] 65 logging.info(lines) 66 67 with open(req_file, "w", encoding="utf-8") as fp: 68 fp.writelines(lines) 69 70 @staticmethod 71 def set_min_torch_by_python(fpath: str = "requirements/base.txt") -> None: 72 """Set minimal torch version according to Python actual version. 73 74 >>> AssistantCLI.set_min_torch_by_python("../requirements/base.txt") 75 76 """ 77 py_ver = f"{sys.version_info.major}.{sys.version_info.minor}" 78 if py_ver not in LUT_PYTHON_TORCH: 79 return 80 with open(fpath) as fp: 81 reqs = parse_requirements(fp.readlines()) 82 pkg_ver = next(p for p in reqs if p.name == "torch") 83 pt_ver = min([LooseVersion(v[1]) for v in pkg_ver.specs]) 84 pt_ver = max(LooseVersion(LUT_PYTHON_TORCH[py_ver]), pt_ver) 85 with open(fpath) as fp: 86 requires = fp.read() 87 requires = re.sub(r"torch>=[\d\.]+", f"torch>={pt_ver}", requires) 88 with open(fpath, "w", encoding="utf-8") as fp: 89 fp.write(requires) 90 91 @staticmethod 92 def replace_min_requirements(fpath: str) -> None: 93 """Replace all `>=` by `==` in given file.""" 94 logging.info(f"processing: {fpath}") 95 with open(fpath, encoding="utf-8") as fp: 96 req = fp.read() 97 req = req.replace(">=", "==") 98 with open(fpath, "w", encoding="utf-8") as fp: 99 fp.write(req) 100 101 @staticmethod 102 def set_oldest_versions(req_files: List[str] = REQUIREMENTS_FILES) -> None: 103 """Set the oldest version for requirements.""" 104 AssistantCLI.set_min_torch_by_python() 105 for fpath in req_files: 106 AssistantCLI.replace_min_requirements(fpath) 107 108 @staticmethod 109 def changed_domains( 110 pr: Optional[int] = None, 111 auth_token: Optional[str] = None, 112 as_list: bool = False, 113 general_sub_pkgs: Tuple[str] = _PKG_WIDE_SUBPACKAGES, 114 ) -> Union[str, List[str]]: 115 """Determine what domains were changed in particular PR.""" 116 if not pr: 117 return "unittests" 118 url = f"https://api.github.com/repos/Lightning-AI/torchmetrics/pulls/{pr}/files" 119 logging.debug(url) 120 data = request_url(url, auth_token) 121 if not data: 122 logging.debug("WARNING: No data was received -> test everything.") 123 return "unittests" 124 files = [d["filename"] for d in data] 125 126 # filter out all integrations as they run in separate suit 127 files = [fn for fn in files if not fn.startswith("tests/integrations")] 128 if not files: 129 logging.debug("Only integrations was changed so not reason for deep testing...") 130 return "" 131 # filter only docs files 132 files_ = [fn for fn in files if fn.startswith("docs")] 133 if len(files) == len(files_): 134 logging.debug("Only docs was changed so not reason for deep testing...") 135 return "" 136 137 # filter only package files and skip inits 138 _is_in_test = lambda fn: fn.startswith("tests") 139 _filter_pkg = lambda fn: _is_in_test(fn) or (fn.startswith("src/torchmetrics") and "__init__.py" not in fn) 140 files_pkg = [fn for fn in files if _filter_pkg(fn)] 141 if not files_pkg: 142 return "unittests" 143 144 # parse domains 145 def _crop_path(fname: str, paths: List[str]) -> str: 146 for p in paths: 147 fname = fname.replace(p, "") 148 return fname 149 150 files_pkg = [_crop_path(fn, ["src/torchmetrics/", "tests/unittests/", "functional/"]) for fn in files_pkg] 151 # filter domain names 152 tm_modules = [fn.split("/")[0] for fn in files_pkg if "/" in fn] 153 # filter general (used everywhere) sub-packages 154 tm_modules = [md for md in tm_modules if md not in general_sub_pkgs] 155 if len(files_pkg) > len(tm_modules): 156 logging.debug("Some more files was changed -> rather test everything...") 157 return "unittests" 158 # keep only unique 159 if as_list: 160 return list(tm_modules) 161 tm_modules = [f"unittests/{md}" for md in set(tm_modules)] 162 not_exists = [p for p in tm_modules if os.path.exists(p)] 163 if not_exists: 164 raise ValueError(f"Missing following paths: {not_exists}") 165 return " ".join(tm_modules) 166 167 168 if __name__ == "__main__": 169 logging.basicConfig(level=logging.INFO) 170 fire.Fire(AssistantCLI) 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/.github/assistant.py b/.github/assistant.py --- a/.github/assistant.py +++ b/.github/assistant.py @@ -18,11 +18,11 @@ import re import sys import traceback -from distutils.version import LooseVersion from typing import List, Optional, Tuple, Union import fire import requests +from packaging.version import parse from pkg_resources import parse_requirements _REQUEST_TIMEOUT = 10 @@ -80,8 +80,8 @@ with open(fpath) as fp: reqs = parse_requirements(fp.readlines()) pkg_ver = next(p for p in reqs if p.name == "torch") - pt_ver = min([LooseVersion(v[1]) for v in pkg_ver.specs]) - pt_ver = max(LooseVersion(LUT_PYTHON_TORCH[py_ver]), pt_ver) + pt_ver = min([parse(v[1]) for v in pkg_ver.specs]) + pt_ver = max(parse(LUT_PYTHON_TORCH[py_ver]), pt_ver) with open(fpath) as fp: requires = fp.read() requires = re.sub(r"torch>=[\d\.]+", f"torch>={pt_ver}", requires) diff --git a/src/torchmetrics/utilities/imports.py b/src/torchmetrics/utilities/imports.py --- a/src/torchmetrics/utilities/imports.py +++ b/src/torchmetrics/utilities/imports.py @@ -15,13 +15,13 @@ import operator import shutil import sys -from distutils.version import LooseVersion from typing import Optional from lightning_utilities.core.imports import compare_version, package_available +from packaging.version import Version, parse _PYTHON_VERSION = ".".join(map(str, [sys.version_info.major, sys.version_info.minor, sys.version_info.micro])) -_PYTHON_LOWER_3_8 = LooseVersion(_PYTHON_VERSION) < LooseVersion("3.8") +_PYTHON_LOWER_3_8 = parse(_PYTHON_VERSION) < Version("3.8") _TORCH_LOWER_1_12_DEV: Optional[bool] = compare_version("torch", operator.lt, "1.12.0.dev") _TORCH_GREATER_EQUAL_1_9: Optional[bool] = compare_version("torch", operator.ge, "1.9.0") _TORCH_GREATER_EQUAL_1_10: Optional[bool] = compare_version("torch", operator.ge, "1.10.0")
{"golden_diff": "diff --git a/.github/assistant.py b/.github/assistant.py\n--- a/.github/assistant.py\n+++ b/.github/assistant.py\n@@ -18,11 +18,11 @@\n import re\n import sys\n import traceback\n-from distutils.version import LooseVersion\n from typing import List, Optional, Tuple, Union\n \n import fire\n import requests\n+from packaging.version import parse\n from pkg_resources import parse_requirements\n \n _REQUEST_TIMEOUT = 10\n@@ -80,8 +80,8 @@\n with open(fpath) as fp:\n reqs = parse_requirements(fp.readlines())\n pkg_ver = next(p for p in reqs if p.name == \"torch\")\n- pt_ver = min([LooseVersion(v[1]) for v in pkg_ver.specs])\n- pt_ver = max(LooseVersion(LUT_PYTHON_TORCH[py_ver]), pt_ver)\n+ pt_ver = min([parse(v[1]) for v in pkg_ver.specs])\n+ pt_ver = max(parse(LUT_PYTHON_TORCH[py_ver]), pt_ver)\n with open(fpath) as fp:\n requires = fp.read()\n requires = re.sub(r\"torch>=[\\d\\.]+\", f\"torch>={pt_ver}\", requires)\ndiff --git a/src/torchmetrics/utilities/imports.py b/src/torchmetrics/utilities/imports.py\n--- a/src/torchmetrics/utilities/imports.py\n+++ b/src/torchmetrics/utilities/imports.py\n@@ -15,13 +15,13 @@\n import operator\n import shutil\n import sys\n-from distutils.version import LooseVersion\n from typing import Optional\n \n from lightning_utilities.core.imports import compare_version, package_available\n+from packaging.version import Version, parse\n \n _PYTHON_VERSION = \".\".join(map(str, [sys.version_info.major, sys.version_info.minor, sys.version_info.micro]))\n-_PYTHON_LOWER_3_8 = LooseVersion(_PYTHON_VERSION) < LooseVersion(\"3.8\")\n+_PYTHON_LOWER_3_8 = parse(_PYTHON_VERSION) < Version(\"3.8\")\n _TORCH_LOWER_1_12_DEV: Optional[bool] = compare_version(\"torch\", operator.lt, \"1.12.0.dev\")\n _TORCH_GREATER_EQUAL_1_9: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.9.0\")\n _TORCH_GREATER_EQUAL_1_10: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.10.0\")\n", "issue": "DeprecationWarning: The distutils package is deprecated\n## \ud83d\udc1b Bug\r\n\r\nTorchMetrics still uses distutils, but distutils is deprecated.\r\n\r\n### To Reproduce\r\n\r\n```console\r\n> python3 -Wall\r\n>>> import torchmetrics\r\nlib/python3.11/site-packages/torchmetrics/utilities/imports.py:18: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives\r\n from distutils.version import LooseVersion\r\n```\r\n\r\n### Expected behavior\r\n\r\nI would not expect TorchMetrics to still be using distutils.\r\n\r\n### Environment\r\n\r\n- TorchMetrics version: 1.1, 1.2, master (spack)\r\n- Python & PyTorch Version: 3.11.4 & 2.0.1\r\n- Any other relevant information such as OS: macOS 13.5.2\r\n\r\n### Additional context\r\n\r\nI also sometimes see a different error message from pytest:\r\n```\r\nDeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.\r\n```\r\nThis gives one possible solution.\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Import utilities.\"\"\"\nimport operator\nimport shutil\nimport sys\nfrom distutils.version import LooseVersion\nfrom typing import Optional\n\nfrom lightning_utilities.core.imports import compare_version, package_available\n\n_PYTHON_VERSION = \".\".join(map(str, [sys.version_info.major, sys.version_info.minor, sys.version_info.micro]))\n_PYTHON_LOWER_3_8 = LooseVersion(_PYTHON_VERSION) < LooseVersion(\"3.8\")\n_TORCH_LOWER_1_12_DEV: Optional[bool] = compare_version(\"torch\", operator.lt, \"1.12.0.dev\")\n_TORCH_GREATER_EQUAL_1_9: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.9.0\")\n_TORCH_GREATER_EQUAL_1_10: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.10.0\")\n_TORCH_GREATER_EQUAL_1_11: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.11.0\")\n_TORCH_GREATER_EQUAL_1_12: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.12.0\")\n_TORCH_GREATER_EQUAL_1_13: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.13.0\")\n\n_JIWER_AVAILABLE: bool = package_available(\"jiwer\")\n_NLTK_AVAILABLE: bool = package_available(\"nltk\")\n_ROUGE_SCORE_AVAILABLE: bool = package_available(\"rouge_score\")\n_BERTSCORE_AVAILABLE: bool = package_available(\"bert_score\")\n_SCIPY_AVAILABLE: bool = package_available(\"scipy\")\n_TORCH_FIDELITY_AVAILABLE: bool = package_available(\"torch_fidelity\")\n_LPIPS_AVAILABLE: bool = package_available(\"lpips\")\n_PYCOCOTOOLS_AVAILABLE: bool = package_available(\"pycocotools\")\n_TORCHVISION_AVAILABLE: bool = package_available(\"torchvision\")\n_TORCHVISION_GREATER_EQUAL_0_8: Optional[bool] = compare_version(\"torchvision\", operator.ge, \"0.8.0\")\n_TORCHVISION_GREATER_EQUAL_0_13: Optional[bool] = compare_version(\"torchvision\", operator.ge, \"0.13.0\")\n_TQDM_AVAILABLE: bool = package_available(\"tqdm\")\n_TRANSFORMERS_AVAILABLE: bool = package_available(\"transformers\")\n_TRANSFORMERS_GREATER_EQUAL_4_4: Optional[bool] = compare_version(\"transformers\", operator.ge, \"4.4.0\")\n_TRANSFORMERS_GREATER_EQUAL_4_10: Optional[bool] = compare_version(\"transformers\", operator.ge, \"4.10.0\")\n_PESQ_AVAILABLE: bool = package_available(\"pesq\")\n_GAMMATONE_AVAILABLE: bool = package_available(\"gammatone\")\n_TORCHAUDIO_AVAILABLE: bool = package_available(\"torchaudio\")\n_TORCHAUDIO_GREATER_EQUAL_0_10: Optional[bool] = compare_version(\"torchaudio\", operator.ge, \"0.10.0\")\n_SACREBLEU_AVAILABLE: bool = package_available(\"sacrebleu\")\n_REGEX_AVAILABLE: bool = package_available(\"regex\")\n_PYSTOI_AVAILABLE: bool = package_available(\"pystoi\")\n_FAST_BSS_EVAL_AVAILABLE: bool = package_available(\"fast_bss_eval\")\n_MATPLOTLIB_AVAILABLE: bool = package_available(\"matplotlib\")\n_SCIENCEPLOT_AVAILABLE: bool = package_available(\"scienceplots\")\n_MULTIPROCESSING_AVAILABLE: bool = package_available(\"multiprocessing\")\n_XLA_AVAILABLE: bool = package_available(\"torch_xla\")\n_PIQ_GREATER_EQUAL_0_8: Optional[bool] = compare_version(\"piq\", operator.ge, \"0.8.0\")\n_FASTER_COCO_EVAL_AVAILABLE: bool = package_available(\"faster_coco_eval\")\n\n_LATEX_AVAILABLE: bool = shutil.which(\"latex\") is not None\n", "path": "src/torchmetrics/utilities/imports.py"}, {"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport traceback\nfrom distutils.version import LooseVersion\nfrom typing import List, Optional, Tuple, Union\n\nimport fire\nimport requests\nfrom pkg_resources import parse_requirements\n\n_REQUEST_TIMEOUT = 10\n_PATH_ROOT = os.path.dirname(os.path.dirname(__file__))\n_PKG_WIDE_SUBPACKAGES = (\"utilities\", \"helpers\")\nLUT_PYTHON_TORCH = {\n \"3.8\": \"1.4\",\n \"3.9\": \"1.7.1\",\n \"3.10\": \"1.11\",\n \"3.11\": \"1.13\",\n}\n_path = lambda *ds: os.path.join(_PATH_ROOT, *ds)\nREQUIREMENTS_FILES = (*glob.glob(_path(\"requirements\", \"*.txt\")), _path(\"requirements.txt\"))\n\n\ndef request_url(url: str, auth_token: Optional[str] = None) -> Optional[dict]:\n \"\"\"General request with checking if request limit was reached.\"\"\"\n auth_header = {\"Authorization\": f\"token {auth_token}\"} if auth_token else {}\n try:\n req = requests.get(url, headers=auth_header, timeout=_REQUEST_TIMEOUT)\n except requests.exceptions.Timeout:\n traceback.print_exc()\n return None\n if req.status_code == 403:\n return None\n return json.loads(req.content.decode(req.encoding))\n\n\nclass AssistantCLI:\n \"\"\"CLI assistant for local CI.\"\"\"\n\n @staticmethod\n def prune_packages(req_file: str, *pkgs: str) -> None:\n \"\"\"Prune packages from requirement file.\"\"\"\n with open(req_file) as fp:\n lines = fp.readlines()\n\n for pkg in pkgs:\n lines = [ln for ln in lines if not ln.startswith(pkg)]\n logging.info(lines)\n\n with open(req_file, \"w\", encoding=\"utf-8\") as fp:\n fp.writelines(lines)\n\n @staticmethod\n def set_min_torch_by_python(fpath: str = \"requirements/base.txt\") -> None:\n \"\"\"Set minimal torch version according to Python actual version.\n\n >>> AssistantCLI.set_min_torch_by_python(\"../requirements/base.txt\")\n\n \"\"\"\n py_ver = f\"{sys.version_info.major}.{sys.version_info.minor}\"\n if py_ver not in LUT_PYTHON_TORCH:\n return\n with open(fpath) as fp:\n reqs = parse_requirements(fp.readlines())\n pkg_ver = next(p for p in reqs if p.name == \"torch\")\n pt_ver = min([LooseVersion(v[1]) for v in pkg_ver.specs])\n pt_ver = max(LooseVersion(LUT_PYTHON_TORCH[py_ver]), pt_ver)\n with open(fpath) as fp:\n requires = fp.read()\n requires = re.sub(r\"torch>=[\\d\\.]+\", f\"torch>={pt_ver}\", requires)\n with open(fpath, \"w\", encoding=\"utf-8\") as fp:\n fp.write(requires)\n\n @staticmethod\n def replace_min_requirements(fpath: str) -> None:\n \"\"\"Replace all `>=` by `==` in given file.\"\"\"\n logging.info(f\"processing: {fpath}\")\n with open(fpath, encoding=\"utf-8\") as fp:\n req = fp.read()\n req = req.replace(\">=\", \"==\")\n with open(fpath, \"w\", encoding=\"utf-8\") as fp:\n fp.write(req)\n\n @staticmethod\n def set_oldest_versions(req_files: List[str] = REQUIREMENTS_FILES) -> None:\n \"\"\"Set the oldest version for requirements.\"\"\"\n AssistantCLI.set_min_torch_by_python()\n for fpath in req_files:\n AssistantCLI.replace_min_requirements(fpath)\n\n @staticmethod\n def changed_domains(\n pr: Optional[int] = None,\n auth_token: Optional[str] = None,\n as_list: bool = False,\n general_sub_pkgs: Tuple[str] = _PKG_WIDE_SUBPACKAGES,\n ) -> Union[str, List[str]]:\n \"\"\"Determine what domains were changed in particular PR.\"\"\"\n if not pr:\n return \"unittests\"\n url = f\"https://api.github.com/repos/Lightning-AI/torchmetrics/pulls/{pr}/files\"\n logging.debug(url)\n data = request_url(url, auth_token)\n if not data:\n logging.debug(\"WARNING: No data was received -> test everything.\")\n return \"unittests\"\n files = [d[\"filename\"] for d in data]\n\n # filter out all integrations as they run in separate suit\n files = [fn for fn in files if not fn.startswith(\"tests/integrations\")]\n if not files:\n logging.debug(\"Only integrations was changed so not reason for deep testing...\")\n return \"\"\n # filter only docs files\n files_ = [fn for fn in files if fn.startswith(\"docs\")]\n if len(files) == len(files_):\n logging.debug(\"Only docs was changed so not reason for deep testing...\")\n return \"\"\n\n # filter only package files and skip inits\n _is_in_test = lambda fn: fn.startswith(\"tests\")\n _filter_pkg = lambda fn: _is_in_test(fn) or (fn.startswith(\"src/torchmetrics\") and \"__init__.py\" not in fn)\n files_pkg = [fn for fn in files if _filter_pkg(fn)]\n if not files_pkg:\n return \"unittests\"\n\n # parse domains\n def _crop_path(fname: str, paths: List[str]) -> str:\n for p in paths:\n fname = fname.replace(p, \"\")\n return fname\n\n files_pkg = [_crop_path(fn, [\"src/torchmetrics/\", \"tests/unittests/\", \"functional/\"]) for fn in files_pkg]\n # filter domain names\n tm_modules = [fn.split(\"/\")[0] for fn in files_pkg if \"/\" in fn]\n # filter general (used everywhere) sub-packages\n tm_modules = [md for md in tm_modules if md not in general_sub_pkgs]\n if len(files_pkg) > len(tm_modules):\n logging.debug(\"Some more files was changed -> rather test everything...\")\n return \"unittests\"\n # keep only unique\n if as_list:\n return list(tm_modules)\n tm_modules = [f\"unittests/{md}\" for md in set(tm_modules)]\n not_exists = [p for p in tm_modules if os.path.exists(p)]\n if not_exists:\n raise ValueError(f\"Missing following paths: {not_exists}\")\n return \" \".join(tm_modules)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n fire.Fire(AssistantCLI)\n", "path": ".github/assistant.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Import utilities.\"\"\"\nimport operator\nimport shutil\nimport sys\nfrom typing import Optional\n\nfrom lightning_utilities.core.imports import compare_version, package_available\nfrom packaging.version import Version, parse\n\n_PYTHON_VERSION = \".\".join(map(str, [sys.version_info.major, sys.version_info.minor, sys.version_info.micro]))\n_PYTHON_LOWER_3_8 = parse(_PYTHON_VERSION) < Version(\"3.8\")\n_TORCH_LOWER_1_12_DEV: Optional[bool] = compare_version(\"torch\", operator.lt, \"1.12.0.dev\")\n_TORCH_GREATER_EQUAL_1_9: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.9.0\")\n_TORCH_GREATER_EQUAL_1_10: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.10.0\")\n_TORCH_GREATER_EQUAL_1_11: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.11.0\")\n_TORCH_GREATER_EQUAL_1_12: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.12.0\")\n_TORCH_GREATER_EQUAL_1_13: Optional[bool] = compare_version(\"torch\", operator.ge, \"1.13.0\")\n\n_JIWER_AVAILABLE: bool = package_available(\"jiwer\")\n_NLTK_AVAILABLE: bool = package_available(\"nltk\")\n_ROUGE_SCORE_AVAILABLE: bool = package_available(\"rouge_score\")\n_BERTSCORE_AVAILABLE: bool = package_available(\"bert_score\")\n_SCIPY_AVAILABLE: bool = package_available(\"scipy\")\n_TORCH_FIDELITY_AVAILABLE: bool = package_available(\"torch_fidelity\")\n_LPIPS_AVAILABLE: bool = package_available(\"lpips\")\n_PYCOCOTOOLS_AVAILABLE: bool = package_available(\"pycocotools\")\n_TORCHVISION_AVAILABLE: bool = package_available(\"torchvision\")\n_TORCHVISION_GREATER_EQUAL_0_8: Optional[bool] = compare_version(\"torchvision\", operator.ge, \"0.8.0\")\n_TORCHVISION_GREATER_EQUAL_0_13: Optional[bool] = compare_version(\"torchvision\", operator.ge, \"0.13.0\")\n_TQDM_AVAILABLE: bool = package_available(\"tqdm\")\n_TRANSFORMERS_AVAILABLE: bool = package_available(\"transformers\")\n_TRANSFORMERS_GREATER_EQUAL_4_4: Optional[bool] = compare_version(\"transformers\", operator.ge, \"4.4.0\")\n_TRANSFORMERS_GREATER_EQUAL_4_10: Optional[bool] = compare_version(\"transformers\", operator.ge, \"4.10.0\")\n_PESQ_AVAILABLE: bool = package_available(\"pesq\")\n_GAMMATONE_AVAILABLE: bool = package_available(\"gammatone\")\n_TORCHAUDIO_AVAILABLE: bool = package_available(\"torchaudio\")\n_TORCHAUDIO_GREATER_EQUAL_0_10: Optional[bool] = compare_version(\"torchaudio\", operator.ge, \"0.10.0\")\n_SACREBLEU_AVAILABLE: bool = package_available(\"sacrebleu\")\n_REGEX_AVAILABLE: bool = package_available(\"regex\")\n_PYSTOI_AVAILABLE: bool = package_available(\"pystoi\")\n_FAST_BSS_EVAL_AVAILABLE: bool = package_available(\"fast_bss_eval\")\n_MATPLOTLIB_AVAILABLE: bool = package_available(\"matplotlib\")\n_SCIENCEPLOT_AVAILABLE: bool = package_available(\"scienceplots\")\n_MULTIPROCESSING_AVAILABLE: bool = package_available(\"multiprocessing\")\n_XLA_AVAILABLE: bool = package_available(\"torch_xla\")\n_PIQ_GREATER_EQUAL_0_8: Optional[bool] = compare_version(\"piq\", operator.ge, \"0.8.0\")\n_FASTER_COCO_EVAL_AVAILABLE: bool = package_available(\"faster_coco_eval\")\n\n_LATEX_AVAILABLE: bool = shutil.which(\"latex\") is not None\n", "path": "src/torchmetrics/utilities/imports.py"}, {"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport json\nimport logging\nimport os\nimport re\nimport sys\nimport traceback\nfrom typing import List, Optional, Tuple, Union\n\nimport fire\nimport requests\nfrom packaging.version import parse\nfrom pkg_resources import parse_requirements\n\n_REQUEST_TIMEOUT = 10\n_PATH_ROOT = os.path.dirname(os.path.dirname(__file__))\n_PKG_WIDE_SUBPACKAGES = (\"utilities\", \"helpers\")\nLUT_PYTHON_TORCH = {\n \"3.8\": \"1.4\",\n \"3.9\": \"1.7.1\",\n \"3.10\": \"1.11\",\n \"3.11\": \"1.13\",\n}\n_path = lambda *ds: os.path.join(_PATH_ROOT, *ds)\nREQUIREMENTS_FILES = (*glob.glob(_path(\"requirements\", \"*.txt\")), _path(\"requirements.txt\"))\n\n\ndef request_url(url: str, auth_token: Optional[str] = None) -> Optional[dict]:\n \"\"\"General request with checking if request limit was reached.\"\"\"\n auth_header = {\"Authorization\": f\"token {auth_token}\"} if auth_token else {}\n try:\n req = requests.get(url, headers=auth_header, timeout=_REQUEST_TIMEOUT)\n except requests.exceptions.Timeout:\n traceback.print_exc()\n return None\n if req.status_code == 403:\n return None\n return json.loads(req.content.decode(req.encoding))\n\n\nclass AssistantCLI:\n \"\"\"CLI assistant for local CI.\"\"\"\n\n @staticmethod\n def prune_packages(req_file: str, *pkgs: str) -> None:\n \"\"\"Prune packages from requirement file.\"\"\"\n with open(req_file) as fp:\n lines = fp.readlines()\n\n for pkg in pkgs:\n lines = [ln for ln in lines if not ln.startswith(pkg)]\n logging.info(lines)\n\n with open(req_file, \"w\", encoding=\"utf-8\") as fp:\n fp.writelines(lines)\n\n @staticmethod\n def set_min_torch_by_python(fpath: str = \"requirements/base.txt\") -> None:\n \"\"\"Set minimal torch version according to Python actual version.\n\n >>> AssistantCLI.set_min_torch_by_python(\"../requirements/base.txt\")\n\n \"\"\"\n py_ver = f\"{sys.version_info.major}.{sys.version_info.minor}\"\n if py_ver not in LUT_PYTHON_TORCH:\n return\n with open(fpath) as fp:\n reqs = parse_requirements(fp.readlines())\n pkg_ver = next(p for p in reqs if p.name == \"torch\")\n pt_ver = min([parse(v[1]) for v in pkg_ver.specs])\n pt_ver = max(parse(LUT_PYTHON_TORCH[py_ver]), pt_ver)\n with open(fpath) as fp:\n requires = fp.read()\n requires = re.sub(r\"torch>=[\\d\\.]+\", f\"torch>={pt_ver}\", requires)\n with open(fpath, \"w\", encoding=\"utf-8\") as fp:\n fp.write(requires)\n\n @staticmethod\n def replace_min_requirements(fpath: str) -> None:\n \"\"\"Replace all `>=` by `==` in given file.\"\"\"\n logging.info(f\"processing: {fpath}\")\n with open(fpath, encoding=\"utf-8\") as fp:\n req = fp.read()\n req = req.replace(\">=\", \"==\")\n with open(fpath, \"w\", encoding=\"utf-8\") as fp:\n fp.write(req)\n\n @staticmethod\n def set_oldest_versions(req_files: List[str] = REQUIREMENTS_FILES) -> None:\n \"\"\"Set the oldest version for requirements.\"\"\"\n AssistantCLI.set_min_torch_by_python()\n for fpath in req_files:\n AssistantCLI.replace_min_requirements(fpath)\n\n @staticmethod\n def changed_domains(\n pr: Optional[int] = None,\n auth_token: Optional[str] = None,\n as_list: bool = False,\n general_sub_pkgs: Tuple[str] = _PKG_WIDE_SUBPACKAGES,\n ) -> Union[str, List[str]]:\n \"\"\"Determine what domains were changed in particular PR.\"\"\"\n if not pr:\n return \"unittests\"\n url = f\"https://api.github.com/repos/Lightning-AI/torchmetrics/pulls/{pr}/files\"\n logging.debug(url)\n data = request_url(url, auth_token)\n if not data:\n logging.debug(\"WARNING: No data was received -> test everything.\")\n return \"unittests\"\n files = [d[\"filename\"] for d in data]\n\n # filter out all integrations as they run in separate suit\n files = [fn for fn in files if not fn.startswith(\"tests/integrations\")]\n if not files:\n logging.debug(\"Only integrations was changed so not reason for deep testing...\")\n return \"\"\n # filter only docs files\n files_ = [fn for fn in files if fn.startswith(\"docs\")]\n if len(files) == len(files_):\n logging.debug(\"Only docs was changed so not reason for deep testing...\")\n return \"\"\n\n # filter only package files and skip inits\n _is_in_test = lambda fn: fn.startswith(\"tests\")\n _filter_pkg = lambda fn: _is_in_test(fn) or (fn.startswith(\"src/torchmetrics\") and \"__init__.py\" not in fn)\n files_pkg = [fn for fn in files if _filter_pkg(fn)]\n if not files_pkg:\n return \"unittests\"\n\n # parse domains\n def _crop_path(fname: str, paths: List[str]) -> str:\n for p in paths:\n fname = fname.replace(p, \"\")\n return fname\n\n files_pkg = [_crop_path(fn, [\"src/torchmetrics/\", \"tests/unittests/\", \"functional/\"]) for fn in files_pkg]\n # filter domain names\n tm_modules = [fn.split(\"/\")[0] for fn in files_pkg if \"/\" in fn]\n # filter general (used everywhere) sub-packages\n tm_modules = [md for md in tm_modules if md not in general_sub_pkgs]\n if len(files_pkg) > len(tm_modules):\n logging.debug(\"Some more files was changed -> rather test everything...\")\n return \"unittests\"\n # keep only unique\n if as_list:\n return list(tm_modules)\n tm_modules = [f\"unittests/{md}\" for md in set(tm_modules)]\n not_exists = [p for p in tm_modules if os.path.exists(p)]\n if not_exists:\n raise ValueError(f\"Missing following paths: {not_exists}\")\n return \" \".join(tm_modules)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n fire.Fire(AssistantCLI)\n", "path": ".github/assistant.py"}]}
3,590
549
gh_patches_debug_3497
rasdani/github-patches
git_diff
ManimCommunity__manim-646
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The config value `html_favicon' has type `PosixPath'; expected `str'. BTW when building the docs now I get ``` WARNING: The config value `html_favicon' has type `PosixPath'; expected `str'. ``` _Originally posted by @leotrs in https://github.com/ManimCommunity/manim/pull/631#issuecomment-719075737_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/conf.py` Content: ``` 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 13 import os 14 import subprocess 15 import sys 16 from distutils.sysconfig import get_python_lib 17 from pathlib import Path 18 19 sys.path.insert(0, os.path.abspath(".")) 20 21 22 if os.environ.get("READTHEDOCS") == "True": 23 site_path = get_python_lib() 24 # bindings for pangocffi, cairocffi, pangocairocffi need to be generated 25 subprocess.run(["python", "pangocffi/ffi_build.py"], cwd=site_path) 26 subprocess.run(["python", "cairocffi/ffi_build.py"], cwd=site_path) 27 subprocess.run(["python", "pangocairocffi/ffi_build.py"], cwd=site_path) 28 # we need to add ffmpeg to the path 29 ffmpeg_path = os.path.join(site_path, "imageio_ffmpeg", "binaries") 30 # the included binary is named ffmpeg-linux..., create a symlink 31 [ffmpeg_bin] = [ 32 file for file in os.listdir(ffmpeg_path) if file.startswith("ffmpeg-") 33 ] 34 os.symlink( 35 os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, "ffmpeg") 36 ) 37 os.environ["PATH"] += os.pathsep + ffmpeg_path 38 39 40 # -- Project information ----------------------------------------------------- 41 42 project = "Manim" 43 copyright = "2020, The Manim Community Dev Team" 44 author = "The Manim Community Dev Team" 45 46 47 # -- General configuration --------------------------------------------------- 48 49 # Add any Sphinx extension module names here, as strings. They can be 50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 51 # ones. 52 extensions = [ 53 "sphinx.ext.autodoc", 54 "recommonmark", 55 "sphinx_copybutton", 56 "sphinx.ext.napoleon", 57 "sphinx.ext.autosummary", 58 "sphinx.ext.doctest", 59 "manim_directive", 60 ] 61 62 # Automatically generate stub pages when using the .. autosummary directive 63 autosummary_generate = True 64 65 # Add any paths that contain templates here, relative to this directory. 66 templates_path = ["_templates"] 67 68 # List of patterns, relative to source directory, that match files and 69 # directories to ignore when looking for source files. 70 # This pattern also affects html_static_path and html_extra_path. 71 exclude_patterns = [] 72 73 74 # -- Options for HTML output ------------------------------------------------- 75 76 # The theme to use for HTML and HTML Help pages. See the documentation for 77 # a list of builtin themes. 78 # 79 import guzzle_sphinx_theme 80 81 html_theme_path = guzzle_sphinx_theme.html_theme_path() 82 html_theme = "guzzle_sphinx_theme" 83 html_favicon = Path("_static/favicon.ico") 84 85 # There's a standing issue with Sphinx's new-style sidebars. This is a 86 # workaround. Taken from 87 # https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826 88 html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]} 89 90 # Register the theme as an extension to generate a sitemap.xml 91 extensions.append("guzzle_sphinx_theme") 92 93 # Add any paths that contain custom static files (such as style sheets) here, 94 # relative to this directory. They are copied after the builtin static files, 95 # so a file named "default.css" will overwrite the builtin "default.css". 96 html_static_path = ["_static"] 97 98 # This specifies any additional css files that will override the theme's 99 html_css_files = ["custom.css"] 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -80,7 +80,7 @@ html_theme_path = guzzle_sphinx_theme.html_theme_path() html_theme = "guzzle_sphinx_theme" -html_favicon = Path("_static/favicon.ico") +html_favicon = str(Path("_static/favicon.ico")) # There's a standing issue with Sphinx's new-style sidebars. This is a # workaround. Taken from
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -80,7 +80,7 @@\n \n html_theme_path = guzzle_sphinx_theme.html_theme_path()\n html_theme = \"guzzle_sphinx_theme\"\n-html_favicon = Path(\"_static/favicon.ico\")\n+html_favicon = str(Path(\"_static/favicon.ico\"))\n \n # There's a standing issue with Sphinx's new-style sidebars. This is a\n # workaround. Taken from\n", "issue": "The config value `html_favicon' has type `PosixPath'; expected `str'.\nBTW when building the docs now I get\r\n```\r\nWARNING: The config value `html_favicon' has type `PosixPath'; expected `str'.\r\n```\r\n\r\n_Originally posted by @leotrs in https://github.com/ManimCommunity/manim/pull/631#issuecomment-719075737_\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # bindings for pangocffi, cairocffi, pangocairocffi need to be generated\n subprocess.run([\"python\", \"pangocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"cairocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"pangocairocffi/ffi_build.py\"], cwd=site_path)\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = Path(\"_static/favicon.ico\")\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport subprocess\nimport sys\nfrom distutils.sysconfig import get_python_lib\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\nif os.environ.get(\"READTHEDOCS\") == \"True\":\n site_path = get_python_lib()\n # bindings for pangocffi, cairocffi, pangocairocffi need to be generated\n subprocess.run([\"python\", \"pangocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"cairocffi/ffi_build.py\"], cwd=site_path)\n subprocess.run([\"python\", \"pangocairocffi/ffi_build.py\"], cwd=site_path)\n # we need to add ffmpeg to the path\n ffmpeg_path = os.path.join(site_path, \"imageio_ffmpeg\", \"binaries\")\n # the included binary is named ffmpeg-linux..., create a symlink\n [ffmpeg_bin] = [\n file for file in os.listdir(ffmpeg_path) if file.startswith(\"ffmpeg-\")\n ]\n os.symlink(\n os.path.join(ffmpeg_path, ffmpeg_bin), os.path.join(ffmpeg_path, \"ffmpeg\")\n )\n os.environ[\"PATH\"] += os.pathsep + ffmpeg_path\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"recommonmark\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"manim_directive\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nimport guzzle_sphinx_theme\n\nhtml_theme_path = guzzle_sphinx_theme.html_theme_path()\nhtml_theme = \"guzzle_sphinx_theme\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# There's a standing issue with Sphinx's new-style sidebars. This is a\n# workaround. Taken from\n# https://github.com/guzzle/guzzle_sphinx_theme/issues/33#issuecomment-637081826\nhtml_sidebars = {\"**\": [\"logo-text.html\", \"globaltoc.html\", \"searchbox.html\"]}\n\n# Register the theme as an extension to generate a sitemap.xml\nextensions.append(\"guzzle_sphinx_theme\")\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n", "path": "docs/source/conf.py"}]}
1,408
113
gh_patches_debug_10794
rasdani/github-patches
git_diff
jupyterhub__zero-to-jupyterhub-k8s-1564
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Document the fact that access to cloud metadata is disabled by default It's not immediately obvious that `iptables` are overridden with an init container, blocking access to EC2 metadata. This should be documented somewhere. Perhaps in https://zero-to-jupyterhub.readthedocs.io/en/latest/amazon/step-zero-aws.html. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `doc/source/conf.py` Content: ``` 1 2 # Configuration file for the Sphinx documentation builder. 3 # 4 # This file only contains a selection of the most common options. For a full 5 # list see the documentation: 6 # https://www.sphinx-doc.org/en/master/usage/configuration.html 7 8 # -- Path setup -------------------------------------------------------------- 9 10 # If extensions (or modules to document with autodoc) are in another directory, 11 # add these directories to sys.path here. If the directory is relative to the 12 # documentation root, use os.path.abspath to make it absolute, like shown here. 13 # 14 # import os 15 # import sys 16 # sys.path.insert(0, os.path.abspath('.')) 17 18 19 # -- Project specific imports ------------------------------------------------ 20 21 from datetime import date 22 23 import yaml 24 25 26 # -- Sphinx setup function --------------------------------------------------- 27 # ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function 28 29 def setup(app): 30 app.add_css_file('custom.css') 31 32 33 # -- Project information ----------------------------------------------------- 34 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information 35 36 project = 'Zero to JupyterHub with Kubernetes' 37 copyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year) 38 author = 'Project Jupyter Contributors' 39 40 # The version info for the project you're documenting, acts as replacement for 41 # |version| and |release|, also used in various other places throughout the 42 # built documents. 43 # 44 with open('../../jupyterhub/Chart.yaml') as f: 45 chart = yaml.safe_load(f) 46 version = chart['version'].split('-', 1)[0] 47 release = chart['version'] 48 49 # Project specific variables 50 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog 51 rst_epilog = """ 52 .. |hub_version| replace:: {v} 53 """.format(v=chart['appVersion']) 54 55 56 # -- General configuration --------------------------------------------------- 57 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration 58 59 # Set the default role so we can use `foo` instead of ``foo`` 60 default_role = 'literal' 61 62 # Add any Sphinx extension module names here, as strings. They can be 63 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 64 # ones. 65 extensions = ['sphinx.ext.mathjax', 66 'sphinx_copybutton', 67 'myst_parser'] 68 69 # List of patterns, relative to source directory, that match files and 70 # directories to ignore when looking for source files. 71 # This pattern also affects html_static_path and html_extra_path. 72 exclude_patterns = [] 73 74 # The master toctree document. 75 master_doc = 'index' 76 77 # The suffix(es) of source filenames. 78 source_suffix = ['.rst', '.md'] 79 80 81 # -- Generate the Helm chart configuration reference from a schema file ------ 82 83 # header 84 with open('reference/reference.txt', 'r') as f: 85 header_md = f.readlines() 86 header_md = header_md[1:] 87 header_md = [ln.strip('\n') for ln in header_md] 88 89 # schema 90 with open('../../jupyterhub/schema.yaml', 'r') as f: 91 data = yaml.safe_load(f) 92 def parse_schema(d, md=[], depth=0, pre=''): 93 """ 94 Generate markdown headers from a passed python dictionary created by 95 parsing a schema.yaml file. 96 """ 97 if 'properties' in d: 98 depth += 1 99 # Create markdown headers for each schema level 100 for key, val in d['properties'].items(): 101 md.append('#'*(depth + 1) + ' ' + pre + key) 102 md.append('') 103 if 'description' in val: 104 for ln in val['description'].split('\n'): 105 md.append(ln) 106 md.append('') 107 108 parse_schema(val, md, depth, pre+'{}.'.format(key)) 109 depth -= 1 110 return md 111 schema_md = parse_schema(data) 112 113 # reference = header + schema 114 reference_md = header_md + schema_md 115 with open('reference/reference.md', 'w') as f: 116 f.write('\n'.join(reference_md)) 117 118 119 # -- Options for linkcheck builder ------------------------------------------- 120 # ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder 121 linkcheck_ignore = [ 122 r'(.*)github\.com(.*)#', # javascript based anchors 123 r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case 124 "https://your-domain.com", # example 125 "https://your-domain-name.com", # example 126 "https://kubernetes.io/docs/tutorials/kubernetes-basics/", # works 127 "https://cloud.ibm.com/kubernetes/catalog/create", # works 128 ] 129 linkcheck_anchors_ignore = [ 130 "/#!", 131 "/#%21", 132 ] 133 134 135 # -- Options for HTML output ------------------------------------------------- 136 # ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output 137 138 # The theme to use for HTML and HTML Help pages. See the documentation for 139 # a list of builtin themes. 140 # 141 html_theme = 'pydata_sphinx_theme' 142 html_favicon = '_static/images/logo/favicon.ico' 143 html_logo = '_static/images/logo/logo.png' 144 145 # Add any paths that contain custom static files (such as style sheets) here, 146 # relative to this directory. They are copied after the builtin static files, 147 # so a file named "default.css" will overwrite the builtin "default.css". 148 html_static_path = ['_static'] 149 150 151 152 153 # -- Below are options for more esoteric output ------------------------------- 154 # ----------------------------------------------------------------------------- 155 156 157 # -- Options for HTML help output --------------------------------------------- 158 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output 159 160 # Output file base name for HTML help builder. 161 htmlhelp_basename = 'ZeroToJupyterhubDoc' 162 163 164 # -- Options for LaTeX output ------------------------------------------------ 165 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output 166 167 latex_elements = { 168 # The paper size ('letterpaper' or 'a4paper'). 169 # 170 # 'papersize': 'letterpaper', 171 172 # The font size ('10pt', '11pt' or '12pt'). 173 # 174 # 'pointsize': '10pt', 175 176 # Additional stuff for the LaTeX preamble. 177 # 178 # 'preamble': '', 179 180 # Latex figure (float) alignment 181 # 182 # 'figure_align': 'htbp', 183 } 184 185 # Grouping the document tree into LaTeX files. 186 latex_documents = [ 187 ( 188 master_doc, # source start file 189 'ZeroToJupyterhubDoc.tex', # target name 190 'Zero to JupyterHub', # title 191 author, # author 192 'manual' # documentclass [howto, manual, or own class] 193 ), 194 ] 195 196 197 # -- Options for manual page output ------------------------------------------ 198 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output 199 200 # One entry per manual page. 201 man_pages = [ 202 ( 203 master_doc, # source start file 204 'zerotojupyterhub', # name 205 'Zero to JupyterHub', # description 206 [author], # authors 207 1, # manual section 208 ), 209 ] 210 211 212 # -- Options for Texinfo output ---------------------------------------------- 213 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output 214 215 # Grouping the document tree into Texinfo files. 216 texinfo_documents = [ 217 ( 218 master_doc, # source start file 219 'ZeroToJupyterhubDoc', # target name 220 'Zero to JupyterHub', # title 221 author, # author 222 'ZeroToJupyterhubDoc', # dir menu entry 223 'One line description of project.', # description 224 'Miscellaneous' # category 225 ), 226 ] 227 228 229 # -- Options for epub output ------------------------------------------------- 230 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output 231 232 # Bibliographic Dublin Core info. 233 epub_title = project 234 epub_author = author 235 epub_publisher = author 236 epub_copyright = copyright 237 238 # The unique identifier of the text. This can be a ISBN number 239 # or the project homepage. 240 # 241 # epub_identifier = '' 242 243 # A unique identification for the text. 244 # 245 # epub_uid = '' 246 247 # A list of files that should not be packed into the epub file. 248 epub_exclude_files = ['search.html'] 249 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -138,7 +138,19 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # + html_theme = 'pydata_sphinx_theme' +html_theme_options = { + "github_url": "https://github.com/jupyterhub/zero-to-jupyterhub-k8s/", + "use_edit_page_button": True, +} +html_context = { + "github_user": "jupyterhub", + "github_repo": "zero-to-jupyterhub-k8s", + "github_version": "master", + "doc_path":"doc", +} + html_favicon = '_static/images/logo/favicon.ico' html_logo = '_static/images/logo/logo.png'
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -138,7 +138,19 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n+\n html_theme = 'pydata_sphinx_theme'\n+html_theme_options = {\n+ \"github_url\": \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/\",\n+ \"use_edit_page_button\": True,\n+}\n+html_context = {\n+ \"github_user\": \"jupyterhub\",\n+ \"github_repo\": \"zero-to-jupyterhub-k8s\",\n+ \"github_version\": \"master\",\n+ \"doc_path\":\"doc\",\n+}\n+\n html_favicon = '_static/images/logo/favicon.ico'\n html_logo = '_static/images/logo/logo.png'\n", "issue": "Document the fact that access to cloud metadata is disabled by default\nIt's not immediately obvious that `iptables` are overridden with an init container, blocking access to EC2 metadata.\r\n\r\nThis should be documented somewhere. Perhaps in https://zero-to-jupyterhub.readthedocs.io/en/latest/amazon/step-zero-aws.html.\n", "before_files": [{"content": "\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nfrom datetime import date\n\nimport yaml\n\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\ndef setup(app):\n app.add_css_file('custom.css')\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information\n\nproject = 'Zero to JupyterHub with Kubernetes'\ncopyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year)\nauthor = 'Project Jupyter Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nwith open('../../jupyterhub/Chart.yaml') as f:\n chart = yaml.safe_load(f)\nversion = chart['version'].split('-', 1)[0]\nrelease = chart['version']\n\n# Project specific variables\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog\nrst_epilog = \"\"\"\n.. |hub_version| replace:: {v}\n\"\"\".format(v=chart['appVersion'])\n\n\n# -- General configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax',\n 'sphinx_copybutton',\n 'myst_parser']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The suffix(es) of source filenames.\nsource_suffix = ['.rst', '.md']\n\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open('reference/reference.txt', 'r') as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip('\\n') for ln in header_md]\n\n# schema\nwith open('../../jupyterhub/schema.yaml', 'r') as f:\n data = yaml.safe_load(f)\ndef parse_schema(d, md=[], depth=0, pre=''):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if 'properties' in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d['properties'].items():\n md.append('#'*(depth + 1) + ' ' + pre + key)\n md.append('')\n if 'description' in val:\n for ln in val['description'].split('\\n'):\n md.append(ln)\n md.append('')\n\n parse_schema(val, md, depth, pre+'{}.'.format(key))\n depth -= 1\n return md\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open('reference/reference.md', 'w') as f:\n f.write('\\n'.join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r'(.*)github\\.com(.*)#', # javascript based anchors\n r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n\n\n# -- Below are options for more esoteric output -------------------------------\n# -----------------------------------------------------------------------------\n\n\n# -- Options for HTML help output ---------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files.\nlatex_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc.tex', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'manual' # documentclass [howto, manual, or own class]\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output\n\n# One entry per manual page.\nman_pages = [\n (\n master_doc, # source start file\n 'zerotojupyterhub', # name\n 'Zero to JupyterHub', # description\n [author], # authors\n 1, # manual section\n ),\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output\n\n# Grouping the document tree into Texinfo files.\ntexinfo_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'ZeroToJupyterhubDoc', # dir menu entry\n 'One line description of project.', # description\n 'Miscellaneous' # category\n ),\n]\n\n\n# -- Options for epub output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n", "path": "doc/source/conf.py"}], "after_files": [{"content": "\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nfrom datetime import date\n\nimport yaml\n\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\ndef setup(app):\n app.add_css_file('custom.css')\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information\n\nproject = 'Zero to JupyterHub with Kubernetes'\ncopyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year)\nauthor = 'Project Jupyter Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nwith open('../../jupyterhub/Chart.yaml') as f:\n chart = yaml.safe_load(f)\nversion = chart['version'].split('-', 1)[0]\nrelease = chart['version']\n\n# Project specific variables\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog\nrst_epilog = \"\"\"\n.. |hub_version| replace:: {v}\n\"\"\".format(v=chart['appVersion'])\n\n\n# -- General configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax',\n 'sphinx_copybutton',\n 'myst_parser']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The suffix(es) of source filenames.\nsource_suffix = ['.rst', '.md']\n\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open('reference/reference.txt', 'r') as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip('\\n') for ln in header_md]\n\n# schema\nwith open('../../jupyterhub/schema.yaml', 'r') as f:\n data = yaml.safe_load(f)\ndef parse_schema(d, md=[], depth=0, pre=''):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if 'properties' in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d['properties'].items():\n md.append('#'*(depth + 1) + ' ' + pre + key)\n md.append('')\n if 'description' in val:\n for ln in val['description'].split('\\n'):\n md.append(ln)\n md.append('')\n\n parse_schema(val, md, depth, pre+'{}.'.format(key))\n depth -= 1\n return md\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open('reference/reference.md', 'w') as f:\n f.write('\\n'.join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r'(.*)github\\.com(.*)#', # javascript based anchors\n r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\nhtml_theme = 'pydata_sphinx_theme'\nhtml_theme_options = {\n \"github_url\": \"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/\",\n \"use_edit_page_button\": True,\n}\nhtml_context = {\n \"github_user\": \"jupyterhub\",\n \"github_repo\": \"zero-to-jupyterhub-k8s\",\n \"github_version\": \"master\",\n \"doc_path\":\"doc\",\n}\n\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n\n\n# -- Below are options for more esoteric output -------------------------------\n# -----------------------------------------------------------------------------\n\n\n# -- Options for HTML help output ---------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files.\nlatex_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc.tex', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'manual' # documentclass [howto, manual, or own class]\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output\n\n# One entry per manual page.\nman_pages = [\n (\n master_doc, # source start file\n 'zerotojupyterhub', # name\n 'Zero to JupyterHub', # description\n [author], # authors\n 1, # manual section\n ),\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output\n\n# Grouping the document tree into Texinfo files.\ntexinfo_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'ZeroToJupyterhubDoc', # dir menu entry\n 'One line description of project.', # description\n 'Miscellaneous' # category\n ),\n]\n\n\n# -- Options for epub output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n", "path": "doc/source/conf.py"}]}
2,811
195
gh_patches_debug_11838
rasdani/github-patches
git_diff
sopel-irc__sopel-419
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tell module errors when you don't give it enough arguments ``` <Flyte> .tell <Lurk> AttributeError: 'NoneType' object has no attribute 'split' (file "/home/flyte/workspace/willie/ve/local/lib/python2.7/site-packages/willie/modules/tell.py", line 93, in f_remind) <Flyte> .tell Flyte <Lurk> ValueError: need more than 1 value to unpack (file "/home/flyte/workspace/willie/ve/local/lib/python2.7/site-packages/willie/modules/tell.py", line 93, in f_remind) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `willie/modules/tell.py` Content: ``` 1 """ 2 tell.py - Willie Tell and Ask Module 3 Copyright 2008, Sean B. Palmer, inamidst.com 4 Licensed under the Eiffel Forum License 2. 5 6 http://willie.dftba.net 7 """ 8 9 import os 10 import time 11 import datetime 12 import pytz 13 import threading 14 from willie.tools import Nick 15 from willie.module import commands, nickname_commands, rule, priority, example 16 17 maximum = 4 18 19 20 def loadReminders(fn, lock): 21 lock.acquire() 22 try: 23 result = {} 24 f = open(fn) 25 for line in f: 26 line = line.strip() 27 if line: 28 try: 29 tellee, teller, verb, timenow, msg = line.split('\t', 4) 30 except ValueError: 31 continue # @@ hmm 32 result.setdefault(tellee, []).append((teller, verb, timenow, msg)) 33 f.close() 34 finally: 35 lock.release() 36 return result 37 38 39 def dumpReminders(fn, data, lock): 40 lock.acquire() 41 try: 42 f = open(fn, 'w') 43 for tellee in data.iterkeys(): 44 for remindon in data[tellee]: 45 line = '\t'.join((tellee,) + remindon) 46 try: 47 f.write((line + '\n').encode('utf-8')) 48 except IOError: 49 break 50 try: 51 f.close() 52 except IOError: 53 pass 54 finally: 55 lock.release() 56 return True 57 58 59 def setup(self): 60 fn = self.nick + '-' + self.config.host + '.tell.db' 61 self.tell_filename = os.path.join(self.config.dotdir, fn) 62 if not os.path.exists(self.tell_filename): 63 try: 64 f = open(self.tell_filename, 'w') 65 except OSError: 66 pass 67 else: 68 f.write('') 69 f.close() 70 self.memory['tell_lock'] = threading.Lock() 71 self.memory['reminders'] = loadReminders(self.tell_filename, self.memory['tell_lock']) 72 73 74 def get_user_time(bot, nick): 75 tz = 'UTC' 76 tformat = None 77 if bot.db and nick in bot.db.preferences: 78 tz = bot.db.preferences.get(nick, 'tz') or 'UTC' 79 tformat = bot.db.preferences.get(nick, 'time_format') 80 if tz not in pytz.all_timezones_set: 81 tz = 'UTC' 82 return (pytz.timezone(tz.strip()), tformat or '%Y-%m-%d %H:%M:%S %Z') 83 84 85 @commands('tell', 'ask') 86 @nickname_commands('tell', 'ask') 87 @example('Willie, tell Embolalia he broke something again.') 88 def f_remind(bot, trigger): 89 """Give someone a message the next time they're seen""" 90 teller = trigger.nick 91 92 verb = trigger.group(1) 93 tellee, msg = trigger.group(2).split(None, 1) 94 95 tellee = Nick(tellee.rstrip('.,:;')) 96 97 if not os.path.exists(bot.tell_filename): 98 return 99 100 if len(tellee) > 20: 101 return bot.reply('That nickname is too long.') 102 if tellee == bot.nick: 103 return bot.reply("I'm here now, you can tell me whatever you want!") 104 105 tz, tformat = get_user_time(bot, tellee) 106 timenow = datetime.datetime.now(tz).strftime(tformat) 107 if not tellee in (Nick(teller), bot.nick, 'me'): 108 bot.memory['tell_lock'].acquire() 109 try: 110 if not tellee in bot.memory['reminders']: 111 bot.memory['reminders'][tellee] = [(teller, verb, timenow, msg)] 112 else: 113 bot.memory['reminders'][tellee].append((teller, verb, timenow, msg)) 114 finally: 115 bot.memory['tell_lock'].release() 116 117 response = "I'll pass that on when %s is around." % tellee 118 119 bot.reply(response) 120 elif Nick(teller) == tellee: 121 bot.say('You can %s yourself that.' % verb) 122 else: 123 bot.say("Hey, I'm not as stupid as Monty you know!") 124 125 dumpReminders(bot.tell_filename, bot.memory['reminders'], bot.memory['tell_lock']) # @@ tell 126 127 128 def getReminders(bot, channel, key, tellee): 129 lines = [] 130 template = "%s: %s <%s> %s %s %s" 131 today = time.strftime('%d %b', time.gmtime()) 132 133 bot.memory['tell_lock'].acquire() 134 try: 135 for (teller, verb, datetime, msg) in bot.memory['reminders'][key]: 136 if datetime.startswith(today): 137 datetime = datetime[len(today) + 1:] 138 lines.append(template % (tellee, datetime, teller, verb, tellee, msg)) 139 140 try: 141 del bot.memory['reminders'][key] 142 except KeyError: 143 bot.msg(channel, 'Er...') 144 finally: 145 bot.memory['tell_lock'].release() 146 return lines 147 148 149 @rule('(.*)') 150 @priority('low') 151 def message(bot, trigger): 152 153 tellee = trigger.nick 154 channel = trigger.sender 155 156 if not os.path.exists(bot.tell_filename): 157 return 158 159 reminders = [] 160 remkeys = list(reversed(sorted(bot.memory['reminders'].keys()))) 161 162 for remkey in remkeys: 163 if not remkey.endswith('*') or remkey.endswith(':'): 164 if tellee == remkey: 165 reminders.extend(getReminders(bot, channel, remkey, tellee)) 166 elif tellee.startswith(remkey.rstrip('*:')): 167 reminders.extend(getReminders(bot, channel, remkey, tellee)) 168 169 for line in reminders[:maximum]: 170 bot.say(line) 171 172 if reminders[maximum:]: 173 bot.say('Further messages sent privately') 174 for line in reminders[maximum:]: 175 bot.msg(tellee, line) 176 177 if len(bot.memory['reminders'].keys()) != remkeys: 178 dumpReminders(bot.tell_filename, bot.memory['reminders'], bot.memory['tell_lock']) # @@ tell 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/willie/modules/tell.py b/willie/modules/tell.py --- a/willie/modules/tell.py +++ b/willie/modules/tell.py @@ -88,11 +88,20 @@ def f_remind(bot, trigger): """Give someone a message the next time they're seen""" teller = trigger.nick - verb = trigger.group(1) - tellee, msg = trigger.group(2).split(None, 1) - tellee = Nick(tellee.rstrip('.,:;')) + if not trigger.group(3): + bot.reply("%s whom?" % verb) + return + + tellee = trigger.group(3).rstrip('.,:;') + msg = trigger.group(2).lstrip(tellee).lstrip() + + if not msg: + bot.reply("%s %s what?" % (verb, tellee)) + return + + tellee = Nick(tellee) if not os.path.exists(bot.tell_filename): return
{"golden_diff": "diff --git a/willie/modules/tell.py b/willie/modules/tell.py\n--- a/willie/modules/tell.py\n+++ b/willie/modules/tell.py\n@@ -88,11 +88,20 @@\n def f_remind(bot, trigger):\n \"\"\"Give someone a message the next time they're seen\"\"\"\n teller = trigger.nick\n-\n verb = trigger.group(1)\n- tellee, msg = trigger.group(2).split(None, 1)\n \n- tellee = Nick(tellee.rstrip('.,:;'))\n+ if not trigger.group(3):\n+ bot.reply(\"%s whom?\" % verb)\n+ return\n+\n+ tellee = trigger.group(3).rstrip('.,:;')\n+ msg = trigger.group(2).lstrip(tellee).lstrip()\n+\n+ if not msg:\n+ bot.reply(\"%s %s what?\" % (verb, tellee))\n+ return\n+\n+ tellee = Nick(tellee)\n \n if not os.path.exists(bot.tell_filename):\n return\n", "issue": "Tell module errors when you don't give it enough arguments\n```\n<Flyte> .tell\n<Lurk> AttributeError: 'NoneType' object has no attribute 'split' (file \"/home/flyte/workspace/willie/ve/local/lib/python2.7/site-packages/willie/modules/tell.py\", line 93, in f_remind)\n<Flyte> .tell Flyte\n<Lurk> ValueError: need more than 1 value to unpack (file \"/home/flyte/workspace/willie/ve/local/lib/python2.7/site-packages/willie/modules/tell.py\", line 93, in f_remind)\n```\n\n", "before_files": [{"content": "\"\"\"\ntell.py - Willie Tell and Ask Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\n\nimport os\nimport time\nimport datetime\nimport pytz\nimport threading\nfrom willie.tools import Nick\nfrom willie.module import commands, nickname_commands, rule, priority, example\n\nmaximum = 4\n\n\ndef loadReminders(fn, lock):\n lock.acquire()\n try:\n result = {}\n f = open(fn)\n for line in f:\n line = line.strip()\n if line:\n try:\n tellee, teller, verb, timenow, msg = line.split('\\t', 4)\n except ValueError:\n continue # @@ hmm\n result.setdefault(tellee, []).append((teller, verb, timenow, msg))\n f.close()\n finally:\n lock.release()\n return result\n\n\ndef dumpReminders(fn, data, lock):\n lock.acquire()\n try:\n f = open(fn, 'w')\n for tellee in data.iterkeys():\n for remindon in data[tellee]:\n line = '\\t'.join((tellee,) + remindon)\n try:\n f.write((line + '\\n').encode('utf-8'))\n except IOError:\n break\n try:\n f.close()\n except IOError:\n pass\n finally:\n lock.release()\n return True\n\n\ndef setup(self):\n fn = self.nick + '-' + self.config.host + '.tell.db'\n self.tell_filename = os.path.join(self.config.dotdir, fn)\n if not os.path.exists(self.tell_filename):\n try:\n f = open(self.tell_filename, 'w')\n except OSError:\n pass\n else:\n f.write('')\n f.close()\n self.memory['tell_lock'] = threading.Lock()\n self.memory['reminders'] = loadReminders(self.tell_filename, self.memory['tell_lock'])\n\n\ndef get_user_time(bot, nick):\n tz = 'UTC'\n tformat = None\n if bot.db and nick in bot.db.preferences:\n tz = bot.db.preferences.get(nick, 'tz') or 'UTC'\n tformat = bot.db.preferences.get(nick, 'time_format')\n if tz not in pytz.all_timezones_set:\n tz = 'UTC'\n return (pytz.timezone(tz.strip()), tformat or '%Y-%m-%d %H:%M:%S %Z')\n\n\n@commands('tell', 'ask')\n@nickname_commands('tell', 'ask')\n@example('Willie, tell Embolalia he broke something again.')\ndef f_remind(bot, trigger):\n \"\"\"Give someone a message the next time they're seen\"\"\"\n teller = trigger.nick\n\n verb = trigger.group(1)\n tellee, msg = trigger.group(2).split(None, 1)\n\n tellee = Nick(tellee.rstrip('.,:;'))\n\n if not os.path.exists(bot.tell_filename):\n return\n\n if len(tellee) > 20:\n return bot.reply('That nickname is too long.')\n if tellee == bot.nick:\n return bot.reply(\"I'm here now, you can tell me whatever you want!\")\n\n tz, tformat = get_user_time(bot, tellee)\n timenow = datetime.datetime.now(tz).strftime(tformat)\n if not tellee in (Nick(teller), bot.nick, 'me'):\n bot.memory['tell_lock'].acquire()\n try:\n if not tellee in bot.memory['reminders']:\n bot.memory['reminders'][tellee] = [(teller, verb, timenow, msg)]\n else:\n bot.memory['reminders'][tellee].append((teller, verb, timenow, msg))\n finally:\n bot.memory['tell_lock'].release()\n\n response = \"I'll pass that on when %s is around.\" % tellee\n\n bot.reply(response)\n elif Nick(teller) == tellee:\n bot.say('You can %s yourself that.' % verb)\n else:\n bot.say(\"Hey, I'm not as stupid as Monty you know!\")\n\n dumpReminders(bot.tell_filename, bot.memory['reminders'], bot.memory['tell_lock']) # @@ tell\n\n\ndef getReminders(bot, channel, key, tellee):\n lines = []\n template = \"%s: %s <%s> %s %s %s\"\n today = time.strftime('%d %b', time.gmtime())\n\n bot.memory['tell_lock'].acquire()\n try:\n for (teller, verb, datetime, msg) in bot.memory['reminders'][key]:\n if datetime.startswith(today):\n datetime = datetime[len(today) + 1:]\n lines.append(template % (tellee, datetime, teller, verb, tellee, msg))\n\n try:\n del bot.memory['reminders'][key]\n except KeyError:\n bot.msg(channel, 'Er...')\n finally:\n bot.memory['tell_lock'].release()\n return lines\n\n\n@rule('(.*)')\n@priority('low')\ndef message(bot, trigger):\n\n tellee = trigger.nick\n channel = trigger.sender\n\n if not os.path.exists(bot.tell_filename):\n return\n\n reminders = []\n remkeys = list(reversed(sorted(bot.memory['reminders'].keys())))\n\n for remkey in remkeys:\n if not remkey.endswith('*') or remkey.endswith(':'):\n if tellee == remkey:\n reminders.extend(getReminders(bot, channel, remkey, tellee))\n elif tellee.startswith(remkey.rstrip('*:')):\n reminders.extend(getReminders(bot, channel, remkey, tellee))\n\n for line in reminders[:maximum]:\n bot.say(line)\n\n if reminders[maximum:]:\n bot.say('Further messages sent privately')\n for line in reminders[maximum:]:\n bot.msg(tellee, line)\n\n if len(bot.memory['reminders'].keys()) != remkeys:\n dumpReminders(bot.tell_filename, bot.memory['reminders'], bot.memory['tell_lock']) # @@ tell\n", "path": "willie/modules/tell.py"}], "after_files": [{"content": "\"\"\"\ntell.py - Willie Tell and Ask Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\n\nimport os\nimport time\nimport datetime\nimport pytz\nimport threading\nfrom willie.tools import Nick\nfrom willie.module import commands, nickname_commands, rule, priority, example\n\nmaximum = 4\n\n\ndef loadReminders(fn, lock):\n lock.acquire()\n try:\n result = {}\n f = open(fn)\n for line in f:\n line = line.strip()\n if line:\n try:\n tellee, teller, verb, timenow, msg = line.split('\\t', 4)\n except ValueError:\n continue # @@ hmm\n result.setdefault(tellee, []).append((teller, verb, timenow, msg))\n f.close()\n finally:\n lock.release()\n return result\n\n\ndef dumpReminders(fn, data, lock):\n lock.acquire()\n try:\n f = open(fn, 'w')\n for tellee in data.iterkeys():\n for remindon in data[tellee]:\n line = '\\t'.join((tellee,) + remindon)\n try:\n f.write((line + '\\n').encode('utf-8'))\n except IOError:\n break\n try:\n f.close()\n except IOError:\n pass\n finally:\n lock.release()\n return True\n\n\ndef setup(self):\n fn = self.nick + '-' + self.config.host + '.tell.db'\n self.tell_filename = os.path.join(self.config.dotdir, fn)\n if not os.path.exists(self.tell_filename):\n try:\n f = open(self.tell_filename, 'w')\n except OSError:\n pass\n else:\n f.write('')\n f.close()\n self.memory['tell_lock'] = threading.Lock()\n self.memory['reminders'] = loadReminders(self.tell_filename, self.memory['tell_lock'])\n\n\ndef get_user_time(bot, nick):\n tz = 'UTC'\n tformat = None\n if bot.db and nick in bot.db.preferences:\n tz = bot.db.preferences.get(nick, 'tz') or 'UTC'\n tformat = bot.db.preferences.get(nick, 'time_format')\n if tz not in pytz.all_timezones_set:\n tz = 'UTC'\n return (pytz.timezone(tz.strip()), tformat or '%Y-%m-%d %H:%M:%S %Z')\n\n\n@commands('tell', 'ask')\n@nickname_commands('tell', 'ask')\n@example('Willie, tell Embolalia he broke something again.')\ndef f_remind(bot, trigger):\n \"\"\"Give someone a message the next time they're seen\"\"\"\n teller = trigger.nick\n verb = trigger.group(1)\n\n if not trigger.group(3):\n bot.reply(\"%s whom?\" % verb)\n return\n\n tellee = trigger.group(3).rstrip('.,:;')\n msg = trigger.group(2).lstrip(tellee).lstrip()\n\n if not msg:\n bot.reply(\"%s %s what?\" % (verb, tellee))\n return\n\n tellee = Nick(tellee)\n\n if not os.path.exists(bot.tell_filename):\n return\n\n if len(tellee) > 20:\n return bot.reply('That nickname is too long.')\n if tellee == bot.nick:\n return bot.reply(\"I'm here now, you can tell me whatever you want!\")\n\n tz, tformat = get_user_time(bot, tellee)\n timenow = datetime.datetime.now(tz).strftime(tformat)\n if not tellee in (Nick(teller), bot.nick, 'me'):\n bot.memory['tell_lock'].acquire()\n try:\n if not tellee in bot.memory['reminders']:\n bot.memory['reminders'][tellee] = [(teller, verb, timenow, msg)]\n else:\n bot.memory['reminders'][tellee].append((teller, verb, timenow, msg))\n finally:\n bot.memory['tell_lock'].release()\n\n response = \"I'll pass that on when %s is around.\" % tellee\n\n bot.reply(response)\n elif Nick(teller) == tellee:\n bot.say('You can %s yourself that.' % verb)\n else:\n bot.say(\"Hey, I'm not as stupid as Monty you know!\")\n\n dumpReminders(bot.tell_filename, bot.memory['reminders'], bot.memory['tell_lock']) # @@ tell\n\n\ndef getReminders(bot, channel, key, tellee):\n lines = []\n template = \"%s: %s <%s> %s %s %s\"\n today = time.strftime('%d %b', time.gmtime())\n\n bot.memory['tell_lock'].acquire()\n try:\n for (teller, verb, datetime, msg) in bot.memory['reminders'][key]:\n if datetime.startswith(today):\n datetime = datetime[len(today) + 1:]\n lines.append(template % (tellee, datetime, teller, verb, tellee, msg))\n\n try:\n del bot.memory['reminders'][key]\n except KeyError:\n bot.msg(channel, 'Er...')\n finally:\n bot.memory['tell_lock'].release()\n return lines\n\n\n@rule('(.*)')\n@priority('low')\ndef message(bot, trigger):\n\n tellee = trigger.nick\n channel = trigger.sender\n\n if not os.path.exists(bot.tell_filename):\n return\n\n reminders = []\n remkeys = list(reversed(sorted(bot.memory['reminders'].keys())))\n\n for remkey in remkeys:\n if not remkey.endswith('*') or remkey.endswith(':'):\n if tellee == remkey:\n reminders.extend(getReminders(bot, channel, remkey, tellee))\n elif tellee.startswith(remkey.rstrip('*:')):\n reminders.extend(getReminders(bot, channel, remkey, tellee))\n\n for line in reminders[:maximum]:\n bot.say(line)\n\n if reminders[maximum:]:\n bot.say('Further messages sent privately')\n for line in reminders[maximum:]:\n bot.msg(tellee, line)\n\n if len(bot.memory['reminders'].keys()) != remkeys:\n dumpReminders(bot.tell_filename, bot.memory['reminders'], bot.memory['tell_lock']) # @@ tell\n", "path": "willie/modules/tell.py"}]}
2,190
242
gh_patches_debug_41816
rasdani/github-patches
git_diff
fonttools__fonttools-804
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [T2CharStringPen] Rounding coordinates? Currently, [T2CharStringPen](https://github.com/fonttools/fonttools/blob/fcd697a328b439165b3e9d04904eb73f065b6838/Lib/fontTools/pens/t2CharStringPen.py#L78) always rounds glyph coordinates and casts to int, but it does not seem a good idea. I think we should remove this behavior, or make it optional. cf. TTGlyphPen (#526) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Lib/fontTools/pens/t2CharStringPen.py` Content: ``` 1 # Copyright (c) 2009 Type Supply LLC 2 # Author: Tal Leming 3 4 5 from __future__ import print_function, division, absolute_import 6 7 from fontTools.misc.py23 import * 8 from fontTools.misc.psCharStrings import T2CharString 9 from fontTools.pens.basePen import BasePen 10 11 12 def roundInt(v): 13 return int(round(v)) 14 15 16 def roundIntPoint(point): 17 x, y = point 18 return roundInt(x), roundInt(y) 19 20 21 class RelativeCoordinatePen(BasePen): 22 23 def __init__(self, glyphSet): 24 BasePen.__init__(self, glyphSet) 25 self._lastX = None 26 self._lastY = None 27 self._heldAbsoluteMove = None 28 29 def _makePointRelative(self, pt): 30 absX, absY = pt 31 absX = absX 32 absY = absY 33 # no points have been added 34 # so no conversion is needed 35 if self._lastX is None: 36 relX, relY = absX, absY 37 # otherwise calculate the relative coordinates 38 else: 39 relX = absX - self._lastX 40 relY = absY - self._lastY 41 # store the absolute coordinates 42 self._lastX = absX 43 self._lastY = absY 44 # now return the relative coordinates 45 return relX, relY 46 47 def _moveTo(self, pt): 48 self._heldAbsoluteMove = pt 49 50 def _releaseHeldMove(self): 51 if self._heldAbsoluteMove is not None: 52 pt = self._makePointRelative(self._heldAbsoluteMove) 53 self._relativeMoveTo(pt) 54 self._heldAbsoluteMove = None 55 56 def _relativeMoveTo(self, pt): 57 raise NotImplementedError 58 59 def _lineTo(self, pt): 60 self._releaseHeldMove() 61 pt = self._makePointRelative(pt) 62 self._relativeLineTo(pt) 63 64 def _relativeLineTo(self, pt): 65 raise NotImplementedError 66 67 def _curveToOne(self, pt1, pt2, pt3): 68 self._releaseHeldMove() 69 pt1 = self._makePointRelative(pt1) 70 pt2 = self._makePointRelative(pt2) 71 pt3 = self._makePointRelative(pt3) 72 self._relativeCurveToOne(pt1, pt2, pt3) 73 74 def _relativeCurveToOne(self, pt1, pt2, pt3): 75 raise NotImplementedError 76 77 78 class T2CharStringPen(RelativeCoordinatePen): 79 80 def __init__(self, width, glyphSet): 81 RelativeCoordinatePen.__init__(self, glyphSet) 82 self._heldMove = None 83 self._program = [] 84 if width is not None: 85 self._program.append(roundInt(width)) 86 87 def _moveTo(self, pt): 88 RelativeCoordinatePen._moveTo(self, roundIntPoint(pt)) 89 90 def _relativeMoveTo(self, pt): 91 pt = roundIntPoint(pt) 92 x, y = pt 93 self._heldMove = [x, y, "rmoveto"] 94 95 def _storeHeldMove(self): 96 if self._heldMove is not None: 97 self._program.extend(self._heldMove) 98 self._heldMove = None 99 100 def _lineTo(self, pt): 101 RelativeCoordinatePen._lineTo(self, roundIntPoint(pt)) 102 103 def _relativeLineTo(self, pt): 104 self._storeHeldMove() 105 pt = roundIntPoint(pt) 106 x, y = pt 107 self._program.extend([x, y, "rlineto"]) 108 109 def _curveToOne(self, pt1, pt2, pt3): 110 RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3)) 111 112 def _relativeCurveToOne(self, pt1, pt2, pt3): 113 self._storeHeldMove() 114 pt1 = roundIntPoint(pt1) 115 pt2 = roundIntPoint(pt2) 116 pt3 = roundIntPoint(pt3) 117 x1, y1 = pt1 118 x2, y2 = pt2 119 x3, y3 = pt3 120 self._program.extend([x1, y1, x2, y2, x3, y3, "rrcurveto"]) 121 122 def _closePath(self): 123 pass 124 125 def _endPath(self): 126 pass 127 128 def getCharString(self, private=None, globalSubrs=None): 129 program = self._program + ["endchar"] 130 charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs) 131 return charString 132 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Lib/fontTools/pens/t2CharStringPen.py b/Lib/fontTools/pens/t2CharStringPen.py --- a/Lib/fontTools/pens/t2CharStringPen.py +++ b/Lib/fontTools/pens/t2CharStringPen.py @@ -9,15 +9,6 @@ from fontTools.pens.basePen import BasePen -def roundInt(v): - return int(round(v)) - - -def roundIntPoint(point): - x, y = point - return roundInt(x), roundInt(y) - - class RelativeCoordinatePen(BasePen): def __init__(self, glyphSet): @@ -75,20 +66,45 @@ raise NotImplementedError +def makeRoundFunc(tolerance): + if tolerance < 0: + raise ValueError("Rounding tolerance must be positive") + + def _round(number): + if tolerance == 0: + return number # no-op + rounded = round(number) + # return rounded integer if the tolerance >= 0.5, or if the absolute + # difference between the original float and the rounded integer is + # within the tolerance + if tolerance >= .5 or abs(rounded - number) <= tolerance: + return rounded + else: + # else return the value un-rounded + return number + + def roundPoint(point): + x, y = point + return _round(x), _round(y) + + return roundPoint + + class T2CharStringPen(RelativeCoordinatePen): - def __init__(self, width, glyphSet): + def __init__(self, width, glyphSet, roundTolerance=0.5): RelativeCoordinatePen.__init__(self, glyphSet) + self.roundPoint = makeRoundFunc(roundTolerance) self._heldMove = None self._program = [] if width is not None: - self._program.append(roundInt(width)) + self._program.append(round(width)) def _moveTo(self, pt): - RelativeCoordinatePen._moveTo(self, roundIntPoint(pt)) + RelativeCoordinatePen._moveTo(self, self.roundPoint(pt)) def _relativeMoveTo(self, pt): - pt = roundIntPoint(pt) + pt = self.roundPoint(pt) x, y = pt self._heldMove = [x, y, "rmoveto"] @@ -98,22 +114,25 @@ self._heldMove = None def _lineTo(self, pt): - RelativeCoordinatePen._lineTo(self, roundIntPoint(pt)) + RelativeCoordinatePen._lineTo(self, self.roundPoint(pt)) def _relativeLineTo(self, pt): self._storeHeldMove() - pt = roundIntPoint(pt) + pt = self.roundPoint(pt) x, y = pt self._program.extend([x, y, "rlineto"]) def _curveToOne(self, pt1, pt2, pt3): - RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3)) + RelativeCoordinatePen._curveToOne(self, + self.roundPoint(pt1), + self.roundPoint(pt2), + self.roundPoint(pt3)) def _relativeCurveToOne(self, pt1, pt2, pt3): self._storeHeldMove() - pt1 = roundIntPoint(pt1) - pt2 = roundIntPoint(pt2) - pt3 = roundIntPoint(pt3) + pt1 = self.roundPoint(pt1) + pt2 = self.roundPoint(pt2) + pt3 = self.roundPoint(pt3) x1, y1 = pt1 x2, y2 = pt2 x3, y3 = pt3 @@ -127,5 +146,6 @@ def getCharString(self, private=None, globalSubrs=None): program = self._program + ["endchar"] - charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs) + charString = T2CharString( + program=program, private=private, globalSubrs=globalSubrs) return charString
{"golden_diff": "diff --git a/Lib/fontTools/pens/t2CharStringPen.py b/Lib/fontTools/pens/t2CharStringPen.py\n--- a/Lib/fontTools/pens/t2CharStringPen.py\n+++ b/Lib/fontTools/pens/t2CharStringPen.py\n@@ -9,15 +9,6 @@\n from fontTools.pens.basePen import BasePen\n \n \n-def roundInt(v):\n- return int(round(v))\n-\n-\n-def roundIntPoint(point):\n- x, y = point\n- return roundInt(x), roundInt(y)\n-\n-\n class RelativeCoordinatePen(BasePen):\n \n def __init__(self, glyphSet):\n@@ -75,20 +66,45 @@\n raise NotImplementedError\n \n \n+def makeRoundFunc(tolerance):\n+ if tolerance < 0:\n+ raise ValueError(\"Rounding tolerance must be positive\")\n+\n+ def _round(number):\n+ if tolerance == 0:\n+ return number # no-op\n+ rounded = round(number)\n+ # return rounded integer if the tolerance >= 0.5, or if the absolute\n+ # difference between the original float and the rounded integer is\n+ # within the tolerance\n+ if tolerance >= .5 or abs(rounded - number) <= tolerance:\n+ return rounded\n+ else:\n+ # else return the value un-rounded\n+ return number\n+\n+ def roundPoint(point):\n+ x, y = point\n+ return _round(x), _round(y)\n+\n+ return roundPoint\n+\n+\n class T2CharStringPen(RelativeCoordinatePen):\n \n- def __init__(self, width, glyphSet):\n+ def __init__(self, width, glyphSet, roundTolerance=0.5):\n RelativeCoordinatePen.__init__(self, glyphSet)\n+ self.roundPoint = makeRoundFunc(roundTolerance)\n self._heldMove = None\n self._program = []\n if width is not None:\n- self._program.append(roundInt(width))\n+ self._program.append(round(width))\n \n def _moveTo(self, pt):\n- RelativeCoordinatePen._moveTo(self, roundIntPoint(pt))\n+ RelativeCoordinatePen._moveTo(self, self.roundPoint(pt))\n \n def _relativeMoveTo(self, pt):\n- pt = roundIntPoint(pt)\n+ pt = self.roundPoint(pt)\n x, y = pt\n self._heldMove = [x, y, \"rmoveto\"]\n \n@@ -98,22 +114,25 @@\n self._heldMove = None\n \n def _lineTo(self, pt):\n- RelativeCoordinatePen._lineTo(self, roundIntPoint(pt))\n+ RelativeCoordinatePen._lineTo(self, self.roundPoint(pt))\n \n def _relativeLineTo(self, pt):\n self._storeHeldMove()\n- pt = roundIntPoint(pt)\n+ pt = self.roundPoint(pt)\n x, y = pt\n self._program.extend([x, y, \"rlineto\"])\n \n def _curveToOne(self, pt1, pt2, pt3):\n- RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3))\n+ RelativeCoordinatePen._curveToOne(self,\n+ self.roundPoint(pt1),\n+ self.roundPoint(pt2),\n+ self.roundPoint(pt3))\n \n def _relativeCurveToOne(self, pt1, pt2, pt3):\n self._storeHeldMove()\n- pt1 = roundIntPoint(pt1)\n- pt2 = roundIntPoint(pt2)\n- pt3 = roundIntPoint(pt3)\n+ pt1 = self.roundPoint(pt1)\n+ pt2 = self.roundPoint(pt2)\n+ pt3 = self.roundPoint(pt3)\n x1, y1 = pt1\n x2, y2 = pt2\n x3, y3 = pt3\n@@ -127,5 +146,6 @@\n \n def getCharString(self, private=None, globalSubrs=None):\n program = self._program + [\"endchar\"]\n- charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)\n+ charString = T2CharString(\n+ program=program, private=private, globalSubrs=globalSubrs)\n return charString\n", "issue": "[T2CharStringPen] Rounding coordinates?\nCurrently, [T2CharStringPen](https://github.com/fonttools/fonttools/blob/fcd697a328b439165b3e9d04904eb73f065b6838/Lib/fontTools/pens/t2CharStringPen.py#L78) always rounds glyph coordinates and casts to int, but it does not seem a good idea. I think we should remove this behavior, or make it optional.\r\n\r\ncf. TTGlyphPen (#526)\n", "before_files": [{"content": "# Copyright (c) 2009 Type Supply LLC\n# Author: Tal Leming\n\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc.psCharStrings import T2CharString\nfrom fontTools.pens.basePen import BasePen\n\n\ndef roundInt(v):\n return int(round(v))\n\n\ndef roundIntPoint(point):\n x, y = point\n return roundInt(x), roundInt(y)\n\n\nclass RelativeCoordinatePen(BasePen):\n\n def __init__(self, glyphSet):\n BasePen.__init__(self, glyphSet)\n self._lastX = None\n self._lastY = None\n self._heldAbsoluteMove = None\n\n def _makePointRelative(self, pt):\n absX, absY = pt\n absX = absX\n absY = absY\n # no points have been added\n # so no conversion is needed\n if self._lastX is None:\n relX, relY = absX, absY\n # otherwise calculate the relative coordinates\n else:\n relX = absX - self._lastX\n relY = absY - self._lastY\n # store the absolute coordinates\n self._lastX = absX\n self._lastY = absY\n # now return the relative coordinates\n return relX, relY\n\n def _moveTo(self, pt):\n self._heldAbsoluteMove = pt\n\n def _releaseHeldMove(self):\n if self._heldAbsoluteMove is not None:\n pt = self._makePointRelative(self._heldAbsoluteMove)\n self._relativeMoveTo(pt)\n self._heldAbsoluteMove = None\n\n def _relativeMoveTo(self, pt):\n raise NotImplementedError\n\n def _lineTo(self, pt):\n self._releaseHeldMove()\n pt = self._makePointRelative(pt)\n self._relativeLineTo(pt)\n\n def _relativeLineTo(self, pt):\n raise NotImplementedError\n\n def _curveToOne(self, pt1, pt2, pt3):\n self._releaseHeldMove()\n pt1 = self._makePointRelative(pt1)\n pt2 = self._makePointRelative(pt2)\n pt3 = self._makePointRelative(pt3)\n self._relativeCurveToOne(pt1, pt2, pt3)\n\n def _relativeCurveToOne(self, pt1, pt2, pt3):\n raise NotImplementedError\n\n\nclass T2CharStringPen(RelativeCoordinatePen):\n\n def __init__(self, width, glyphSet):\n RelativeCoordinatePen.__init__(self, glyphSet)\n self._heldMove = None\n self._program = []\n if width is not None:\n self._program.append(roundInt(width))\n\n def _moveTo(self, pt):\n RelativeCoordinatePen._moveTo(self, roundIntPoint(pt))\n\n def _relativeMoveTo(self, pt):\n pt = roundIntPoint(pt)\n x, y = pt\n self._heldMove = [x, y, \"rmoveto\"]\n\n def _storeHeldMove(self):\n if self._heldMove is not None:\n self._program.extend(self._heldMove)\n self._heldMove = None\n\n def _lineTo(self, pt):\n RelativeCoordinatePen._lineTo(self, roundIntPoint(pt))\n\n def _relativeLineTo(self, pt):\n self._storeHeldMove()\n pt = roundIntPoint(pt)\n x, y = pt\n self._program.extend([x, y, \"rlineto\"])\n\n def _curveToOne(self, pt1, pt2, pt3):\n RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3))\n\n def _relativeCurveToOne(self, pt1, pt2, pt3):\n self._storeHeldMove()\n pt1 = roundIntPoint(pt1)\n pt2 = roundIntPoint(pt2)\n pt3 = roundIntPoint(pt3)\n x1, y1 = pt1\n x2, y2 = pt2\n x3, y3 = pt3\n self._program.extend([x1, y1, x2, y2, x3, y3, \"rrcurveto\"])\n\n def _closePath(self):\n pass\n\n def _endPath(self):\n pass\n\n def getCharString(self, private=None, globalSubrs=None):\n program = self._program + [\"endchar\"]\n charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)\n return charString\n", "path": "Lib/fontTools/pens/t2CharStringPen.py"}], "after_files": [{"content": "# Copyright (c) 2009 Type Supply LLC\n# Author: Tal Leming\n\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc.psCharStrings import T2CharString\nfrom fontTools.pens.basePen import BasePen\n\n\nclass RelativeCoordinatePen(BasePen):\n\n def __init__(self, glyphSet):\n BasePen.__init__(self, glyphSet)\n self._lastX = None\n self._lastY = None\n self._heldAbsoluteMove = None\n\n def _makePointRelative(self, pt):\n absX, absY = pt\n absX = absX\n absY = absY\n # no points have been added\n # so no conversion is needed\n if self._lastX is None:\n relX, relY = absX, absY\n # otherwise calculate the relative coordinates\n else:\n relX = absX - self._lastX\n relY = absY - self._lastY\n # store the absolute coordinates\n self._lastX = absX\n self._lastY = absY\n # now return the relative coordinates\n return relX, relY\n\n def _moveTo(self, pt):\n self._heldAbsoluteMove = pt\n\n def _releaseHeldMove(self):\n if self._heldAbsoluteMove is not None:\n pt = self._makePointRelative(self._heldAbsoluteMove)\n self._relativeMoveTo(pt)\n self._heldAbsoluteMove = None\n\n def _relativeMoveTo(self, pt):\n raise NotImplementedError\n\n def _lineTo(self, pt):\n self._releaseHeldMove()\n pt = self._makePointRelative(pt)\n self._relativeLineTo(pt)\n\n def _relativeLineTo(self, pt):\n raise NotImplementedError\n\n def _curveToOne(self, pt1, pt2, pt3):\n self._releaseHeldMove()\n pt1 = self._makePointRelative(pt1)\n pt2 = self._makePointRelative(pt2)\n pt3 = self._makePointRelative(pt3)\n self._relativeCurveToOne(pt1, pt2, pt3)\n\n def _relativeCurveToOne(self, pt1, pt2, pt3):\n raise NotImplementedError\n\n\ndef makeRoundFunc(tolerance):\n if tolerance < 0:\n raise ValueError(\"Rounding tolerance must be positive\")\n\n def _round(number):\n if tolerance == 0:\n return number # no-op\n rounded = round(number)\n # return rounded integer if the tolerance >= 0.5, or if the absolute\n # difference between the original float and the rounded integer is\n # within the tolerance\n if tolerance >= .5 or abs(rounded - number) <= tolerance:\n return rounded\n else:\n # else return the value un-rounded\n return number\n\n def roundPoint(point):\n x, y = point\n return _round(x), _round(y)\n\n return roundPoint\n\n\nclass T2CharStringPen(RelativeCoordinatePen):\n\n def __init__(self, width, glyphSet, roundTolerance=0.5):\n RelativeCoordinatePen.__init__(self, glyphSet)\n self.roundPoint = makeRoundFunc(roundTolerance)\n self._heldMove = None\n self._program = []\n if width is not None:\n self._program.append(round(width))\n\n def _moveTo(self, pt):\n RelativeCoordinatePen._moveTo(self, self.roundPoint(pt))\n\n def _relativeMoveTo(self, pt):\n pt = self.roundPoint(pt)\n x, y = pt\n self._heldMove = [x, y, \"rmoveto\"]\n\n def _storeHeldMove(self):\n if self._heldMove is not None:\n self._program.extend(self._heldMove)\n self._heldMove = None\n\n def _lineTo(self, pt):\n RelativeCoordinatePen._lineTo(self, self.roundPoint(pt))\n\n def _relativeLineTo(self, pt):\n self._storeHeldMove()\n pt = self.roundPoint(pt)\n x, y = pt\n self._program.extend([x, y, \"rlineto\"])\n\n def _curveToOne(self, pt1, pt2, pt3):\n RelativeCoordinatePen._curveToOne(self,\n self.roundPoint(pt1),\n self.roundPoint(pt2),\n self.roundPoint(pt3))\n\n def _relativeCurveToOne(self, pt1, pt2, pt3):\n self._storeHeldMove()\n pt1 = self.roundPoint(pt1)\n pt2 = self.roundPoint(pt2)\n pt3 = self.roundPoint(pt3)\n x1, y1 = pt1\n x2, y2 = pt2\n x3, y3 = pt3\n self._program.extend([x1, y1, x2, y2, x3, y3, \"rrcurveto\"])\n\n def _closePath(self):\n pass\n\n def _endPath(self):\n pass\n\n def getCharString(self, private=None, globalSubrs=None):\n program = self._program + [\"endchar\"]\n charString = T2CharString(\n program=program, private=private, globalSubrs=globalSubrs)\n return charString\n", "path": "Lib/fontTools/pens/t2CharStringPen.py"}]}
1,708
965
gh_patches_debug_21894
rasdani/github-patches
git_diff
freedomofpress__securedrop-644
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Location of replies unclear for sources As a source, it it unclear where I should go to see messages from the journalist. When I choose the “Check for response” and choose “continue” it should clearly show me a “Messages” area of the screen, and say “No replies from the journalist”, or list the messages (if there are any). We currently flash a message that says "you received a new reply", but this is confusing. It only appears once, so if the user refreshes the page or navigates away and back again, they will not see that message (they will still see the reply, until it is deleted). Another option would be to make “submit new documents/messages” a separate page from “check for replies from the journalist” page. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/source.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import os 3 from datetime import datetime 4 import uuid 5 from functools import wraps 6 import zipfile 7 from cStringIO import StringIO 8 import subprocess 9 10 import logging 11 # This module's logger is explicitly labeled so the correct logger is used, 12 # even when this is run from the command line (e.g. during development) 13 log = logging.getLogger('source') 14 15 from flask import (Flask, request, render_template, session, redirect, url_for, 16 flash, abort, g, send_file) 17 from flask_wtf.csrf import CsrfProtect 18 19 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound 20 from sqlalchemy.exc import IntegrityError 21 22 import config 23 import version 24 import crypto_util 25 import store 26 import background 27 import template_filters 28 from db import db_session, Source, Submission 29 from jinja2 import evalcontextfilter 30 31 app = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR) 32 app.config.from_object(config.SourceInterfaceFlaskConfig) 33 CsrfProtect(app) 34 35 SUBMIT_DOC_NOTIFY_STR = "Thanks! We received your document" 36 SUBMIT_MSG_NOTIFY_STR = "Thanks! We received your message" 37 SUBMIT_CODENAME_NOTIFY_STR = "Please remember your codename: you can use it to log back into this site to read responses from us and to submit follow-up documents and messages." 38 39 app.jinja_env.globals['version'] = version.__version__ 40 if getattr(config, 'CUSTOM_HEADER_IMAGE', None): 41 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE 42 app.jinja_env.globals['use_custom_header_image'] = True 43 else: 44 app.jinja_env.globals['header_image'] = 'logo.png' 45 app.jinja_env.globals['use_custom_header_image'] = False 46 47 app.jinja_env.filters['datetimeformat'] = template_filters.datetimeformat 48 app.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br) 49 50 @app.teardown_appcontext 51 def shutdown_session(exception=None): 52 """Automatically remove database sessions at the end of the request, or 53 when the application shuts down""" 54 db_session.remove() 55 56 57 def logged_in(): 58 return 'logged_in' in session 59 60 61 def login_required(f): 62 @wraps(f) 63 def decorated_function(*args, **kwargs): 64 if not logged_in(): 65 return redirect(url_for('login')) 66 return f(*args, **kwargs) 67 return decorated_function 68 69 70 def ignore_static(f): 71 """Only executes the wrapped function if we're not loading a static resource.""" 72 @wraps(f) 73 def decorated_function(*args, **kwargs): 74 if request.path.startswith('/static'): 75 return # don't execute the decorated function 76 return f(*args, **kwargs) 77 return decorated_function 78 79 80 @app.before_request 81 @ignore_static 82 def setup_g(): 83 """Store commonly used values in Flask's special g object""" 84 # ignore_static here because `crypto_util.hash_codename` is scrypt (very 85 # time consuming), and we don't need to waste time running if we're just 86 # serving a static resource that won't need to access these common values. 87 if logged_in(): 88 g.codename = session['codename'] 89 g.sid = crypto_util.hash_codename(g.codename) 90 try: 91 g.source = Source.query.filter(Source.filesystem_id == g.sid).one() 92 except MultipleResultsFound as e: 93 app.logger.error("Found multiple Sources when one was expected: %s" % (e,)) 94 abort(500) 95 except NoResultFound as e: 96 app.logger.error("Found no Sources when one was expected: %s" % (e,)) 97 del session['logged_in'] 98 del session['codename'] 99 return redirect(url_for('index')) 100 g.loc = store.path(g.sid) 101 102 103 @app.before_request 104 @ignore_static 105 def check_tor2web(): 106 # ignore_static here so we only flash a single message warning about Tor2Web, 107 # corresponding to the intial page load. 108 if 'X-tor2web' in request.headers: 109 flash('<strong>WARNING:</strong> You appear to be using Tor2Web. ' 110 'This <strong>does not</strong> provide anonymity. ' 111 '<a href="/tor2web-warning">Why is this dangerous?</a>', 112 "banner-warning") 113 114 115 @app.route('/') 116 def index(): 117 return render_template('index.html') 118 119 120 def generate_unique_codename(num_words): 121 """Generate random codenames until we get an unused one""" 122 while True: 123 codename = crypto_util.genrandomid(num_words) 124 sid = crypto_util.hash_codename(codename) # scrypt (slow) 125 matching_sources = Source.query.filter(Source.filesystem_id == sid).all() 126 if len(matching_sources) == 0: 127 return codename 128 129 130 @app.route('/generate', methods=('GET', 'POST')) 131 def generate(): 132 # Popping this key prevents errors when a logged in user returns to /generate. 133 # TODO: is this the best experience? A logged in user will be automatically 134 # logged out if they navigate to /generate by accident, which could be 135 # confusing. It might be better to instead redirect them to the lookup 136 # page, or inform them that they're logged in. 137 session.pop('logged_in', None) 138 139 number_words = 8 140 if request.method == 'POST': 141 number_words = int(request.form['number-words']) 142 if number_words not in range(7, 11): 143 abort(403) 144 145 codename = generate_unique_codename(number_words) 146 session['codename'] = codename 147 return render_template('generate.html', codename=codename) 148 149 150 @app.route('/create', methods=['POST']) 151 def create(): 152 sid = crypto_util.hash_codename(session['codename']) 153 154 source = Source(sid, crypto_util.display_id()) 155 db_session.add(source) 156 try: 157 db_session.commit() 158 except IntegrityError as e: 159 app.logger.error("Attempt to create a source with duplicate codename: %s" % (e,)) 160 else: 161 os.mkdir(store.path(sid)) 162 163 session['logged_in'] = True 164 return redirect(url_for('lookup')) 165 166 167 @app.route('/lookup', methods=('GET',)) 168 @login_required 169 def lookup(): 170 replies = [] 171 for fn in os.listdir(g.loc): 172 if fn.endswith('-reply.gpg'): 173 try: 174 msg = crypto_util.decrypt(g.codename, 175 file(store.path(g.sid, fn)).read()).decode("utf-8") 176 except UnicodeDecodeError: 177 app.logger.error("Could not decode reply %s" % fn) 178 else: 179 date = datetime.fromtimestamp(os.stat(store.path(g.sid, fn)).st_mtime) 180 replies.append(dict(id=fn, date=date, msg=msg)) 181 182 def async_genkey(sid, codename): 183 with app.app_context(): 184 background.execute(lambda: crypto_util.genkeypair(sid, codename)) 185 186 # Generate a keypair to encrypt replies from the journalist 187 # Only do this if the journalist has flagged the source as one 188 # that they would like to reply to. (Issue #140.) 189 if not crypto_util.getkey(g.sid) and g.source.flagged: 190 async_genkey(g.sid, g.codename) 191 192 # if this was a redirect from the login page, flash a message if there are 193 # no replies to clarify "check for replies" flow (#393) 194 if request.args.get('from_login') == '1' and len(replies) == 0: 195 flash("There are no replies at this time. You can submit more documents from this codename below.", "notification") 196 197 return render_template('lookup.html', codename=g.codename, replies=replies, 198 flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid)) 199 200 201 def normalize_timestamps(sid): 202 """ 203 Update the timestamps on all of the source's submissions to match that of 204 the latest submission. This minimizes metadata that could be useful to 205 investigators. See #301. 206 """ 207 sub_paths = [ store.path(sid, submission.filename) 208 for submission in g.source.submissions ] 209 if len(sub_paths) > 1: 210 args = ["touch"] 211 args.extend(sub_paths[:-1]) 212 rc = subprocess.call(args) 213 if rc != 0: 214 app.logger.warning("Couldn't normalize submission timestamps (touch exited with %d)" % rc) 215 216 217 @app.route('/submit', methods=('POST',)) 218 @login_required 219 def submit(): 220 msg = request.form['msg'] 221 fh = request.files['fh'] 222 223 fnames = [] 224 journalist_filename = g.source.journalist_filename() 225 226 if msg: 227 g.source.interaction_count += 1 228 fnames.append(store.save_message_submission(g.sid, g.source.interaction_count, 229 journalist_filename, msg)) 230 flash("{}. {}".format(SUBMIT_MSG_NOTIFY_STR, 231 SUBMIT_CODENAME_NOTIFY_STR), "notification") 232 if fh: 233 g.source.interaction_count += 1 234 fnames.append(store.save_file_submission(g.sid, g.source.interaction_count, 235 journalist_filename, fh.filename, fh.stream)) 236 flash("{} '{}'. {}".format(SUBMIT_DOC_NOTIFY_STR, 237 fh.filename or '[unnamed]', 238 SUBMIT_CODENAME_NOTIFY_STR), "notification") 239 for fname in fnames: 240 submission = Submission(g.source, fname) 241 db_session.add(submission) 242 243 if g.source.pending: 244 g.source.pending = False 245 246 # Generate a keypair now, if there's enough entropy (issue #303) 247 entropy_avail = int(open('/proc/sys/kernel/random/entropy_avail').read()) 248 if entropy_avail >= 2400: 249 crypto_util.genkeypair(g.sid, g.codename) 250 251 g.source.last_updated = datetime.utcnow() 252 db_session.commit() 253 normalize_timestamps(g.sid) 254 255 return redirect(url_for('lookup')) 256 257 258 @app.route('/delete', methods=('POST',)) 259 @login_required 260 def delete(): 261 msgid = request.form['msgid'] 262 assert '/' not in msgid 263 potential_files = os.listdir(g.loc) 264 if msgid not in potential_files: 265 abort(404) # TODO are the checks necessary? 266 store.secure_unlink(store.path(g.sid, msgid)) 267 flash("Reply deleted.", "notification") 268 269 return redirect(url_for('lookup')) 270 271 272 def valid_codename(codename): 273 return os.path.exists(store.path(crypto_util.hash_codename(codename))) 274 275 @app.route('/login', methods=('GET', 'POST')) 276 def login(): 277 if request.method == 'POST': 278 codename = request.form['codename'] 279 try: 280 valid = valid_codename(codename) 281 except crypto_util.CryptoException: 282 pass 283 else: 284 if valid: 285 session.update(codename=codename, logged_in=True) 286 return redirect(url_for('lookup', from_login='1')) 287 flash("Sorry, that is not a recognized codename.", "error") 288 return render_template('login.html') 289 290 291 @app.route('/howto-disable-js') 292 def howto_disable_js(): 293 return render_template("howto-disable-js.html") 294 295 296 @app.route('/tor2web-warning') 297 def tor2web_warning(): 298 return render_template("tor2web-warning.html") 299 300 301 @app.route('/journalist-key') 302 def download_journalist_pubkey(): 303 journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY) 304 return send_file(StringIO(journalist_pubkey), 305 mimetype="application/pgp-keys", 306 attachment_filename=config.JOURNALIST_KEY + ".asc", 307 as_attachment=True) 308 309 310 @app.route('/why-journalist-key') 311 def why_download_journalist_pubkey(): 312 return render_template("why-journalist-key.html") 313 314 315 @app.errorhandler(404) 316 def page_not_found(error): 317 return render_template('notfound.html'), 404 318 319 @app.errorhandler(500) 320 def internal_error(error): 321 return render_template('error.html'), 500 322 323 def write_pidfile(): 324 pid = str(os.getpid()) 325 with open(config.SOURCE_PIDFILE, 'w') as fp: 326 fp.write(pid) 327 328 if __name__ == "__main__": 329 write_pidfile() 330 # TODO make sure debug is not on in production 331 app.run(debug=True, host='0.0.0.0', port=8080) 332 333 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/source.py b/securedrop/source.py --- a/securedrop/source.py +++ b/securedrop/source.py @@ -189,11 +189,6 @@ if not crypto_util.getkey(g.sid) and g.source.flagged: async_genkey(g.sid, g.codename) - # if this was a redirect from the login page, flash a message if there are - # no replies to clarify "check for replies" flow (#393) - if request.args.get('from_login') == '1' and len(replies) == 0: - flash("There are no replies at this time. You can submit more documents from this codename below.", "notification") - return render_template('lookup.html', codename=g.codename, replies=replies, flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid)) @@ -264,7 +259,7 @@ if msgid not in potential_files: abort(404) # TODO are the checks necessary? store.secure_unlink(store.path(g.sid, msgid)) - flash("Reply deleted.", "notification") + flash("Reply deleted", "notification") return redirect(url_for('lookup'))
{"golden_diff": "diff --git a/securedrop/source.py b/securedrop/source.py\n--- a/securedrop/source.py\n+++ b/securedrop/source.py\n@@ -189,11 +189,6 @@\n if not crypto_util.getkey(g.sid) and g.source.flagged:\n async_genkey(g.sid, g.codename)\n \n- # if this was a redirect from the login page, flash a message if there are\n- # no replies to clarify \"check for replies\" flow (#393)\n- if request.args.get('from_login') == '1' and len(replies) == 0:\n- flash(\"There are no replies at this time. You can submit more documents from this codename below.\", \"notification\")\n-\n return render_template('lookup.html', codename=g.codename, replies=replies,\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n \n@@ -264,7 +259,7 @@\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n store.secure_unlink(store.path(g.sid, msgid))\n- flash(\"Reply deleted.\", \"notification\")\n+ flash(\"Reply deleted\", \"notification\")\n \n return redirect(url_for('lookup'))\n", "issue": "Location of replies unclear for sources\nAs a source, it it unclear where I should go to see messages from the journalist.\n\nWhen I choose the \u201cCheck for response\u201d and choose \u201ccontinue\u201d it should clearly show me a \u201cMessages\u201d area of the screen, and say \u201cNo replies from the journalist\u201d, or list the messages (if there are any).\n\nWe currently flash a message that says \"you received a new reply\", but this is confusing. It only appears once, so if the user refreshes the page or navigates away and back again, they will not see that message (they will still see the reply, until it is deleted).\n\nAnother option would be to make \u201csubmit new documents/messages\u201d a separate page from \u201ccheck for replies from the journalist\u201d page.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\nimport zipfile\nfrom cStringIO import StringIO\nimport subprocess\n\nimport logging\n# This module's logger is explicitly labeled so the correct logger is used,\n# even when this is run from the command line (e.g. during development)\nlog = logging.getLogger('source')\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\nfrom sqlalchemy.exc import IntegrityError\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nimport template_filters\nfrom db import db_session, Source, Submission\nfrom jinja2 import evalcontextfilter\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.SourceInterfaceFlaskConfig)\nCsrfProtect(app)\n\nSUBMIT_DOC_NOTIFY_STR = \"Thanks! We received your document\"\nSUBMIT_MSG_NOTIFY_STR = \"Thanks! We received your message\"\nSUBMIT_CODENAME_NOTIFY_STR = \"Please remember your codename: you can use it to log back into this site to read responses from us and to submit follow-up documents and messages.\"\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\napp.jinja_env.filters['datetimeformat'] = template_filters.datetimeformat\napp.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef logged_in():\n return 'logged_in' in session\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('login'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.hash_codename` is scrypt (very\n # time consuming), and we don't need to waste time running if we're just\n # serving a static resource that won't need to access these common values.\n if logged_in():\n g.codename = session['codename']\n g.sid = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query.filter(Source.filesystem_id == g.sid).one()\n except MultipleResultsFound as e:\n app.logger.error(\"Found multiple Sources when one was expected: %s\" % (e,))\n abort(500)\n except NoResultFound as e:\n app.logger.error(\"Found no Sources when one was expected: %s\" % (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('index'))\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"banner-warning\")\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\ndef generate_unique_codename(num_words):\n \"\"\"Generate random codenames until we get an unused one\"\"\"\n while True:\n codename = crypto_util.genrandomid(num_words)\n sid = crypto_util.hash_codename(codename) # scrypt (slow)\n matching_sources = Source.query.filter(Source.filesystem_id == sid).all()\n if len(matching_sources) == 0:\n return codename\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n # Popping this key prevents errors when a logged in user returns to /generate.\n # TODO: is this the best experience? A logged in user will be automatically\n # logged out if they navigate to /generate by accident, which could be\n # confusing. It might be better to instead redirect them to the lookup\n # page, or inform them that they're logged in.\n session.pop('logged_in', None)\n\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n\n codename = generate_unique_codename(number_words)\n session['codename'] = codename\n return render_template('generate.html', codename=codename)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.hash_codename(session['codename'])\n\n source = Source(sid, crypto_util.display_id())\n db_session.add(source)\n try:\n db_session.commit()\n except IntegrityError as e: \n app.logger.error(\"Attempt to create a source with duplicate codename: %s\" % (e,))\n else:\n os.mkdir(store.path(sid))\n\n session['logged_in'] = True\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n replies = []\n for fn in os.listdir(g.loc):\n if fn.endswith('-reply.gpg'):\n try:\n msg = crypto_util.decrypt(g.codename,\n file(store.path(g.sid, fn)).read()).decode(\"utf-8\")\n except UnicodeDecodeError:\n app.logger.error(\"Could not decode reply %s\" % fn)\n else:\n date = datetime.fromtimestamp(os.stat(store.path(g.sid, fn)).st_mtime)\n replies.append(dict(id=fn, date=date, msg=msg))\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and g.source.flagged:\n async_genkey(g.sid, g.codename)\n\n # if this was a redirect from the login page, flash a message if there are\n # no replies to clarify \"check for replies\" flow (#393)\n if request.args.get('from_login') == '1' and len(replies) == 0:\n flash(\"There are no replies at this time. You can submit more documents from this codename below.\", \"notification\")\n\n return render_template('lookup.html', codename=g.codename, replies=replies,\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n\n\ndef normalize_timestamps(sid):\n \"\"\"\n Update the timestamps on all of the source's submissions to match that of\n the latest submission. This minimizes metadata that could be useful to\n investigators. See #301.\n \"\"\"\n sub_paths = [ store.path(sid, submission.filename)\n for submission in g.source.submissions ]\n if len(sub_paths) > 1:\n args = [\"touch\"]\n args.extend(sub_paths[:-1])\n rc = subprocess.call(args)\n if rc != 0:\n app.logger.warning(\"Couldn't normalize submission timestamps (touch exited with %d)\" % rc)\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n\n fnames = []\n journalist_filename = g.source.journalist_filename()\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(store.save_message_submission(g.sid, g.source.interaction_count,\n journalist_filename, msg))\n flash(\"{}. {}\".format(SUBMIT_MSG_NOTIFY_STR,\n SUBMIT_CODENAME_NOTIFY_STR), \"notification\")\n if fh:\n g.source.interaction_count += 1\n fnames.append(store.save_file_submission(g.sid, g.source.interaction_count,\n journalist_filename, fh.filename, fh.stream))\n flash(\"{} '{}'. {}\".format(SUBMIT_DOC_NOTIFY_STR,\n fh.filename or '[unnamed]',\n SUBMIT_CODENAME_NOTIFY_STR), \"notification\")\n for fname in fnames:\n submission = Submission(g.source, fname)\n db_session.add(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n entropy_avail = int(open('/proc/sys/kernel/random/entropy_avail').read())\n if entropy_avail >= 2400:\n crypto_util.genkeypair(g.sid, g.codename)\n\n g.source.last_updated = datetime.utcnow()\n db_session.commit()\n normalize_timestamps(g.sid)\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n store.secure_unlink(store.path(g.sid, msgid))\n flash(\"Reply deleted.\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.hash_codename(codename)))\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n try:\n valid = valid_codename(codename)\n except crypto_util.CryptoException:\n pass\n else:\n if valid:\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup', from_login='1'))\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\[email protected](500)\ndef internal_error(error):\n return render_template('error.html'), 500\n\ndef write_pidfile():\n pid = str(os.getpid())\n with open(config.SOURCE_PIDFILE, 'w') as fp:\n fp.write(pid)\n\nif __name__ == \"__main__\":\n write_pidfile()\n # TODO make sure debug is not on in production\n app.run(debug=True, host='0.0.0.0', port=8080)\n\n", "path": "securedrop/source.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\nimport zipfile\nfrom cStringIO import StringIO\nimport subprocess\n\nimport logging\n# This module's logger is explicitly labeled so the correct logger is used,\n# even when this is run from the command line (e.g. during development)\nlog = logging.getLogger('source')\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\nfrom sqlalchemy.exc import IntegrityError\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nimport template_filters\nfrom db import db_session, Source, Submission\nfrom jinja2 import evalcontextfilter\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.SourceInterfaceFlaskConfig)\nCsrfProtect(app)\n\nSUBMIT_DOC_NOTIFY_STR = \"Thanks! We received your document\"\nSUBMIT_MSG_NOTIFY_STR = \"Thanks! We received your message\"\nSUBMIT_CODENAME_NOTIFY_STR = \"Please remember your codename: you can use it to log back into this site to read responses from us and to submit follow-up documents and messages.\"\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\napp.jinja_env.filters['datetimeformat'] = template_filters.datetimeformat\napp.jinja_env.filters['nl2br'] = evalcontextfilter(template_filters.nl2br)\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef logged_in():\n return 'logged_in' in session\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('login'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.hash_codename` is scrypt (very\n # time consuming), and we don't need to waste time running if we're just\n # serving a static resource that won't need to access these common values.\n if logged_in():\n g.codename = session['codename']\n g.sid = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query.filter(Source.filesystem_id == g.sid).one()\n except MultipleResultsFound as e:\n app.logger.error(\"Found multiple Sources when one was expected: %s\" % (e,))\n abort(500)\n except NoResultFound as e:\n app.logger.error(\"Found no Sources when one was expected: %s\" % (e,))\n del session['logged_in']\n del session['codename']\n return redirect(url_for('index'))\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"banner-warning\")\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\ndef generate_unique_codename(num_words):\n \"\"\"Generate random codenames until we get an unused one\"\"\"\n while True:\n codename = crypto_util.genrandomid(num_words)\n sid = crypto_util.hash_codename(codename) # scrypt (slow)\n matching_sources = Source.query.filter(Source.filesystem_id == sid).all()\n if len(matching_sources) == 0:\n return codename\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n # Popping this key prevents errors when a logged in user returns to /generate.\n # TODO: is this the best experience? A logged in user will be automatically\n # logged out if they navigate to /generate by accident, which could be\n # confusing. It might be better to instead redirect them to the lookup\n # page, or inform them that they're logged in.\n session.pop('logged_in', None)\n\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n\n codename = generate_unique_codename(number_words)\n session['codename'] = codename\n return render_template('generate.html', codename=codename)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.hash_codename(session['codename'])\n\n source = Source(sid, crypto_util.display_id())\n db_session.add(source)\n try:\n db_session.commit()\n except IntegrityError as e: \n app.logger.error(\"Attempt to create a source with duplicate codename: %s\" % (e,))\n else:\n os.mkdir(store.path(sid))\n\n session['logged_in'] = True\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n replies = []\n for fn in os.listdir(g.loc):\n if fn.endswith('-reply.gpg'):\n try:\n msg = crypto_util.decrypt(g.codename,\n file(store.path(g.sid, fn)).read()).decode(\"utf-8\")\n except UnicodeDecodeError:\n app.logger.error(\"Could not decode reply %s\" % fn)\n else:\n date = datetime.fromtimestamp(os.stat(store.path(g.sid, fn)).st_mtime)\n replies.append(dict(id=fn, date=date, msg=msg))\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and g.source.flagged:\n async_genkey(g.sid, g.codename)\n\n return render_template('lookup.html', codename=g.codename, replies=replies,\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n\n\ndef normalize_timestamps(sid):\n \"\"\"\n Update the timestamps on all of the source's submissions to match that of\n the latest submission. This minimizes metadata that could be useful to\n investigators. See #301.\n \"\"\"\n sub_paths = [ store.path(sid, submission.filename)\n for submission in g.source.submissions ]\n if len(sub_paths) > 1:\n args = [\"touch\"]\n args.extend(sub_paths[:-1])\n rc = subprocess.call(args)\n if rc != 0:\n app.logger.warning(\"Couldn't normalize submission timestamps (touch exited with %d)\" % rc)\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n\n fnames = []\n journalist_filename = g.source.journalist_filename()\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(store.save_message_submission(g.sid, g.source.interaction_count,\n journalist_filename, msg))\n flash(\"{}. {}\".format(SUBMIT_MSG_NOTIFY_STR,\n SUBMIT_CODENAME_NOTIFY_STR), \"notification\")\n if fh:\n g.source.interaction_count += 1\n fnames.append(store.save_file_submission(g.sid, g.source.interaction_count,\n journalist_filename, fh.filename, fh.stream))\n flash(\"{} '{}'. {}\".format(SUBMIT_DOC_NOTIFY_STR,\n fh.filename or '[unnamed]',\n SUBMIT_CODENAME_NOTIFY_STR), \"notification\")\n for fname in fnames:\n submission = Submission(g.source, fname)\n db_session.add(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n entropy_avail = int(open('/proc/sys/kernel/random/entropy_avail').read())\n if entropy_avail >= 2400:\n crypto_util.genkeypair(g.sid, g.codename)\n\n g.source.last_updated = datetime.utcnow()\n db_session.commit()\n normalize_timestamps(g.sid)\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n store.secure_unlink(store.path(g.sid, msgid))\n flash(\"Reply deleted\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.hash_codename(codename)))\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n try:\n valid = valid_codename(codename)\n except crypto_util.CryptoException:\n pass\n else:\n if valid:\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup', from_login='1'))\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\[email protected](500)\ndef internal_error(error):\n return render_template('error.html'), 500\n\ndef write_pidfile():\n pid = str(os.getpid())\n with open(config.SOURCE_PIDFILE, 'w') as fp:\n fp.write(pid)\n\nif __name__ == \"__main__\":\n write_pidfile()\n # TODO make sure debug is not on in production\n app.run(debug=True, host='0.0.0.0', port=8080)\n\n", "path": "securedrop/source.py"}]}
3,999
280
gh_patches_debug_3882
rasdani/github-patches
git_diff
explosion__spaCy-3032
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- parameter averaging You recommend (https://spacy.io/usage/training#section-tips): > Then when we want to actually use the model, we use the averages, not the most recent value. > with nlp.use_params(optimizer.averages): > nlp.to_disk('/model') But in the classification example https://spacy.io/usage/training#section-textcat model is saved only not in nlp.use_params(optimizer.averages) scope. Is it correct? parameter averaging You recommend (https://spacy.io/usage/training#section-tips): > Then when we want to actually use the model, we use the averages, not the most recent value. > with nlp.use_params(optimizer.averages): > nlp.to_disk('/model') But in the classification example https://spacy.io/usage/training#section-textcat model is saved only not in nlp.use_params(optimizer.averages) scope. Is it correct? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/training/train_textcat.py` Content: ``` 1 #!/usr/bin/env python 2 # coding: utf8 3 """Train a convolutional neural network text classifier on the 4 IMDB dataset, using the TextCategorizer component. The dataset will be loaded 5 automatically via Thinc's built-in dataset loader. The model is added to 6 spacy.pipeline, and predictions are available via `doc.cats`. For more details, 7 see the documentation: 8 * Training: https://spacy.io/usage/training 9 10 Compatible with: spaCy v2.0.0+ 11 """ 12 from __future__ import unicode_literals, print_function 13 import plac 14 import random 15 from pathlib import Path 16 import thinc.extra.datasets 17 18 import spacy 19 from spacy.util import minibatch, compounding 20 21 22 @plac.annotations( 23 model=("Model name. Defaults to blank 'en' model.", "option", "m", str), 24 output_dir=("Optional output directory", "option", "o", Path), 25 n_texts=("Number of texts to train from", "option", "t", int), 26 n_iter=("Number of training iterations", "option", "n", int)) 27 def main(model=None, output_dir=None, n_iter=20, n_texts=2000): 28 if model is not None: 29 nlp = spacy.load(model) # load existing spaCy model 30 print("Loaded model '%s'" % model) 31 else: 32 nlp = spacy.blank('en') # create blank Language class 33 print("Created blank 'en' model") 34 35 # add the text classifier to the pipeline if it doesn't exist 36 # nlp.create_pipe works for built-ins that are registered with spaCy 37 if 'textcat' not in nlp.pipe_names: 38 textcat = nlp.create_pipe('textcat') 39 nlp.add_pipe(textcat, last=True) 40 # otherwise, get it, so we can add labels to it 41 else: 42 textcat = nlp.get_pipe('textcat') 43 44 # add label to text classifier 45 textcat.add_label('POSITIVE') 46 47 # load the IMDB dataset 48 print("Loading IMDB data...") 49 (train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts) 50 print("Using {} examples ({} training, {} evaluation)" 51 .format(n_texts, len(train_texts), len(dev_texts))) 52 train_data = list(zip(train_texts, 53 [{'cats': cats} for cats in train_cats])) 54 55 # get names of other pipes to disable them during training 56 other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat'] 57 with nlp.disable_pipes(*other_pipes): # only train textcat 58 optimizer = nlp.begin_training() 59 print("Training the model...") 60 print('{:^5}\t{:^5}\t{:^5}\t{:^5}'.format('LOSS', 'P', 'R', 'F')) 61 for i in range(n_iter): 62 losses = {} 63 # batch up the examples using spaCy's minibatch 64 batches = minibatch(train_data, size=compounding(4., 32., 1.001)) 65 for batch in batches: 66 texts, annotations = zip(*batch) 67 nlp.update(texts, annotations, sgd=optimizer, drop=0.2, 68 losses=losses) 69 with textcat.model.use_params(optimizer.averages): 70 # evaluate on the dev data split off in load_data() 71 scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats) 72 print('{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}' # print a simple table 73 .format(losses['textcat'], scores['textcat_p'], 74 scores['textcat_r'], scores['textcat_f'])) 75 76 # test the trained model 77 test_text = "This movie sucked" 78 doc = nlp(test_text) 79 print(test_text, doc.cats) 80 81 if output_dir is not None: 82 output_dir = Path(output_dir) 83 if not output_dir.exists(): 84 output_dir.mkdir() 85 nlp.to_disk(output_dir) 86 print("Saved model to", output_dir) 87 88 # test the saved model 89 print("Loading from", output_dir) 90 nlp2 = spacy.load(output_dir) 91 doc2 = nlp2(test_text) 92 print(test_text, doc2.cats) 93 94 95 def load_data(limit=0, split=0.8): 96 """Load data from the IMDB dataset.""" 97 # Partition off part of the train data for evaluation 98 train_data, _ = thinc.extra.datasets.imdb() 99 random.shuffle(train_data) 100 train_data = train_data[-limit:] 101 texts, labels = zip(*train_data) 102 cats = [{'POSITIVE': bool(y)} for y in labels] 103 split = int(len(train_data) * split) 104 return (texts[:split], cats[:split]), (texts[split:], cats[split:]) 105 106 107 def evaluate(tokenizer, textcat, texts, cats): 108 docs = (tokenizer(text) for text in texts) 109 tp = 0.0 # True positives 110 fp = 1e-8 # False positives 111 fn = 1e-8 # False negatives 112 tn = 0.0 # True negatives 113 for i, doc in enumerate(textcat.pipe(docs)): 114 gold = cats[i] 115 for label, score in doc.cats.items(): 116 if label not in gold: 117 continue 118 if score >= 0.5 and gold[label] >= 0.5: 119 tp += 1. 120 elif score >= 0.5 and gold[label] < 0.5: 121 fp += 1. 122 elif score < 0.5 and gold[label] < 0.5: 123 tn += 1 124 elif score < 0.5 and gold[label] >= 0.5: 125 fn += 1 126 precision = tp / (tp + fp) 127 recall = tp / (tp + fn) 128 f_score = 2 * (precision * recall) / (precision + recall) 129 return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score} 130 131 132 if __name__ == '__main__': 133 plac.call(main) 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/training/train_textcat.py b/examples/training/train_textcat.py --- a/examples/training/train_textcat.py +++ b/examples/training/train_textcat.py @@ -82,7 +82,8 @@ output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() - nlp.to_disk(output_dir) + with nlp.use_params(optimizer.averages): + nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model
{"golden_diff": "diff --git a/examples/training/train_textcat.py b/examples/training/train_textcat.py\n--- a/examples/training/train_textcat.py\n+++ b/examples/training/train_textcat.py\n@@ -82,7 +82,8 @@\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n- nlp.to_disk(output_dir)\n+ with nlp.use_params(optimizer.averages):\n+ nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n \n # test the saved model\n", "issue": "parameter averaging\nYou recommend (https://spacy.io/usage/training#section-tips):\r\n\r\n> Then when we want to actually use the model, we use the averages, not the most recent value. \r\n> with nlp.use_params(optimizer.averages):\r\n> nlp.to_disk('/model')\r\n\r\nBut in the classification example https://spacy.io/usage/training#section-textcat model is saved only not in nlp.use_params(optimizer.averages) scope. Is it correct?\nparameter averaging\nYou recommend (https://spacy.io/usage/training#section-tips):\r\n\r\n> Then when we want to actually use the model, we use the averages, not the most recent value. \r\n> with nlp.use_params(optimizer.averages):\r\n> nlp.to_disk('/model')\r\n\r\nBut in the classification example https://spacy.io/usage/training#section-textcat model is saved only not in nlp.use_params(optimizer.averages) scope. Is it correct?\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf8\n\"\"\"Train a convolutional neural network text classifier on the\nIMDB dataset, using the TextCategorizer component. The dataset will be loaded\nautomatically via Thinc's built-in dataset loader. The model is added to\nspacy.pipeline, and predictions are available via `doc.cats`. For more details,\nsee the documentation:\n* Training: https://spacy.io/usage/training\n\nCompatible with: spaCy v2.0.0+\n\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport plac\nimport random\nfrom pathlib import Path\nimport thinc.extra.datasets\n\nimport spacy\nfrom spacy.util import minibatch, compounding\n\n\[email protected](\n model=(\"Model name. Defaults to blank 'en' model.\", \"option\", \"m\", str),\n output_dir=(\"Optional output directory\", \"option\", \"o\", Path),\n n_texts=(\"Number of texts to train from\", \"option\", \"t\", int),\n n_iter=(\"Number of training iterations\", \"option\", \"n\", int))\ndef main(model=None, output_dir=None, n_iter=20, n_texts=2000):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # add the text classifier to the pipeline if it doesn't exist\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'textcat' not in nlp.pipe_names:\n textcat = nlp.create_pipe('textcat')\n nlp.add_pipe(textcat, last=True)\n # otherwise, get it, so we can add labels to it\n else:\n textcat = nlp.get_pipe('textcat')\n\n # add label to text classifier\n textcat.add_label('POSITIVE')\n\n # load the IMDB dataset\n print(\"Loading IMDB data...\")\n (train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)\n print(\"Using {} examples ({} training, {} evaluation)\"\n .format(n_texts, len(train_texts), len(dev_texts)))\n train_data = list(zip(train_texts,\n [{'cats': cats} for cats in train_cats]))\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']\n with nlp.disable_pipes(*other_pipes): # only train textcat\n optimizer = nlp.begin_training()\n print(\"Training the model...\")\n print('{:^5}\\t{:^5}\\t{:^5}\\t{:^5}'.format('LOSS', 'P', 'R', 'F'))\n for i in range(n_iter):\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(train_data, size=compounding(4., 32., 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, drop=0.2,\n losses=losses)\n with textcat.model.use_params(optimizer.averages):\n # evaluate on the dev data split off in load_data()\n scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)\n print('{0:.3f}\\t{1:.3f}\\t{2:.3f}\\t{3:.3f}' # print a simple table\n .format(losses['textcat'], scores['textcat_p'],\n scores['textcat_r'], scores['textcat_f']))\n\n # test the trained model\n test_text = \"This movie sucked\"\n doc = nlp(test_text)\n print(test_text, doc.cats)\n\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n doc2 = nlp2(test_text)\n print(test_text, doc2.cats)\n\n\ndef load_data(limit=0, split=0.8):\n \"\"\"Load data from the IMDB dataset.\"\"\"\n # Partition off part of the train data for evaluation\n train_data, _ = thinc.extra.datasets.imdb()\n random.shuffle(train_data)\n train_data = train_data[-limit:]\n texts, labels = zip(*train_data)\n cats = [{'POSITIVE': bool(y)} for y in labels]\n split = int(len(train_data) * split)\n return (texts[:split], cats[:split]), (texts[split:], cats[split:])\n\n\ndef evaluate(tokenizer, textcat, texts, cats):\n docs = (tokenizer(text) for text in texts)\n tp = 0.0 # True positives\n fp = 1e-8 # False positives\n fn = 1e-8 # False negatives\n tn = 0.0 # True negatives\n for i, doc in enumerate(textcat.pipe(docs)):\n gold = cats[i]\n for label, score in doc.cats.items():\n if label not in gold:\n continue\n if score >= 0.5 and gold[label] >= 0.5:\n tp += 1.\n elif score >= 0.5 and gold[label] < 0.5:\n fp += 1.\n elif score < 0.5 and gold[label] < 0.5:\n tn += 1\n elif score < 0.5 and gold[label] >= 0.5:\n fn += 1\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_score = 2 * (precision * recall) / (precision + recall)\n return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}\n\n\nif __name__ == '__main__':\n plac.call(main)\n", "path": "examples/training/train_textcat.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding: utf8\n\"\"\"Train a convolutional neural network text classifier on the\nIMDB dataset, using the TextCategorizer component. The dataset will be loaded\nautomatically via Thinc's built-in dataset loader. The model is added to\nspacy.pipeline, and predictions are available via `doc.cats`. For more details,\nsee the documentation:\n* Training: https://spacy.io/usage/training\n\nCompatible with: spaCy v2.0.0+\n\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport plac\nimport random\nfrom pathlib import Path\nimport thinc.extra.datasets\n\nimport spacy\nfrom spacy.util import minibatch, compounding\n\n\[email protected](\n model=(\"Model name. Defaults to blank 'en' model.\", \"option\", \"m\", str),\n output_dir=(\"Optional output directory\", \"option\", \"o\", Path),\n n_texts=(\"Number of texts to train from\", \"option\", \"t\", int),\n n_iter=(\"Number of training iterations\", \"option\", \"n\", int))\ndef main(model=None, output_dir=None, n_iter=20, n_texts=2000):\n if model is not None:\n nlp = spacy.load(model) # load existing spaCy model\n print(\"Loaded model '%s'\" % model)\n else:\n nlp = spacy.blank('en') # create blank Language class\n print(\"Created blank 'en' model\")\n\n # add the text classifier to the pipeline if it doesn't exist\n # nlp.create_pipe works for built-ins that are registered with spaCy\n if 'textcat' not in nlp.pipe_names:\n textcat = nlp.create_pipe('textcat')\n nlp.add_pipe(textcat, last=True)\n # otherwise, get it, so we can add labels to it\n else:\n textcat = nlp.get_pipe('textcat')\n\n # add label to text classifier\n textcat.add_label('POSITIVE')\n\n # load the IMDB dataset\n print(\"Loading IMDB data...\")\n (train_texts, train_cats), (dev_texts, dev_cats) = load_data(limit=n_texts)\n print(\"Using {} examples ({} training, {} evaluation)\"\n .format(n_texts, len(train_texts), len(dev_texts)))\n train_data = list(zip(train_texts,\n [{'cats': cats} for cats in train_cats]))\n\n # get names of other pipes to disable them during training\n other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'textcat']\n with nlp.disable_pipes(*other_pipes): # only train textcat\n optimizer = nlp.begin_training()\n print(\"Training the model...\")\n print('{:^5}\\t{:^5}\\t{:^5}\\t{:^5}'.format('LOSS', 'P', 'R', 'F'))\n for i in range(n_iter):\n losses = {}\n # batch up the examples using spaCy's minibatch\n batches = minibatch(train_data, size=compounding(4., 32., 1.001))\n for batch in batches:\n texts, annotations = zip(*batch)\n nlp.update(texts, annotations, sgd=optimizer, drop=0.2,\n losses=losses)\n with textcat.model.use_params(optimizer.averages):\n # evaluate on the dev data split off in load_data()\n scores = evaluate(nlp.tokenizer, textcat, dev_texts, dev_cats)\n print('{0:.3f}\\t{1:.3f}\\t{2:.3f}\\t{3:.3f}' # print a simple table\n .format(losses['textcat'], scores['textcat_p'],\n scores['textcat_r'], scores['textcat_f']))\n\n # test the trained model\n test_text = \"This movie sucked\"\n doc = nlp(test_text)\n print(test_text, doc.cats)\n\n if output_dir is not None:\n output_dir = Path(output_dir)\n if not output_dir.exists():\n output_dir.mkdir()\n with nlp.use_params(optimizer.averages):\n nlp.to_disk(output_dir)\n print(\"Saved model to\", output_dir)\n\n # test the saved model\n print(\"Loading from\", output_dir)\n nlp2 = spacy.load(output_dir)\n doc2 = nlp2(test_text)\n print(test_text, doc2.cats)\n\n\ndef load_data(limit=0, split=0.8):\n \"\"\"Load data from the IMDB dataset.\"\"\"\n # Partition off part of the train data for evaluation\n train_data, _ = thinc.extra.datasets.imdb()\n random.shuffle(train_data)\n train_data = train_data[-limit:]\n texts, labels = zip(*train_data)\n cats = [{'POSITIVE': bool(y)} for y in labels]\n split = int(len(train_data) * split)\n return (texts[:split], cats[:split]), (texts[split:], cats[split:])\n\n\ndef evaluate(tokenizer, textcat, texts, cats):\n docs = (tokenizer(text) for text in texts)\n tp = 1e-8 # True positives\n fp = 1e-8 # False positives\n fn = 1e-8 # False negatives\n tn = 1e-8 # True negatives\n for i, doc in enumerate(textcat.pipe(docs)):\n gold = cats[i]\n for label, score in doc.cats.items():\n if label not in gold:\n continue\n if score >= 0.5 and gold[label] >= 0.5:\n tp += 1.\n elif score >= 0.5 and gold[label] < 0.5:\n fp += 1.\n elif score < 0.5 and gold[label] < 0.5:\n tn += 1\n elif score < 0.5 and gold[label] >= 0.5:\n fn += 1\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f_score = 2 * (precision * recall) / (precision + recall)\n return {'textcat_p': precision, 'textcat_r': recall, 'textcat_f': f_score}\n\n\nif __name__ == '__main__':\n plac.call(main)\n", "path": "examples/training/train_textcat.py"}]}
2,124
120
gh_patches_debug_44141
rasdani/github-patches
git_diff
sopel-irc__sopel-1700
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- help: output should be sent to requester privately IIRC, the older version of Sopel (or Willie?) used to send the long help results privately to the user who requested the help command list results. Or at least make it optional to admin who wants gisthub or privately. Thank you in advance. :) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/help.py` Content: ``` 1 # coding=utf-8 2 """ 3 help.py - Sopel Help Module 4 Copyright 2008, Sean B. Palmer, inamidst.com 5 Copyright © 2013, Elad Alfassa, <[email protected]> 6 Copyright © 2018, Adam Erdman, pandorah.org 7 Copyright © 2019, Tomasz Kurcz, github.com/uint 8 Copyright © 2019, dgw, technobabbl.es 9 Licensed under the Eiffel Forum License 2. 10 11 https://sopel.chat 12 """ 13 from __future__ import unicode_literals, absolute_import, print_function, division 14 15 import re 16 import collections 17 import socket 18 import textwrap 19 20 import requests 21 22 from sopel.config.types import ChoiceAttribute, ValidatedAttribute, StaticSection 23 from sopel.logger import get_logger 24 from sopel.module import commands, rule, example, priority 25 from sopel.tools import SopelMemory 26 27 28 SETTING_CACHE_NAMESPACE = 'help-setting-cache' # Set top-level memory key name 29 LOGGER = get_logger(__name__) 30 31 # Settings that should require the help listing to be regenerated, or 32 # re-POSTed to paste, if they are changed during runtime. 33 # Keys are module names, and values are lists of setting names 34 # specific to that module. 35 TRACKED_SETTINGS = { 36 'help': [ 37 'output', 38 'show_server_host', 39 ] 40 } 41 42 43 class PostingException(Exception): 44 """Custom exception type for errors posting help to the chosen pastebin.""" 45 pass 46 47 48 # Pastebin handlers 49 50 51 def _requests_post_catch_errors(*args, **kwargs): 52 try: 53 response = requests.post(*args, **kwargs) 54 response.raise_for_status() 55 except ( 56 requests.exceptions.Timeout, 57 requests.exceptions.TooManyRedirects, 58 requests.exceptions.RequestException, 59 requests.exceptions.HTTPError 60 ): 61 # We re-raise all expected exception types to a generic "posting error" 62 # that's easy for callers to expect, and then we pass the original 63 # exception through to provide some debugging info 64 LOGGER.exception('Error during POST request') 65 raise PostingException('Could not communicate with remote service') 66 67 # remaining handling (e.g. errors inside the response) is left to the caller 68 return response 69 70 71 def post_to_clbin(msg): 72 try: 73 result = _requests_post_catch_errors('https://clbin.com/', data={'clbin': msg}) 74 except PostingException: 75 raise 76 77 result = result.text 78 if '://clbin.com/' in result: 79 # find/replace just in case the site tries to be sneaky and save on SSL overhead, 80 # though it will probably send us an HTTPS link without any tricks. 81 return result.replace('http://', 'https://', 1) 82 else: 83 LOGGER.error("Invalid result %s", result) 84 raise PostingException('clbin result did not contain expected URL base.') 85 86 87 def post_to_0x0(msg): 88 try: 89 result = _requests_post_catch_errors('https://0x0.st', files={'file': msg}) 90 except PostingException: 91 raise 92 93 result = result.text 94 if '://0x0.st' in result: 95 # find/replace just in case the site tries to be sneaky and save on SSL overhead, 96 # though it will probably send us an HTTPS link without any tricks. 97 return result.replace('http://', 'https://', 1) 98 else: 99 LOGGER.error('Invalid result %s', result) 100 raise PostingException('0x0.st result did not contain expected URL base.') 101 102 103 def post_to_hastebin(msg): 104 try: 105 result = _requests_post_catch_errors('https://hastebin.com/documents', data=msg) 106 except PostingException: 107 raise 108 109 try: 110 result = result.json() 111 except ValueError: 112 LOGGER.error("Invalid Hastebin response %s", result) 113 raise PostingException('Could not parse response from Hastebin!') 114 115 if 'key' not in result: 116 LOGGER.error("Invalid result %s", result) 117 raise PostingException('Hastebin result did not contain expected URL base.') 118 return "https://hastebin.com/" + result['key'] 119 120 121 def post_to_termbin(msg): 122 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 123 sock.settimeout(10) # the bot may NOT wait forever for a response; that would be bad 124 try: 125 sock.connect(('termbin.com', 9999)) 126 sock.sendall(msg) 127 sock.shutdown(socket.SHUT_WR) 128 response = "" 129 while 1: 130 data = sock.recv(1024) 131 if data == "": 132 break 133 response += data 134 sock.close() 135 except socket.error: 136 LOGGER.exception('Error during communication with termbin') 137 raise PostingException('Error uploading to termbin') 138 139 # find/replace just in case the site tries to be sneaky and save on SSL overhead, 140 # though it will probably send us an HTTPS link without any tricks. 141 return response.strip('\x00\n').replace('http://', 'https://', 1) 142 143 144 def post_to_ubuntu(msg): 145 data = { 146 'poster': 'sopel', 147 'syntax': 'text', 148 'expiration': '', 149 'content': msg, 150 } 151 try: 152 result = _requests_post_catch_errors('https://pastebin.ubuntu.com/', data=data) 153 except PostingException: 154 raise 155 156 if not re.match(r'https://pastebin.ubuntu.com/p/[^/]+/', result.url): 157 LOGGER.error("Invalid Ubuntu pastebin response url %s", result.url) 158 raise PostingException('Invalid response from Ubuntu pastebin: %s' % result.url) 159 160 return result.url 161 162 163 PASTEBIN_PROVIDERS = { 164 'clbin': post_to_clbin, 165 '0x0': post_to_0x0, 166 'hastebin': post_to_hastebin, 167 'termbin': post_to_termbin, 168 'ubuntu': post_to_ubuntu, 169 } 170 171 172 class HelpSection(StaticSection): 173 """Configuration section for this module.""" 174 output = ChoiceAttribute('output', 175 list(PASTEBIN_PROVIDERS), 176 default='clbin') 177 """The pastebin provider to use for help output.""" 178 show_server_host = ValidatedAttribute('show_server_host', bool, default=True) 179 """Show the IRC server's hostname/IP in the first line of the help listing?""" 180 181 182 def configure(config): 183 """ 184 | name | example | purpose | 185 | ---- | ------- | ------- | 186 | output | clbin | The pastebin provider to use for help output | 187 | show\\_server\\_host | True | Whether to show the IRC server's hostname/IP at the top of command listings | 188 """ 189 config.define_section('help', HelpSection) 190 provider_list = ', '.join(PASTEBIN_PROVIDERS) 191 config.help.configure_setting( 192 'output', 193 'Pick a pastebin provider: {}: '.format(provider_list) 194 ) 195 config.help.configure_setting( 196 'show_server_host', 197 'Should the help command show the IRC server\'s hostname/IP in the listing?' 198 ) 199 200 201 def setup(bot): 202 bot.config.define_section('help', HelpSection) 203 204 # Initialize memory 205 if SETTING_CACHE_NAMESPACE not in bot.memory: 206 bot.memory[SETTING_CACHE_NAMESPACE] = SopelMemory() 207 208 # Initialize settings cache 209 for section in TRACKED_SETTINGS: 210 if section not in bot.memory[SETTING_CACHE_NAMESPACE]: 211 bot.memory[SETTING_CACHE_NAMESPACE][section] = SopelMemory() 212 213 update_cache(bot) # Populate cache 214 215 bot.config.define_section('help', HelpSection) 216 217 218 def update_cache(bot): 219 for section, setting_names_list in TRACKED_SETTINGS.items(): 220 for setting_name in setting_names_list: 221 bot.memory[SETTING_CACHE_NAMESPACE][section][setting_name] = getattr(getattr(bot.config, section), setting_name) 222 223 224 def is_cache_valid(bot): 225 for section, setting_names_list in TRACKED_SETTINGS.items(): 226 for setting_name in setting_names_list: 227 if bot.memory[SETTING_CACHE_NAMESPACE][section][setting_name] != getattr(getattr(bot.config, section), setting_name): 228 return False 229 return True 230 231 232 @rule('$nick' r'(?i)(help|doc) +([A-Za-z]+)(?:\?+)?$') 233 @example('.help tell') 234 @commands('help', 'commands') 235 @priority('low') 236 def help(bot, trigger): 237 """Shows a command's documentation, and an example if available. With no arguments, lists all commands.""" 238 if trigger.group(2): 239 name = trigger.group(2) 240 name = name.lower() 241 242 # number of lines of help to show 243 threshold = 3 244 245 if name in bot.doc: 246 # count lines we're going to send 247 # lines in command docstring, plus one line for example(s) if present (they're sent all on one line) 248 if len(bot.doc[name][0]) + int(bool(bot.doc[name][1])) > threshold: 249 if trigger.nick != trigger.sender: # don't say that if asked in private 250 bot.reply('The documentation for this command is too long; ' 251 'I\'m sending it to you in a private message.') 252 253 def msgfun(l): 254 bot.say(l, trigger.nick) 255 else: 256 msgfun = bot.reply 257 258 for line in bot.doc[name][0]: 259 msgfun(line) 260 if bot.doc[name][1]: 261 # Build a nice, grammatically-correct list of examples 262 examples = ', '.join(bot.doc[name][1][:-2] + [' or '.join(bot.doc[name][1][-2:])]) 263 msgfun('e.g. ' + examples) 264 else: 265 # This'll probably catch most cases, without having to spend the time 266 # actually creating the list first. Maybe worth storing the link and a 267 # heuristic in the DB, too, so it persists across restarts. Would need a 268 # command to regenerate, too... 269 if ( 270 'command-list' in bot.memory and 271 bot.memory['command-list'][0] == len(bot.command_groups) and 272 is_cache_valid(bot) 273 ): 274 url = bot.memory['command-list'][1] 275 else: 276 bot.say("Hang on, I'm creating a list.") 277 msgs = [] 278 279 name_length = max(6, max(len(k) for k in bot.command_groups.keys())) 280 for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items(): 281 category = category.upper().ljust(name_length) 282 cmds = set(cmds) # remove duplicates 283 cmds = ' '.join(cmds) 284 msg = category + ' ' + cmds 285 indent = ' ' * (name_length + 2) 286 # Honestly not sure why this is a list here 287 msgs.append('\n'.join(textwrap.wrap(msg, subsequent_indent=indent))) 288 289 url = create_list(bot, '\n\n'.join(msgs)) 290 if not url: 291 return 292 bot.memory['command-list'] = (len(bot.command_groups), url) 293 update_cache(bot) 294 bot.say("I've posted a list of my commands at {0} - You can see " 295 "more info about any of these commands by doing {1}help " 296 "<command> (e.g. {1}help time)" 297 .format(url, bot.config.core.help_prefix)) 298 299 300 def create_list(bot, msg): 301 """Creates & uploads the command list. 302 303 Returns the URL from the chosen pastebin provider. 304 """ 305 msg = 'Command listing for {}{}\n\n{}'.format( 306 bot.nick, 307 ('@' + bot.config.core.host) if bot.config.help.show_server_host else '', 308 msg) 309 310 try: 311 result = PASTEBIN_PROVIDERS[bot.config.help.output](msg) 312 except PostingException: 313 bot.say("Sorry! Something went wrong.") 314 LOGGER.exception("Error posting commands") 315 return 316 return result 317 318 319 @rule('$nick' r'(?i)help(?:[?!]+)?$') 320 @priority('low') 321 def help2(bot, trigger): 322 response = ( 323 "Hi, I'm a bot. Say {1}commands to me in private for a list " 324 "of my commands, or see https://sopel.chat for more " 325 "general details. My owner is {0}." 326 .format(bot.config.core.owner, bot.config.core.help_prefix)) 327 bot.reply(response) 328 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/help.py b/sopel/modules/help.py --- a/sopel/modules/help.py +++ b/sopel/modules/help.py @@ -167,6 +167,11 @@ 'termbin': post_to_termbin, 'ubuntu': post_to_ubuntu, } +REPLY_METHODS = [ + 'channel', + 'query', + 'notice', +] class HelpSection(StaticSection): @@ -175,6 +180,10 @@ list(PASTEBIN_PROVIDERS), default='clbin') """The pastebin provider to use for help output.""" + reply_method = ChoiceAttribute('reply_method', + REPLY_METHODS, + default='channel') + """Where/how to reply to help commands (public/private).""" show_server_host = ValidatedAttribute('show_server_host', bool, default=True) """Show the IRC server's hostname/IP in the first line of the help listing?""" @@ -184,14 +193,20 @@ | name | example | purpose | | ---- | ------- | ------- | | output | clbin | The pastebin provider to use for help output | + | reply\\_method | channel | How/where help output should be sent | | show\\_server\\_host | True | Whether to show the IRC server's hostname/IP at the top of command listings | """ config.define_section('help', HelpSection) provider_list = ', '.join(PASTEBIN_PROVIDERS) + reply_method_list = ', '.join(REPLY_METHODS) config.help.configure_setting( 'output', 'Pick a pastebin provider: {}: '.format(provider_list) ) + config.help.configure_setting( + 'reply_method', + 'How/where should help command replies be sent: {}? '.format(reply_method_list) + ) config.help.configure_setting( 'show_server_host', 'Should the help command show the IRC server\'s hostname/IP in the listing?' @@ -235,6 +250,16 @@ @priority('low') def help(bot, trigger): """Shows a command's documentation, and an example if available. With no arguments, lists all commands.""" + if bot.config.help.reply_method == 'query': + def respond(text): + bot.say(text, trigger.nick) + elif bot.config.help.reply_method == 'notice': + def respond(text): + bot.notice(text, trigger.nick) + else: + def respond(text): + bot.say(text, trigger.sender) + if trigger.group(2): name = trigger.group(2) name = name.lower() @@ -253,7 +278,7 @@ def msgfun(l): bot.say(l, trigger.nick) else: - msgfun = bot.reply + msgfun = respond for line in bot.doc[name][0]: msgfun(line) @@ -273,7 +298,7 @@ ): url = bot.memory['command-list'][1] else: - bot.say("Hang on, I'm creating a list.") + respond("Hang on, I'm creating a list.") msgs = [] name_length = max(6, max(len(k) for k in bot.command_groups.keys())) @@ -291,7 +316,7 @@ return bot.memory['command-list'] = (len(bot.command_groups), url) update_cache(bot) - bot.say("I've posted a list of my commands at {0} - You can see " + respond("I've posted a list of my commands at {0} - You can see " "more info about any of these commands by doing {1}help " "<command> (e.g. {1}help time)" .format(url, bot.config.core.help_prefix))
{"golden_diff": "diff --git a/sopel/modules/help.py b/sopel/modules/help.py\n--- a/sopel/modules/help.py\n+++ b/sopel/modules/help.py\n@@ -167,6 +167,11 @@\n 'termbin': post_to_termbin,\n 'ubuntu': post_to_ubuntu,\n }\n+REPLY_METHODS = [\n+ 'channel',\n+ 'query',\n+ 'notice',\n+]\n \n \n class HelpSection(StaticSection):\n@@ -175,6 +180,10 @@\n list(PASTEBIN_PROVIDERS),\n default='clbin')\n \"\"\"The pastebin provider to use for help output.\"\"\"\n+ reply_method = ChoiceAttribute('reply_method',\n+ REPLY_METHODS,\n+ default='channel')\n+ \"\"\"Where/how to reply to help commands (public/private).\"\"\"\n show_server_host = ValidatedAttribute('show_server_host', bool, default=True)\n \"\"\"Show the IRC server's hostname/IP in the first line of the help listing?\"\"\"\n \n@@ -184,14 +193,20 @@\n | name | example | purpose |\n | ---- | ------- | ------- |\n | output | clbin | The pastebin provider to use for help output |\n+ | reply\\\\_method | channel | How/where help output should be sent |\n | show\\\\_server\\\\_host | True | Whether to show the IRC server's hostname/IP at the top of command listings |\n \"\"\"\n config.define_section('help', HelpSection)\n provider_list = ', '.join(PASTEBIN_PROVIDERS)\n+ reply_method_list = ', '.join(REPLY_METHODS)\n config.help.configure_setting(\n 'output',\n 'Pick a pastebin provider: {}: '.format(provider_list)\n )\n+ config.help.configure_setting(\n+ 'reply_method',\n+ 'How/where should help command replies be sent: {}? '.format(reply_method_list)\n+ )\n config.help.configure_setting(\n 'show_server_host',\n 'Should the help command show the IRC server\\'s hostname/IP in the listing?'\n@@ -235,6 +250,16 @@\n @priority('low')\n def help(bot, trigger):\n \"\"\"Shows a command's documentation, and an example if available. With no arguments, lists all commands.\"\"\"\n+ if bot.config.help.reply_method == 'query':\n+ def respond(text):\n+ bot.say(text, trigger.nick)\n+ elif bot.config.help.reply_method == 'notice':\n+ def respond(text):\n+ bot.notice(text, trigger.nick)\n+ else:\n+ def respond(text):\n+ bot.say(text, trigger.sender)\n+\n if trigger.group(2):\n name = trigger.group(2)\n name = name.lower()\n@@ -253,7 +278,7 @@\n def msgfun(l):\n bot.say(l, trigger.nick)\n else:\n- msgfun = bot.reply\n+ msgfun = respond\n \n for line in bot.doc[name][0]:\n msgfun(line)\n@@ -273,7 +298,7 @@\n ):\n url = bot.memory['command-list'][1]\n else:\n- bot.say(\"Hang on, I'm creating a list.\")\n+ respond(\"Hang on, I'm creating a list.\")\n msgs = []\n \n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n@@ -291,7 +316,7 @@\n return\n bot.memory['command-list'] = (len(bot.command_groups), url)\n update_cache(bot)\n- bot.say(\"I've posted a list of my commands at {0} - You can see \"\n+ respond(\"I've posted a list of my commands at {0} - You can see \"\n \"more info about any of these commands by doing {1}help \"\n \"<command> (e.g. {1}help time)\"\n .format(url, bot.config.core.help_prefix))\n", "issue": "help: output should be sent to requester privately\nIIRC, the older version of Sopel (or Willie?) used to send the long help results privately to the user who requested the help command list results. Or at least make it optional to admin who wants gisthub or privately.\r\n\r\nThank you in advance. :)\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nhelp.py - Sopel Help Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nCopyright \u00a9 2018, Adam Erdman, pandorah.org\nCopyright \u00a9 2019, Tomasz Kurcz, github.com/uint\nCopyright \u00a9 2019, dgw, technobabbl.es\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nimport collections\nimport socket\nimport textwrap\n\nimport requests\n\nfrom sopel.config.types import ChoiceAttribute, ValidatedAttribute, StaticSection\nfrom sopel.logger import get_logger\nfrom sopel.module import commands, rule, example, priority\nfrom sopel.tools import SopelMemory\n\n\nSETTING_CACHE_NAMESPACE = 'help-setting-cache' # Set top-level memory key name\nLOGGER = get_logger(__name__)\n\n# Settings that should require the help listing to be regenerated, or\n# re-POSTed to paste, if they are changed during runtime.\n# Keys are module names, and values are lists of setting names\n# specific to that module.\nTRACKED_SETTINGS = {\n 'help': [\n 'output',\n 'show_server_host',\n ]\n}\n\n\nclass PostingException(Exception):\n \"\"\"Custom exception type for errors posting help to the chosen pastebin.\"\"\"\n pass\n\n\n# Pastebin handlers\n\n\ndef _requests_post_catch_errors(*args, **kwargs):\n try:\n response = requests.post(*args, **kwargs)\n response.raise_for_status()\n except (\n requests.exceptions.Timeout,\n requests.exceptions.TooManyRedirects,\n requests.exceptions.RequestException,\n requests.exceptions.HTTPError\n ):\n # We re-raise all expected exception types to a generic \"posting error\"\n # that's easy for callers to expect, and then we pass the original\n # exception through to provide some debugging info\n LOGGER.exception('Error during POST request')\n raise PostingException('Could not communicate with remote service')\n\n # remaining handling (e.g. errors inside the response) is left to the caller\n return response\n\n\ndef post_to_clbin(msg):\n try:\n result = _requests_post_catch_errors('https://clbin.com/', data={'clbin': msg})\n except PostingException:\n raise\n\n result = result.text\n if '://clbin.com/' in result:\n # find/replace just in case the site tries to be sneaky and save on SSL overhead,\n # though it will probably send us an HTTPS link without any tricks.\n return result.replace('http://', 'https://', 1)\n else:\n LOGGER.error(\"Invalid result %s\", result)\n raise PostingException('clbin result did not contain expected URL base.')\n\n\ndef post_to_0x0(msg):\n try:\n result = _requests_post_catch_errors('https://0x0.st', files={'file': msg})\n except PostingException:\n raise\n\n result = result.text\n if '://0x0.st' in result:\n # find/replace just in case the site tries to be sneaky and save on SSL overhead,\n # though it will probably send us an HTTPS link without any tricks.\n return result.replace('http://', 'https://', 1)\n else:\n LOGGER.error('Invalid result %s', result)\n raise PostingException('0x0.st result did not contain expected URL base.')\n\n\ndef post_to_hastebin(msg):\n try:\n result = _requests_post_catch_errors('https://hastebin.com/documents', data=msg)\n except PostingException:\n raise\n\n try:\n result = result.json()\n except ValueError:\n LOGGER.error(\"Invalid Hastebin response %s\", result)\n raise PostingException('Could not parse response from Hastebin!')\n\n if 'key' not in result:\n LOGGER.error(\"Invalid result %s\", result)\n raise PostingException('Hastebin result did not contain expected URL base.')\n return \"https://hastebin.com/\" + result['key']\n\n\ndef post_to_termbin(msg):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(10) # the bot may NOT wait forever for a response; that would be bad\n try:\n sock.connect(('termbin.com', 9999))\n sock.sendall(msg)\n sock.shutdown(socket.SHUT_WR)\n response = \"\"\n while 1:\n data = sock.recv(1024)\n if data == \"\":\n break\n response += data\n sock.close()\n except socket.error:\n LOGGER.exception('Error during communication with termbin')\n raise PostingException('Error uploading to termbin')\n\n # find/replace just in case the site tries to be sneaky and save on SSL overhead,\n # though it will probably send us an HTTPS link without any tricks.\n return response.strip('\\x00\\n').replace('http://', 'https://', 1)\n\n\ndef post_to_ubuntu(msg):\n data = {\n 'poster': 'sopel',\n 'syntax': 'text',\n 'expiration': '',\n 'content': msg,\n }\n try:\n result = _requests_post_catch_errors('https://pastebin.ubuntu.com/', data=data)\n except PostingException:\n raise\n\n if not re.match(r'https://pastebin.ubuntu.com/p/[^/]+/', result.url):\n LOGGER.error(\"Invalid Ubuntu pastebin response url %s\", result.url)\n raise PostingException('Invalid response from Ubuntu pastebin: %s' % result.url)\n\n return result.url\n\n\nPASTEBIN_PROVIDERS = {\n 'clbin': post_to_clbin,\n '0x0': post_to_0x0,\n 'hastebin': post_to_hastebin,\n 'termbin': post_to_termbin,\n 'ubuntu': post_to_ubuntu,\n}\n\n\nclass HelpSection(StaticSection):\n \"\"\"Configuration section for this module.\"\"\"\n output = ChoiceAttribute('output',\n list(PASTEBIN_PROVIDERS),\n default='clbin')\n \"\"\"The pastebin provider to use for help output.\"\"\"\n show_server_host = ValidatedAttribute('show_server_host', bool, default=True)\n \"\"\"Show the IRC server's hostname/IP in the first line of the help listing?\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | output | clbin | The pastebin provider to use for help output |\n | show\\\\_server\\\\_host | True | Whether to show the IRC server's hostname/IP at the top of command listings |\n \"\"\"\n config.define_section('help', HelpSection)\n provider_list = ', '.join(PASTEBIN_PROVIDERS)\n config.help.configure_setting(\n 'output',\n 'Pick a pastebin provider: {}: '.format(provider_list)\n )\n config.help.configure_setting(\n 'show_server_host',\n 'Should the help command show the IRC server\\'s hostname/IP in the listing?'\n )\n\n\ndef setup(bot):\n bot.config.define_section('help', HelpSection)\n\n # Initialize memory\n if SETTING_CACHE_NAMESPACE not in bot.memory:\n bot.memory[SETTING_CACHE_NAMESPACE] = SopelMemory()\n\n # Initialize settings cache\n for section in TRACKED_SETTINGS:\n if section not in bot.memory[SETTING_CACHE_NAMESPACE]:\n bot.memory[SETTING_CACHE_NAMESPACE][section] = SopelMemory()\n\n update_cache(bot) # Populate cache\n\n bot.config.define_section('help', HelpSection)\n\n\ndef update_cache(bot):\n for section, setting_names_list in TRACKED_SETTINGS.items():\n for setting_name in setting_names_list:\n bot.memory[SETTING_CACHE_NAMESPACE][section][setting_name] = getattr(getattr(bot.config, section), setting_name)\n\n\ndef is_cache_valid(bot):\n for section, setting_names_list in TRACKED_SETTINGS.items():\n for setting_name in setting_names_list:\n if bot.memory[SETTING_CACHE_NAMESPACE][section][setting_name] != getattr(getattr(bot.config, section), setting_name):\n return False\n return True\n\n\n@rule('$nick' r'(?i)(help|doc) +([A-Za-z]+)(?:\\?+)?$')\n@example('.help tell')\n@commands('help', 'commands')\n@priority('low')\ndef help(bot, trigger):\n \"\"\"Shows a command's documentation, and an example if available. With no arguments, lists all commands.\"\"\"\n if trigger.group(2):\n name = trigger.group(2)\n name = name.lower()\n\n # number of lines of help to show\n threshold = 3\n\n if name in bot.doc:\n # count lines we're going to send\n # lines in command docstring, plus one line for example(s) if present (they're sent all on one line)\n if len(bot.doc[name][0]) + int(bool(bot.doc[name][1])) > threshold:\n if trigger.nick != trigger.sender: # don't say that if asked in private\n bot.reply('The documentation for this command is too long; '\n 'I\\'m sending it to you in a private message.')\n\n def msgfun(l):\n bot.say(l, trigger.nick)\n else:\n msgfun = bot.reply\n\n for line in bot.doc[name][0]:\n msgfun(line)\n if bot.doc[name][1]:\n # Build a nice, grammatically-correct list of examples\n examples = ', '.join(bot.doc[name][1][:-2] + [' or '.join(bot.doc[name][1][-2:])])\n msgfun('e.g. ' + examples)\n else:\n # This'll probably catch most cases, without having to spend the time\n # actually creating the list first. Maybe worth storing the link and a\n # heuristic in the DB, too, so it persists across restarts. Would need a\n # command to regenerate, too...\n if (\n 'command-list' in bot.memory and\n bot.memory['command-list'][0] == len(bot.command_groups) and\n is_cache_valid(bot)\n ):\n url = bot.memory['command-list'][1]\n else:\n bot.say(\"Hang on, I'm creating a list.\")\n msgs = []\n\n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():\n category = category.upper().ljust(name_length)\n cmds = set(cmds) # remove duplicates\n cmds = ' '.join(cmds)\n msg = category + ' ' + cmds\n indent = ' ' * (name_length + 2)\n # Honestly not sure why this is a list here\n msgs.append('\\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))\n\n url = create_list(bot, '\\n\\n'.join(msgs))\n if not url:\n return\n bot.memory['command-list'] = (len(bot.command_groups), url)\n update_cache(bot)\n bot.say(\"I've posted a list of my commands at {0} - You can see \"\n \"more info about any of these commands by doing {1}help \"\n \"<command> (e.g. {1}help time)\"\n .format(url, bot.config.core.help_prefix))\n\n\ndef create_list(bot, msg):\n \"\"\"Creates & uploads the command list.\n\n Returns the URL from the chosen pastebin provider.\n \"\"\"\n msg = 'Command listing for {}{}\\n\\n{}'.format(\n bot.nick,\n ('@' + bot.config.core.host) if bot.config.help.show_server_host else '',\n msg)\n\n try:\n result = PASTEBIN_PROVIDERS[bot.config.help.output](msg)\n except PostingException:\n bot.say(\"Sorry! Something went wrong.\")\n LOGGER.exception(\"Error posting commands\")\n return\n return result\n\n\n@rule('$nick' r'(?i)help(?:[?!]+)?$')\n@priority('low')\ndef help2(bot, trigger):\n response = (\n \"Hi, I'm a bot. Say {1}commands to me in private for a list \"\n \"of my commands, or see https://sopel.chat for more \"\n \"general details. My owner is {0}.\"\n .format(bot.config.core.owner, bot.config.core.help_prefix))\n bot.reply(response)\n", "path": "sopel/modules/help.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nhelp.py - Sopel Help Module\nCopyright 2008, Sean B. Palmer, inamidst.com\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nCopyright \u00a9 2018, Adam Erdman, pandorah.org\nCopyright \u00a9 2019, Tomasz Kurcz, github.com/uint\nCopyright \u00a9 2019, dgw, technobabbl.es\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nimport collections\nimport socket\nimport textwrap\n\nimport requests\n\nfrom sopel.config.types import ChoiceAttribute, ValidatedAttribute, StaticSection\nfrom sopel.logger import get_logger\nfrom sopel.module import commands, rule, example, priority\nfrom sopel.tools import SopelMemory\n\n\nSETTING_CACHE_NAMESPACE = 'help-setting-cache' # Set top-level memory key name\nLOGGER = get_logger(__name__)\n\n# Settings that should require the help listing to be regenerated, or\n# re-POSTed to paste, if they are changed during runtime.\n# Keys are module names, and values are lists of setting names\n# specific to that module.\nTRACKED_SETTINGS = {\n 'help': [\n 'output',\n 'show_server_host',\n ]\n}\n\n\nclass PostingException(Exception):\n \"\"\"Custom exception type for errors posting help to the chosen pastebin.\"\"\"\n pass\n\n\n# Pastebin handlers\n\n\ndef _requests_post_catch_errors(*args, **kwargs):\n try:\n response = requests.post(*args, **kwargs)\n response.raise_for_status()\n except (\n requests.exceptions.Timeout,\n requests.exceptions.TooManyRedirects,\n requests.exceptions.RequestException,\n requests.exceptions.HTTPError\n ):\n # We re-raise all expected exception types to a generic \"posting error\"\n # that's easy for callers to expect, and then we pass the original\n # exception through to provide some debugging info\n LOGGER.exception('Error during POST request')\n raise PostingException('Could not communicate with remote service')\n\n # remaining handling (e.g. errors inside the response) is left to the caller\n return response\n\n\ndef post_to_clbin(msg):\n try:\n result = _requests_post_catch_errors('https://clbin.com/', data={'clbin': msg})\n except PostingException:\n raise\n\n result = result.text\n if '://clbin.com/' in result:\n # find/replace just in case the site tries to be sneaky and save on SSL overhead,\n # though it will probably send us an HTTPS link without any tricks.\n return result.replace('http://', 'https://', 1)\n else:\n LOGGER.error(\"Invalid result %s\", result)\n raise PostingException('clbin result did not contain expected URL base.')\n\n\ndef post_to_0x0(msg):\n try:\n result = _requests_post_catch_errors('https://0x0.st', files={'file': msg})\n except PostingException:\n raise\n\n result = result.text\n if '://0x0.st' in result:\n # find/replace just in case the site tries to be sneaky and save on SSL overhead,\n # though it will probably send us an HTTPS link without any tricks.\n return result.replace('http://', 'https://', 1)\n else:\n LOGGER.error('Invalid result %s', result)\n raise PostingException('0x0.st result did not contain expected URL base.')\n\n\ndef post_to_hastebin(msg):\n try:\n result = _requests_post_catch_errors('https://hastebin.com/documents', data=msg)\n except PostingException:\n raise\n\n try:\n result = result.json()\n except ValueError:\n LOGGER.error(\"Invalid Hastebin response %s\", result)\n raise PostingException('Could not parse response from Hastebin!')\n\n if 'key' not in result:\n LOGGER.error(\"Invalid result %s\", result)\n raise PostingException('Hastebin result did not contain expected URL base.')\n return \"https://hastebin.com/\" + result['key']\n\n\ndef post_to_termbin(msg):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(10) # the bot may NOT wait forever for a response; that would be bad\n try:\n sock.connect(('termbin.com', 9999))\n sock.sendall(msg)\n sock.shutdown(socket.SHUT_WR)\n response = \"\"\n while 1:\n data = sock.recv(1024)\n if data == \"\":\n break\n response += data\n sock.close()\n except socket.error:\n LOGGER.exception('Error during communication with termbin')\n raise PostingException('Error uploading to termbin')\n\n # find/replace just in case the site tries to be sneaky and save on SSL overhead,\n # though it will probably send us an HTTPS link without any tricks.\n return response.strip('\\x00\\n').replace('http://', 'https://', 1)\n\n\ndef post_to_ubuntu(msg):\n data = {\n 'poster': 'sopel',\n 'syntax': 'text',\n 'expiration': '',\n 'content': msg,\n }\n try:\n result = _requests_post_catch_errors('https://pastebin.ubuntu.com/', data=data)\n except PostingException:\n raise\n\n if not re.match(r'https://pastebin.ubuntu.com/p/[^/]+/', result.url):\n LOGGER.error(\"Invalid Ubuntu pastebin response url %s\", result.url)\n raise PostingException('Invalid response from Ubuntu pastebin: %s' % result.url)\n\n return result.url\n\n\nPASTEBIN_PROVIDERS = {\n 'clbin': post_to_clbin,\n '0x0': post_to_0x0,\n 'hastebin': post_to_hastebin,\n 'termbin': post_to_termbin,\n 'ubuntu': post_to_ubuntu,\n}\nREPLY_METHODS = [\n 'channel',\n 'query',\n 'notice',\n]\n\n\nclass HelpSection(StaticSection):\n \"\"\"Configuration section for this module.\"\"\"\n output = ChoiceAttribute('output',\n list(PASTEBIN_PROVIDERS),\n default='clbin')\n \"\"\"The pastebin provider to use for help output.\"\"\"\n reply_method = ChoiceAttribute('reply_method',\n REPLY_METHODS,\n default='channel')\n \"\"\"Where/how to reply to help commands (public/private).\"\"\"\n show_server_host = ValidatedAttribute('show_server_host', bool, default=True)\n \"\"\"Show the IRC server's hostname/IP in the first line of the help listing?\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | output | clbin | The pastebin provider to use for help output |\n | reply\\\\_method | channel | How/where help output should be sent |\n | show\\\\_server\\\\_host | True | Whether to show the IRC server's hostname/IP at the top of command listings |\n \"\"\"\n config.define_section('help', HelpSection)\n provider_list = ', '.join(PASTEBIN_PROVIDERS)\n reply_method_list = ', '.join(REPLY_METHODS)\n config.help.configure_setting(\n 'output',\n 'Pick a pastebin provider: {}: '.format(provider_list)\n )\n config.help.configure_setting(\n 'reply_method',\n 'How/where should help command replies be sent: {}? '.format(reply_method_list)\n )\n config.help.configure_setting(\n 'show_server_host',\n 'Should the help command show the IRC server\\'s hostname/IP in the listing?'\n )\n\n\ndef setup(bot):\n bot.config.define_section('help', HelpSection)\n\n # Initialize memory\n if SETTING_CACHE_NAMESPACE not in bot.memory:\n bot.memory[SETTING_CACHE_NAMESPACE] = SopelMemory()\n\n # Initialize settings cache\n for section in TRACKED_SETTINGS:\n if section not in bot.memory[SETTING_CACHE_NAMESPACE]:\n bot.memory[SETTING_CACHE_NAMESPACE][section] = SopelMemory()\n\n update_cache(bot) # Populate cache\n\n bot.config.define_section('help', HelpSection)\n\n\ndef update_cache(bot):\n for section, setting_names_list in TRACKED_SETTINGS.items():\n for setting_name in setting_names_list:\n bot.memory[SETTING_CACHE_NAMESPACE][section][setting_name] = getattr(getattr(bot.config, section), setting_name)\n\n\ndef is_cache_valid(bot):\n for section, setting_names_list in TRACKED_SETTINGS.items():\n for setting_name in setting_names_list:\n if bot.memory[SETTING_CACHE_NAMESPACE][section][setting_name] != getattr(getattr(bot.config, section), setting_name):\n return False\n return True\n\n\n@rule('$nick' r'(?i)(help|doc) +([A-Za-z]+)(?:\\?+)?$')\n@example('.help tell')\n@commands('help', 'commands')\n@priority('low')\ndef help(bot, trigger):\n \"\"\"Shows a command's documentation, and an example if available. With no arguments, lists all commands.\"\"\"\n if bot.config.help.reply_method == 'query':\n def respond(text):\n bot.say(text, trigger.nick)\n elif bot.config.help.reply_method == 'notice':\n def respond(text):\n bot.notice(text, trigger.nick)\n else:\n def respond(text):\n bot.say(text, trigger.sender)\n\n if trigger.group(2):\n name = trigger.group(2)\n name = name.lower()\n\n # number of lines of help to show\n threshold = 3\n\n if name in bot.doc:\n # count lines we're going to send\n # lines in command docstring, plus one line for example(s) if present (they're sent all on one line)\n if len(bot.doc[name][0]) + int(bool(bot.doc[name][1])) > threshold:\n if trigger.nick != trigger.sender: # don't say that if asked in private\n bot.reply('The documentation for this command is too long; '\n 'I\\'m sending it to you in a private message.')\n\n def msgfun(l):\n bot.say(l, trigger.nick)\n else:\n msgfun = respond\n\n for line in bot.doc[name][0]:\n msgfun(line)\n if bot.doc[name][1]:\n # Build a nice, grammatically-correct list of examples\n examples = ', '.join(bot.doc[name][1][:-2] + [' or '.join(bot.doc[name][1][-2:])])\n msgfun('e.g. ' + examples)\n else:\n # This'll probably catch most cases, without having to spend the time\n # actually creating the list first. Maybe worth storing the link and a\n # heuristic in the DB, too, so it persists across restarts. Would need a\n # command to regenerate, too...\n if (\n 'command-list' in bot.memory and\n bot.memory['command-list'][0] == len(bot.command_groups) and\n is_cache_valid(bot)\n ):\n url = bot.memory['command-list'][1]\n else:\n respond(\"Hang on, I'm creating a list.\")\n msgs = []\n\n name_length = max(6, max(len(k) for k in bot.command_groups.keys()))\n for category, cmds in collections.OrderedDict(sorted(bot.command_groups.items())).items():\n category = category.upper().ljust(name_length)\n cmds = set(cmds) # remove duplicates\n cmds = ' '.join(cmds)\n msg = category + ' ' + cmds\n indent = ' ' * (name_length + 2)\n # Honestly not sure why this is a list here\n msgs.append('\\n'.join(textwrap.wrap(msg, subsequent_indent=indent)))\n\n url = create_list(bot, '\\n\\n'.join(msgs))\n if not url:\n return\n bot.memory['command-list'] = (len(bot.command_groups), url)\n update_cache(bot)\n respond(\"I've posted a list of my commands at {0} - You can see \"\n \"more info about any of these commands by doing {1}help \"\n \"<command> (e.g. {1}help time)\"\n .format(url, bot.config.core.help_prefix))\n\n\ndef create_list(bot, msg):\n \"\"\"Creates & uploads the command list.\n\n Returns the URL from the chosen pastebin provider.\n \"\"\"\n msg = 'Command listing for {}{}\\n\\n{}'.format(\n bot.nick,\n ('@' + bot.config.core.host) if bot.config.help.show_server_host else '',\n msg)\n\n try:\n result = PASTEBIN_PROVIDERS[bot.config.help.output](msg)\n except PostingException:\n bot.say(\"Sorry! Something went wrong.\")\n LOGGER.exception(\"Error posting commands\")\n return\n return result\n\n\n@rule('$nick' r'(?i)help(?:[?!]+)?$')\n@priority('low')\ndef help2(bot, trigger):\n response = (\n \"Hi, I'm a bot. Say {1}commands to me in private for a list \"\n \"of my commands, or see https://sopel.chat for more \"\n \"general details. My owner is {0}.\"\n .format(bot.config.core.owner, bot.config.core.help_prefix))\n bot.reply(response)\n", "path": "sopel/modules/help.py"}]}
3,926
862
gh_patches_debug_3948
rasdani/github-patches
git_diff
conan-io__conan-center-index-1822
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] thrift/0.13.0: build fails using --profile but works using --profile:host and --profile:build ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **thrift/0.13.0** * Operating System+version: **Linux Ubuntu 18.04** * Compiler+version: **GCC 7** * Conan version: **conan 1.25.2** * Python version: **Python 3.6.9** ### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use) ``` Configuration for profile linux-x64: [settings] os=Linux os_build=Linux arch=x86_64 arch_build=x86_64 compiler=gcc compiler.version=7 compiler.libcxx=libstdc++11 build_type=Debug [options] *:fPIC=True *:shared=True [build_requires] [env] ``` ### Steps to reproduce (Include if Applicable) I have a package that uses _thrift/0.13.0_ as build requirement (thrift compiler) and as normal requirement (thrift libs). When I build and create my package with ```conan create . --profile:host linux-x64 --profile:build linux-x64``` the build works perfectly, the thrift sources are compiled with the thrift compiler. When I use ```conan create . --profile linux-x64``` the build fails when using the thrift compiler. The difference is that in the first case, the binary directories of all build requirement packages are added to PATH environment during build and in the second case, nothing is added to PATH. The cmake build of my package expects the thrift compiler being in PATH. I was not sure if this is really a package bug and not a conan bug. Sorry if this was the wrong place. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/thrift/all/conanfile.py` Content: ``` 1 import os 2 from conans import tools, CMake, ConanFile 3 4 5 class ConanFileDefault(ConanFile): 6 name = "thrift" 7 description = "Thrift is an associated code generation mechanism for RPC" 8 topics = ("conan", "thrift", "serialization", "rpc") 9 url = "https://github.com/conan-io/conan-center-index" 10 homepage = "https://github.com/apache/thrift" 11 license = "Apache-2.0" 12 exports_sources = ["CMakeLists.txt", "patches/*.diff"] 13 generators = "cmake", "cmake_find_package" 14 15 settings = "os", "arch", "compiler", "build_type" 16 options = { 17 "shared": [True, False], 18 "fPIC": [True, False], 19 "with_zlib": [True, False], 20 "with_libevent": [True, False], 21 "with_openssl": [True, False], 22 "with_boost_functional": [True, False], 23 "with_boost_smart_ptr": [True, False], 24 "with_boost_static": [True, False], 25 "with_boostthreads": [True, False], 26 "with_stdthreads": [True, False], 27 "with_c_glib": [True, False], 28 "with_cpp": [True, False], 29 "with_java": [True, False], 30 "with_python": [True, False], 31 "with_haskell": [True, False], 32 "with_plugin": [True, False] 33 } 34 default_options = { 35 "shared": False, 36 "fPIC": True, 37 "with_zlib": True, 38 "with_libevent": True, 39 "with_openssl": True, 40 "with_boost_functional": False, 41 "with_boost_smart_ptr": False, 42 "with_boost_static": False, 43 "with_boostthreads": False, 44 "with_stdthreads": True, 45 "with_c_glib": False, 46 "with_cpp": True, 47 "with_java": False, 48 "with_python": False, 49 "with_haskell": False, 50 "with_plugin": False 51 } 52 _source_subfolder = "source_subfolder" 53 _build_subfolder = "build_subfolder" 54 55 _cmake = None 56 57 def config_options(self): 58 if self.settings.os == 'Windows': 59 del self.options.fPIC 60 61 def source(self): 62 tools.get(**self.conan_data["sources"][self.version]) 63 extracted_dir = "thrift-" + self.version 64 os.rename(extracted_dir, self._source_subfolder) 65 66 def build(self): 67 for p in self.conan_data["patches"][self.version]: 68 tools.patch(**p) 69 for f in ["Findflex.cmake", "Findbison.cmake"]: 70 if os.path.isfile(f): 71 os.unlink(f) 72 cmake = self._configure_cmake() 73 cmake.build() 74 75 def requirements(self): 76 self.requires("boost/1.73.0") 77 if self.settings.os == 'Windows': 78 self.requires("winflexbison/2.5.22") 79 else: 80 self.requires("flex/2.6.4") 81 self.requires("bison/3.5.3") 82 83 if self.options.with_openssl: 84 self.requires("openssl/1.1.1g") 85 if self.options.with_zlib: 86 self.requires("zlib/1.2.11") 87 if self.options.with_libevent: 88 self.requires("libevent/2.1.11") 89 90 def _configure_cmake(self): 91 if self._cmake: 92 return self._cmake 93 self._cmake = CMake(self) 94 for option, value in self.options.items(): 95 self._cmake.definitions[option.upper()] = value 96 97 # Make thrift use correct thread lib (see repo/build/cmake/config.h.in) 98 self._cmake.definitions["USE_STD_THREAD"] = self.options.with_stdthreads 99 self._cmake.definitions["USE_BOOST_THREAD"] = self.options.with_boostthreads 100 self._cmake.definitions["WITH_SHARED_LIB"] = self.options.shared 101 self._cmake.definitions["WITH_STATIC_LIB"] = not self.options.shared 102 self._cmake.definitions["BOOST_ROOT"] = self.deps_cpp_info['boost'].rootpath 103 self._cmake.definitions["BUILD_TESTING"] = False 104 self._cmake.definitions["BUILD_COMPILER"] = True 105 self._cmake.definitions["BUILD_LIBRARIES"] = True 106 self._cmake.definitions["BUILD_EXAMPLES"] = False 107 self._cmake.definitions["BUILD_TUTORIALS"] = False 108 109 # Make optional libs "findable" 110 if self.options.with_openssl: 111 self._cmake.definitions["OPENSSL_ROOT_DIR"] = self.deps_cpp_info['openssl'].rootpath 112 if self.options.with_zlib: 113 self._cmake.definitions["ZLIB_ROOT"] = self.deps_cpp_info['zlib'].rootpath 114 if self.options.with_libevent: 115 self._cmake.definitions["LIBEVENT_ROOT"] = self.deps_cpp_info['libevent'].rootpath 116 117 self._cmake.configure(build_folder=self._build_subfolder) 118 return self._cmake 119 120 def package(self): 121 self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder) 122 cmake = self._configure_cmake() 123 cmake.install() 124 # Copy generated headers from build tree 125 build_source_dir = os.path.join(self._build_subfolder, self._source_subfolder) 126 self.copy(pattern="*.h", dst="include", src=build_source_dir, keep_path=True) 127 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake")) 128 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig")) 129 130 def package_info(self): 131 self.cpp_info.libs = tools.collect_libs(self) 132 # Make sure libs are link in correct order. Important thing is that libthrift/thrift is last 133 # (a little naive to sort, but libthrift/thrift should end up last since rest of the libs extend it with an abbrevation: 'thriftnb', 'thriftz') 134 # The library that needs symbols must be first, then the library that resolves the symbols should come after. 135 self.cpp_info.libs.sort(reverse = True) 136 137 if self.settings.os == "Windows": 138 # To avoid error C2589: '(' : illegal token on right side of '::' 139 self.cpp_info.defines.append("NOMINMAX") 140 elif self.settings.os == "Linux": 141 self.cpp_info.system_libs.extend(["m", "pthread"]) 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/thrift/all/conanfile.py b/recipes/thrift/all/conanfile.py --- a/recipes/thrift/all/conanfile.py +++ b/recipes/thrift/all/conanfile.py @@ -139,3 +139,7 @@ self.cpp_info.defines.append("NOMINMAX") elif self.settings.os == "Linux": self.cpp_info.system_libs.extend(["m", "pthread"]) + + bin_path = os.path.join(self.package_folder, "bin") + self.output.info("Appending PATH env var with : {}".format(bin_path)) + self.env_info.PATH.append(bin_path)
{"golden_diff": "diff --git a/recipes/thrift/all/conanfile.py b/recipes/thrift/all/conanfile.py\n--- a/recipes/thrift/all/conanfile.py\n+++ b/recipes/thrift/all/conanfile.py\n@@ -139,3 +139,7 @@\n self.cpp_info.defines.append(\"NOMINMAX\")\n elif self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n+\n+ bin_path = os.path.join(self.package_folder, \"bin\")\n+ self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n+ self.env_info.PATH.append(bin_path)\n", "issue": "[package] thrift/0.13.0: build fails using --profile but works using --profile:host and --profile:build\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **thrift/0.13.0**\r\n * Operating System+version: **Linux Ubuntu 18.04**\r\n * Compiler+version: **GCC 7**\r\n * Conan version: **conan 1.25.2**\r\n * Python version: **Python 3.6.9**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\nConfiguration for profile linux-x64:\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=7\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Debug\r\n[options]\r\n*:fPIC=True\r\n*:shared=True\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nI have a package that uses _thrift/0.13.0_ as build requirement (thrift compiler) and as normal requirement (thrift libs). When I build and create my package with\r\n\r\n```conan create . --profile:host linux-x64 --profile:build linux-x64```\r\n\r\nthe build works perfectly, the thrift sources are compiled with the thrift compiler. When I use\r\n\r\n```conan create . --profile linux-x64```\r\n\r\nthe build fails when using the thrift compiler. The difference is that in the first case, the binary directories of all build requirement packages are added to PATH environment during build and in the second case, nothing is added to PATH. The cmake build of my package expects the thrift compiler being in PATH.\r\n\r\nI was not sure if this is really a package bug and not a conan bug. Sorry if this was the wrong place.\r\n\r\n\n", "before_files": [{"content": "import os\nfrom conans import tools, CMake, ConanFile\n\n\nclass ConanFileDefault(ConanFile):\n name = \"thrift\"\n description = \"Thrift is an associated code generation mechanism for RPC\"\n topics = (\"conan\", \"thrift\", \"serialization\", \"rpc\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/apache/thrift\"\n license = \"Apache-2.0\"\n exports_sources = [\"CMakeLists.txt\", \"patches/*.diff\"]\n generators = \"cmake\", \"cmake_find_package\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_zlib\": [True, False],\n \"with_libevent\": [True, False],\n \"with_openssl\": [True, False],\n \"with_boost_functional\": [True, False],\n \"with_boost_smart_ptr\": [True, False],\n \"with_boost_static\": [True, False],\n \"with_boostthreads\": [True, False],\n \"with_stdthreads\": [True, False],\n \"with_c_glib\": [True, False],\n \"with_cpp\": [True, False],\n \"with_java\": [True, False],\n \"with_python\": [True, False],\n \"with_haskell\": [True, False],\n \"with_plugin\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_zlib\": True,\n \"with_libevent\": True,\n \"with_openssl\": True,\n \"with_boost_functional\": False,\n \"with_boost_smart_ptr\": False,\n \"with_boost_static\": False,\n \"with_boostthreads\": False,\n \"with_stdthreads\": True,\n \"with_c_glib\": False,\n \"with_cpp\": True,\n \"with_java\": False,\n \"with_python\": False,\n \"with_haskell\": False,\n \"with_plugin\": False\n }\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n _cmake = None\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"thrift-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def build(self):\n for p in self.conan_data[\"patches\"][self.version]:\n tools.patch(**p)\n for f in [\"Findflex.cmake\", \"Findbison.cmake\"]:\n if os.path.isfile(f):\n os.unlink(f)\n cmake = self._configure_cmake()\n cmake.build()\n\n def requirements(self):\n self.requires(\"boost/1.73.0\")\n if self.settings.os == 'Windows':\n self.requires(\"winflexbison/2.5.22\")\n else:\n self.requires(\"flex/2.6.4\")\n self.requires(\"bison/3.5.3\")\n\n if self.options.with_openssl:\n self.requires(\"openssl/1.1.1g\")\n if self.options.with_zlib:\n self.requires(\"zlib/1.2.11\")\n if self.options.with_libevent:\n self.requires(\"libevent/2.1.11\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n for option, value in self.options.items():\n self._cmake.definitions[option.upper()] = value\n\n # Make thrift use correct thread lib (see repo/build/cmake/config.h.in)\n self._cmake.definitions[\"USE_STD_THREAD\"] = self.options.with_stdthreads\n self._cmake.definitions[\"USE_BOOST_THREAD\"] = self.options.with_boostthreads\n self._cmake.definitions[\"WITH_SHARED_LIB\"] = self.options.shared\n self._cmake.definitions[\"WITH_STATIC_LIB\"] = not self.options.shared\n self._cmake.definitions[\"BOOST_ROOT\"] = self.deps_cpp_info['boost'].rootpath\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"BUILD_COMPILER\"] = True\n self._cmake.definitions[\"BUILD_LIBRARIES\"] = True\n self._cmake.definitions[\"BUILD_EXAMPLES\"] = False\n self._cmake.definitions[\"BUILD_TUTORIALS\"] = False\n\n # Make optional libs \"findable\"\n if self.options.with_openssl:\n self._cmake.definitions[\"OPENSSL_ROOT_DIR\"] = self.deps_cpp_info['openssl'].rootpath\n if self.options.with_zlib:\n self._cmake.definitions[\"ZLIB_ROOT\"] = self.deps_cpp_info['zlib'].rootpath\n if self.options.with_libevent:\n self._cmake.definitions[\"LIBEVENT_ROOT\"] = self.deps_cpp_info['libevent'].rootpath\n\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n # Copy generated headers from build tree\n build_source_dir = os.path.join(self._build_subfolder, self._source_subfolder)\n self.copy(pattern=\"*.h\", dst=\"include\", src=build_source_dir, keep_path=True)\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n # Make sure libs are link in correct order. Important thing is that libthrift/thrift is last\n # (a little naive to sort, but libthrift/thrift should end up last since rest of the libs extend it with an abbrevation: 'thriftnb', 'thriftz')\n # The library that needs symbols must be first, then the library that resolves the symbols should come after.\n self.cpp_info.libs.sort(reverse = True)\n\n if self.settings.os == \"Windows\":\n # To avoid error C2589: '(' : illegal token on right side of '::'\n self.cpp_info.defines.append(\"NOMINMAX\")\n elif self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n", "path": "recipes/thrift/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conans import tools, CMake, ConanFile\n\n\nclass ConanFileDefault(ConanFile):\n name = \"thrift\"\n description = \"Thrift is an associated code generation mechanism for RPC\"\n topics = (\"conan\", \"thrift\", \"serialization\", \"rpc\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/apache/thrift\"\n license = \"Apache-2.0\"\n exports_sources = [\"CMakeLists.txt\", \"patches/*.diff\"]\n generators = \"cmake\", \"cmake_find_package\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_zlib\": [True, False],\n \"with_libevent\": [True, False],\n \"with_openssl\": [True, False],\n \"with_boost_functional\": [True, False],\n \"with_boost_smart_ptr\": [True, False],\n \"with_boost_static\": [True, False],\n \"with_boostthreads\": [True, False],\n \"with_stdthreads\": [True, False],\n \"with_c_glib\": [True, False],\n \"with_cpp\": [True, False],\n \"with_java\": [True, False],\n \"with_python\": [True, False],\n \"with_haskell\": [True, False],\n \"with_plugin\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_zlib\": True,\n \"with_libevent\": True,\n \"with_openssl\": True,\n \"with_boost_functional\": False,\n \"with_boost_smart_ptr\": False,\n \"with_boost_static\": False,\n \"with_boostthreads\": False,\n \"with_stdthreads\": True,\n \"with_c_glib\": False,\n \"with_cpp\": True,\n \"with_java\": False,\n \"with_python\": False,\n \"with_haskell\": False,\n \"with_plugin\": False\n }\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n _cmake = None\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"thrift-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def build(self):\n for p in self.conan_data[\"patches\"][self.version]:\n tools.patch(**p)\n for f in [\"Findflex.cmake\", \"Findbison.cmake\"]:\n if os.path.isfile(f):\n os.unlink(f)\n cmake = self._configure_cmake()\n cmake.build()\n\n def requirements(self):\n self.requires(\"boost/1.73.0\")\n if self.settings.os == 'Windows':\n self.requires(\"winflexbison/2.5.22\")\n else:\n self.requires(\"flex/2.6.4\")\n self.requires(\"bison/3.5.3\")\n\n if self.options.with_openssl:\n self.requires(\"openssl/1.1.1g\")\n if self.options.with_zlib:\n self.requires(\"zlib/1.2.11\")\n if self.options.with_libevent:\n self.requires(\"libevent/2.1.11\")\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n for option, value in self.options.items():\n self._cmake.definitions[option.upper()] = value\n\n # Make thrift use correct thread lib (see repo/build/cmake/config.h.in)\n self._cmake.definitions[\"USE_STD_THREAD\"] = self.options.with_stdthreads\n self._cmake.definitions[\"USE_BOOST_THREAD\"] = self.options.with_boostthreads\n self._cmake.definitions[\"WITH_SHARED_LIB\"] = self.options.shared\n self._cmake.definitions[\"WITH_STATIC_LIB\"] = not self.options.shared\n self._cmake.definitions[\"BOOST_ROOT\"] = self.deps_cpp_info['boost'].rootpath\n self._cmake.definitions[\"BUILD_TESTING\"] = False\n self._cmake.definitions[\"BUILD_COMPILER\"] = True\n self._cmake.definitions[\"BUILD_LIBRARIES\"] = True\n self._cmake.definitions[\"BUILD_EXAMPLES\"] = False\n self._cmake.definitions[\"BUILD_TUTORIALS\"] = False\n\n # Make optional libs \"findable\"\n if self.options.with_openssl:\n self._cmake.definitions[\"OPENSSL_ROOT_DIR\"] = self.deps_cpp_info['openssl'].rootpath\n if self.options.with_zlib:\n self._cmake.definitions[\"ZLIB_ROOT\"] = self.deps_cpp_info['zlib'].rootpath\n if self.options.with_libevent:\n self._cmake.definitions[\"LIBEVENT_ROOT\"] = self.deps_cpp_info['libevent'].rootpath\n\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n # Copy generated headers from build tree\n build_source_dir = os.path.join(self._build_subfolder, self._source_subfolder)\n self.copy(pattern=\"*.h\", dst=\"include\", src=build_source_dir, keep_path=True)\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n # Make sure libs are link in correct order. Important thing is that libthrift/thrift is last\n # (a little naive to sort, but libthrift/thrift should end up last since rest of the libs extend it with an abbrevation: 'thriftnb', 'thriftz')\n # The library that needs symbols must be first, then the library that resolves the symbols should come after.\n self.cpp_info.libs.sort(reverse = True)\n\n if self.settings.os == \"Windows\":\n # To avoid error C2589: '(' : illegal token on right side of '::'\n self.cpp_info.defines.append(\"NOMINMAX\")\n elif self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/thrift/all/conanfile.py"}]}
2,460
142
gh_patches_debug_17604
rasdani/github-patches
git_diff
DataDog__dd-trace-py-4250
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot call sqlite3.backup(db) on a TracedSQLite object Thanks for taking the time for reporting an issue! Before reporting an issue on dd-trace-py, please be sure to provide all necessary information. If you're hitting a bug, make sure that you're using the latest version of this library. ### Which version of dd-trace-py are you using? 1.5.0 ### Which version of pip are you using? 21.1.1 _ddtrace requires pip>=18 to install one of our pre-built wheels_ ### Which version of the libraries are you using? You can copy/paste the output of `pip freeze` here. ``` ddtrace==1.5.0 ``` ### How can we reproduce your problem? ``` from ddtrace import config, patch_all import sqlite3 config.env = "test" # the environment the application is in config.service = "app" # name of your application config.version = "v1" # version of your application patch_all() src = sqlite3.connect("1.db") dst = sqlite3.connect("2.db") with dst: src.backup(dst, pages=1) dst.close() src.close() ``` ### What is the result that you get? The following TypeError ``` TypeError: backup() argument 1 must be sqlite3.Connection, not TracedSQLite ``` ### What is the result that you expected? The function should succeed without error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/sqlite3/patch.py` Content: ``` 1 import os 2 import sqlite3 3 import sqlite3.dbapi2 4 5 from ddtrace import config 6 from ddtrace.vendor import wrapt 7 8 from ...contrib.dbapi import FetchTracedCursor 9 from ...contrib.dbapi import TracedConnection 10 from ...contrib.dbapi import TracedCursor 11 from ...internal.utils.formats import asbool 12 from ...pin import Pin 13 14 15 # Original connect method 16 _connect = sqlite3.connect 17 18 config._add( 19 "sqlite", 20 dict( 21 _default_service="sqlite", 22 _dbapi_span_name_prefix="sqlite", 23 trace_fetch_methods=asbool(os.getenv("DD_SQLITE_TRACE_FETCH_METHODS", default=False)), 24 ), 25 ) 26 27 28 def patch(): 29 wrapped = wrapt.FunctionWrapper(_connect, traced_connect) 30 31 setattr(sqlite3, "connect", wrapped) 32 setattr(sqlite3.dbapi2, "connect", wrapped) 33 34 35 def unpatch(): 36 sqlite3.connect = _connect 37 sqlite3.dbapi2.connect = _connect 38 39 40 def traced_connect(func, _, args, kwargs): 41 conn = func(*args, **kwargs) 42 return patch_conn(conn) 43 44 45 def patch_conn(conn): 46 wrapped = TracedSQLite(conn) 47 Pin().onto(wrapped) 48 return wrapped 49 50 51 class TracedSQLiteCursor(TracedCursor): 52 def executemany(self, *args, **kwargs): 53 # DEV: SQLite3 Cursor.execute always returns back the cursor instance 54 super(TracedSQLiteCursor, self).executemany(*args, **kwargs) 55 return self 56 57 def execute(self, *args, **kwargs): 58 # DEV: SQLite3 Cursor.execute always returns back the cursor instance 59 super(TracedSQLiteCursor, self).execute(*args, **kwargs) 60 return self 61 62 63 class TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor): 64 pass 65 66 67 class TracedSQLite(TracedConnection): 68 def __init__(self, conn, pin=None, cursor_cls=None): 69 if not cursor_cls: 70 # Do not trace `fetch*` methods by default 71 cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor 72 73 super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls) 74 75 def execute(self, *args, **kwargs): 76 # sqlite has a few extra sugar functions 77 return self.cursor().execute(*args, **kwargs) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/sqlite3/patch.py b/ddtrace/contrib/sqlite3/patch.py --- a/ddtrace/contrib/sqlite3/patch.py +++ b/ddtrace/contrib/sqlite3/patch.py @@ -1,6 +1,7 @@ import os import sqlite3 import sqlite3.dbapi2 +import sys from ddtrace import config from ddtrace.vendor import wrapt @@ -75,3 +76,13 @@ def execute(self, *args, **kwargs): # sqlite has a few extra sugar functions return self.cursor().execute(*args, **kwargs) + + # backup was added in Python 3.7 + if sys.version_info >= (3, 7, 0): + + def backup(self, target, *args, **kwargs): + # sqlite3 checks the type of `target`, it cannot be a wrapped connection + # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899 + if isinstance(target, TracedConnection): + target = target.__wrapped__ + return self.__wrapped__.backup(target, *args, **kwargs)
{"golden_diff": "diff --git a/ddtrace/contrib/sqlite3/patch.py b/ddtrace/contrib/sqlite3/patch.py\n--- a/ddtrace/contrib/sqlite3/patch.py\n+++ b/ddtrace/contrib/sqlite3/patch.py\n@@ -1,6 +1,7 @@\n import os\n import sqlite3\n import sqlite3.dbapi2\n+import sys\n \n from ddtrace import config\n from ddtrace.vendor import wrapt\n@@ -75,3 +76,13 @@\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n+\n+ # backup was added in Python 3.7\n+ if sys.version_info >= (3, 7, 0):\n+\n+ def backup(self, target, *args, **kwargs):\n+ # sqlite3 checks the type of `target`, it cannot be a wrapped connection\n+ # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899\n+ if isinstance(target, TracedConnection):\n+ target = target.__wrapped__\n+ return self.__wrapped__.backup(target, *args, **kwargs)\n", "issue": "Cannot call sqlite3.backup(db) on a TracedSQLite object\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n\r\n### Which version of dd-trace-py are you using?\r\n1.5.0\r\n### Which version of pip are you using?\r\n21.1.1\r\n_ddtrace requires pip>=18 to install one of our pre-built wheels_\r\n\r\n### Which version of the libraries are you using?\r\n\r\nYou can copy/paste the output of `pip freeze` here.\r\n\r\n```\r\nddtrace==1.5.0\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n```\r\nfrom ddtrace import config, patch_all\r\nimport sqlite3\r\n\r\nconfig.env = \"test\" # the environment the application is in\r\nconfig.service = \"app\" # name of your application\r\nconfig.version = \"v1\" # version of your application\r\npatch_all()\r\n\r\nsrc = sqlite3.connect(\"1.db\")\r\ndst = sqlite3.connect(\"2.db\")\r\nwith dst:\r\n src.backup(dst, pages=1)\r\ndst.close()\r\nsrc.close()\r\n```\r\n\r\n### What is the result that you get?\r\n\r\nThe following TypeError\r\n```\r\nTypeError: backup() argument 1 must be sqlite3.Connection, not TracedSQLite\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nThe function should succeed without error.\r\n\n", "before_files": [{"content": "import os\nimport sqlite3\nimport sqlite3.dbapi2\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...contrib.dbapi import FetchTracedCursor\nfrom ...contrib.dbapi import TracedConnection\nfrom ...contrib.dbapi import TracedCursor\nfrom ...internal.utils.formats import asbool\nfrom ...pin import Pin\n\n\n# Original connect method\n_connect = sqlite3.connect\n\nconfig._add(\n \"sqlite\",\n dict(\n _default_service=\"sqlite\",\n _dbapi_span_name_prefix=\"sqlite\",\n trace_fetch_methods=asbool(os.getenv(\"DD_SQLITE_TRACE_FETCH_METHODS\", default=False)),\n ),\n)\n\n\ndef patch():\n wrapped = wrapt.FunctionWrapper(_connect, traced_connect)\n\n setattr(sqlite3, \"connect\", wrapped)\n setattr(sqlite3.dbapi2, \"connect\", wrapped)\n\n\ndef unpatch():\n sqlite3.connect = _connect\n sqlite3.dbapi2.connect = _connect\n\n\ndef traced_connect(func, _, args, kwargs):\n conn = func(*args, **kwargs)\n return patch_conn(conn)\n\n\ndef patch_conn(conn):\n wrapped = TracedSQLite(conn)\n Pin().onto(wrapped)\n return wrapped\n\n\nclass TracedSQLiteCursor(TracedCursor):\n def executemany(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).executemany(*args, **kwargs)\n return self\n\n def execute(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).execute(*args, **kwargs)\n return self\n\n\nclass TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor):\n pass\n\n\nclass TracedSQLite(TracedConnection):\n def __init__(self, conn, pin=None, cursor_cls=None):\n if not cursor_cls:\n # Do not trace `fetch*` methods by default\n cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor\n\n super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls)\n\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n", "path": "ddtrace/contrib/sqlite3/patch.py"}], "after_files": [{"content": "import os\nimport sqlite3\nimport sqlite3.dbapi2\nimport sys\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...contrib.dbapi import FetchTracedCursor\nfrom ...contrib.dbapi import TracedConnection\nfrom ...contrib.dbapi import TracedCursor\nfrom ...internal.utils.formats import asbool\nfrom ...pin import Pin\n\n\n# Original connect method\n_connect = sqlite3.connect\n\nconfig._add(\n \"sqlite\",\n dict(\n _default_service=\"sqlite\",\n _dbapi_span_name_prefix=\"sqlite\",\n trace_fetch_methods=asbool(os.getenv(\"DD_SQLITE_TRACE_FETCH_METHODS\", default=False)),\n ),\n)\n\n\ndef patch():\n wrapped = wrapt.FunctionWrapper(_connect, traced_connect)\n\n setattr(sqlite3, \"connect\", wrapped)\n setattr(sqlite3.dbapi2, \"connect\", wrapped)\n\n\ndef unpatch():\n sqlite3.connect = _connect\n sqlite3.dbapi2.connect = _connect\n\n\ndef traced_connect(func, _, args, kwargs):\n conn = func(*args, **kwargs)\n return patch_conn(conn)\n\n\ndef patch_conn(conn):\n wrapped = TracedSQLite(conn)\n Pin().onto(wrapped)\n return wrapped\n\n\nclass TracedSQLiteCursor(TracedCursor):\n def executemany(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).executemany(*args, **kwargs)\n return self\n\n def execute(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).execute(*args, **kwargs)\n return self\n\n\nclass TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor):\n pass\n\n\nclass TracedSQLite(TracedConnection):\n def __init__(self, conn, pin=None, cursor_cls=None):\n if not cursor_cls:\n # Do not trace `fetch*` methods by default\n cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor\n\n super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls)\n\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n\n # backup was added in Python 3.7\n if sys.version_info >= (3, 7, 0):\n\n def backup(self, target, *args, **kwargs):\n # sqlite3 checks the type of `target`, it cannot be a wrapped connection\n # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899\n if isinstance(target, TracedConnection):\n target = target.__wrapped__\n return self.__wrapped__.backup(target, *args, **kwargs)\n", "path": "ddtrace/contrib/sqlite3/patch.py"}]}
1,259
307
gh_patches_debug_25631
rasdani/github-patches
git_diff
pytorch__vision-1149
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- A small bug in ImageNet Dataset Implementation Currently, all values (i.e., index) in the `class_to_idx` and `wnid_to_idx` of `ImageNet` are 0. The current code is as follows where `idcs` is wrong since it will fetch the indices of images instead of classes. ```python idcs = [idx for _, idx in self.imgs] self.wnids = self.classes self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)} self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] self.class_to_idx = {cls: idx for clss, idx in zip(self.classes, idcs) for cls in clss} ``` A simple fix will be, ```python self.wnids = self.classes self.wnid_to_idx = self.class_to_idx # ImageFolder handles it well self.classes, self.class_to_idx = [], {} for wnid, idx in self.wnid_to_idx.items(): classes = wnid_to_classes[wnid] self.classes.append(classes) for c in classes: self.class_to_idx[c] = idx ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/datasets/imagenet.py` Content: ``` 1 from __future__ import print_function 2 import os 3 import shutil 4 import tempfile 5 import torch 6 from .folder import ImageFolder 7 from .utils import check_integrity, download_and_extract_archive, extract_archive 8 9 ARCHIVE_DICT = { 10 'train': { 11 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar', 12 'md5': '1d675b47d978889d74fa0da5fadfb00e', 13 }, 14 'val': { 15 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar', 16 'md5': '29b22e2961454d5413ddabcf34fc5622', 17 }, 18 'devkit': { 19 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz', 20 'md5': 'fa75699e90414af021442c21a62c3abf', 21 } 22 } 23 24 25 class ImageNet(ImageFolder): 26 """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset. 27 28 Args: 29 root (string): Root directory of the ImageNet Dataset. 30 split (string, optional): The dataset split, supports ``train``, or ``val``. 31 download (bool, optional): If true, downloads the dataset from the internet and 32 puts it in root directory. If dataset is already downloaded, it is not 33 downloaded again. 34 transform (callable, optional): A function/transform that takes in an PIL image 35 and returns a transformed version. E.g, ``transforms.RandomCrop`` 36 target_transform (callable, optional): A function/transform that takes in the 37 target and transforms it. 38 loader (callable, optional): A function to load an image given its path. 39 40 Attributes: 41 classes (list): List of the class names. 42 class_to_idx (dict): Dict with items (class_name, class_index). 43 wnids (list): List of the WordNet IDs. 44 wnid_to_idx (dict): Dict with items (wordnet_id, class_index). 45 imgs (list): List of (image path, class_index) tuples 46 targets (list): The class_index value for each image in the dataset 47 """ 48 49 def __init__(self, root, split='train', download=False, **kwargs): 50 root = self.root = os.path.expanduser(root) 51 self.split = self._verify_split(split) 52 53 if download: 54 self.download() 55 wnid_to_classes = self._load_meta_file()[0] 56 57 super(ImageNet, self).__init__(self.split_folder, **kwargs) 58 self.root = root 59 60 idcs = [idx for _, idx in self.imgs] 61 self.wnids = self.classes 62 self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)} 63 self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] 64 self.class_to_idx = {cls: idx 65 for clss, idx in zip(self.classes, idcs) 66 for cls in clss} 67 68 def download(self): 69 if not check_integrity(self.meta_file): 70 tmp_dir = tempfile.mkdtemp() 71 72 archive_dict = ARCHIVE_DICT['devkit'] 73 download_and_extract_archive(archive_dict['url'], self.root, 74 extract_root=tmp_dir, 75 md5=archive_dict['md5']) 76 devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0] 77 meta = parse_devkit(os.path.join(tmp_dir, devkit_folder)) 78 self._save_meta_file(*meta) 79 80 shutil.rmtree(tmp_dir) 81 82 if not os.path.isdir(self.split_folder): 83 archive_dict = ARCHIVE_DICT[self.split] 84 download_and_extract_archive(archive_dict['url'], self.root, 85 extract_root=self.split_folder, 86 md5=archive_dict['md5']) 87 88 if self.split == 'train': 89 prepare_train_folder(self.split_folder) 90 elif self.split == 'val': 91 val_wnids = self._load_meta_file()[1] 92 prepare_val_folder(self.split_folder, val_wnids) 93 else: 94 msg = ("You set download=True, but a folder '{}' already exist in " 95 "the root directory. If you want to re-download or re-extract the " 96 "archive, delete the folder.") 97 print(msg.format(self.split)) 98 99 @property 100 def meta_file(self): 101 return os.path.join(self.root, 'meta.bin') 102 103 def _load_meta_file(self): 104 if check_integrity(self.meta_file): 105 return torch.load(self.meta_file) 106 else: 107 raise RuntimeError("Meta file not found or corrupted.", 108 "You can use download=True to create it.") 109 110 def _save_meta_file(self, wnid_to_class, val_wnids): 111 torch.save((wnid_to_class, val_wnids), self.meta_file) 112 113 def _verify_split(self, split): 114 if split not in self.valid_splits: 115 msg = "Unknown split {} .".format(split) 116 msg += "Valid splits are {{}}.".format(", ".join(self.valid_splits)) 117 raise ValueError(msg) 118 return split 119 120 @property 121 def valid_splits(self): 122 return 'train', 'val' 123 124 @property 125 def split_folder(self): 126 return os.path.join(self.root, self.split) 127 128 def extra_repr(self): 129 return "Split: {split}".format(**self.__dict__) 130 131 132 def parse_devkit(root): 133 idx_to_wnid, wnid_to_classes = parse_meta(root) 134 val_idcs = parse_val_groundtruth(root) 135 val_wnids = [idx_to_wnid[idx] for idx in val_idcs] 136 return wnid_to_classes, val_wnids 137 138 139 def parse_meta(devkit_root, path='data', filename='meta.mat'): 140 import scipy.io as sio 141 142 metafile = os.path.join(devkit_root, path, filename) 143 meta = sio.loadmat(metafile, squeeze_me=True)['synsets'] 144 nums_children = list(zip(*meta))[4] 145 meta = [meta[idx] for idx, num_children in enumerate(nums_children) 146 if num_children == 0] 147 idcs, wnids, classes = list(zip(*meta))[:3] 148 classes = [tuple(clss.split(', ')) for clss in classes] 149 idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)} 150 wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)} 151 return idx_to_wnid, wnid_to_classes 152 153 154 def parse_val_groundtruth(devkit_root, path='data', 155 filename='ILSVRC2012_validation_ground_truth.txt'): 156 with open(os.path.join(devkit_root, path, filename), 'r') as txtfh: 157 val_idcs = txtfh.readlines() 158 return [int(val_idx) for val_idx in val_idcs] 159 160 161 def prepare_train_folder(folder): 162 for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]: 163 extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True) 164 165 166 def prepare_val_folder(folder, wnids): 167 img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)]) 168 169 for wnid in set(wnids): 170 os.mkdir(os.path.join(folder, wnid)) 171 172 for wnid, img_file in zip(wnids, img_files): 173 shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file))) 174 175 176 def _splitexts(root): 177 exts = [] 178 ext = '.' 179 while ext: 180 root, ext = os.path.splitext(root) 181 exts.append(ext) 182 return root, ''.join(reversed(exts)) 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py --- a/torchvision/datasets/imagenet.py +++ b/torchvision/datasets/imagenet.py @@ -38,7 +38,7 @@ loader (callable, optional): A function to load an image given its path. Attributes: - classes (list): List of the class names. + classes (list): List of the class name tuples. class_to_idx (dict): Dict with items (class_name, class_index). wnids (list): List of the WordNet IDs. wnid_to_idx (dict): Dict with items (wordnet_id, class_index). @@ -57,12 +57,11 @@ super(ImageNet, self).__init__(self.split_folder, **kwargs) self.root = root - idcs = [idx for _, idx in self.imgs] self.wnids = self.classes - self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)} + self.wnid_to_idx = self.class_to_idx self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] self.class_to_idx = {cls: idx - for clss, idx in zip(self.classes, idcs) + for idx, clss in enumerate(self.classes) for cls in clss} def download(self):
{"golden_diff": "diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py\n--- a/torchvision/datasets/imagenet.py\n+++ b/torchvision/datasets/imagenet.py\n@@ -38,7 +38,7 @@\n loader (callable, optional): A function to load an image given its path.\n \n Attributes:\n- classes (list): List of the class names.\n+ classes (list): List of the class name tuples.\n class_to_idx (dict): Dict with items (class_name, class_index).\n wnids (list): List of the WordNet IDs.\n wnid_to_idx (dict): Dict with items (wordnet_id, class_index).\n@@ -57,12 +57,11 @@\n super(ImageNet, self).__init__(self.split_folder, **kwargs)\n self.root = root\n \n- idcs = [idx for _, idx in self.imgs]\n self.wnids = self.classes\n- self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}\n+ self.wnid_to_idx = self.class_to_idx\n self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\n self.class_to_idx = {cls: idx\n- for clss, idx in zip(self.classes, idcs)\n+ for idx, clss in enumerate(self.classes)\n for cls in clss}\n \n def download(self):\n", "issue": "A small bug in ImageNet Dataset Implementation\nCurrently, all values (i.e., index) in the `class_to_idx` and `wnid_to_idx` of `ImageNet` are 0.\r\nThe current code is as follows where `idcs` is wrong since it will fetch the indices of images instead of classes.\r\n```python\r\n idcs = [idx for _, idx in self.imgs]\r\n self.wnids = self.classes\r\n self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}\r\n self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\r\n self.class_to_idx = {cls: idx\r\n for clss, idx in zip(self.classes, idcs)\r\n for cls in clss}\r\n```\r\nA simple fix will be,\r\n```python\r\n self.wnids = self.classes\r\n self.wnid_to_idx = self.class_to_idx # ImageFolder handles it well\r\n self.classes, self.class_to_idx = [], {}\r\n for wnid, idx in self.wnid_to_idx.items():\r\n classes = wnid_to_classes[wnid]\r\n self.classes.append(classes)\r\n for c in classes:\r\n self.class_to_idx[c] = idx\r\n```\n", "before_files": [{"content": "from __future__ import print_function\nimport os\nimport shutil\nimport tempfile\nimport torch\nfrom .folder import ImageFolder\nfrom .utils import check_integrity, download_and_extract_archive, extract_archive\n\nARCHIVE_DICT = {\n 'train': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',\n 'md5': '1d675b47d978889d74fa0da5fadfb00e',\n },\n 'val': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',\n 'md5': '29b22e2961454d5413ddabcf34fc5622',\n },\n 'devkit': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',\n 'md5': 'fa75699e90414af021442c21a62c3abf',\n }\n}\n\n\nclass ImageNet(ImageFolder):\n \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n\n Args:\n root (string): Root directory of the ImageNet Dataset.\n split (string, optional): The dataset split, supports ``train``, or ``val``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n wnids (list): List of the WordNet IDs.\n wnid_to_idx (dict): Dict with items (wordnet_id, class_index).\n imgs (list): List of (image path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, split='train', download=False, **kwargs):\n root = self.root = os.path.expanduser(root)\n self.split = self._verify_split(split)\n\n if download:\n self.download()\n wnid_to_classes = self._load_meta_file()[0]\n\n super(ImageNet, self).__init__(self.split_folder, **kwargs)\n self.root = root\n\n idcs = [idx for _, idx in self.imgs]\n self.wnids = self.classes\n self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}\n self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\n self.class_to_idx = {cls: idx\n for clss, idx in zip(self.classes, idcs)\n for cls in clss}\n\n def download(self):\n if not check_integrity(self.meta_file):\n tmp_dir = tempfile.mkdtemp()\n\n archive_dict = ARCHIVE_DICT['devkit']\n download_and_extract_archive(archive_dict['url'], self.root,\n extract_root=tmp_dir,\n md5=archive_dict['md5'])\n devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]\n meta = parse_devkit(os.path.join(tmp_dir, devkit_folder))\n self._save_meta_file(*meta)\n\n shutil.rmtree(tmp_dir)\n\n if not os.path.isdir(self.split_folder):\n archive_dict = ARCHIVE_DICT[self.split]\n download_and_extract_archive(archive_dict['url'], self.root,\n extract_root=self.split_folder,\n md5=archive_dict['md5'])\n\n if self.split == 'train':\n prepare_train_folder(self.split_folder)\n elif self.split == 'val':\n val_wnids = self._load_meta_file()[1]\n prepare_val_folder(self.split_folder, val_wnids)\n else:\n msg = (\"You set download=True, but a folder '{}' already exist in \"\n \"the root directory. If you want to re-download or re-extract the \"\n \"archive, delete the folder.\")\n print(msg.format(self.split))\n\n @property\n def meta_file(self):\n return os.path.join(self.root, 'meta.bin')\n\n def _load_meta_file(self):\n if check_integrity(self.meta_file):\n return torch.load(self.meta_file)\n else:\n raise RuntimeError(\"Meta file not found or corrupted.\",\n \"You can use download=True to create it.\")\n\n def _save_meta_file(self, wnid_to_class, val_wnids):\n torch.save((wnid_to_class, val_wnids), self.meta_file)\n\n def _verify_split(self, split):\n if split not in self.valid_splits:\n msg = \"Unknown split {} .\".format(split)\n msg += \"Valid splits are {{}}.\".format(\", \".join(self.valid_splits))\n raise ValueError(msg)\n return split\n\n @property\n def valid_splits(self):\n return 'train', 'val'\n\n @property\n def split_folder(self):\n return os.path.join(self.root, self.split)\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n\n\ndef parse_devkit(root):\n idx_to_wnid, wnid_to_classes = parse_meta(root)\n val_idcs = parse_val_groundtruth(root)\n val_wnids = [idx_to_wnid[idx] for idx in val_idcs]\n return wnid_to_classes, val_wnids\n\n\ndef parse_meta(devkit_root, path='data', filename='meta.mat'):\n import scipy.io as sio\n\n metafile = os.path.join(devkit_root, path, filename)\n meta = sio.loadmat(metafile, squeeze_me=True)['synsets']\n nums_children = list(zip(*meta))[4]\n meta = [meta[idx] for idx, num_children in enumerate(nums_children)\n if num_children == 0]\n idcs, wnids, classes = list(zip(*meta))[:3]\n classes = [tuple(clss.split(', ')) for clss in classes]\n idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}\n wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}\n return idx_to_wnid, wnid_to_classes\n\n\ndef parse_val_groundtruth(devkit_root, path='data',\n filename='ILSVRC2012_validation_ground_truth.txt'):\n with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:\n val_idcs = txtfh.readlines()\n return [int(val_idx) for val_idx in val_idcs]\n\n\ndef prepare_train_folder(folder):\n for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:\n extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True)\n\n\ndef prepare_val_folder(folder, wnids):\n img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])\n\n for wnid in set(wnids):\n os.mkdir(os.path.join(folder, wnid))\n\n for wnid, img_file in zip(wnids, img_files):\n shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))\n\n\ndef _splitexts(root):\n exts = []\n ext = '.'\n while ext:\n root, ext = os.path.splitext(root)\n exts.append(ext)\n return root, ''.join(reversed(exts))\n", "path": "torchvision/datasets/imagenet.py"}], "after_files": [{"content": "from __future__ import print_function\nimport os\nimport shutil\nimport tempfile\nimport torch\nfrom .folder import ImageFolder\nfrom .utils import check_integrity, download_and_extract_archive, extract_archive\n\nARCHIVE_DICT = {\n 'train': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',\n 'md5': '1d675b47d978889d74fa0da5fadfb00e',\n },\n 'val': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',\n 'md5': '29b22e2961454d5413ddabcf34fc5622',\n },\n 'devkit': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',\n 'md5': 'fa75699e90414af021442c21a62c3abf',\n }\n}\n\n\nclass ImageNet(ImageFolder):\n \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n\n Args:\n root (string): Root directory of the ImageNet Dataset.\n split (string, optional): The dataset split, supports ``train``, or ``val``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class name tuples.\n class_to_idx (dict): Dict with items (class_name, class_index).\n wnids (list): List of the WordNet IDs.\n wnid_to_idx (dict): Dict with items (wordnet_id, class_index).\n imgs (list): List of (image path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, split='train', download=False, **kwargs):\n root = self.root = os.path.expanduser(root)\n self.split = self._verify_split(split)\n\n if download:\n self.download()\n wnid_to_classes = self._load_meta_file()[0]\n\n super(ImageNet, self).__init__(self.split_folder, **kwargs)\n self.root = root\n\n self.wnids = self.classes\n self.wnid_to_idx = self.class_to_idx\n self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\n self.class_to_idx = {cls: idx\n for idx, clss in enumerate(self.classes)\n for cls in clss}\n\n def download(self):\n if not check_integrity(self.meta_file):\n tmp_dir = tempfile.mkdtemp()\n\n archive_dict = ARCHIVE_DICT['devkit']\n download_and_extract_archive(archive_dict['url'], self.root,\n extract_root=tmp_dir,\n md5=archive_dict['md5'])\n devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]\n meta = parse_devkit(os.path.join(tmp_dir, devkit_folder))\n self._save_meta_file(*meta)\n\n shutil.rmtree(tmp_dir)\n\n if not os.path.isdir(self.split_folder):\n archive_dict = ARCHIVE_DICT[self.split]\n download_and_extract_archive(archive_dict['url'], self.root,\n extract_root=self.split_folder,\n md5=archive_dict['md5'])\n\n if self.split == 'train':\n prepare_train_folder(self.split_folder)\n elif self.split == 'val':\n val_wnids = self._load_meta_file()[1]\n prepare_val_folder(self.split_folder, val_wnids)\n else:\n msg = (\"You set download=True, but a folder '{}' already exist in \"\n \"the root directory. If you want to re-download or re-extract the \"\n \"archive, delete the folder.\")\n print(msg.format(self.split))\n\n @property\n def meta_file(self):\n return os.path.join(self.root, 'meta.bin')\n\n def _load_meta_file(self):\n if check_integrity(self.meta_file):\n return torch.load(self.meta_file)\n else:\n raise RuntimeError(\"Meta file not found or corrupted.\",\n \"You can use download=True to create it.\")\n\n def _save_meta_file(self, wnid_to_class, val_wnids):\n torch.save((wnid_to_class, val_wnids), self.meta_file)\n\n def _verify_split(self, split):\n if split not in self.valid_splits:\n msg = \"Unknown split {} .\".format(split)\n msg += \"Valid splits are {{}}.\".format(\", \".join(self.valid_splits))\n raise ValueError(msg)\n return split\n\n @property\n def valid_splits(self):\n return 'train', 'val'\n\n @property\n def split_folder(self):\n return os.path.join(self.root, self.split)\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n\n\ndef parse_devkit(root):\n idx_to_wnid, wnid_to_classes = parse_meta(root)\n val_idcs = parse_val_groundtruth(root)\n val_wnids = [idx_to_wnid[idx] for idx in val_idcs]\n return wnid_to_classes, val_wnids\n\n\ndef parse_meta(devkit_root, path='data', filename='meta.mat'):\n import scipy.io as sio\n\n metafile = os.path.join(devkit_root, path, filename)\n meta = sio.loadmat(metafile, squeeze_me=True)['synsets']\n nums_children = list(zip(*meta))[4]\n meta = [meta[idx] for idx, num_children in enumerate(nums_children)\n if num_children == 0]\n idcs, wnids, classes = list(zip(*meta))[:3]\n classes = [tuple(clss.split(', ')) for clss in classes]\n idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}\n wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}\n return idx_to_wnid, wnid_to_classes\n\n\ndef parse_val_groundtruth(devkit_root, path='data',\n filename='ILSVRC2012_validation_ground_truth.txt'):\n with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:\n val_idcs = txtfh.readlines()\n return [int(val_idx) for val_idx in val_idcs]\n\n\ndef prepare_train_folder(folder):\n for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:\n extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True)\n\n\ndef prepare_val_folder(folder, wnids):\n img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])\n\n for wnid in set(wnids):\n os.mkdir(os.path.join(folder, wnid))\n\n for wnid, img_file in zip(wnids, img_files):\n shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))\n\n\ndef _splitexts(root):\n exts = []\n ext = '.'\n while ext:\n root, ext = os.path.splitext(root)\n exts.append(ext)\n return root, ''.join(reversed(exts))\n", "path": "torchvision/datasets/imagenet.py"}]}
2,772
329
gh_patches_debug_7048
rasdani/github-patches
git_diff
airctic__icevision-821
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Adding Non Unique Names To ClassMap Must Be Illegal Currently, `add_name` doesn't check if the added `name` is a duplicate or not. This should be illegal, as classes must be unique https://github.com/airctic/icevision/blob/82f3c7322d5adbc41d6a1b7b9e4327eb32ad23c4/icevision/core/class_map.py#L47-L51 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `icevision/core/class_map.py` Content: ``` 1 __all__ = ["ClassMap", "BACKGROUND"] 2 3 from icevision.imports import * 4 5 BACKGROUND = "background" 6 7 8 class ClassMap: 9 """Utility class for mapping between class name and id.""" 10 11 def __init__( 12 self, 13 classes: Optional[Sequence[str]] = None, 14 background: Optional[str] = BACKGROUND, 15 ): 16 self._lock = True 17 18 self._id2class = copy(list(classes)) if classes else [] 19 # insert background if required 20 self._background = background 21 if self._background is not None: 22 try: 23 self._id2class.remove(self._background) 24 except ValueError: 25 pass 26 # background is always index zero 27 self._id2class.insert(0, self._background) 28 29 self._class2id = {name: i for i, name in enumerate(self._id2class)} 30 31 @property 32 def num_classes(self): 33 return len(self) 34 35 def get_by_id(self, id: int) -> str: 36 return self._id2class[id] 37 38 def get_by_name(self, name: str) -> int: 39 try: 40 return self._class2id[name] 41 except KeyError as e: 42 if not self._lock: 43 return self.add_name(name) 44 else: 45 raise e 46 47 def add_name(self, name) -> int: 48 self._id2class.append(name) 49 id = len(self._class2id) 50 self._class2id[name] = id 51 return id 52 53 def lock(self): 54 self._lock = True 55 return self 56 57 def unlock(self): 58 self._lock = False 59 return self 60 61 def __eq__(self, other) -> bool: 62 if isinstance(other, ClassMap): 63 return self.__dict__ == other.__dict__ 64 return False 65 66 def __len__(self): 67 return len(self._id2class) 68 69 def __repr__(self): 70 return f"<ClassMap: {self._class2id.__repr__()}>" 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/icevision/core/class_map.py b/icevision/core/class_map.py --- a/icevision/core/class_map.py +++ b/icevision/core/class_map.py @@ -44,7 +44,13 @@ else: raise e - def add_name(self, name) -> int: + def add_name(self, name: str) -> int: + # Raise error if trying to add duplicate value + if name in self._id2class: + raise ValueError( + f"'{name}' already exists in the ClassMap. You can only add new labels that are unique" + ) + self._id2class.append(name) id = len(self._class2id) self._class2id[name] = id
{"golden_diff": "diff --git a/icevision/core/class_map.py b/icevision/core/class_map.py\n--- a/icevision/core/class_map.py\n+++ b/icevision/core/class_map.py\n@@ -44,7 +44,13 @@\n else:\n raise e\n \n- def add_name(self, name) -> int:\n+ def add_name(self, name: str) -> int:\n+ # Raise error if trying to add duplicate value\n+ if name in self._id2class:\n+ raise ValueError(\n+ f\"'{name}' already exists in the ClassMap. You can only add new labels that are unique\"\n+ )\n+\n self._id2class.append(name)\n id = len(self._class2id)\n self._class2id[name] = id\n", "issue": "Adding Non Unique Names To ClassMap Must Be Illegal\nCurrently, `add_name` doesn't check if the added `name` is a duplicate or not. This should be illegal, as classes must be unique\r\n\r\nhttps://github.com/airctic/icevision/blob/82f3c7322d5adbc41d6a1b7b9e4327eb32ad23c4/icevision/core/class_map.py#L47-L51\n", "before_files": [{"content": "__all__ = [\"ClassMap\", \"BACKGROUND\"]\n\nfrom icevision.imports import *\n\nBACKGROUND = \"background\"\n\n\nclass ClassMap:\n \"\"\"Utility class for mapping between class name and id.\"\"\"\n\n def __init__(\n self,\n classes: Optional[Sequence[str]] = None,\n background: Optional[str] = BACKGROUND,\n ):\n self._lock = True\n\n self._id2class = copy(list(classes)) if classes else []\n # insert background if required\n self._background = background\n if self._background is not None:\n try:\n self._id2class.remove(self._background)\n except ValueError:\n pass\n # background is always index zero\n self._id2class.insert(0, self._background)\n\n self._class2id = {name: i for i, name in enumerate(self._id2class)}\n\n @property\n def num_classes(self):\n return len(self)\n\n def get_by_id(self, id: int) -> str:\n return self._id2class[id]\n\n def get_by_name(self, name: str) -> int:\n try:\n return self._class2id[name]\n except KeyError as e:\n if not self._lock:\n return self.add_name(name)\n else:\n raise e\n\n def add_name(self, name) -> int:\n self._id2class.append(name)\n id = len(self._class2id)\n self._class2id[name] = id\n return id\n\n def lock(self):\n self._lock = True\n return self\n\n def unlock(self):\n self._lock = False\n return self\n\n def __eq__(self, other) -> bool:\n if isinstance(other, ClassMap):\n return self.__dict__ == other.__dict__\n return False\n\n def __len__(self):\n return len(self._id2class)\n\n def __repr__(self):\n return f\"<ClassMap: {self._class2id.__repr__()}>\"\n", "path": "icevision/core/class_map.py"}], "after_files": [{"content": "__all__ = [\"ClassMap\", \"BACKGROUND\"]\n\nfrom icevision.imports import *\n\nBACKGROUND = \"background\"\n\n\nclass ClassMap:\n \"\"\"Utility class for mapping between class name and id.\"\"\"\n\n def __init__(\n self,\n classes: Optional[Sequence[str]] = None,\n background: Optional[str] = BACKGROUND,\n ):\n self._lock = True\n\n self._id2class = copy(list(classes)) if classes else []\n # insert background if required\n self._background = background\n if self._background is not None:\n try:\n self._id2class.remove(self._background)\n except ValueError:\n pass\n # background is always index zero\n self._id2class.insert(0, self._background)\n\n self._class2id = {name: i for i, name in enumerate(self._id2class)}\n\n @property\n def num_classes(self):\n return len(self)\n\n def get_by_id(self, id: int) -> str:\n return self._id2class[id]\n\n def get_by_name(self, name: str) -> int:\n try:\n return self._class2id[name]\n except KeyError as e:\n if not self._lock:\n return self.add_name(name)\n else:\n raise e\n\n def add_name(self, name: str) -> int:\n # Raise error if trying to add duplicate value\n if name in self._id2class:\n raise ValueError(\n f\"'{name}' already exists in the ClassMap. You can only add new labels that are unique\"\n )\n\n self._id2class.append(name)\n id = len(self._class2id)\n self._class2id[name] = id\n return id\n\n def lock(self):\n self._lock = True\n return self\n\n def unlock(self):\n self._lock = False\n return self\n\n def __eq__(self, other) -> bool:\n if isinstance(other, ClassMap):\n return self.__dict__ == other.__dict__\n return False\n\n def __len__(self):\n return len(self._id2class)\n\n def __repr__(self):\n return f\"<ClassMap: {self._class2id.__repr__()}>\"\n", "path": "icevision/core/class_map.py"}]}
939
172
gh_patches_debug_9985
rasdani/github-patches
git_diff
cal-itp__benefits-1030
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: Error page bug - Log out button too low A clear and concise description of what the bug is. ## To Reproduce Steps to reproduce the behavior: 1. Go through the Login.gov IAL2 flow with a user who is under age 65 2. See error ## Expected behavior A clear and concise description of what you expected to happen. ## Screenshots Tablet/Mobile widths are off: <img width="485" alt="image" src="https://user-images.githubusercontent.com/3673236/190286767-dc5e2f26-3a48-44e7-a480-9751726938ea.png"> <img width="457" alt="image" src="https://user-images.githubusercontent.com/3673236/190287117-4c1072cb-271d-4b1a-b0c8-00f7dd958302.png"> Desktop is good: <img width="1440" alt="image" src="https://user-images.githubusercontent.com/3673236/190287052-4c8a778a-7ccf-42dd-8042-c59a0b350308.png"> ## Desktop (please complete the following information) - OS: [e.g. iOS] ## Smartphone (please complete the following information) - Device: [e.g. iPhone6] ## Additional context Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `benefits/enrollment/views.py` Content: ``` 1 """ 2 The enrollment application: view definitions for the benefits enrollment flow. 3 """ 4 import logging 5 6 from django.http import JsonResponse 7 from django.template.response import TemplateResponse 8 from django.urls import reverse 9 from django.utils.html import format_html 10 from django.utils.decorators import decorator_from_middleware 11 from django.utils.translation import pgettext, gettext as _ 12 13 from benefits.core import models, session, viewmodels 14 from benefits.core.middleware import EligibleSessionRequired, VerifierSessionRequired, pageview_decorator 15 from benefits.core.views import ROUTE_HELP, ROUTE_LOGGED_OUT 16 from . import analytics, api, forms 17 18 19 ROUTE_INDEX = "enrollment:index" 20 ROUTE_RETRY = "enrollment:retry" 21 ROUTE_SUCCESS = "enrollment:success" 22 ROUTE_TOKEN = "enrollment:token" 23 24 TEMPLATE_INDEX = "enrollment/index.html" 25 TEMPLATE_RETRY = "enrollment/retry.html" 26 TEMPLATE_SUCCESS = "enrollment/success.html" 27 28 29 logger = logging.getLogger(__name__) 30 31 32 @decorator_from_middleware(EligibleSessionRequired) 33 def token(request): 34 """View handler for the enrollment auth token.""" 35 if not session.enrollment_token_valid(request): 36 agency = session.agency(request) 37 response = api.Client(agency).access_token() 38 session.update(request, enrollment_token=response.access_token, enrollment_token_exp=response.expiry) 39 40 data = {"token": session.enrollment_token(request)} 41 42 return JsonResponse(data) 43 44 45 @decorator_from_middleware(EligibleSessionRequired) 46 def index(request): 47 """View handler for the enrollment landing page.""" 48 session.update(request, origin=reverse(ROUTE_INDEX)) 49 50 agency = session.agency(request) 51 verifier = session.verifier(request) 52 53 # POST back after payment processor form, process card token 54 if request.method == "POST": 55 form = forms.CardTokenizeSuccessForm(request.POST) 56 if not form.is_valid(): 57 raise Exception("Invalid card token form") 58 59 eligibility = session.eligibility(request) 60 logger.debug(f"Session contains an {models.EligibilityType.__name__}") 61 62 logger.debug("Read tokenized card") 63 card_token = form.cleaned_data.get("card_token") 64 65 response = api.Client(agency).enroll(card_token, eligibility.group_id) 66 if response.success: 67 analytics.returned_success(request, eligibility.group_id) 68 return success(request) 69 else: 70 analytics.returned_error(request, response.message) 71 raise Exception(response.message) 72 73 # GET enrollment index, with button to initiate payment processor connection 74 else: 75 tokenize_button = "tokenize_card" 76 tokenize_retry_form = forms.CardTokenizeFailForm(ROUTE_RETRY) 77 tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix="") 78 79 confirmed_eligibility_item = viewmodels.MediaItem( 80 icon=viewmodels.Icon("happybus", pgettext("image alt text", "core.icons.happybus")), 81 heading=_(verifier.eligibility_confirmed_item_heading), 82 details=_(verifier.eligibility_confirmed_item_details), 83 ) 84 85 help_link = reverse(ROUTE_HELP) 86 link_card_item = viewmodels.MediaItem( 87 icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")), 88 heading=_("enrollment.pages.index.link_card_item.heading"), 89 details=[ 90 format_html(_("enrollment.pages.index.link_card_item.p[0]%(link)s") % {"link": f"{help_link}#littlepay"}), 91 _("enrollment.pages.index.link_card_item.p[1]"), 92 ], 93 ) 94 95 media = [confirmed_eligibility_item, link_card_item] 96 97 page = viewmodels.Page( 98 title=_("enrollment.pages.index.title"), 99 headline=_("enrollment.pages.index.headline"), 100 forms=[tokenize_retry_form, tokenize_success_form], 101 buttons=[ 102 viewmodels.Button.primary( 103 text=_("enrollment.buttons.payment_partner"), id=tokenize_button, url=f"#{tokenize_button}" 104 ), 105 ], 106 ) 107 context = {"media": media} 108 context.update(page.context_dict()) 109 110 # add agency details 111 agency_vm = viewmodels.TransitAgency(agency) 112 context.update(agency_vm.context_dict()) 113 114 # and payment processor details 115 processor_vm = viewmodels.PaymentProcessor( 116 model=agency.payment_processor, 117 access_token_url=reverse(ROUTE_TOKEN), 118 element_id=f"#{tokenize_button}", 119 color="#046b99", 120 name=f"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}", 121 ) 122 context.update(processor_vm.context_dict()) 123 logger.warning(f"card_tokenize_url: {context['payment_processor'].card_tokenize_url}") 124 125 # the tokenize form URLs are injected to page-generated Javascript 126 context["forms"] = { 127 "tokenize_retry": reverse(tokenize_retry_form.action_url), 128 "tokenize_success": reverse(tokenize_success_form.action_url), 129 } 130 131 return TemplateResponse(request, TEMPLATE_INDEX, context) 132 133 134 @decorator_from_middleware(EligibleSessionRequired) 135 def retry(request): 136 """View handler for a recoverable failure condition.""" 137 if request.method == "POST": 138 analytics.returned_retry(request) 139 form = forms.CardTokenizeFailForm(request.POST) 140 if form.is_valid(): 141 agency = session.agency(request) 142 page = viewmodels.Page( 143 classes="no-image-mobile", 144 title=_("enrollment.pages.retry.title"), 145 icon=viewmodels.Icon("bankcardquestion", pgettext("image alt text", "core.icons.bankcardquestion")), 146 headline=_("enrollment.pages.retry.title"), 147 paragraphs=[_("enrollment.pages.retry.p[0]")], 148 buttons=viewmodels.Button.agency_contact_links(agency), 149 ) 150 page.buttons.append(viewmodels.Button.primary(text=_("core.buttons.retry"), url=session.origin(request))) 151 return TemplateResponse(request, TEMPLATE_RETRY, page.context_dict()) 152 else: 153 analytics.returned_error(request, "Invalid retry submission.") 154 raise Exception("Invalid retry submission.") 155 else: 156 analytics.returned_error(request, "This view method only supports POST.") 157 raise Exception("This view method only supports POST.") 158 159 160 @pageview_decorator 161 @decorator_from_middleware(VerifierSessionRequired) 162 def success(request): 163 """View handler for the final success page.""" 164 request.path = "/enrollment/success" 165 session.update(request, origin=reverse(ROUTE_SUCCESS)) 166 167 verifier = session.verifier(request) 168 169 page = viewmodels.Page(title=_("enrollment.pages.success.title"), headline=_("enrollment.pages.success.headline")) 170 171 if verifier.is_auth_required and session.logged_in(request): 172 # overwrite origin for a logged in user 173 # if they click the logout button, they are taken to the new route 174 session.update(request, origin=reverse(ROUTE_LOGGED_OUT)) 175 page.buttons = [viewmodels.Button.logout()] 176 177 success_item = viewmodels.MediaItem( 178 icon=viewmodels.Icon("happybus", pgettext("image alt text", "core.icons.happybus")), 179 details=[ 180 _(verifier.enrollment_success_confirm_item_details), 181 format_html(_("enrollment.pages.success.helplink%(link)s") % {"link": f"{reverse(ROUTE_HELP)}"}), 182 ], 183 ) 184 media = [success_item] 185 186 if verifier.enrollment_success_expiry_item_heading or verifier.enrollment_success_expiry_item_details: 187 heading = ( 188 _(verifier.enrollment_success_expiry_item_heading) if verifier.enrollment_success_expiry_item_heading else None 189 ) 190 details = ( 191 _(verifier.enrollment_success_expiry_item_details) if verifier.enrollment_success_expiry_item_details else None 192 ) 193 expiry_item = viewmodels.MediaItem( 194 icon=viewmodels.Icon("calendarcheck", pgettext("image alt text", "core.icons.calendarcheck")), 195 heading=heading, 196 details=details, 197 ) 198 media.insert(0, expiry_item) 199 200 context = {"media": media} 201 context.update(page.context_dict()) 202 203 return TemplateResponse(request, TEMPLATE_SUCCESS, context) 204 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py --- a/benefits/enrollment/views.py +++ b/benefits/enrollment/views.py @@ -140,7 +140,6 @@ if form.is_valid(): agency = session.agency(request) page = viewmodels.Page( - classes="no-image-mobile", title=_("enrollment.pages.retry.title"), icon=viewmodels.Icon("bankcardquestion", pgettext("image alt text", "core.icons.bankcardquestion")), headline=_("enrollment.pages.retry.title"),
{"golden_diff": "diff --git a/benefits/enrollment/views.py b/benefits/enrollment/views.py\n--- a/benefits/enrollment/views.py\n+++ b/benefits/enrollment/views.py\n@@ -140,7 +140,6 @@\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n- classes=\"no-image-mobile\",\n title=_(\"enrollment.pages.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n headline=_(\"enrollment.pages.retry.title\"),\n", "issue": "Bug: Error page bug - Log out button too low\nA clear and concise description of what the bug is.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Go through the Login.gov IAL2 flow with a user who is under age 65\r\n2. See error\r\n\r\n## Expected behavior\r\n\r\nA clear and concise description of what you expected to happen.\r\n\r\n## Screenshots\r\n\r\nTablet/Mobile widths are off:\r\n<img width=\"485\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/190286767-dc5e2f26-3a48-44e7-a480-9751726938ea.png\">\r\n<img width=\"457\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/190287117-4c1072cb-271d-4b1a-b0c8-00f7dd958302.png\">\r\n\r\nDesktop is good:\r\n<img width=\"1440\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/190287052-4c8a778a-7ccf-42dd-8042-c59a0b350308.png\">\r\n\r\n\r\n## Desktop (please complete the following information)\r\n\r\n- OS: [e.g. iOS]\r\n\r\n## Smartphone (please complete the following information)\r\n\r\n- Device: [e.g. iPhone6]\r\n\r\n## Additional context\r\n\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "\"\"\"\nThe enrollment application: view definitions for the benefits enrollment flow.\n\"\"\"\nimport logging\n\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import models, session, viewmodels\nfrom benefits.core.middleware import EligibleSessionRequired, VerifierSessionRequired, pageview_decorator\nfrom benefits.core.views import ROUTE_HELP, ROUTE_LOGGED_OUT\nfrom . import analytics, api, forms\n\n\nROUTE_INDEX = \"enrollment:index\"\nROUTE_RETRY = \"enrollment:retry\"\nROUTE_SUCCESS = \"enrollment:success\"\nROUTE_TOKEN = \"enrollment:token\"\n\nTEMPLATE_INDEX = \"enrollment/index.html\"\nTEMPLATE_RETRY = \"enrollment/retry.html\"\nTEMPLATE_SUCCESS = \"enrollment/success.html\"\n\n\nlogger = logging.getLogger(__name__)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef token(request):\n \"\"\"View handler for the enrollment auth token.\"\"\"\n if not session.enrollment_token_valid(request):\n agency = session.agency(request)\n response = api.Client(agency).access_token()\n session.update(request, enrollment_token=response.access_token, enrollment_token_exp=response.expiry)\n\n data = {\"token\": session.enrollment_token(request)}\n\n return JsonResponse(data)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef index(request):\n \"\"\"View handler for the enrollment landing page.\"\"\"\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n agency = session.agency(request)\n verifier = session.verifier(request)\n\n # POST back after payment processor form, process card token\n if request.method == \"POST\":\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n\n eligibility = session.eligibility(request)\n logger.debug(f\"Session contains an {models.EligibilityType.__name__}\")\n\n logger.debug(\"Read tokenized card\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n if response.success:\n analytics.returned_success(request, eligibility.group_id)\n return success(request)\n else:\n analytics.returned_error(request, response.message)\n raise Exception(response.message)\n\n # GET enrollment index, with button to initiate payment processor connection\n else:\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(ROUTE_RETRY)\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n confirmed_eligibility_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n heading=_(verifier.eligibility_confirmed_item_heading),\n details=_(verifier.eligibility_confirmed_item_details),\n )\n\n help_link = reverse(ROUTE_HELP)\n link_card_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n heading=_(\"enrollment.pages.index.link_card_item.heading\"),\n details=[\n format_html(_(\"enrollment.pages.index.link_card_item.p[0]%(link)s\") % {\"link\": f\"{help_link}#littlepay\"}),\n _(\"enrollment.pages.index.link_card_item.p[1]\"),\n ],\n )\n\n media = [confirmed_eligibility_item, link_card_item]\n\n page = viewmodels.Page(\n title=_(\"enrollment.pages.index.title\"),\n headline=_(\"enrollment.pages.index.headline\"),\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"enrollment.buttons.payment_partner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n ],\n )\n context = {\"media\": media}\n context.update(page.context_dict())\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and payment processor details\n processor_vm = viewmodels.PaymentProcessor(\n model=agency.payment_processor,\n access_token_url=reverse(ROUTE_TOKEN),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}\",\n )\n context.update(processor_vm.context_dict())\n logger.warning(f\"card_tokenize_url: {context['payment_processor'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context[\"forms\"] = {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n\n return TemplateResponse(request, TEMPLATE_INDEX, context)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n analytics.returned_retry(request)\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n classes=\"no-image-mobile\",\n title=_(\"enrollment.pages.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n headline=_(\"enrollment.pages.retry.title\"),\n paragraphs=[_(\"enrollment.pages.retry.p[0]\")],\n buttons=viewmodels.Button.agency_contact_links(agency),\n )\n page.buttons.append(viewmodels.Button.primary(text=_(\"core.buttons.retry\"), url=session.origin(request)))\n return TemplateResponse(request, TEMPLATE_RETRY, page.context_dict())\n else:\n analytics.returned_error(request, \"Invalid retry submission.\")\n raise Exception(\"Invalid retry submission.\")\n else:\n analytics.returned_error(request, \"This view method only supports POST.\")\n raise Exception(\"This view method only supports POST.\")\n\n\n@pageview_decorator\n@decorator_from_middleware(VerifierSessionRequired)\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n request.path = \"/enrollment/success\"\n session.update(request, origin=reverse(ROUTE_SUCCESS))\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(title=_(\"enrollment.pages.success.title\"), headline=_(\"enrollment.pages.success.headline\"))\n\n if verifier.is_auth_required and session.logged_in(request):\n # overwrite origin for a logged in user\n # if they click the logout button, they are taken to the new route\n session.update(request, origin=reverse(ROUTE_LOGGED_OUT))\n page.buttons = [viewmodels.Button.logout()]\n\n success_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n details=[\n _(verifier.enrollment_success_confirm_item_details),\n format_html(_(\"enrollment.pages.success.helplink%(link)s\") % {\"link\": f\"{reverse(ROUTE_HELP)}\"}),\n ],\n )\n media = [success_item]\n\n if verifier.enrollment_success_expiry_item_heading or verifier.enrollment_success_expiry_item_details:\n heading = (\n _(verifier.enrollment_success_expiry_item_heading) if verifier.enrollment_success_expiry_item_heading else None\n )\n details = (\n _(verifier.enrollment_success_expiry_item_details) if verifier.enrollment_success_expiry_item_details else None\n )\n expiry_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"calendarcheck\", pgettext(\"image alt text\", \"core.icons.calendarcheck\")),\n heading=heading,\n details=details,\n )\n media.insert(0, expiry_item)\n\n context = {\"media\": media}\n context.update(page.context_dict())\n\n return TemplateResponse(request, TEMPLATE_SUCCESS, context)\n", "path": "benefits/enrollment/views.py"}], "after_files": [{"content": "\"\"\"\nThe enrollment application: view definitions for the benefits enrollment flow.\n\"\"\"\nimport logging\n\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import models, session, viewmodels\nfrom benefits.core.middleware import EligibleSessionRequired, VerifierSessionRequired, pageview_decorator\nfrom benefits.core.views import ROUTE_HELP, ROUTE_LOGGED_OUT\nfrom . import analytics, api, forms\n\n\nROUTE_INDEX = \"enrollment:index\"\nROUTE_RETRY = \"enrollment:retry\"\nROUTE_SUCCESS = \"enrollment:success\"\nROUTE_TOKEN = \"enrollment:token\"\n\nTEMPLATE_INDEX = \"enrollment/index.html\"\nTEMPLATE_RETRY = \"enrollment/retry.html\"\nTEMPLATE_SUCCESS = \"enrollment/success.html\"\n\n\nlogger = logging.getLogger(__name__)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef token(request):\n \"\"\"View handler for the enrollment auth token.\"\"\"\n if not session.enrollment_token_valid(request):\n agency = session.agency(request)\n response = api.Client(agency).access_token()\n session.update(request, enrollment_token=response.access_token, enrollment_token_exp=response.expiry)\n\n data = {\"token\": session.enrollment_token(request)}\n\n return JsonResponse(data)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef index(request):\n \"\"\"View handler for the enrollment landing page.\"\"\"\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n agency = session.agency(request)\n verifier = session.verifier(request)\n\n # POST back after payment processor form, process card token\n if request.method == \"POST\":\n form = forms.CardTokenizeSuccessForm(request.POST)\n if not form.is_valid():\n raise Exception(\"Invalid card token form\")\n\n eligibility = session.eligibility(request)\n logger.debug(f\"Session contains an {models.EligibilityType.__name__}\")\n\n logger.debug(\"Read tokenized card\")\n card_token = form.cleaned_data.get(\"card_token\")\n\n response = api.Client(agency).enroll(card_token, eligibility.group_id)\n if response.success:\n analytics.returned_success(request, eligibility.group_id)\n return success(request)\n else:\n analytics.returned_error(request, response.message)\n raise Exception(response.message)\n\n # GET enrollment index, with button to initiate payment processor connection\n else:\n tokenize_button = \"tokenize_card\"\n tokenize_retry_form = forms.CardTokenizeFailForm(ROUTE_RETRY)\n tokenize_success_form = forms.CardTokenizeSuccessForm(auto_id=True, label_suffix=\"\")\n\n confirmed_eligibility_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n heading=_(verifier.eligibility_confirmed_item_heading),\n details=_(verifier.eligibility_confirmed_item_details),\n )\n\n help_link = reverse(ROUTE_HELP)\n link_card_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n heading=_(\"enrollment.pages.index.link_card_item.heading\"),\n details=[\n format_html(_(\"enrollment.pages.index.link_card_item.p[0]%(link)s\") % {\"link\": f\"{help_link}#littlepay\"}),\n _(\"enrollment.pages.index.link_card_item.p[1]\"),\n ],\n )\n\n media = [confirmed_eligibility_item, link_card_item]\n\n page = viewmodels.Page(\n title=_(\"enrollment.pages.index.title\"),\n headline=_(\"enrollment.pages.index.headline\"),\n forms=[tokenize_retry_form, tokenize_success_form],\n buttons=[\n viewmodels.Button.primary(\n text=_(\"enrollment.buttons.payment_partner\"), id=tokenize_button, url=f\"#{tokenize_button}\"\n ),\n ],\n )\n context = {\"media\": media}\n context.update(page.context_dict())\n\n # add agency details\n agency_vm = viewmodels.TransitAgency(agency)\n context.update(agency_vm.context_dict())\n\n # and payment processor details\n processor_vm = viewmodels.PaymentProcessor(\n model=agency.payment_processor,\n access_token_url=reverse(ROUTE_TOKEN),\n element_id=f\"#{tokenize_button}\",\n color=\"#046b99\",\n name=f\"{agency.long_name} {_('partnered with')} {agency.payment_processor.name}\",\n )\n context.update(processor_vm.context_dict())\n logger.warning(f\"card_tokenize_url: {context['payment_processor'].card_tokenize_url}\")\n\n # the tokenize form URLs are injected to page-generated Javascript\n context[\"forms\"] = {\n \"tokenize_retry\": reverse(tokenize_retry_form.action_url),\n \"tokenize_success\": reverse(tokenize_success_form.action_url),\n }\n\n return TemplateResponse(request, TEMPLATE_INDEX, context)\n\n\n@decorator_from_middleware(EligibleSessionRequired)\ndef retry(request):\n \"\"\"View handler for a recoverable failure condition.\"\"\"\n if request.method == \"POST\":\n analytics.returned_retry(request)\n form = forms.CardTokenizeFailForm(request.POST)\n if form.is_valid():\n agency = session.agency(request)\n page = viewmodels.Page(\n title=_(\"enrollment.pages.retry.title\"),\n icon=viewmodels.Icon(\"bankcardquestion\", pgettext(\"image alt text\", \"core.icons.bankcardquestion\")),\n headline=_(\"enrollment.pages.retry.title\"),\n paragraphs=[_(\"enrollment.pages.retry.p[0]\")],\n buttons=viewmodels.Button.agency_contact_links(agency),\n )\n page.buttons.append(viewmodels.Button.primary(text=_(\"core.buttons.retry\"), url=session.origin(request)))\n return TemplateResponse(request, TEMPLATE_RETRY, page.context_dict())\n else:\n analytics.returned_error(request, \"Invalid retry submission.\")\n raise Exception(\"Invalid retry submission.\")\n else:\n analytics.returned_error(request, \"This view method only supports POST.\")\n raise Exception(\"This view method only supports POST.\")\n\n\n@pageview_decorator\n@decorator_from_middleware(VerifierSessionRequired)\ndef success(request):\n \"\"\"View handler for the final success page.\"\"\"\n request.path = \"/enrollment/success\"\n session.update(request, origin=reverse(ROUTE_SUCCESS))\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(title=_(\"enrollment.pages.success.title\"), headline=_(\"enrollment.pages.success.headline\"))\n\n if verifier.is_auth_required and session.logged_in(request):\n # overwrite origin for a logged in user\n # if they click the logout button, they are taken to the new route\n session.update(request, origin=reverse(ROUTE_LOGGED_OUT))\n page.buttons = [viewmodels.Button.logout()]\n\n success_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n details=[\n _(verifier.enrollment_success_confirm_item_details),\n format_html(_(\"enrollment.pages.success.helplink%(link)s\") % {\"link\": f\"{reverse(ROUTE_HELP)}\"}),\n ],\n )\n media = [success_item]\n\n if verifier.enrollment_success_expiry_item_heading or verifier.enrollment_success_expiry_item_details:\n heading = (\n _(verifier.enrollment_success_expiry_item_heading) if verifier.enrollment_success_expiry_item_heading else None\n )\n details = (\n _(verifier.enrollment_success_expiry_item_details) if verifier.enrollment_success_expiry_item_details else None\n )\n expiry_item = viewmodels.MediaItem(\n icon=viewmodels.Icon(\"calendarcheck\", pgettext(\"image alt text\", \"core.icons.calendarcheck\")),\n heading=heading,\n details=details,\n )\n media.insert(0, expiry_item)\n\n context = {\"media\": media}\n context.update(page.context_dict())\n\n return TemplateResponse(request, TEMPLATE_SUCCESS, context)\n", "path": "benefits/enrollment/views.py"}]}
2,816
125
gh_patches_debug_54726
rasdani/github-patches
git_diff
DDMAL__CantusDB-845
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "my sources"" page side panel http://206.12.88.113/my-sources/ has a "created by" side panel. This is 1. not paginated 3. includes all the sources and not just the created ones (so it's both extra- long and also redundant). Staging: <img width="1106" alt="image" src="https://github.com/DDMAL/CantusDB/assets/67451875/3d11789e-6027-4358-8595-328e95e89d7b"> on production this only has the sources created on production, so it only has one source (I'm assuming the others will show up once we sort out the "Created by" info from OldCantus? <img width="1160" alt="image" src="https://github.com/DDMAL/CantusDB/assets/67451875/f6e98d78-0f66-421c-aad9-2ede47400d88"> On OldCantus it looks like this: <img width="981" alt="image" src="https://github.com/DDMAL/CantusDB/assets/67451875/15f4b995-d930-4645-9ca4-3befce6a868d"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django/cantusdb_project/main_app/views/user.py` Content: ``` 1 from django.urls import reverse 2 from django.db.models.aggregates import Count 3 from django.views.generic import DetailView 4 from django.contrib.auth import get_user_model, login as auth_login 5 from main_app.models import Source 6 from django.views.generic import ListView 7 from django.contrib.auth.mixins import LoginRequiredMixin 8 from django.db.models import Q 9 from django.core.paginator import Paginator 10 from django.contrib.auth.views import LogoutView, LoginView 11 from django.contrib import messages 12 from extra_views import SearchableListMixin 13 from django.http import HttpResponseRedirect 14 from django.core.exceptions import PermissionDenied 15 16 17 class UserDetailView(DetailView): 18 """Detail view for User model 19 20 Accessed by /users/<pk> 21 """ 22 23 model = get_user_model() 24 context_object_name = "user" 25 template_name = "user_detail.html" 26 27 def get_context_data(self, **kwargs): 28 user = self.get_object() 29 # to begin, if the person viewing the site is not logged in, 30 # they should only be able to view the detail pages of indexers, 31 # and not the detail pages of run-of-the-mill users 32 viewing_user = self.request.user 33 if not (viewing_user.is_authenticated or user.is_indexer): 34 raise PermissionDenied() 35 36 context = super().get_context_data(**kwargs) 37 display_unpublished = viewing_user.is_authenticated 38 sort_by_siglum = lambda source: source.siglum 39 if display_unpublished: 40 context["inventoried_sources"] = sorted( 41 user.inventoried_sources.all(), key=sort_by_siglum 42 ) 43 context["full_text_sources"] = sorted( 44 user.entered_full_text_for_sources.all(), key=sort_by_siglum 45 ) 46 context["melody_sources"] = sorted( 47 user.entered_melody_for_sources.all(), key=sort_by_siglum 48 ) 49 context["proofread_sources"] = sorted( 50 user.proofread_sources.all(), key=sort_by_siglum 51 ) 52 context["edited_sources"] = sorted( 53 user.edited_sources.all(), key=sort_by_siglum 54 ) 55 else: 56 context["inventoried_sources"] = sorted( 57 user.inventoried_sources.all().filter(published=True), 58 key=sort_by_siglum, 59 ) 60 context["full_text_sources"] = sorted( 61 user.entered_full_text_for_sources.all().filter(published=True), 62 key=sort_by_siglum, 63 ) 64 context["melody_sources"] = sorted( 65 user.entered_melody_for_sources.all().filter(published=True), 66 key=sort_by_siglum, 67 ) 68 context["proofread_sources"] = sorted( 69 user.proofread_sources.all().filter(published=True), key=sort_by_siglum 70 ) 71 context["edited_sources"] = sorted( 72 user.edited_sources.all().filter(published=True), key=sort_by_siglum 73 ) 74 75 return context 76 77 78 class UserSourceListView(LoginRequiredMixin, ListView): 79 model = Source 80 context_object_name = "sources" 81 template_name = "user_source_list.html" 82 paginate_by = 100 83 84 def get_queryset(self): 85 return ( 86 Source.objects.filter( 87 Q(current_editors=self.request.user) 88 | Q(created_by=self.request.user) 89 # | Q(inventoried_by=self.request.user) 90 # | Q(full_text_entered_by=self.request.user) 91 # | Q(melodies_entered_by=self.request.user) 92 # | Q(proofreaders=self.request.user) 93 # | Q(other_editors=self.request.user) 94 ) 95 .order_by("-date_created") 96 .distinct() 97 ) 98 99 def get_context_data(self, **kwargs): 100 context = super().get_context_data(**kwargs) 101 102 user_created_sources = ( 103 Source.objects.filter(created_by=self.request.user) 104 .order_by("-date_created") 105 .distinct() 106 ) 107 paginator = Paginator(user_created_sources, 10) 108 page_number = self.request.GET.get("page2") 109 page_obj = paginator.get_page(page_number) 110 111 context["user_created_sources_page_obj"] = page_obj 112 return context 113 114 115 class CustomLogoutView(LogoutView): 116 def get_next_page(self): 117 next_page = super().get_next_page() 118 messages.success(self.request, "You have successfully logged out!") 119 return next_page 120 121 122 class UserListView(LoginRequiredMixin, SearchableListMixin, ListView): 123 """A list of all User objects 124 125 This view is equivalent to the user list view on the old Cantus. 126 This includes all User objects on the old Cantus. 127 When passed a `?q=<query>` argument in the GET request, it will filter users 128 based on the fields defined in `search_fields` with the `icontains` lookup. 129 130 Accessed by /users/ 131 """ 132 133 model = get_user_model() 134 ordering = "full_name" 135 search_fields = ["full_name", "institution", "city", "country"] 136 paginate_by = 100 137 template_name = "user_list.html" 138 context_object_name = "users" 139 140 141 class IndexerListView(SearchableListMixin, ListView): 142 """A list of User objects shown to the public 143 144 This view replaces the indexer list view on the old Cantus. 145 The indexers are considered a subset of all User objects, the subset shown to the public. 146 This includes the User objects corresponding to Indexer objects on the old Cantus. 147 When passed a `?q=<query>` argument in the GET request, it will filter users 148 based on the fields defined in `search_fields` with the `icontains` lookup. 149 150 Accessed by /indexers/ 151 """ 152 153 model = get_user_model() 154 ordering = "full_name" 155 search_fields = ["full_name", "institution", "city", "country"] 156 paginate_by = 100 157 template_name = "indexer_list.html" 158 context_object_name = "indexers" 159 160 def get_queryset(self): 161 all_users = super().get_queryset() 162 indexers = all_users.filter(is_indexer=True) 163 display_unpublished = self.request.user.is_authenticated 164 if display_unpublished: 165 indexers = indexers.annotate(source_count=Count("inventoried_sources")) 166 # display those who have at least one source 167 return indexers.filter(source_count__gte=1) 168 else: 169 indexers = indexers.annotate( 170 source_count=Count( 171 "inventoried_sources", filter=Q(inventoried_sources__published=True) 172 ) 173 ) 174 # display those who have at least one published source 175 return indexers.filter(source_count__gte=1) 176 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py --- a/django/cantusdb_project/main_app/views/user.py +++ b/django/cantusdb_project/main_app/views/user.py @@ -104,7 +104,7 @@ .order_by("-date_created") .distinct() ) - paginator = Paginator(user_created_sources, 10) + paginator = Paginator(user_created_sources, 6) page_number = self.request.GET.get("page2") page_obj = paginator.get_page(page_number)
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py\n--- a/django/cantusdb_project/main_app/views/user.py\n+++ b/django/cantusdb_project/main_app/views/user.py\n@@ -104,7 +104,7 @@\n .order_by(\"-date_created\")\n .distinct()\n )\n- paginator = Paginator(user_created_sources, 10)\n+ paginator = Paginator(user_created_sources, 6)\n page_number = self.request.GET.get(\"page2\")\n page_obj = paginator.get_page(page_number)\n", "issue": "\"my sources\"\" page side panel\nhttp://206.12.88.113/my-sources/ has a \"created by\" side panel. This is\r\n1. not paginated\r\n3. includes all the sources and not just the created ones (so it's both extra- long and also redundant).\r\nStaging:\r\n<img width=\"1106\" alt=\"image\" src=\"https://github.com/DDMAL/CantusDB/assets/67451875/3d11789e-6027-4358-8595-328e95e89d7b\">\r\non production this only has the sources created on production, so it only has one source (I'm assuming the others will show up once we sort out the \"Created by\" info from OldCantus?\r\n<img width=\"1160\" alt=\"image\" src=\"https://github.com/DDMAL/CantusDB/assets/67451875/f6e98d78-0f66-421c-aad9-2ede47400d88\">\r\nOn OldCantus it looks like this:\r\n\r\n<img width=\"981\" alt=\"image\" src=\"https://github.com/DDMAL/CantusDB/assets/67451875/15f4b995-d930-4645-9ca4-3befce6a868d\">\r\n\r\n\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.db.models.aggregates import Count\nfrom django.views.generic import DetailView\nfrom django.contrib.auth import get_user_model, login as auth_login\nfrom main_app.models import Source\nfrom django.views.generic import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.views import LogoutView, LoginView\nfrom django.contrib import messages\nfrom extra_views import SearchableListMixin\nfrom django.http import HttpResponseRedirect\nfrom django.core.exceptions import PermissionDenied\n\n\nclass UserDetailView(DetailView):\n \"\"\"Detail view for User model\n\n Accessed by /users/<pk>\n \"\"\"\n\n model = get_user_model()\n context_object_name = \"user\"\n template_name = \"user_detail.html\"\n\n def get_context_data(self, **kwargs):\n user = self.get_object()\n # to begin, if the person viewing the site is not logged in,\n # they should only be able to view the detail pages of indexers,\n # and not the detail pages of run-of-the-mill users\n viewing_user = self.request.user\n if not (viewing_user.is_authenticated or user.is_indexer):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n display_unpublished = viewing_user.is_authenticated\n sort_by_siglum = lambda source: source.siglum\n if display_unpublished:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all(), key=sort_by_siglum\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all(), key=sort_by_siglum\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all(), key=sort_by_siglum\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all(), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all(), key=sort_by_siglum\n )\n else:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all().filter(published=True), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all().filter(published=True), key=sort_by_siglum\n )\n\n return context\n\n\nclass UserSourceListView(LoginRequiredMixin, ListView):\n model = Source\n context_object_name = \"sources\"\n template_name = \"user_source_list.html\"\n paginate_by = 100\n\n def get_queryset(self):\n return (\n Source.objects.filter(\n Q(current_editors=self.request.user)\n | Q(created_by=self.request.user)\n # | Q(inventoried_by=self.request.user)\n # | Q(full_text_entered_by=self.request.user)\n # | Q(melodies_entered_by=self.request.user)\n # | Q(proofreaders=self.request.user)\n # | Q(other_editors=self.request.user)\n )\n .order_by(\"-date_created\")\n .distinct()\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n user_created_sources = (\n Source.objects.filter(created_by=self.request.user)\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 10)\n page_number = self.request.GET.get(\"page2\")\n page_obj = paginator.get_page(page_number)\n\n context[\"user_created_sources_page_obj\"] = page_obj\n return context\n\n\nclass CustomLogoutView(LogoutView):\n def get_next_page(self):\n next_page = super().get_next_page()\n messages.success(self.request, \"You have successfully logged out!\")\n return next_page\n\n\nclass UserListView(LoginRequiredMixin, SearchableListMixin, ListView):\n \"\"\"A list of all User objects\n\n This view is equivalent to the user list view on the old Cantus.\n This includes all User objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /users/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"user_list.html\"\n context_object_name = \"users\"\n\n\nclass IndexerListView(SearchableListMixin, ListView):\n \"\"\"A list of User objects shown to the public\n\n This view replaces the indexer list view on the old Cantus.\n The indexers are considered a subset of all User objects, the subset shown to the public.\n This includes the User objects corresponding to Indexer objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /indexers/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"indexer_list.html\"\n context_object_name = \"indexers\"\n\n def get_queryset(self):\n all_users = super().get_queryset()\n indexers = all_users.filter(is_indexer=True)\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n indexers = indexers.annotate(source_count=Count(\"inventoried_sources\"))\n # display those who have at least one source\n return indexers.filter(source_count__gte=1)\n else:\n indexers = indexers.annotate(\n source_count=Count(\n \"inventoried_sources\", filter=Q(inventoried_sources__published=True)\n )\n )\n # display those who have at least one published source\n return indexers.filter(source_count__gte=1)\n", "path": "django/cantusdb_project/main_app/views/user.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom django.db.models.aggregates import Count\nfrom django.views.generic import DetailView\nfrom django.contrib.auth import get_user_model, login as auth_login\nfrom main_app.models import Source\nfrom django.views.generic import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.views import LogoutView, LoginView\nfrom django.contrib import messages\nfrom extra_views import SearchableListMixin\nfrom django.http import HttpResponseRedirect\nfrom django.core.exceptions import PermissionDenied\n\n\nclass UserDetailView(DetailView):\n \"\"\"Detail view for User model\n\n Accessed by /users/<pk>\n \"\"\"\n\n model = get_user_model()\n context_object_name = \"user\"\n template_name = \"user_detail.html\"\n\n def get_context_data(self, **kwargs):\n user = self.get_object()\n # to begin, if the person viewing the site is not logged in,\n # they should only be able to view the detail pages of indexers,\n # and not the detail pages of run-of-the-mill users\n viewing_user = self.request.user\n if not (viewing_user.is_authenticated or user.is_indexer):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n display_unpublished = viewing_user.is_authenticated\n sort_by_siglum = lambda source: source.siglum\n if display_unpublished:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all(), key=sort_by_siglum\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all(), key=sort_by_siglum\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all(), key=sort_by_siglum\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all(), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all(), key=sort_by_siglum\n )\n else:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all().filter(published=True), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all().filter(published=True), key=sort_by_siglum\n )\n\n return context\n\n\nclass UserSourceListView(LoginRequiredMixin, ListView):\n model = Source\n context_object_name = \"sources\"\n template_name = \"user_source_list.html\"\n paginate_by = 100\n\n def get_queryset(self):\n return (\n Source.objects.filter(\n Q(current_editors=self.request.user)\n | Q(created_by=self.request.user)\n # | Q(inventoried_by=self.request.user)\n # | Q(full_text_entered_by=self.request.user)\n # | Q(melodies_entered_by=self.request.user)\n # | Q(proofreaders=self.request.user)\n # | Q(other_editors=self.request.user)\n )\n .order_by(\"-date_created\")\n .distinct()\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n user_created_sources = (\n Source.objects.filter(created_by=self.request.user)\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 6)\n page_number = self.request.GET.get(\"page2\")\n page_obj = paginator.get_page(page_number)\n\n context[\"user_created_sources_page_obj\"] = page_obj\n return context\n\n\nclass CustomLogoutView(LogoutView):\n def get_next_page(self):\n next_page = super().get_next_page()\n messages.success(self.request, \"You have successfully logged out!\")\n return next_page\n\n\nclass UserListView(LoginRequiredMixin, SearchableListMixin, ListView):\n \"\"\"A list of all User objects\n\n This view is equivalent to the user list view on the old Cantus.\n This includes all User objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /users/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"user_list.html\"\n context_object_name = \"users\"\n\n\nclass IndexerListView(SearchableListMixin, ListView):\n \"\"\"A list of User objects shown to the public\n\n This view replaces the indexer list view on the old Cantus.\n The indexers are considered a subset of all User objects, the subset shown to the public.\n This includes the User objects corresponding to Indexer objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /indexers/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"indexer_list.html\"\n context_object_name = \"indexers\"\n\n def get_queryset(self):\n all_users = super().get_queryset()\n indexers = all_users.filter(is_indexer=True)\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n indexers = indexers.annotate(source_count=Count(\"inventoried_sources\"))\n # display those who have at least one source\n return indexers.filter(source_count__gte=1)\n else:\n indexers = indexers.annotate(\n source_count=Count(\n \"inventoried_sources\", filter=Q(inventoried_sources__published=True)\n )\n )\n # display those who have at least one published source\n return indexers.filter(source_count__gte=1)\n", "path": "django/cantusdb_project/main_app/views/user.py"}]}
2,451
135
gh_patches_debug_29810
rasdani/github-patches
git_diff
acl-org__acl-anthology-285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing author data on early CL journals Authors are part of the titles, rather than having author fields. See for example: https://aclweb.org/anthology/events/cl-1979/ J79-J83 should be "American Journal of Computational Linguistics" and changed its name only in 1984. The name "Computational Linguistics" is hard-coded, so I'm not sure if this is an easy change. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bin/anthology/papers.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2019 Marcel Bollmann <[email protected]> 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 import logging as log 18 from .people import PersonName 19 from .utils import ( 20 infer_attachment_url, 21 remove_extra_whitespace, 22 is_journal, 23 is_volume_id, 24 to_volume_id, 25 ) 26 from . import data 27 28 # For BibTeX export 29 from .formatter import bibtex_encode, bibtex_make_entry 30 31 # Names of XML elements that may appear multiple times 32 _LIST_ELEMENTS = ("attachment", "author", "editor", "video", "revision", "erratum") 33 34 35 class Paper: 36 def __init__(self, paper_id, top_level_id, formatter): 37 self.formatter = formatter 38 self.parent_volume = None 39 self.paper_id = paper_id 40 self.top_level_id = top_level_id 41 self.attrib = {} 42 self._bibkey = False 43 44 def from_xml(xml_element, *args): 45 paper = Paper(xml_element.get("id"), *args) 46 paper._parse_element(xml_element) 47 paper.attrib["title"] = paper.get_title("plain") 48 if "booktitle" in paper.attrib: 49 paper.attrib["booktitle"] = paper.get_booktitle("plain") 50 if "editor" in paper.attrib: 51 if paper.is_volume: 52 if "author" in paper.attrib: 53 log.warn( 54 "Paper {} has both <editor> and <author>; ignoring <author>".format( 55 paper.full_id 56 ) 57 ) 58 # Proceedings editors are considered authors for their front matter 59 paper.attrib["author"] = paper.attrib["editor"] 60 del paper.attrib["editor"] 61 else: 62 log.warn( 63 "Paper {} has <editor> but is not a proceedings volume; ignoring <editor>".format( 64 paper.full_id 65 ) 66 ) 67 if "year" not in paper.attrib: 68 paper._infer_year() 69 if "pages" in paper.attrib: 70 if paper.attrib["pages"] is not None: 71 paper._interpret_pages() 72 else: 73 del paper.attrib["pages"] 74 return paper 75 76 def _parse_element(self, paper_element): 77 # read & store values 78 if "href" in paper_element.attrib: 79 self.attrib["attrib_href"] = paper_element.get("href") 80 self.attrib["url"] = paper_element.get("href") 81 elif not (self.is_volume and is_journal(self.full_id)): 82 # Generate a URL, except for top-level journal entries 83 self.attrib["url"] = data.ANTHOLOGY_URL.format(self.full_id) 84 for element in paper_element: 85 # parse value 86 tag = element.tag.lower() 87 if tag in ("abstract", "title", "booktitle"): 88 tag = "xml_{}".format(tag) 89 value = element 90 elif tag == "attachment": 91 value = { 92 "filename": element.text, 93 "type": element.get("type", "attachment"), 94 "url": infer_attachment_url(element.text), 95 } 96 elif tag in ("author", "editor"): 97 value = PersonName.from_element(element) 98 elif tag in ("erratum", "revision"): 99 if tag == "revision" and "revision" not in self.attrib: 100 # Explicitly construct URL of original version of the paper 101 # -- this is a bit hacky, but it's not given in the XML 102 # explicitly 103 self.attrib["revision"] = [ 104 { 105 "value": "{}v1".format(self.full_id), 106 "id": "1", 107 "url": data.ANTHOLOGY_URL.format( 108 "{}v1".format(self.full_id) 109 ), 110 } 111 ] 112 value = { 113 "value": element.text, 114 "id": element.get("id"), 115 "url": data.ANTHOLOGY_URL.format(element.text), 116 } 117 elif tag == "mrf": 118 value = {"filename": element.text, "src": element.get("src")} 119 elif tag == "video": 120 # Treat videos the same way as other attachments 121 tag = "attachment" 122 value = { 123 "filename": element.get("href"), 124 "type": element.get("tag", "video"), 125 "url": infer_attachment_url(element.get("href")), 126 } 127 elif tag in ("dataset", "software"): 128 value = { 129 "filename": element.text, 130 "type": tag, 131 "url": infer_attachment_url(element.text), 132 } 133 tag = "attachment" 134 else: 135 value = element.text 136 # store value 137 if tag == "url": 138 continue # We basically have to ignore this for now 139 if tag in _LIST_ELEMENTS: 140 try: 141 self.attrib[tag].append(value) 142 except KeyError: 143 self.attrib[tag] = [value] 144 else: 145 if tag in self.attrib: 146 log.warning( 147 "{}: Unexpected multiple occurrence of '{}' element".format( 148 self.full_id, tag 149 ) 150 ) 151 self.attrib[tag] = value 152 153 def _infer_year(self): 154 """Infer the year from the volume ID. 155 156 Many paper entries do not explicitly contain their year. This function assumes 157 that the paper's volume identifier follows the format 'xyy', where x is 158 some letter and yy are the last two digits of the year of publication. 159 """ 160 assert ( 161 len(self.top_level_id) == 3 162 ), "Couldn't infer year: unknown volume ID format" 163 digits = self.top_level_id[1:] 164 if int(digits) >= 60: 165 year = "19{}".format(digits) 166 else: 167 year = "20{}".format(digits) 168 self.attrib["year"] = year 169 170 def _interpret_pages(self): 171 """Splits up 'pages' field into first and last page, if possible. 172 173 This is used for metadata in the generated HTML.""" 174 for s in ("--", "-", "–"): 175 if self.attrib["pages"].count(s) == 1: 176 self.attrib["page_first"], self.attrib["page_last"] = self.attrib[ 177 "pages" 178 ].split(s) 179 self.attrib["pages"] = self.attrib["pages"].replace(s, "–") 180 return 181 182 @property 183 def is_volume(self): 184 """Determines if this paper is a regular paper or a proceedings volume. 185 186 By default, each paper ID of format 'x000' will be treated as (the front 187 matter of) a proceedings volume, unless the XML is of type workshop, 188 where each paper ID of format 'xx00' is treated as one volume. 189 """ 190 return is_volume_id(self.full_id) 191 192 @property 193 def full_id(self): 194 return "{}-{}".format(self.top_level_id, self.paper_id) 195 196 @property 197 def bibkey(self): 198 if not self._bibkey: 199 self._bibkey = self.full_id # fallback 200 return self._bibkey 201 202 @bibkey.setter 203 def bibkey(self, value): 204 self._bibkey = value 205 206 @property 207 def bibtype(self): 208 if is_journal(self.full_id): 209 return "article" 210 elif self.is_volume: 211 return "proceedings" 212 else: 213 return "inproceedings" 214 215 @property 216 def parent_volume_id(self): 217 if self.parent_volume is not None: 218 return self.parent_volume.full_id 219 return None 220 221 def get(self, name, default=None): 222 try: 223 return self.attrib[name] 224 except KeyError: 225 return default 226 227 def get_title(self, form="xml"): 228 """Returns the paper title, optionally formatting it. 229 230 Accepted formats: 231 - xml: Include any contained XML tags unchanged 232 - plain: Strip all XML tags, returning only plain text 233 - html: Convert XML tags into valid HTML tags 234 - latex: Convert XML tags into LaTeX commands 235 """ 236 return self.formatter(self.get("xml_title"), form) 237 238 def get_abstract(self, form="xml"): 239 """Returns the abstract, optionally formatting it. 240 241 See `get_title()` for details. 242 """ 243 return self.formatter(self.get("xml_abstract"), form, allow_url=True) 244 245 def get_booktitle(self, form="xml"): 246 """Returns the booktitle, optionally formatting it. 247 248 See `get_title()` for details. 249 """ 250 return self.formatter(self.get("xml_booktitle"), form) 251 252 def as_bibtex(self): 253 """Return the BibTeX entry for this paper.""" 254 # Build BibTeX entry 255 bibkey = self.bibkey 256 bibtype = self.bibtype 257 entries = [("title", self.get_title(form="latex"))] 258 for people in ("author", "editor"): 259 if people in self.attrib: 260 entries.append( 261 (people, " and ".join(p.as_bibtex() for p in self.get(people))) 262 ) 263 if is_journal(self.full_id): 264 entries.append( 265 ("journal", bibtex_encode(self.parent_volume.get("meta_journal_title"))) 266 ) 267 journal_volume = self.parent_volume.get( 268 "meta_volume", self.parent_volume.get("volume") 269 ) 270 if journal_volume: 271 entries.append(("volume", journal_volume)) 272 journal_issue = self.parent_volume.get( 273 "meta_issue", self.parent_volume.get("issue") 274 ) 275 if journal_issue: 276 entries.append(("number", journal_issue)) 277 else: 278 # not is_journal(self.full_id) 279 if "xml_booktitle" in self.attrib: 280 entries.append(("booktitle", self.get_booktitle(form="latex"))) 281 elif bibtype != "proceedings": 282 entries.append( 283 ("booktitle", self.parent_volume.get_title(form="latex")) 284 ) 285 for entry in ("month", "year", "address", "publisher"): 286 if entry in self.attrib: 287 entries.append((entry, bibtex_encode(self.get(entry)))) 288 for entry in ("url", "doi"): 289 if entry in self.attrib: 290 # don't want latex escapes such as 291 # doi = "10.1162/coli{\_}a{\_}00008", 292 entries.append((entry, self.get(entry))) 293 if "pages" in self.attrib: 294 entries.append(("pages", self.get("pages").replace("–", "--"))) 295 if "xml_abstract" in self.attrib: 296 entries.append(("abstract", self.get_abstract(form="latex"))) 297 298 # Serialize it 299 return bibtex_make_entry(bibkey, bibtype, entries) 300 301 def as_dict(self): 302 value = self.attrib 303 value["paper_id"] = self.paper_id 304 value["parent_volume_id"] = self.parent_volume_id 305 value["bibkey"] = self.bibkey 306 value["bibtype"] = self.bibtype 307 return value 308 309 def items(self): 310 return self.attrib.items() 311 ``` Path: `bin/anthology/data.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2019 Marcel Bollmann <[email protected]> 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 ################################################################################ 18 # This file contains all constants and functions that have hardcoded data (such 19 # as URLs or journal titles) which does not come from the XML. This is to 20 # provide a single file where such hardcoded data can be looked up and/or 21 # changed. 22 ################################################################################ 23 24 ANTHOLOGY_URL = "https://www.aclweb.org/anthology/{}" 25 ATTACHMENT_URL = "https://www.aclweb.org/anthology/attachments/{}" 26 27 28 def get_journal_title(top_level_id, volume_title): 29 if top_level_id[0] == "J": 30 return "Computational Linguistics" 31 elif top_level_id[0] == "Q": 32 return "Transactions of the Association for Computational Linguistics" 33 else: 34 return volume_title 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bin/anthology/data.py b/bin/anthology/data.py --- a/bin/anthology/data.py +++ b/bin/anthology/data.py @@ -27,7 +27,10 @@ def get_journal_title(top_level_id, volume_title): if top_level_id[0] == "J": - return "Computational Linguistics" + if int(top_level_id[1:3]) <= 83: + return "American Journal of Computational Linguistics" + else: + return "Computational Linguistics" elif top_level_id[0] == "Q": return "Transactions of the Association for Computational Linguistics" else: diff --git a/bin/anthology/papers.py b/bin/anthology/papers.py --- a/bin/anthology/papers.py +++ b/bin/anthology/papers.py @@ -282,7 +282,7 @@ entries.append( ("booktitle", self.parent_volume.get_title(form="latex")) ) - for entry in ("month", "year", "address", "publisher"): + for entry in ("month", "year", "address", "publisher", "note"): if entry in self.attrib: entries.append((entry, bibtex_encode(self.get(entry)))) for entry in ("url", "doi"): @@ -292,6 +292,8 @@ entries.append((entry, self.get(entry))) if "pages" in self.attrib: entries.append(("pages", self.get("pages").replace("–", "--"))) + if "note" in self.attrib: + entries.append(("note", self.get("note"))) if "xml_abstract" in self.attrib: entries.append(("abstract", self.get_abstract(form="latex")))
{"golden_diff": "diff --git a/bin/anthology/data.py b/bin/anthology/data.py\n--- a/bin/anthology/data.py\n+++ b/bin/anthology/data.py\n@@ -27,7 +27,10 @@\n \n def get_journal_title(top_level_id, volume_title):\n if top_level_id[0] == \"J\":\n- return \"Computational Linguistics\"\n+ if int(top_level_id[1:3]) <= 83:\n+ return \"American Journal of Computational Linguistics\"\n+ else:\n+ return \"Computational Linguistics\"\n elif top_level_id[0] == \"Q\":\n return \"Transactions of the Association for Computational Linguistics\"\n else:\ndiff --git a/bin/anthology/papers.py b/bin/anthology/papers.py\n--- a/bin/anthology/papers.py\n+++ b/bin/anthology/papers.py\n@@ -282,7 +282,7 @@\n entries.append(\n (\"booktitle\", self.parent_volume.get_title(form=\"latex\"))\n )\n- for entry in (\"month\", \"year\", \"address\", \"publisher\"):\n+ for entry in (\"month\", \"year\", \"address\", \"publisher\", \"note\"):\n if entry in self.attrib:\n entries.append((entry, bibtex_encode(self.get(entry))))\n for entry in (\"url\", \"doi\"):\n@@ -292,6 +292,8 @@\n entries.append((entry, self.get(entry)))\n if \"pages\" in self.attrib:\n entries.append((\"pages\", self.get(\"pages\").replace(\"\u2013\", \"--\")))\n+ if \"note\" in self.attrib:\n+ entries.append((\"note\", self.get(\"note\")))\n if \"xml_abstract\" in self.attrib:\n entries.append((\"abstract\", self.get_abstract(form=\"latex\")))\n", "issue": "Missing author data on early CL journals\nAuthors are part of the titles, rather than having author fields. See for example: https://aclweb.org/anthology/events/cl-1979/\nJ79-J83 should be \"American Journal of Computational Linguistics\"\nand changed its name only in 1984. The name \"Computational Linguistics\" is hard-coded, so I'm not sure if this is an easy change.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging as log\nfrom .people import PersonName\nfrom .utils import (\n infer_attachment_url,\n remove_extra_whitespace,\n is_journal,\n is_volume_id,\n to_volume_id,\n)\nfrom . import data\n\n# For BibTeX export\nfrom .formatter import bibtex_encode, bibtex_make_entry\n\n# Names of XML elements that may appear multiple times\n_LIST_ELEMENTS = (\"attachment\", \"author\", \"editor\", \"video\", \"revision\", \"erratum\")\n\n\nclass Paper:\n def __init__(self, paper_id, top_level_id, formatter):\n self.formatter = formatter\n self.parent_volume = None\n self.paper_id = paper_id\n self.top_level_id = top_level_id\n self.attrib = {}\n self._bibkey = False\n\n def from_xml(xml_element, *args):\n paper = Paper(xml_element.get(\"id\"), *args)\n paper._parse_element(xml_element)\n paper.attrib[\"title\"] = paper.get_title(\"plain\")\n if \"booktitle\" in paper.attrib:\n paper.attrib[\"booktitle\"] = paper.get_booktitle(\"plain\")\n if \"editor\" in paper.attrib:\n if paper.is_volume:\n if \"author\" in paper.attrib:\n log.warn(\n \"Paper {} has both <editor> and <author>; ignoring <author>\".format(\n paper.full_id\n )\n )\n # Proceedings editors are considered authors for their front matter\n paper.attrib[\"author\"] = paper.attrib[\"editor\"]\n del paper.attrib[\"editor\"]\n else:\n log.warn(\n \"Paper {} has <editor> but is not a proceedings volume; ignoring <editor>\".format(\n paper.full_id\n )\n )\n if \"year\" not in paper.attrib:\n paper._infer_year()\n if \"pages\" in paper.attrib:\n if paper.attrib[\"pages\"] is not None:\n paper._interpret_pages()\n else:\n del paper.attrib[\"pages\"]\n return paper\n\n def _parse_element(self, paper_element):\n # read & store values\n if \"href\" in paper_element.attrib:\n self.attrib[\"attrib_href\"] = paper_element.get(\"href\")\n self.attrib[\"url\"] = paper_element.get(\"href\")\n elif not (self.is_volume and is_journal(self.full_id)):\n # Generate a URL, except for top-level journal entries\n self.attrib[\"url\"] = data.ANTHOLOGY_URL.format(self.full_id)\n for element in paper_element:\n # parse value\n tag = element.tag.lower()\n if tag in (\"abstract\", \"title\", \"booktitle\"):\n tag = \"xml_{}\".format(tag)\n value = element\n elif tag == \"attachment\":\n value = {\n \"filename\": element.text,\n \"type\": element.get(\"type\", \"attachment\"),\n \"url\": infer_attachment_url(element.text),\n }\n elif tag in (\"author\", \"editor\"):\n value = PersonName.from_element(element)\n elif tag in (\"erratum\", \"revision\"):\n if tag == \"revision\" and \"revision\" not in self.attrib:\n # Explicitly construct URL of original version of the paper\n # -- this is a bit hacky, but it's not given in the XML\n # explicitly\n self.attrib[\"revision\"] = [\n {\n \"value\": \"{}v1\".format(self.full_id),\n \"id\": \"1\",\n \"url\": data.ANTHOLOGY_URL.format(\n \"{}v1\".format(self.full_id)\n ),\n }\n ]\n value = {\n \"value\": element.text,\n \"id\": element.get(\"id\"),\n \"url\": data.ANTHOLOGY_URL.format(element.text),\n }\n elif tag == \"mrf\":\n value = {\"filename\": element.text, \"src\": element.get(\"src\")}\n elif tag == \"video\":\n # Treat videos the same way as other attachments\n tag = \"attachment\"\n value = {\n \"filename\": element.get(\"href\"),\n \"type\": element.get(\"tag\", \"video\"),\n \"url\": infer_attachment_url(element.get(\"href\")),\n }\n elif tag in (\"dataset\", \"software\"):\n value = {\n \"filename\": element.text,\n \"type\": tag,\n \"url\": infer_attachment_url(element.text),\n }\n tag = \"attachment\"\n else:\n value = element.text\n # store value\n if tag == \"url\":\n continue # We basically have to ignore this for now\n if tag in _LIST_ELEMENTS:\n try:\n self.attrib[tag].append(value)\n except KeyError:\n self.attrib[tag] = [value]\n else:\n if tag in self.attrib:\n log.warning(\n \"{}: Unexpected multiple occurrence of '{}' element\".format(\n self.full_id, tag\n )\n )\n self.attrib[tag] = value\n\n def _infer_year(self):\n \"\"\"Infer the year from the volume ID.\n\n Many paper entries do not explicitly contain their year. This function assumes\n that the paper's volume identifier follows the format 'xyy', where x is\n some letter and yy are the last two digits of the year of publication.\n \"\"\"\n assert (\n len(self.top_level_id) == 3\n ), \"Couldn't infer year: unknown volume ID format\"\n digits = self.top_level_id[1:]\n if int(digits) >= 60:\n year = \"19{}\".format(digits)\n else:\n year = \"20{}\".format(digits)\n self.attrib[\"year\"] = year\n\n def _interpret_pages(self):\n \"\"\"Splits up 'pages' field into first and last page, if possible.\n\n This is used for metadata in the generated HTML.\"\"\"\n for s in (\"--\", \"-\", \"\u2013\"):\n if self.attrib[\"pages\"].count(s) == 1:\n self.attrib[\"page_first\"], self.attrib[\"page_last\"] = self.attrib[\n \"pages\"\n ].split(s)\n self.attrib[\"pages\"] = self.attrib[\"pages\"].replace(s, \"\u2013\")\n return\n\n @property\n def is_volume(self):\n \"\"\"Determines if this paper is a regular paper or a proceedings volume.\n\n By default, each paper ID of format 'x000' will be treated as (the front\n matter of) a proceedings volume, unless the XML is of type workshop,\n where each paper ID of format 'xx00' is treated as one volume.\n \"\"\"\n return is_volume_id(self.full_id)\n\n @property\n def full_id(self):\n return \"{}-{}\".format(self.top_level_id, self.paper_id)\n\n @property\n def bibkey(self):\n if not self._bibkey:\n self._bibkey = self.full_id # fallback\n return self._bibkey\n\n @bibkey.setter\n def bibkey(self, value):\n self._bibkey = value\n\n @property\n def bibtype(self):\n if is_journal(self.full_id):\n return \"article\"\n elif self.is_volume:\n return \"proceedings\"\n else:\n return \"inproceedings\"\n\n @property\n def parent_volume_id(self):\n if self.parent_volume is not None:\n return self.parent_volume.full_id\n return None\n\n def get(self, name, default=None):\n try:\n return self.attrib[name]\n except KeyError:\n return default\n\n def get_title(self, form=\"xml\"):\n \"\"\"Returns the paper title, optionally formatting it.\n\n Accepted formats:\n - xml: Include any contained XML tags unchanged\n - plain: Strip all XML tags, returning only plain text\n - html: Convert XML tags into valid HTML tags\n - latex: Convert XML tags into LaTeX commands\n \"\"\"\n return self.formatter(self.get(\"xml_title\"), form)\n\n def get_abstract(self, form=\"xml\"):\n \"\"\"Returns the abstract, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n return self.formatter(self.get(\"xml_abstract\"), form, allow_url=True)\n\n def get_booktitle(self, form=\"xml\"):\n \"\"\"Returns the booktitle, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n return self.formatter(self.get(\"xml_booktitle\"), form)\n\n def as_bibtex(self):\n \"\"\"Return the BibTeX entry for this paper.\"\"\"\n # Build BibTeX entry\n bibkey = self.bibkey\n bibtype = self.bibtype\n entries = [(\"title\", self.get_title(form=\"latex\"))]\n for people in (\"author\", \"editor\"):\n if people in self.attrib:\n entries.append(\n (people, \" and \".join(p.as_bibtex() for p in self.get(people)))\n )\n if is_journal(self.full_id):\n entries.append(\n (\"journal\", bibtex_encode(self.parent_volume.get(\"meta_journal_title\")))\n )\n journal_volume = self.parent_volume.get(\n \"meta_volume\", self.parent_volume.get(\"volume\")\n )\n if journal_volume:\n entries.append((\"volume\", journal_volume))\n journal_issue = self.parent_volume.get(\n \"meta_issue\", self.parent_volume.get(\"issue\")\n )\n if journal_issue:\n entries.append((\"number\", journal_issue))\n else:\n # not is_journal(self.full_id)\n if \"xml_booktitle\" in self.attrib:\n entries.append((\"booktitle\", self.get_booktitle(form=\"latex\")))\n elif bibtype != \"proceedings\":\n entries.append(\n (\"booktitle\", self.parent_volume.get_title(form=\"latex\"))\n )\n for entry in (\"month\", \"year\", \"address\", \"publisher\"):\n if entry in self.attrib:\n entries.append((entry, bibtex_encode(self.get(entry))))\n for entry in (\"url\", \"doi\"):\n if entry in self.attrib:\n # don't want latex escapes such as\n # doi = \"10.1162/coli{\\_}a{\\_}00008\",\n entries.append((entry, self.get(entry)))\n if \"pages\" in self.attrib:\n entries.append((\"pages\", self.get(\"pages\").replace(\"\u2013\", \"--\")))\n if \"xml_abstract\" in self.attrib:\n entries.append((\"abstract\", self.get_abstract(form=\"latex\")))\n\n # Serialize it\n return bibtex_make_entry(bibkey, bibtype, entries)\n\n def as_dict(self):\n value = self.attrib\n value[\"paper_id\"] = self.paper_id\n value[\"parent_volume_id\"] = self.parent_volume_id\n value[\"bibkey\"] = self.bibkey\n value[\"bibtype\"] = self.bibtype\n return value\n\n def items(self):\n return self.attrib.items()\n", "path": "bin/anthology/papers.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n################################################################################\n# This file contains all constants and functions that have hardcoded data (such\n# as URLs or journal titles) which does not come from the XML. This is to\n# provide a single file where such hardcoded data can be looked up and/or\n# changed.\n################################################################################\n\nANTHOLOGY_URL = \"https://www.aclweb.org/anthology/{}\"\nATTACHMENT_URL = \"https://www.aclweb.org/anthology/attachments/{}\"\n\n\ndef get_journal_title(top_level_id, volume_title):\n if top_level_id[0] == \"J\":\n return \"Computational Linguistics\"\n elif top_level_id[0] == \"Q\":\n return \"Transactions of the Association for Computational Linguistics\"\n else:\n return volume_title\n", "path": "bin/anthology/data.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging as log\nfrom .people import PersonName\nfrom .utils import (\n infer_attachment_url,\n remove_extra_whitespace,\n is_journal,\n is_volume_id,\n to_volume_id,\n)\nfrom . import data\n\n# For BibTeX export\nfrom .formatter import bibtex_encode, bibtex_make_entry\n\n# Names of XML elements that may appear multiple times\n_LIST_ELEMENTS = (\"attachment\", \"author\", \"editor\", \"video\", \"revision\", \"erratum\")\n\n\nclass Paper:\n def __init__(self, paper_id, top_level_id, formatter):\n self.formatter = formatter\n self.parent_volume = None\n self.paper_id = paper_id\n self.top_level_id = top_level_id\n self.attrib = {}\n self._bibkey = False\n\n def from_xml(xml_element, *args):\n paper = Paper(xml_element.get(\"id\"), *args)\n paper._parse_element(xml_element)\n paper.attrib[\"title\"] = paper.get_title(\"plain\")\n if \"booktitle\" in paper.attrib:\n paper.attrib[\"booktitle\"] = paper.get_booktitle(\"plain\")\n if \"editor\" in paper.attrib:\n if paper.is_volume:\n if \"author\" in paper.attrib:\n log.warn(\n \"Paper {} has both <editor> and <author>; ignoring <author>\".format(\n paper.full_id\n )\n )\n # Proceedings editors are considered authors for their front matter\n paper.attrib[\"author\"] = paper.attrib[\"editor\"]\n del paper.attrib[\"editor\"]\n else:\n log.warn(\n \"Paper {} has <editor> but is not a proceedings volume; ignoring <editor>\".format(\n paper.full_id\n )\n )\n if \"year\" not in paper.attrib:\n paper._infer_year()\n if \"pages\" in paper.attrib:\n if paper.attrib[\"pages\"] is not None:\n paper._interpret_pages()\n else:\n del paper.attrib[\"pages\"]\n return paper\n\n def _parse_element(self, paper_element):\n # read & store values\n if \"href\" in paper_element.attrib:\n self.attrib[\"attrib_href\"] = paper_element.get(\"href\")\n self.attrib[\"url\"] = paper_element.get(\"href\")\n elif not (self.is_volume and is_journal(self.full_id)):\n # Generate a URL, except for top-level journal entries\n self.attrib[\"url\"] = data.ANTHOLOGY_URL.format(self.full_id)\n for element in paper_element:\n # parse value\n tag = element.tag.lower()\n if tag in (\"abstract\", \"title\", \"booktitle\"):\n tag = \"xml_{}\".format(tag)\n value = element\n elif tag == \"attachment\":\n value = {\n \"filename\": element.text,\n \"type\": element.get(\"type\", \"attachment\"),\n \"url\": infer_attachment_url(element.text),\n }\n elif tag in (\"author\", \"editor\"):\n value = PersonName.from_element(element)\n elif tag in (\"erratum\", \"revision\"):\n if tag == \"revision\" and \"revision\" not in self.attrib:\n # Explicitly construct URL of original version of the paper\n # -- this is a bit hacky, but it's not given in the XML\n # explicitly\n self.attrib[\"revision\"] = [\n {\n \"value\": \"{}v1\".format(self.full_id),\n \"id\": \"1\",\n \"url\": data.ANTHOLOGY_URL.format(\n \"{}v1\".format(self.full_id)\n ),\n }\n ]\n value = {\n \"value\": element.text,\n \"id\": element.get(\"id\"),\n \"url\": data.ANTHOLOGY_URL.format(element.text),\n }\n elif tag == \"mrf\":\n value = {\"filename\": element.text, \"src\": element.get(\"src\")}\n elif tag == \"video\":\n # Treat videos the same way as other attachments\n tag = \"attachment\"\n value = {\n \"filename\": element.get(\"href\"),\n \"type\": element.get(\"tag\", \"video\"),\n \"url\": infer_attachment_url(element.get(\"href\")),\n }\n elif tag in (\"dataset\", \"software\"):\n value = {\n \"filename\": element.text,\n \"type\": tag,\n \"url\": infer_attachment_url(element.text),\n }\n tag = \"attachment\"\n else:\n value = element.text\n # store value\n if tag == \"url\":\n continue # We basically have to ignore this for now\n if tag in _LIST_ELEMENTS:\n try:\n self.attrib[tag].append(value)\n except KeyError:\n self.attrib[tag] = [value]\n else:\n if tag in self.attrib:\n log.warning(\n \"{}: Unexpected multiple occurrence of '{}' element\".format(\n self.full_id, tag\n )\n )\n self.attrib[tag] = value\n\n def _infer_year(self):\n \"\"\"Infer the year from the volume ID.\n\n Many paper entries do not explicitly contain their year. This function assumes\n that the paper's volume identifier follows the format 'xyy', where x is\n some letter and yy are the last two digits of the year of publication.\n \"\"\"\n assert (\n len(self.top_level_id) == 3\n ), \"Couldn't infer year: unknown volume ID format\"\n digits = self.top_level_id[1:]\n if int(digits) >= 60:\n year = \"19{}\".format(digits)\n else:\n year = \"20{}\".format(digits)\n self.attrib[\"year\"] = year\n\n def _interpret_pages(self):\n \"\"\"Splits up 'pages' field into first and last page, if possible.\n\n This is used for metadata in the generated HTML.\"\"\"\n for s in (\"--\", \"-\", \"\u2013\"):\n if self.attrib[\"pages\"].count(s) == 1:\n self.attrib[\"page_first\"], self.attrib[\"page_last\"] = self.attrib[\n \"pages\"\n ].split(s)\n self.attrib[\"pages\"] = self.attrib[\"pages\"].replace(s, \"\u2013\")\n return\n\n @property\n def is_volume(self):\n \"\"\"Determines if this paper is a regular paper or a proceedings volume.\n\n By default, each paper ID of format 'x000' will be treated as (the front\n matter of) a proceedings volume, unless the XML is of type workshop,\n where each paper ID of format 'xx00' is treated as one volume.\n \"\"\"\n return is_volume_id(self.full_id)\n\n @property\n def full_id(self):\n return \"{}-{}\".format(self.top_level_id, self.paper_id)\n\n @property\n def bibkey(self):\n if not self._bibkey:\n self._bibkey = self.full_id # fallback\n return self._bibkey\n\n @bibkey.setter\n def bibkey(self, value):\n self._bibkey = value\n\n @property\n def bibtype(self):\n if is_journal(self.full_id):\n return \"article\"\n elif self.is_volume:\n return \"proceedings\"\n else:\n return \"inproceedings\"\n\n @property\n def parent_volume_id(self):\n if self.parent_volume is not None:\n return self.parent_volume.full_id\n return None\n\n def get(self, name, default=None):\n try:\n return self.attrib[name]\n except KeyError:\n return default\n\n def get_title(self, form=\"xml\"):\n \"\"\"Returns the paper title, optionally formatting it.\n\n Accepted formats:\n - xml: Include any contained XML tags unchanged\n - plain: Strip all XML tags, returning only plain text\n - html: Convert XML tags into valid HTML tags\n - latex: Convert XML tags into LaTeX commands\n \"\"\"\n return self.formatter(self.get(\"xml_title\"), form)\n\n def get_abstract(self, form=\"xml\"):\n \"\"\"Returns the abstract, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n return self.formatter(self.get(\"xml_abstract\"), form, allow_url=True)\n\n def get_booktitle(self, form=\"xml\"):\n \"\"\"Returns the booktitle, optionally formatting it.\n\n See `get_title()` for details.\n \"\"\"\n return self.formatter(self.get(\"xml_booktitle\"), form)\n\n def as_bibtex(self):\n \"\"\"Return the BibTeX entry for this paper.\"\"\"\n # Build BibTeX entry\n bibkey = self.bibkey\n bibtype = self.bibtype\n entries = [(\"title\", self.get_title(form=\"latex\"))]\n for people in (\"author\", \"editor\"):\n if people in self.attrib:\n entries.append(\n (people, \" and \".join(p.as_bibtex() for p in self.get(people)))\n )\n if is_journal(self.full_id):\n entries.append(\n (\"journal\", bibtex_encode(self.parent_volume.get(\"meta_journal_title\")))\n )\n journal_volume = self.parent_volume.get(\n \"meta_volume\", self.parent_volume.get(\"volume\")\n )\n if journal_volume:\n entries.append((\"volume\", journal_volume))\n journal_issue = self.parent_volume.get(\n \"meta_issue\", self.parent_volume.get(\"issue\")\n )\n if journal_issue:\n entries.append((\"number\", journal_issue))\n else:\n # not is_journal(self.full_id)\n if \"xml_booktitle\" in self.attrib:\n entries.append((\"booktitle\", self.get_booktitle(form=\"latex\")))\n elif bibtype != \"proceedings\":\n entries.append(\n (\"booktitle\", self.parent_volume.get_title(form=\"latex\"))\n )\n for entry in (\"month\", \"year\", \"address\", \"publisher\", \"note\"):\n if entry in self.attrib:\n entries.append((entry, bibtex_encode(self.get(entry))))\n for entry in (\"url\", \"doi\"):\n if entry in self.attrib:\n # don't want latex escapes such as\n # doi = \"10.1162/coli{\\_}a{\\_}00008\",\n entries.append((entry, self.get(entry)))\n if \"pages\" in self.attrib:\n entries.append((\"pages\", self.get(\"pages\").replace(\"\u2013\", \"--\")))\n if \"note\" in self.attrib:\n entries.append((\"note\", self.get(\"note\")))\n if \"xml_abstract\" in self.attrib:\n entries.append((\"abstract\", self.get_abstract(form=\"latex\")))\n\n # Serialize it\n return bibtex_make_entry(bibkey, bibtype, entries)\n\n def as_dict(self):\n value = self.attrib\n value[\"paper_id\"] = self.paper_id\n value[\"parent_volume_id\"] = self.parent_volume_id\n value[\"bibkey\"] = self.bibkey\n value[\"bibtype\"] = self.bibtype\n return value\n\n def items(self):\n return self.attrib.items()\n", "path": "bin/anthology/papers.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n################################################################################\n# This file contains all constants and functions that have hardcoded data (such\n# as URLs or journal titles) which does not come from the XML. This is to\n# provide a single file where such hardcoded data can be looked up and/or\n# changed.\n################################################################################\n\nANTHOLOGY_URL = \"https://www.aclweb.org/anthology/{}\"\nATTACHMENT_URL = \"https://www.aclweb.org/anthology/attachments/{}\"\n\n\ndef get_journal_title(top_level_id, volume_title):\n if top_level_id[0] == \"J\":\n if int(top_level_id[1:3]) <= 83:\n return \"American Journal of Computational Linguistics\"\n else:\n return \"Computational Linguistics\"\n elif top_level_id[0] == \"Q\":\n return \"Transactions of the Association for Computational Linguistics\"\n else:\n return volume_title\n", "path": "bin/anthology/data.py"}]}
4,030
387
gh_patches_debug_41815
rasdani/github-patches
git_diff
mars-project__mars-2953
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] `dask.persist` cannot work on dask-on-mars <!-- Thank you for your contribution! Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue. --> **Describe the bug** `dask.persist` cannot work on dask-on-mars. **To Reproduce** To help us reproducing this bug, please provide information below: 1. Your Python version 2. The version of Mars you use 3. Versions of crucial packages, such as numpy, scipy and pandas 4. Full stack of the error. 5. Minimized code to reproduce the error. ``` In [1]: import mars In [2]: mars.new_session() Web service started at http://0.0.0.0:64573 Out[2]: <mars.deploy.oscar.session.SyncSession at 0x7fa0f8366d90> In [3]: import dask ...: from mars.contrib.dask import mars_scheduler In [4]: def inc(x): ...: return x + 1 ...: In [5]: dask_task = dask.delayed(inc)(1) In [6]: dask_task.persist(scheduler=mars_scheduler) --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-6-095d61a2acc0> in <module> ----> 1 dask_task.persist(scheduler=mars_scheduler) ~/miniconda3/envs/mars3.8/lib/python3.8/site-packages/dask/base.py in persist(self, **kwargs) 259 dask.base.persist 260 """ --> 261 (result,) = persist(self, traverse=False, **kwargs) 262 return result 263 ~/miniconda3/envs/mars3.8/lib/python3.8/site-packages/dask/base.py in persist(traverse, optimize_graph, scheduler, *args, **kwargs) 831 postpersists.append((rebuild, a_keys, state)) 832 --> 833 results = schedule(dsk, keys, **kwargs) 834 d = dict(zip(keys, results)) 835 results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists] ~/Workspace/mars/mars/contrib/dask/scheduler.py in mars_scheduler(dsk, keys) 39 Computed values corresponding to the provided keys. 40 """ ---> 41 res = reduce(mars_dask_get(dsk, keys)).execute().fetch() 42 if not isinstance(res, List): 43 return [[res]] ~/Workspace/mars/mars/contrib/dask/scheduler.py in mars_dask_get(dsk, keys) 86 return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:])) 87 ---> 88 return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys] ~/Workspace/mars/mars/contrib/dask/scheduler.py in <listcomp>(.0) 86 return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:])) 87 ---> 88 return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys] ~/Workspace/mars/mars/contrib/dask/scheduler.py in <listcomp>(.0) 86 return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:])) 87 ---> 88 return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys] ~/miniconda3/envs/mars3.8/lib/python3.8/site-packages/dask/highlevelgraph.py in __getitem__(self, key) 737 pass 738 --> 739 raise KeyError(key) 740 741 def __len__(self) -> int: KeyError: 'i' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mars/contrib/dask/scheduler.py` Content: ``` 1 # Copyright 1999-2021 Alibaba Group Holding Ltd. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from dask.core import istask, ishashable 16 17 from typing import List, Tuple 18 from .utils import reduce 19 from ...remote import spawn 20 21 22 def mars_scheduler(dsk: dict, keys: List[List[str]]): 23 """ 24 A Dask-Mars scheduler 25 26 This scheduler is intended to be compatible with existing 27 dask user interface, no callbacks are implemented. 28 29 Parameters 30 ---------- 31 dsk: Dict 32 Dask graph, represented as a task DAG dictionary. 33 keys: List[List[str]] 34 2d-list of Dask graph keys whose values we wish to compute and return. 35 36 Returns 37 ------- 38 Object 39 Computed values corresponding to the provided keys. 40 """ 41 res = reduce(mars_dask_get(dsk, keys)).execute().fetch() 42 if not isinstance(res, List): 43 return [[res]] 44 else: 45 return res 46 47 48 def mars_dask_get(dsk: dict, keys: List[List]): 49 """ 50 A Dask-Mars convert function. This function will send the dask graph layers 51 to Mars Remote API, generating mars objects correspond to the provided keys. 52 53 Parameters 54 ---------- 55 dsk: Dict 56 Dask graph, represented as a task DAG dictionary. 57 keys: List[List[str]] 58 2d-list of Dask graph keys whose values we wish to compute and return. 59 60 Returns 61 ------- 62 Object 63 Spawned mars objects corresponding to the provided keys. 64 """ 65 66 def _get_arg(a): 67 # if arg contains layer index or callable objs, handle it 68 if ishashable(a) and a in dsk.keys(): 69 while ishashable(a) and a in dsk.keys(): 70 a = dsk[a] 71 return _execute_task(a) 72 elif not isinstance(a, str) and hasattr(a, "__getitem__"): 73 if istask( 74 a 75 ): # TODO:Handle `SubgraphCallable`, which may contains dsk in it 76 return spawn(a[0], args=tuple(_get_arg(i) for i in a[1:])) 77 elif isinstance(a, dict): 78 return {k: _get_arg(v) for k, v in a.items()} 79 elif isinstance(a, List) or isinstance(a, Tuple): 80 return type(a)(_get_arg(i) for i in a) 81 return a 82 83 def _execute_task(task: tuple): 84 if not istask(task): 85 return _get_arg(task) 86 return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:])) 87 88 return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys] 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mars/contrib/dask/scheduler.py b/mars/contrib/dask/scheduler.py --- a/mars/contrib/dask/scheduler.py +++ b/mars/contrib/dask/scheduler.py @@ -14,12 +14,13 @@ from dask.core import istask, ishashable -from typing import List, Tuple +from typing import List, Tuple, Union from .utils import reduce from ...remote import spawn +from ...deploy.oscar.session import execute -def mars_scheduler(dsk: dict, keys: List[List[str]]): +def mars_scheduler(dsk: dict, keys: Union[List[List[str]], List[str]]): """ A Dask-Mars scheduler @@ -30,22 +31,29 @@ ---------- dsk: Dict Dask graph, represented as a task DAG dictionary. - keys: List[List[str]] - 2d-list of Dask graph keys whose values we wish to compute and return. + keys: Union[List[List[str]], List[str]] + 1d or 2d list of Dask graph keys whose values we wish to compute and return. Returns ------- Object - Computed values corresponding to the provided keys. + Computed values corresponding to the provided keys with same dimension. """ - res = reduce(mars_dask_get(dsk, keys)).execute().fetch() - if not isinstance(res, List): - return [[res]] - else: - return res + if isinstance(keys, List) and not isinstance(keys[0], List): # 1d keys + task = execute(mars_dask_get(dsk, keys)) + if not isinstance(task, List): + task = [task] + return map(lambda x: x.fetch(), task) + else: # 2d keys + res = execute(reduce(mars_dask_get(dsk, keys))).fetch() + if not isinstance(res, List): + return [[res]] + else: + return res -def mars_dask_get(dsk: dict, keys: List[List]): + +def mars_dask_get(dsk: dict, keys: Union[List[List[str]], List[str]]): """ A Dask-Mars convert function. This function will send the dask graph layers to Mars Remote API, generating mars objects correspond to the provided keys. @@ -54,13 +62,13 @@ ---------- dsk: Dict Dask graph, represented as a task DAG dictionary. - keys: List[List[str]] - 2d-list of Dask graph keys whose values we wish to compute and return. + keys: Union[List[List[str]], List[str]] + 1d or 2d list of Dask graph keys whose values we wish to compute and return. Returns ------- Object - Spawned mars objects corresponding to the provided keys. + Spawned mars objects corresponding to the provided keys with same dimension. """ def _get_arg(a): @@ -68,7 +76,7 @@ if ishashable(a) and a in dsk.keys(): while ishashable(a) and a in dsk.keys(): a = dsk[a] - return _execute_task(a) + return _spawn_task(a) elif not isinstance(a, str) and hasattr(a, "__getitem__"): if istask( a @@ -80,9 +88,14 @@ return type(a)(_get_arg(i) for i in a) return a - def _execute_task(task: tuple): + def _spawn_task(task: tuple): if not istask(task): return _get_arg(task) return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:])) - return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys] + return [ + [_spawn_task(dsk[k]) for k in keys_d] + if isinstance(keys_d, List) + else _spawn_task(dsk[keys_d]) + for keys_d in keys + ]
{"golden_diff": "diff --git a/mars/contrib/dask/scheduler.py b/mars/contrib/dask/scheduler.py\n--- a/mars/contrib/dask/scheduler.py\n+++ b/mars/contrib/dask/scheduler.py\n@@ -14,12 +14,13 @@\n \n from dask.core import istask, ishashable\n \n-from typing import List, Tuple\n+from typing import List, Tuple, Union\n from .utils import reduce\n from ...remote import spawn\n+from ...deploy.oscar.session import execute\n \n \n-def mars_scheduler(dsk: dict, keys: List[List[str]]):\n+def mars_scheduler(dsk: dict, keys: Union[List[List[str]], List[str]]):\n \"\"\"\n A Dask-Mars scheduler\n \n@@ -30,22 +31,29 @@\n ----------\n dsk: Dict\n Dask graph, represented as a task DAG dictionary.\n- keys: List[List[str]]\n- 2d-list of Dask graph keys whose values we wish to compute and return.\n+ keys: Union[List[List[str]], List[str]]\n+ 1d or 2d list of Dask graph keys whose values we wish to compute and return.\n \n Returns\n -------\n Object\n- Computed values corresponding to the provided keys.\n+ Computed values corresponding to the provided keys with same dimension.\n \"\"\"\n- res = reduce(mars_dask_get(dsk, keys)).execute().fetch()\n- if not isinstance(res, List):\n- return [[res]]\n- else:\n- return res\n \n+ if isinstance(keys, List) and not isinstance(keys[0], List): # 1d keys\n+ task = execute(mars_dask_get(dsk, keys))\n+ if not isinstance(task, List):\n+ task = [task]\n+ return map(lambda x: x.fetch(), task)\n+ else: # 2d keys\n+ res = execute(reduce(mars_dask_get(dsk, keys))).fetch()\n+ if not isinstance(res, List):\n+ return [[res]]\n+ else:\n+ return res\n \n-def mars_dask_get(dsk: dict, keys: List[List]):\n+\n+def mars_dask_get(dsk: dict, keys: Union[List[List[str]], List[str]]):\n \"\"\"\n A Dask-Mars convert function. This function will send the dask graph layers\n to Mars Remote API, generating mars objects correspond to the provided keys.\n@@ -54,13 +62,13 @@\n ----------\n dsk: Dict\n Dask graph, represented as a task DAG dictionary.\n- keys: List[List[str]]\n- 2d-list of Dask graph keys whose values we wish to compute and return.\n+ keys: Union[List[List[str]], List[str]]\n+ 1d or 2d list of Dask graph keys whose values we wish to compute and return.\n \n Returns\n -------\n Object\n- Spawned mars objects corresponding to the provided keys.\n+ Spawned mars objects corresponding to the provided keys with same dimension.\n \"\"\"\n \n def _get_arg(a):\n@@ -68,7 +76,7 @@\n if ishashable(a) and a in dsk.keys():\n while ishashable(a) and a in dsk.keys():\n a = dsk[a]\n- return _execute_task(a)\n+ return _spawn_task(a)\n elif not isinstance(a, str) and hasattr(a, \"__getitem__\"):\n if istask(\n a\n@@ -80,9 +88,14 @@\n return type(a)(_get_arg(i) for i in a)\n return a\n \n- def _execute_task(task: tuple):\n+ def _spawn_task(task: tuple):\n if not istask(task):\n return _get_arg(task)\n return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:]))\n \n- return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys]\n+ return [\n+ [_spawn_task(dsk[k]) for k in keys_d]\n+ if isinstance(keys_d, List)\n+ else _spawn_task(dsk[keys_d])\n+ for keys_d in keys\n+ ]\n", "issue": "[BUG] `dask.persist` cannot work on dask-on-mars\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\n\r\n`dask.persist` cannot work on dask-on-mars.\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version\r\n2. The version of Mars you use\r\n3. Versions of crucial packages, such as numpy, scipy and pandas\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n```\r\nIn [1]: import mars\r\n\r\nIn [2]: mars.new_session()\r\nWeb service started at http://0.0.0.0:64573\r\nOut[2]: <mars.deploy.oscar.session.SyncSession at 0x7fa0f8366d90>\r\n\r\nIn [3]: import dask\r\n ...: from mars.contrib.dask import mars_scheduler\r\n\r\nIn [4]: def inc(x):\r\n ...: return x + 1\r\n ...: \r\n\r\nIn [5]: dask_task = dask.delayed(inc)(1)\r\n\r\nIn [6]: dask_task.persist(scheduler=mars_scheduler)\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-6-095d61a2acc0> in <module>\r\n----> 1 dask_task.persist(scheduler=mars_scheduler)\r\n\r\n~/miniconda3/envs/mars3.8/lib/python3.8/site-packages/dask/base.py in persist(self, **kwargs)\r\n 259 dask.base.persist\r\n 260 \"\"\"\r\n--> 261 (result,) = persist(self, traverse=False, **kwargs)\r\n 262 return result\r\n 263 \r\n\r\n~/miniconda3/envs/mars3.8/lib/python3.8/site-packages/dask/base.py in persist(traverse, optimize_graph, scheduler, *args, **kwargs)\r\n 831 postpersists.append((rebuild, a_keys, state))\r\n 832 \r\n--> 833 results = schedule(dsk, keys, **kwargs)\r\n 834 d = dict(zip(keys, results))\r\n 835 results2 = [r({k: d[k] for k in ks}, *s) for r, ks, s in postpersists]\r\n\r\n~/Workspace/mars/mars/contrib/dask/scheduler.py in mars_scheduler(dsk, keys)\r\n 39 Computed values corresponding to the provided keys.\r\n 40 \"\"\"\r\n---> 41 res = reduce(mars_dask_get(dsk, keys)).execute().fetch()\r\n 42 if not isinstance(res, List):\r\n 43 return [[res]]\r\n\r\n~/Workspace/mars/mars/contrib/dask/scheduler.py in mars_dask_get(dsk, keys)\r\n 86 return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:]))\r\n 87 \r\n---> 88 return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys]\r\n\r\n~/Workspace/mars/mars/contrib/dask/scheduler.py in <listcomp>(.0)\r\n 86 return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:]))\r\n 87 \r\n---> 88 return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys]\r\n\r\n~/Workspace/mars/mars/contrib/dask/scheduler.py in <listcomp>(.0)\r\n 86 return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:]))\r\n 87 \r\n---> 88 return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys]\r\n\r\n~/miniconda3/envs/mars3.8/lib/python3.8/site-packages/dask/highlevelgraph.py in __getitem__(self, key)\r\n 737 pass\r\n 738 \r\n--> 739 raise KeyError(key)\r\n 740 \r\n 741 def __len__(self) -> int:\r\n\r\nKeyError: 'i'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dask.core import istask, ishashable\n\nfrom typing import List, Tuple\nfrom .utils import reduce\nfrom ...remote import spawn\n\n\ndef mars_scheduler(dsk: dict, keys: List[List[str]]):\n \"\"\"\n A Dask-Mars scheduler\n\n This scheduler is intended to be compatible with existing\n dask user interface, no callbacks are implemented.\n\n Parameters\n ----------\n dsk: Dict\n Dask graph, represented as a task DAG dictionary.\n keys: List[List[str]]\n 2d-list of Dask graph keys whose values we wish to compute and return.\n\n Returns\n -------\n Object\n Computed values corresponding to the provided keys.\n \"\"\"\n res = reduce(mars_dask_get(dsk, keys)).execute().fetch()\n if not isinstance(res, List):\n return [[res]]\n else:\n return res\n\n\ndef mars_dask_get(dsk: dict, keys: List[List]):\n \"\"\"\n A Dask-Mars convert function. This function will send the dask graph layers\n to Mars Remote API, generating mars objects correspond to the provided keys.\n\n Parameters\n ----------\n dsk: Dict\n Dask graph, represented as a task DAG dictionary.\n keys: List[List[str]]\n 2d-list of Dask graph keys whose values we wish to compute and return.\n\n Returns\n -------\n Object\n Spawned mars objects corresponding to the provided keys.\n \"\"\"\n\n def _get_arg(a):\n # if arg contains layer index or callable objs, handle it\n if ishashable(a) and a in dsk.keys():\n while ishashable(a) and a in dsk.keys():\n a = dsk[a]\n return _execute_task(a)\n elif not isinstance(a, str) and hasattr(a, \"__getitem__\"):\n if istask(\n a\n ): # TODO:Handle `SubgraphCallable`, which may contains dsk in it\n return spawn(a[0], args=tuple(_get_arg(i) for i in a[1:]))\n elif isinstance(a, dict):\n return {k: _get_arg(v) for k, v in a.items()}\n elif isinstance(a, List) or isinstance(a, Tuple):\n return type(a)(_get_arg(i) for i in a)\n return a\n\n def _execute_task(task: tuple):\n if not istask(task):\n return _get_arg(task)\n return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:]))\n\n return [[_execute_task(dsk[k]) for k in keys_d] for keys_d in keys]\n", "path": "mars/contrib/dask/scheduler.py"}], "after_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dask.core import istask, ishashable\n\nfrom typing import List, Tuple, Union\nfrom .utils import reduce\nfrom ...remote import spawn\nfrom ...deploy.oscar.session import execute\n\n\ndef mars_scheduler(dsk: dict, keys: Union[List[List[str]], List[str]]):\n \"\"\"\n A Dask-Mars scheduler\n\n This scheduler is intended to be compatible with existing\n dask user interface, no callbacks are implemented.\n\n Parameters\n ----------\n dsk: Dict\n Dask graph, represented as a task DAG dictionary.\n keys: Union[List[List[str]], List[str]]\n 1d or 2d list of Dask graph keys whose values we wish to compute and return.\n\n Returns\n -------\n Object\n Computed values corresponding to the provided keys with same dimension.\n \"\"\"\n\n if isinstance(keys, List) and not isinstance(keys[0], List): # 1d keys\n task = execute(mars_dask_get(dsk, keys))\n if not isinstance(task, List):\n task = [task]\n return map(lambda x: x.fetch(), task)\n else: # 2d keys\n res = execute(reduce(mars_dask_get(dsk, keys))).fetch()\n if not isinstance(res, List):\n return [[res]]\n else:\n return res\n\n\ndef mars_dask_get(dsk: dict, keys: Union[List[List[str]], List[str]]):\n \"\"\"\n A Dask-Mars convert function. This function will send the dask graph layers\n to Mars Remote API, generating mars objects correspond to the provided keys.\n\n Parameters\n ----------\n dsk: Dict\n Dask graph, represented as a task DAG dictionary.\n keys: Union[List[List[str]], List[str]]\n 1d or 2d list of Dask graph keys whose values we wish to compute and return.\n\n Returns\n -------\n Object\n Spawned mars objects corresponding to the provided keys with same dimension.\n \"\"\"\n\n def _get_arg(a):\n # if arg contains layer index or callable objs, handle it\n if ishashable(a) and a in dsk.keys():\n while ishashable(a) and a in dsk.keys():\n a = dsk[a]\n return _spawn_task(a)\n elif not isinstance(a, str) and hasattr(a, \"__getitem__\"):\n if istask(\n a\n ): # TODO:Handle `SubgraphCallable`, which may contains dsk in it\n return spawn(a[0], args=tuple(_get_arg(i) for i in a[1:]))\n elif isinstance(a, dict):\n return {k: _get_arg(v) for k, v in a.items()}\n elif isinstance(a, List) or isinstance(a, Tuple):\n return type(a)(_get_arg(i) for i in a)\n return a\n\n def _spawn_task(task: tuple):\n if not istask(task):\n return _get_arg(task)\n return spawn(task[0], args=tuple(_get_arg(a) for a in task[1:]))\n\n return [\n [_spawn_task(dsk[k]) for k in keys_d]\n if isinstance(keys_d, List)\n else _spawn_task(dsk[keys_d])\n for keys_d in keys\n ]\n", "path": "mars/contrib/dask/scheduler.py"}]}
2,097
925
gh_patches_debug_42527
rasdani/github-patches
git_diff
conan-io__conan-center-index-2077
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [request] cpp-taskflow/2.5.0 ### Package Details * Package Name/Version: **cpp-taskflow/2.5.0** * Changelog: **https://taskflow.github.io/taskflow/release-2-5-0.html** Please note that `cpp-taskflow` is renamed to `taskflow`. So new recipe shall be created. P.S. Can you tell me please, why recipes is not updated automatically, when new library version is released? Lask of automation? Will it be fixed in future conan center pipeline? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/taskflow/all/conanfile.py` Content: ``` 1 from conans import ConanFile, tools 2 from conans.errors import ConanInvalidConfiguration 3 from conans.model.version import Version 4 import os 5 6 7 class TaskflowConan(ConanFile): 8 name = "taskflow" 9 description = "A fast C++ header-only library to help you quickly write parallel programs with complex task dependencies." 10 topics = ("conan", "taskflow", "tasking", "parallelism") 11 url = "https://github.com/conan-io/conan-center-index" 12 homepage = "https://github.com/taskflow/taskflow" 13 license = "MIT" 14 15 no_copy_source = True 16 17 settings = "os", "compiler" 18 19 _source_subfolder = "source_subfolder" 20 21 def configure(self): 22 compiler = str(self.settings.compiler) 23 compiler_version = tools.Version(self.settings.compiler.version) 24 min_req_cppstd = "17" if tools.Version(self.version) <= "2.2.0" else "14" 25 26 if self.settings.compiler.cppstd: 27 tools.check_min_cppstd(self, min_req_cppstd) 28 else: 29 self.output.warn("%s recipe lacks information about the %s compiler" 30 " standard version support" % (self.name, compiler)) 31 32 minimal_version = { 33 "17" : { 34 "Visual Studio": "16", 35 "gcc": "7.3", 36 "clang": "6.0", 37 "apple-clang": "10.0" 38 }, 39 "14" : { 40 "Visual Studio": "15", 41 "gcc": "5", 42 "clang": "4.0", 43 "apple-clang": "8.0" 44 } 45 } 46 47 if compiler not in minimal_version[min_req_cppstd]: 48 self.output.info("%s requires a compiler that supports at least C++%s" % (self.name, min_req_cppstd)) 49 return 50 51 # Exclude compilers not supported by taskflow 52 if compiler_version < minimal_version[min_req_cppstd][compiler]: 53 raise ConanInvalidConfiguration("%s requires a compiler that supports" 54 " at least C++%s. %s %s is not" 55 " supported." % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value))) 56 57 def source(self): 58 tools.get(**self.conan_data["sources"][self.version]) 59 extracted_dir = self.name + "-" + self.version 60 os.rename(extracted_dir, self._source_subfolder) 61 62 def package(self): 63 self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder) 64 self.copy(pattern="*", dst="include/taskflow", src=os.path.join(self._source_subfolder, "taskflow")) 65 66 def package_id(self): 67 self.info.header_only() 68 69 def package_info(self): 70 if self.settings.os == "Linux": 71 self.cpp_info.system_libs.append("pthread") 72 if self.settings.compiler == "Visual Studio": 73 self.cpp_info.defines.append("_ENABLE_EXTENDED_ALIGNED_STORAGE") 74 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/taskflow/all/conanfile.py b/recipes/taskflow/all/conanfile.py --- a/recipes/taskflow/all/conanfile.py +++ b/recipes/taskflow/all/conanfile.py @@ -1,8 +1,7 @@ -from conans import ConanFile, tools -from conans.errors import ConanInvalidConfiguration -from conans.model.version import Version import os +from conans import ConanFile, tools +from conans.errors import ConanInvalidConfiguration class TaskflowConan(ConanFile): name = "taskflow" @@ -16,43 +15,41 @@ settings = "os", "compiler" - _source_subfolder = "source_subfolder" + @property + def _source_subfolder(self): + return "source_subfolder" def configure(self): - compiler = str(self.settings.compiler) - compiler_version = tools.Version(self.settings.compiler.version) - min_req_cppstd = "17" if tools.Version(self.version) <= "2.2.0" else "14" + minimal_cpp_standard = "17" if tools.Version(self.version) <= "2.2.0" else "14" if self.settings.compiler.cppstd: - tools.check_min_cppstd(self, min_req_cppstd) - else: - self.output.warn("%s recipe lacks information about the %s compiler" - " standard version support" % (self.name, compiler)) + tools.check_min_cppstd(self, minimal_cpp_standard) minimal_version = { - "17" : { + "17": { "Visual Studio": "16", "gcc": "7.3", "clang": "6.0", "apple-clang": "10.0" }, - "14" : { + "14": { "Visual Studio": "15", "gcc": "5", "clang": "4.0", "apple-clang": "8.0" } - } + }[minimal_cpp_standard] - if compiler not in minimal_version[min_req_cppstd]: - self.output.info("%s requires a compiler that supports at least C++%s" % (self.name, min_req_cppstd)) + compiler = str(self.settings.compiler) + if compiler not in minimal_version: + self.output.warn( + "%s recipe lacks information about the %s compiler standard version support" % (self.name, compiler)) + self.output.warn( + "%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard)) return - - # Exclude compilers not supported by taskflow - if compiler_version < minimal_version[min_req_cppstd][compiler]: - raise ConanInvalidConfiguration("%s requires a compiler that supports" - " at least C++%s. %s %s is not" - " supported." % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value))) + version = tools.Version(self.settings.compiler.version) + if version < minimal_version[compiler]: + raise ConanInvalidConfiguration("%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard)) def source(self): tools.get(**self.conan_data["sources"][self.version]) @@ -61,7 +58,7 @@ def package(self): self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder) - self.copy(pattern="*", dst="include/taskflow", src=os.path.join(self._source_subfolder, "taskflow")) + self.copy(pattern="*", dst=os.path.join("include", "taskflow"), src=os.path.join(self._source_subfolder, "taskflow")) def package_id(self): self.info.header_only() @@ -71,4 +68,5 @@ self.cpp_info.system_libs.append("pthread") if self.settings.compiler == "Visual Studio": self.cpp_info.defines.append("_ENABLE_EXTENDED_ALIGNED_STORAGE") - + self.cpp_info.names["cmake_find_package"] = "Taskflow" + self.cpp_info.names["cmake_find_package_multi"] = "Taskflow"
{"golden_diff": "diff --git a/recipes/taskflow/all/conanfile.py b/recipes/taskflow/all/conanfile.py\n--- a/recipes/taskflow/all/conanfile.py\n+++ b/recipes/taskflow/all/conanfile.py\n@@ -1,8 +1,7 @@\n-from conans import ConanFile, tools\n-from conans.errors import ConanInvalidConfiguration\n-from conans.model.version import Version\n import os\n \n+from conans import ConanFile, tools\n+from conans.errors import ConanInvalidConfiguration\n \n class TaskflowConan(ConanFile):\n name = \"taskflow\"\n@@ -16,43 +15,41 @@\n \n settings = \"os\", \"compiler\"\n \n- _source_subfolder = \"source_subfolder\"\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n \n def configure(self):\n- compiler = str(self.settings.compiler)\n- compiler_version = tools.Version(self.settings.compiler.version)\n- min_req_cppstd = \"17\" if tools.Version(self.version) <= \"2.2.0\" else \"14\"\n+ minimal_cpp_standard = \"17\" if tools.Version(self.version) <= \"2.2.0\" else \"14\"\n \n if self.settings.compiler.cppstd:\n- tools.check_min_cppstd(self, min_req_cppstd)\n- else:\n- self.output.warn(\"%s recipe lacks information about the %s compiler\"\n- \" standard version support\" % (self.name, compiler))\n+ tools.check_min_cppstd(self, minimal_cpp_standard)\n \n minimal_version = {\n- \"17\" : {\n+ \"17\": {\n \"Visual Studio\": \"16\",\n \"gcc\": \"7.3\",\n \"clang\": \"6.0\",\n \"apple-clang\": \"10.0\"\n },\n- \"14\" : {\n+ \"14\": {\n \"Visual Studio\": \"15\",\n \"gcc\": \"5\",\n \"clang\": \"4.0\",\n \"apple-clang\": \"8.0\"\n }\n- }\n+ }[minimal_cpp_standard]\n \n- if compiler not in minimal_version[min_req_cppstd]:\n- self.output.info(\"%s requires a compiler that supports at least C++%s\" % (self.name, min_req_cppstd))\n+ compiler = str(self.settings.compiler)\n+ if compiler not in minimal_version:\n+ self.output.warn(\n+ \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n+ self.output.warn(\n+ \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n-\n- # Exclude compilers not supported by taskflow\n- if compiler_version < minimal_version[min_req_cppstd][compiler]:\n- raise ConanInvalidConfiguration(\"%s requires a compiler that supports\"\n- \" at least C++%s. %s %s is not\"\n- \" supported.\" % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value)))\n+ version = tools.Version(self.settings.compiler.version)\n+ if version < minimal_version[compiler]:\n+ raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n@@ -61,7 +58,7 @@\n \n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n- self.copy(pattern=\"*\", dst=\"include/taskflow\", src=os.path.join(self._source_subfolder, \"taskflow\"))\n+ self.copy(pattern=\"*\", dst=os.path.join(\"include\", \"taskflow\"), src=os.path.join(self._source_subfolder, \"taskflow\"))\n \n def package_id(self):\n self.info.header_only()\n@@ -71,4 +68,5 @@\n self.cpp_info.system_libs.append(\"pthread\")\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"_ENABLE_EXTENDED_ALIGNED_STORAGE\")\n-\n+ self.cpp_info.names[\"cmake_find_package\"] = \"Taskflow\"\n+ self.cpp_info.names[\"cmake_find_package_multi\"] = \"Taskflow\"\n", "issue": "[request] cpp-taskflow/2.5.0\n### Package Details\r\n * Package Name/Version: **cpp-taskflow/2.5.0**\r\n * Changelog: **https://taskflow.github.io/taskflow/release-2-5-0.html**\r\n\r\n\r\nPlease note that `cpp-taskflow` is renamed to `taskflow`. So new recipe shall be created.\r\n\r\nP.S. Can you tell me please, why recipes is not updated automatically, when new library version is released? Lask of automation? Will it be fixed in future conan center pipeline?\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\nfrom conans.model.version import Version\nimport os\n\n\nclass TaskflowConan(ConanFile):\n name = \"taskflow\"\n description = \"A fast C++ header-only library to help you quickly write parallel programs with complex task dependencies.\"\n topics = (\"conan\", \"taskflow\", \"tasking\", \"parallelism\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/taskflow/taskflow\"\n license = \"MIT\"\n\n no_copy_source = True\n\n settings = \"os\", \"compiler\"\n\n _source_subfolder = \"source_subfolder\"\n\n def configure(self):\n compiler = str(self.settings.compiler)\n compiler_version = tools.Version(self.settings.compiler.version)\n min_req_cppstd = \"17\" if tools.Version(self.version) <= \"2.2.0\" else \"14\"\n\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, min_req_cppstd)\n else:\n self.output.warn(\"%s recipe lacks information about the %s compiler\"\n \" standard version support\" % (self.name, compiler))\n\n minimal_version = {\n \"17\" : {\n \"Visual Studio\": \"16\",\n \"gcc\": \"7.3\",\n \"clang\": \"6.0\",\n \"apple-clang\": \"10.0\"\n },\n \"14\" : {\n \"Visual Studio\": \"15\",\n \"gcc\": \"5\",\n \"clang\": \"4.0\",\n \"apple-clang\": \"8.0\"\n }\n }\n\n if compiler not in minimal_version[min_req_cppstd]:\n self.output.info(\"%s requires a compiler that supports at least C++%s\" % (self.name, min_req_cppstd))\n return\n\n # Exclude compilers not supported by taskflow\n if compiler_version < minimal_version[min_req_cppstd][compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports\"\n \" at least C++%s. %s %s is not\"\n \" supported.\" % (self.name, min_req_cppstd, compiler, Version(self.settings.compiler.version.value)))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(pattern=\"*\", dst=\"include/taskflow\", src=os.path.join(self._source_subfolder, \"taskflow\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"_ENABLE_EXTENDED_ALIGNED_STORAGE\")\n\n", "path": "recipes/taskflow/all/conanfile.py"}], "after_files": [{"content": "import os\n\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nclass TaskflowConan(ConanFile):\n name = \"taskflow\"\n description = \"A fast C++ header-only library to help you quickly write parallel programs with complex task dependencies.\"\n topics = (\"conan\", \"taskflow\", \"tasking\", \"parallelism\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/taskflow/taskflow\"\n license = \"MIT\"\n\n no_copy_source = True\n\n settings = \"os\", \"compiler\"\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n minimal_cpp_standard = \"17\" if tools.Version(self.version) <= \"2.2.0\" else \"14\"\n\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, minimal_cpp_standard)\n\n minimal_version = {\n \"17\": {\n \"Visual Studio\": \"16\",\n \"gcc\": \"7.3\",\n \"clang\": \"6.0\",\n \"apple-clang\": \"10.0\"\n },\n \"14\": {\n \"Visual Studio\": \"15\",\n \"gcc\": \"5\",\n \"clang\": \"4.0\",\n \"apple-clang\": \"8.0\"\n }\n }[minimal_cpp_standard]\n\n compiler = str(self.settings.compiler)\n if compiler not in minimal_version:\n self.output.warn(\n \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n self.output.warn(\n \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n version = tools.Version(self.settings.compiler.version)\n if version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(pattern=\"*\", dst=os.path.join(\"include\", \"taskflow\"), src=os.path.join(self._source_subfolder, \"taskflow\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines.append(\"_ENABLE_EXTENDED_ALIGNED_STORAGE\")\n self.cpp_info.names[\"cmake_find_package\"] = \"Taskflow\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Taskflow\"\n", "path": "recipes/taskflow/all/conanfile.py"}]}
1,169
932
gh_patches_debug_3808
rasdani/github-patches
git_diff
ManimCommunity__manim-2197
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create a test for resolution configuration ## Enhancement proposal A test for the `--resolution` configuration option. It should be placed [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/test_cli_flags.py) and [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/opengl/test_cli_flags_opengl.py) (the OpenGL test can be marked as `xfail` if setting resolution does not work properly on OpenGL). Test different resolutions as well as different ways of separating the resolutions (width and height separated by `;`, `,`, and `-`). ## Additional comments <!-- Add further context that you think might be relevant. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `manim/utils/commands.py` Content: ``` 1 import json 2 import os 3 from subprocess import run 4 from typing import Any, Dict, List 5 6 __all__ = [ 7 "capture", 8 "get_video_metadata", 9 "get_dir_layout", 10 ] 11 12 13 def capture(command, cwd=None, command_input=None): 14 p = run(command, cwd=cwd, input=command_input, capture_output=True, text=True) 15 out, err = p.stdout, p.stderr 16 return out, err, p.returncode 17 18 19 def get_video_metadata(path_to_video: str) -> Dict[str, Any]: 20 command = [ 21 "ffprobe", 22 "-v", 23 "error", 24 "-select_streams", 25 "v:0", 26 "-show_entries", 27 "stream=width,height,nb_frames,duration,avg_frame_rate,codec_name", 28 "-print_format", 29 "json", 30 path_to_video, 31 ] 32 config, err, exitcode = capture(command) 33 assert exitcode == 0, f"FFprobe error: {err}" 34 return json.loads(config)["streams"][0] 35 36 37 def get_dir_layout(dirpath: str) -> List[str]: 38 """Get list of paths relative to dirpath of all files in dir and subdirs recursively.""" 39 index_files: List[str] = [] 40 for root, dirs, files in os.walk(dirpath): 41 for file in files: 42 index_files.append(f"{os.path.relpath(os.path.join(root, file), dirpath)}") 43 return index_files 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/manim/utils/commands.py b/manim/utils/commands.py --- a/manim/utils/commands.py +++ b/manim/utils/commands.py @@ -27,7 +27,7 @@ "stream=width,height,nb_frames,duration,avg_frame_rate,codec_name", "-print_format", "json", - path_to_video, + str(path_to_video), ] config, err, exitcode = capture(command) assert exitcode == 0, f"FFprobe error: {err}"
{"golden_diff": "diff --git a/manim/utils/commands.py b/manim/utils/commands.py\n--- a/manim/utils/commands.py\n+++ b/manim/utils/commands.py\n@@ -27,7 +27,7 @@\n \"stream=width,height,nb_frames,duration,avg_frame_rate,codec_name\",\n \"-print_format\",\n \"json\",\n- path_to_video,\n+ str(path_to_video),\n ]\n config, err, exitcode = capture(command)\n assert exitcode == 0, f\"FFprobe error: {err}\"\n", "issue": "Create a test for resolution configuration\n## Enhancement proposal\r\nA test for the `--resolution` configuration option. It should be placed [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/test_cli_flags.py) and [here](https://github.com/ManimCommunity/manim/blob/main/tests/test_scene_rendering/opengl/test_cli_flags_opengl.py) (the OpenGL test can be marked as `xfail` if setting resolution does not work properly on OpenGL). Test different resolutions as well as different ways of separating the resolutions (width and height separated by `;`, `,`, and `-`).\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\n\n", "before_files": [{"content": "import json\nimport os\nfrom subprocess import run\nfrom typing import Any, Dict, List\n\n__all__ = [\n \"capture\",\n \"get_video_metadata\",\n \"get_dir_layout\",\n]\n\n\ndef capture(command, cwd=None, command_input=None):\n p = run(command, cwd=cwd, input=command_input, capture_output=True, text=True)\n out, err = p.stdout, p.stderr\n return out, err, p.returncode\n\n\ndef get_video_metadata(path_to_video: str) -> Dict[str, Any]:\n command = [\n \"ffprobe\",\n \"-v\",\n \"error\",\n \"-select_streams\",\n \"v:0\",\n \"-show_entries\",\n \"stream=width,height,nb_frames,duration,avg_frame_rate,codec_name\",\n \"-print_format\",\n \"json\",\n path_to_video,\n ]\n config, err, exitcode = capture(command)\n assert exitcode == 0, f\"FFprobe error: {err}\"\n return json.loads(config)[\"streams\"][0]\n\n\ndef get_dir_layout(dirpath: str) -> List[str]:\n \"\"\"Get list of paths relative to dirpath of all files in dir and subdirs recursively.\"\"\"\n index_files: List[str] = []\n for root, dirs, files in os.walk(dirpath):\n for file in files:\n index_files.append(f\"{os.path.relpath(os.path.join(root, file), dirpath)}\")\n return index_files\n", "path": "manim/utils/commands.py"}], "after_files": [{"content": "import json\nimport os\nfrom subprocess import run\nfrom typing import Any, Dict, List\n\n__all__ = [\n \"capture\",\n \"get_video_metadata\",\n \"get_dir_layout\",\n]\n\n\ndef capture(command, cwd=None, command_input=None):\n p = run(command, cwd=cwd, input=command_input, capture_output=True, text=True)\n out, err = p.stdout, p.stderr\n return out, err, p.returncode\n\n\ndef get_video_metadata(path_to_video: str) -> Dict[str, Any]:\n command = [\n \"ffprobe\",\n \"-v\",\n \"error\",\n \"-select_streams\",\n \"v:0\",\n \"-show_entries\",\n \"stream=width,height,nb_frames,duration,avg_frame_rate,codec_name\",\n \"-print_format\",\n \"json\",\n str(path_to_video),\n ]\n config, err, exitcode = capture(command)\n assert exitcode == 0, f\"FFprobe error: {err}\"\n return json.loads(config)[\"streams\"][0]\n\n\ndef get_dir_layout(dirpath: str) -> List[str]:\n \"\"\"Get list of paths relative to dirpath of all files in dir and subdirs recursively.\"\"\"\n index_files: List[str] = []\n for root, dirs, files in os.walk(dirpath):\n for file in files:\n index_files.append(f\"{os.path.relpath(os.path.join(root, file), dirpath)}\")\n return index_files\n", "path": "manim/utils/commands.py"}]}
799
119
gh_patches_debug_13600
rasdani/github-patches
git_diff
scikit-image__scikit-image-2722
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs build fails with numpydoc 0.7.0 ## Description If you just pip install numpydoc today and run 'make html' in doc you get: ``` reading sources... [ 2%] api/skimage.data Exception occurred: File "/opt/opensource/deb/py36/venv3/lib/python3.5/site-packages/numpydoc/docscrape.py", line 343, in _parse raise ValueError(msg) ValueError: The section Other Parameters appears twice in the docstring of None in None. ``` Installing numpydoc 0.6.0 gets past this problem. I have no idea if this is a numpydoc or scikit-image bug really... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/io/_io.py` Content: ``` 1 from io import BytesIO 2 3 import numpy as np 4 import six 5 6 from ..io.manage_plugins import call_plugin 7 from ..color import rgb2grey 8 from .util import file_or_url_context 9 from ..exposure import is_low_contrast 10 from .._shared.utils import all_warnings, warn 11 12 13 __all__ = ['imread', 'imsave', 'imshow', 'show', 14 'imread_collection', 'imshow_collection'] 15 16 17 def imread(fname, as_grey=False, plugin=None, flatten=None, 18 **plugin_args): 19 """Load an image from file. 20 21 Parameters 22 ---------- 23 fname : string 24 Image file name, e.g. ``test.jpg`` or URL. 25 as_grey : bool 26 If True, convert color images to grey-scale (64-bit floats). 27 Images that are already in grey-scale format are not converted. 28 plugin : str 29 Name of plugin to use. By default, the different plugins are 30 tried (starting with the Python Imaging Library) until a suitable 31 candidate is found. If not given and fname is a tiff file, the 32 tifffile plugin will be used. 33 34 Other Parameters 35 ---------------- 36 flatten : bool 37 Backward compatible keyword, superseded by `as_grey`. 38 39 Returns 40 ------- 41 img_array : ndarray 42 The different color bands/channels are stored in the 43 third dimension, such that a grey-image is MxN, an 44 RGB-image MxNx3 and an RGBA-image MxNx4. 45 46 Other parameters 47 ---------------- 48 plugin_args : keywords 49 Passed to the given plugin. 50 51 """ 52 # Backward compatibility 53 if flatten is not None: 54 as_grey = flatten 55 56 if plugin is None and hasattr(fname, 'lower'): 57 if fname.lower().endswith(('.tiff', '.tif')): 58 plugin = 'tifffile' 59 60 with file_or_url_context(fname) as fname: 61 img = call_plugin('imread', fname, plugin=plugin, **plugin_args) 62 63 if not hasattr(img, 'ndim'): 64 return img 65 66 if img.ndim > 2: 67 if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4): 68 img = np.swapaxes(img, -1, -3) 69 img = np.swapaxes(img, -2, -3) 70 71 if as_grey: 72 img = rgb2grey(img) 73 74 return img 75 76 77 def imread_collection(load_pattern, conserve_memory=True, 78 plugin=None, **plugin_args): 79 """ 80 Load a collection of images. 81 82 Parameters 83 ---------- 84 load_pattern : str or list 85 List of objects to load. These are usually filenames, but may 86 vary depending on the currently active plugin. See the docstring 87 for ``ImageCollection`` for the default behaviour of this parameter. 88 conserve_memory : bool, optional 89 If True, never keep more than one in memory at a specific 90 time. Otherwise, images will be cached once they are loaded. 91 92 Returns 93 ------- 94 ic : ImageCollection 95 Collection of images. 96 97 Other parameters 98 ---------------- 99 plugin_args : keywords 100 Passed to the given plugin. 101 102 """ 103 return call_plugin('imread_collection', load_pattern, conserve_memory, 104 plugin=plugin, **plugin_args) 105 106 107 def imsave(fname, arr, plugin=None, **plugin_args): 108 """Save an image to file. 109 110 Parameters 111 ---------- 112 fname : str 113 Target filename. 114 arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4) 115 Image data. 116 plugin : str 117 Name of plugin to use. By default, the different plugins are 118 tried (starting with the Python Imaging Library) until a suitable 119 candidate is found. If not given and fname is a tiff file, the 120 tifffile plugin will be used. 121 122 Other parameters 123 ---------------- 124 plugin_args : keywords 125 Passed to the given plugin. 126 127 Notes 128 ----- 129 When saving a JPEG, the compression ratio may be controlled using the 130 ``quality`` keyword argument which is an integer with values in [1, 100] 131 where 1 is worst quality and smallest file size, and 100 is best quality and 132 largest file size (default 75). This is only available when using the PIL 133 and imageio plugins. 134 """ 135 if plugin is None and hasattr(fname, 'lower'): 136 if fname.lower().endswith(('.tiff', '.tif')): 137 plugin = 'tifffile' 138 if is_low_contrast(arr): 139 warn('%s is a low contrast image' % fname) 140 return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args) 141 142 143 def imshow(arr, plugin=None, **plugin_args): 144 """Display an image. 145 146 Parameters 147 ---------- 148 arr : ndarray or str 149 Image data or name of image file. 150 plugin : str 151 Name of plugin to use. By default, the different plugins are 152 tried (starting with the Python Imaging Library) until a suitable 153 candidate is found. 154 155 Other parameters 156 ---------------- 157 plugin_args : keywords 158 Passed to the given plugin. 159 160 """ 161 if isinstance(arr, six.string_types): 162 arr = call_plugin('imread', arr, plugin=plugin) 163 return call_plugin('imshow', arr, plugin=plugin, **plugin_args) 164 165 166 def imshow_collection(ic, plugin=None, **plugin_args): 167 """Display a collection of images. 168 169 Parameters 170 ---------- 171 ic : ImageCollection 172 Collection to display. 173 plugin : str 174 Name of plugin to use. By default, the different plugins are 175 tried until a suitable candidate is found. 176 177 Other parameters 178 ---------------- 179 plugin_args : keywords 180 Passed to the given plugin. 181 182 """ 183 return call_plugin('imshow_collection', ic, plugin=plugin, **plugin_args) 184 185 186 def show(): 187 '''Display pending images. 188 189 Launch the event loop of the current gui plugin, and display all 190 pending images, queued via `imshow`. This is required when using 191 `imshow` from non-interactive scripts. 192 193 A call to `show` will block execution of code until all windows 194 have been closed. 195 196 Examples 197 -------- 198 >>> import skimage.io as io 199 200 >>> for i in range(4): 201 ... ax_im = io.imshow(np.random.rand(50, 50)) 202 >>> io.show() # doctest: +SKIP 203 204 ''' 205 return call_plugin('_app_show') 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/io/_io.py b/skimage/io/_io.py --- a/skimage/io/_io.py +++ b/skimage/io/_io.py @@ -36,6 +36,9 @@ flatten : bool Backward compatible keyword, superseded by `as_grey`. + plugin_args : keywords + Passed to the given plugin. + Returns ------- img_array : ndarray @@ -43,11 +46,6 @@ third dimension, such that a grey-image is MxN, an RGB-image MxNx3 and an RGBA-image MxNx4. - Other parameters - ---------------- - plugin_args : keywords - Passed to the given plugin. - """ # Backward compatibility if flatten is not None:
{"golden_diff": "diff --git a/skimage/io/_io.py b/skimage/io/_io.py\n--- a/skimage/io/_io.py\n+++ b/skimage/io/_io.py\n@@ -36,6 +36,9 @@\n flatten : bool\n Backward compatible keyword, superseded by `as_grey`.\n \n+ plugin_args : keywords\n+ Passed to the given plugin.\n+\n Returns\n -------\n img_array : ndarray\n@@ -43,11 +46,6 @@\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n \n- Other parameters\n- ----------------\n- plugin_args : keywords\n- Passed to the given plugin.\n-\n \"\"\"\n # Backward compatibility\n if flatten is not None:\n", "issue": "docs build fails with numpydoc 0.7.0\n## Description\r\n\r\nIf you just pip install numpydoc today and run 'make html' in doc you get:\r\n\r\n```\r\nreading sources... [ 2%] api/skimage.data \r\nException occurred:\r\n File \"/opt/opensource/deb/py36/venv3/lib/python3.5/site-packages/numpydoc/docscrape.py\", line 343, in _parse\r\n raise ValueError(msg)\r\nValueError: The section Other Parameters appears twice in the docstring of None in None.\r\n```\r\n\r\nInstalling numpydoc 0.6.0 gets past this problem.\r\n\r\nI have no idea if this is a numpydoc or scikit-image bug really...\r\n\n", "before_files": [{"content": "from io import BytesIO\n\nimport numpy as np\nimport six\n\nfrom ..io.manage_plugins import call_plugin\nfrom ..color import rgb2grey\nfrom .util import file_or_url_context\nfrom ..exposure import is_low_contrast\nfrom .._shared.utils import all_warnings, warn\n\n\n__all__ = ['imread', 'imsave', 'imshow', 'show',\n 'imread_collection', 'imshow_collection']\n\n\ndef imread(fname, as_grey=False, plugin=None, flatten=None,\n **plugin_args):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.jpg`` or URL.\n as_grey : bool\n If True, convert color images to grey-scale (64-bit floats).\n Images that are already in grey-scale format are not converted.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n\n Other Parameters\n ----------------\n flatten : bool\n Backward compatible keyword, superseded by `as_grey`.\n\n Returns\n -------\n img_array : ndarray\n The different color bands/channels are stored in the\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n # Backward compatibility\n if flatten is not None:\n as_grey = flatten\n\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n\n if not hasattr(img, 'ndim'):\n return img\n\n if img.ndim > 2:\n if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):\n img = np.swapaxes(img, -1, -3)\n img = np.swapaxes(img, -2, -3)\n\n if as_grey:\n img = rgb2grey(img)\n\n return img\n\n\ndef imread_collection(load_pattern, conserve_memory=True,\n plugin=None, **plugin_args):\n \"\"\"\n Load a collection of images.\n\n Parameters\n ----------\n load_pattern : str or list\n List of objects to load. These are usually filenames, but may\n vary depending on the currently active plugin. See the docstring\n for ``ImageCollection`` for the default behaviour of this parameter.\n conserve_memory : bool, optional\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n ic : ImageCollection\n Collection of images.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imread_collection', load_pattern, conserve_memory,\n plugin=plugin, **plugin_args)\n\n\ndef imsave(fname, arr, plugin=None, **plugin_args):\n \"\"\"Save an image to file.\n\n Parameters\n ----------\n fname : str\n Target filename.\n arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)\n Image data.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n Notes\n -----\n When saving a JPEG, the compression ratio may be controlled using the\n ``quality`` keyword argument which is an integer with values in [1, 100]\n where 1 is worst quality and smallest file size, and 100 is best quality and\n largest file size (default 75). This is only available when using the PIL\n and imageio plugins.\n \"\"\"\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n if is_low_contrast(arr):\n warn('%s is a low contrast image' % fname)\n return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)\n\n\ndef imshow(arr, plugin=None, **plugin_args):\n \"\"\"Display an image.\n\n Parameters\n ----------\n arr : ndarray or str\n Image data or name of image file.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if isinstance(arr, six.string_types):\n arr = call_plugin('imread', arr, plugin=plugin)\n return call_plugin('imshow', arr, plugin=plugin, **plugin_args)\n\n\ndef imshow_collection(ic, plugin=None, **plugin_args):\n \"\"\"Display a collection of images.\n\n Parameters\n ----------\n ic : ImageCollection\n Collection to display.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried until a suitable candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imshow_collection', ic, plugin=plugin, **plugin_args)\n\n\ndef show():\n '''Display pending images.\n\n Launch the event loop of the current gui plugin, and display all\n pending images, queued via `imshow`. This is required when using\n `imshow` from non-interactive scripts.\n\n A call to `show` will block execution of code until all windows\n have been closed.\n\n Examples\n --------\n >>> import skimage.io as io\n\n >>> for i in range(4):\n ... ax_im = io.imshow(np.random.rand(50, 50))\n >>> io.show() # doctest: +SKIP\n\n '''\n return call_plugin('_app_show')\n", "path": "skimage/io/_io.py"}], "after_files": [{"content": "from io import BytesIO\n\nimport numpy as np\nimport six\n\nfrom ..io.manage_plugins import call_plugin\nfrom ..color import rgb2grey\nfrom .util import file_or_url_context\nfrom ..exposure import is_low_contrast\nfrom .._shared.utils import all_warnings, warn\n\n\n__all__ = ['imread', 'imsave', 'imshow', 'show',\n 'imread_collection', 'imshow_collection']\n\n\ndef imread(fname, as_grey=False, plugin=None, flatten=None,\n **plugin_args):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.jpg`` or URL.\n as_grey : bool\n If True, convert color images to grey-scale (64-bit floats).\n Images that are already in grey-scale format are not converted.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n\n Other Parameters\n ----------------\n flatten : bool\n Backward compatible keyword, superseded by `as_grey`.\n\n plugin_args : keywords\n Passed to the given plugin.\n\n Returns\n -------\n img_array : ndarray\n The different color bands/channels are stored in the\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n \"\"\"\n # Backward compatibility\n if flatten is not None:\n as_grey = flatten\n\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n\n if not hasattr(img, 'ndim'):\n return img\n\n if img.ndim > 2:\n if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):\n img = np.swapaxes(img, -1, -3)\n img = np.swapaxes(img, -2, -3)\n\n if as_grey:\n img = rgb2grey(img)\n\n return img\n\n\ndef imread_collection(load_pattern, conserve_memory=True,\n plugin=None, **plugin_args):\n \"\"\"\n Load a collection of images.\n\n Parameters\n ----------\n load_pattern : str or list\n List of objects to load. These are usually filenames, but may\n vary depending on the currently active plugin. See the docstring\n for ``ImageCollection`` for the default behaviour of this parameter.\n conserve_memory : bool, optional\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n ic : ImageCollection\n Collection of images.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imread_collection', load_pattern, conserve_memory,\n plugin=plugin, **plugin_args)\n\n\ndef imsave(fname, arr, plugin=None, **plugin_args):\n \"\"\"Save an image to file.\n\n Parameters\n ----------\n fname : str\n Target filename.\n arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)\n Image data.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n Notes\n -----\n When saving a JPEG, the compression ratio may be controlled using the\n ``quality`` keyword argument which is an integer with values in [1, 100]\n where 1 is worst quality and smallest file size, and 100 is best quality and\n largest file size (default 75). This is only available when using the PIL\n and imageio plugins.\n \"\"\"\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n if is_low_contrast(arr):\n warn('%s is a low contrast image' % fname)\n return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)\n\n\ndef imshow(arr, plugin=None, **plugin_args):\n \"\"\"Display an image.\n\n Parameters\n ----------\n arr : ndarray or str\n Image data or name of image file.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if isinstance(arr, six.string_types):\n arr = call_plugin('imread', arr, plugin=plugin)\n return call_plugin('imshow', arr, plugin=plugin, **plugin_args)\n\n\ndef imshow_collection(ic, plugin=None, **plugin_args):\n \"\"\"Display a collection of images.\n\n Parameters\n ----------\n ic : ImageCollection\n Collection to display.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried until a suitable candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imshow_collection', ic, plugin=plugin, **plugin_args)\n\n\ndef show():\n '''Display pending images.\n\n Launch the event loop of the current gui plugin, and display all\n pending images, queued via `imshow`. This is required when using\n `imshow` from non-interactive scripts.\n\n A call to `show` will block execution of code until all windows\n have been closed.\n\n Examples\n --------\n >>> import skimage.io as io\n\n >>> for i in range(4):\n ... ax_im = io.imshow(np.random.rand(50, 50))\n >>> io.show() # doctest: +SKIP\n\n '''\n return call_plugin('_app_show')\n", "path": "skimage/io/_io.py"}]}
2,375
185
gh_patches_debug_21419
rasdani/github-patches
git_diff
Mailu__Mailu-1088
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MAIL FROM: does-not-exist@[X.X.X.X] results in an admin exeption stacktrace If a client (read: malicious attacker) connects to Postfix and uses an IP address within brackets instead of a domain name for the sender, e.g.: ``` $ telnet mail.example.com 25 Connected to example.com Escape character is '^]'. 220 mail.example.com ESMTP ready EHLO dummy.example.com 250-mail.example.com 250 STARTTLS MAIL FROM: does-not-exist@[116.203.165.200] 250 2.0.0 OK RCPT TO: [email protected] 451 4.3.0 <does-not-exist@[116.203.165.200]>: Temporary lookup failure ``` we see the following stracktrace in the admin container: ``` [2019-07-27 14:26:27,015] ERROR in app: Exception on /internal/postfix/domain/[116.203.165.200] [GET] Traceback (most recent call last): File "/usr/lib/python3.7/site-packages/idna/core.py", line 271, in alabel ulabel(label) File "/usr/lib/python3.7/site-packages/idna/core.py", line 307, in ulabel check_label(label) File "/usr/lib/python3.7/site-packages/idna/core.py", line 261, in check_label raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label))) idna.core.InvalidCodepoint: Codepoint U+005B at position 1 of '[116' not allowed During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_context context = constructor(dialect, self, conn, *args) File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 669, in _init_compiled param.append(processors[key](compiled_params[key])) File "/usr/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py", line 1182, in process return process_param(value, dialect) File "/app/mailu/models.py", line 30, in process_bind_param return idna.encode(value).decode("ascii").lower() File "/usr/lib/python3.7/site-packages/idna/core.py", line 361, in encode s = alabel(label) File "/usr/lib/python3.7/site-packages/idna/core.py", line 273, in alabel raise IDNAError('The label {0} is not a valid A-label'.format(label)) idna.core.IDNAError: The label b'[116' is not a valid A-label The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/usr/lib/python3.7/site-packages/flask/app.py", line 2292, in wsgi_app response = self.full_dispatch_request() File "/usr/lib/python3.7/site-packages/flask/app.py", line 1815, in full_dispatch_request rv = self.handle_user_exception(e) File "/usr/lib/python3.7/site-packages/flask/app.py", line 1718, in handle_user_exception reraise(exc_type, exc_value, tb) File "/usr/lib/python3.7/site-packages/flask/_compat.py", line 35, in reraise raise value File "/usr/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request rv = self.dispatch_request() File "/usr/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/app/mailu/internal/views/postfix.py", line 9, in postfix_mailbox_domain domain = models.Domain.query.get(domain_name) or \ File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 924, in get ident, loading.load_on_pk_identity) File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 1007, in _get_impl return db_load_fn(self, primary_key_identity) File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/loading.py", line 250, in load_on_pk_identity return q.one() File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2954, in one ret = self.one_or_none() File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2924, in one_or_none ret = list(self) File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2995, in __iter__ return self._execute_and_instances(context) File "/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3018, in _execute_and_instances result = conn.execute(querycontext.statement, self._params) File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 948, in execute return meth(self, multiparams, params) File "/usr/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 269, in _execute_on_connection return connection._execute_clauseelement(self, multiparams, params) File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1060, in _execute_clauseelement compiled_sql, distilled_params File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1132, in _execute_context None, None) File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1413, in _handle_dbapi_exception exc_info File "/usr/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 265, in raise_from_cause reraise(type(exception), exception, tb=exc_tb, cause=cause) File "/usr/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 248, in reraise raise value.with_traceback(tb) File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1127, in _execute_context context = constructor(dialect, self, conn, *args) File "/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 669, in _init_compiled param.append(processors[key](compiled_params[key])) File "/usr/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py", line 1182, in process return process_param(value, dialect) File "/app/mailu/models.py", line 30, in process_bind_param return idna.encode(value).decode("ascii").lower() File "/usr/lib/python3.7/site-packages/idna/core.py", line 361, in encode s = alabel(label) File "/usr/lib/python3.7/site-packages/idna/core.py", line 273, in alabel raise IDNAError('The label {0} is not a valid A-label'.format(label)) sqlalchemy.exc.StatementError: (idna.core.IDNAError) The label b'[116' is not a valid A-label [SQL: 'SELECT domain.created_at AS domain_created_at, domain.updated_at AS domain_updated_at, domain.comment AS domain_comment, domain.name AS domain_name, domain.max_users AS domain_max_users, domain.max_aliases AS domain_max_aliases, domain.max_quota_bytes AS domain_max_quota_bytes, domain.signup_enabled AS domain_signup_enabled \nFROM domain \nWHERE domain.name = ?'] [parameters: [{'%(140312238936248 param)s': '[116.203.165.200]'}]] 10.36.0.0 - - [27/Jul/2019:14:26:27 +0000] "GET /internal/postfix/domain/%5B116.203.165.200%5D HTTP/1.1" 500 291 "-" "Python/3.7 aiohttp/3.5.4" 10.44.0.0 - - [27/Jul/2019:14:26:31 +0000] "GET /internal/auth/email HTTP/1.0" 200 0 "-" "-" ``` NOTE: the lookup is cached I think and you need to use another IP address to reproduce the error multiple times. I think we're missing some validation rules, because trying a similar (invalid) MAIL FROM with an IP (but without brackets) is handled properly: ``` telnet mail.example.com 25 Trying ... Connected to example.com. Escape character is '^]'. 220 mail.example.com ESMTP ready EHLO dummy.example.com 250-mail.example.com 250 STARTTLS MAIL FROM: [email protected] 250 2.0.0 OK RCPT TO: [email protected] 501 5.1.7 Bad sender address syntax Connection closed by foreign host. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/admin/mailu/internal/views/postfix.py` Content: ``` 1 from mailu import models 2 from mailu.internal import internal 3 4 import flask 5 6 7 @internal.route("/postfix/domain/<domain_name>") 8 def postfix_mailbox_domain(domain_name): 9 domain = models.Domain.query.get(domain_name) or \ 10 models.Alternative.query.get(domain_name) or \ 11 flask.abort(404) 12 return flask.jsonify(domain.name) 13 14 15 @internal.route("/postfix/mailbox/<path:email>") 16 def postfix_mailbox_map(email): 17 user = models.User.query.get(email) or flask.abort(404) 18 return flask.jsonify(user.email) 19 20 21 @internal.route("/postfix/alias/<path:alias>") 22 def postfix_alias_map(alias): 23 localpart, domain_name = models.Email.resolve_domain(alias) 24 if localpart is None: 25 return flask.jsonify(domain_name) 26 destination = models.Email.resolve_destination(localpart, domain_name) 27 return flask.jsonify(",".join(destination)) if destination else flask.abort(404) 28 29 30 @internal.route("/postfix/transport/<path:email>") 31 def postfix_transport(email): 32 if email == '*': 33 return flask.abort(404) 34 localpart, domain_name = models.Email.resolve_domain(email) 35 relay = models.Relay.query.get(domain_name) or flask.abort(404) 36 return flask.jsonify("smtp:[{}]".format(relay.smtp)) 37 38 39 @internal.route("/postfix/sender/login/<path:sender>") 40 def postfix_sender_login(sender): 41 localpart, domain_name = models.Email.resolve_domain(sender) 42 if localpart is None: 43 return flask.abort(404) 44 destination = models.Email.resolve_destination(localpart, domain_name, True) 45 return flask.jsonify(",".join(destination)) if destination else flask.abort(404) 46 47 48 @internal.route("/postfix/sender/access/<path:sender>") 49 def postfix_sender_access(sender): 50 """ Simply reject any sender that pretends to be from a local domain 51 """ 52 if not is_void_address(sender): 53 localpart, domain_name = models.Email.resolve_domain(sender) 54 return flask.jsonify("REJECT") if models.Domain.query.get(domain_name) else flask.abort(404) 55 else: 56 return flask.abort(404) 57 58 59 def is_void_address(email): 60 '''True if the email is void (null) email address. 61 ''' 62 if email.startswith('<') and email.endswith('>'): 63 email = email[1:-1] 64 # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's 65 # consider void any such thing. 66 return '@' not in email 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py --- a/core/admin/mailu/internal/views/postfix.py +++ b/core/admin/mailu/internal/views/postfix.py @@ -2,10 +2,13 @@ from mailu.internal import internal import flask +import re @internal.route("/postfix/domain/<domain_name>") def postfix_mailbox_domain(domain_name): + if re.match("^\[.*\]$", domain_name): + return flask.abort(404) domain = models.Domain.query.get(domain_name) or \ models.Alternative.query.get(domain_name) or \ flask.abort(404) @@ -29,7 +32,7 @@ @internal.route("/postfix/transport/<path:email>") def postfix_transport(email): - if email == '*': + if email == '*' or re.match("(^|.*@)\[.*\]$", email): return flask.abort(404) localpart, domain_name = models.Email.resolve_domain(email) relay = models.Relay.query.get(domain_name) or flask.abort(404)
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py\n--- a/core/admin/mailu/internal/views/postfix.py\n+++ b/core/admin/mailu/internal/views/postfix.py\n@@ -2,10 +2,13 @@\n from mailu.internal import internal\n \n import flask\n+import re\n \n \n @internal.route(\"/postfix/domain/<domain_name>\")\n def postfix_mailbox_domain(domain_name):\n+ if re.match(\"^\\[.*\\]$\", domain_name):\n+ return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n@@ -29,7 +32,7 @@\n \n @internal.route(\"/postfix/transport/<path:email>\")\n def postfix_transport(email):\n- if email == '*':\n+ if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n", "issue": "MAIL FROM: does-not-exist@[X.X.X.X] results in an admin exeption stacktrace\nIf a client (read: malicious attacker) connects to Postfix and uses an IP address within brackets instead of a domain name for the sender, e.g.:\r\n\r\n```\r\n$ telnet mail.example.com 25\r\n\r\nConnected to example.com\r\nEscape character is '^]'.\r\n220 mail.example.com ESMTP ready\r\nEHLO dummy.example.com\r\n250-mail.example.com\r\n250 STARTTLS\r\nMAIL FROM: does-not-exist@[116.203.165.200]\r\n250 2.0.0 OK\r\nRCPT TO: [email protected]\r\n451 4.3.0 <does-not-exist@[116.203.165.200]>: Temporary lookup failure\r\n```\r\n\r\nwe see the following stracktrace in the admin container:\r\n\r\n```\r\n[2019-07-27 14:26:27,015] ERROR in app: Exception on /internal/postfix/domain/[116.203.165.200] [GET]\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/idna/core.py\", line 271, in alabel\r\n ulabel(label)\r\n File \"/usr/lib/python3.7/site-packages/idna/core.py\", line 307, in ulabel\r\n check_label(label)\r\n File \"/usr/lib/python3.7/site-packages/idna/core.py\", line 261, in check_label\r\n raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))\r\nidna.core.InvalidCodepoint: Codepoint U+005B at position 1 of '[116' not allowed\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py\", line 1127, in _execute_context\r\n context = constructor(dialect, self, conn, *args)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py\", line 669, in _init_compiled\r\n param.append(processors[key](compiled_params[key]))\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py\", line 1182, in process\r\n return process_param(value, dialect)\r\n File \"/app/mailu/models.py\", line 30, in process_bind_param\r\n return idna.encode(value).decode(\"ascii\").lower()\r\n File \"/usr/lib/python3.7/site-packages/idna/core.py\", line 361, in encode\r\n s = alabel(label)\r\n File \"/usr/lib/python3.7/site-packages/idna/core.py\", line 273, in alabel\r\n raise IDNAError('The label {0} is not a valid A-label'.format(label))\r\nidna.core.IDNAError: The label b'[116' is not a valid A-label\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 2292, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1815, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1718, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/usr/lib/python3.7/site-packages/flask/_compat.py\", line 35, in reraise\r\n raise value\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1813, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/usr/lib/python3.7/site-packages/flask/app.py\", line 1799, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/app/mailu/internal/views/postfix.py\", line 9, in postfix_mailbox_domain\r\n domain = models.Domain.query.get(domain_name) or \\\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py\", line 924, in get\r\n ident, loading.load_on_pk_identity)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py\", line 1007, in _get_impl\r\n return db_load_fn(self, primary_key_identity)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/orm/loading.py\", line 250, in load_on_pk_identity\r\n return q.one()\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py\", line 2954, in one\r\n ret = self.one_or_none()\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py\", line 2924, in one_or_none\r\n ret = list(self)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py\", line 2995, in __iter__\r\n return self._execute_and_instances(context)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/orm/query.py\", line 3018, in _execute_and_instances\r\n result = conn.execute(querycontext.statement, self._params)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py\", line 948, in execute\r\n return meth(self, multiparams, params)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/sql/elements.py\", line 269, in _execute_on_connection\r\n return connection._execute_clauseelement(self, multiparams, params)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py\", line 1060, in _execute_clauseelement\r\n compiled_sql, distilled_params\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py\", line 1132, in _execute_context\r\n None, None)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py\", line 1413, in _handle_dbapi_exception\r\n exc_info\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/util/compat.py\", line 265, in raise_from_cause\r\n reraise(type(exception), exception, tb=exc_tb, cause=cause)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/util/compat.py\", line 248, in reraise\r\n raise value.with_traceback(tb)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/base.py\", line 1127, in _execute_context\r\n context = constructor(dialect, self, conn, *args)\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/engine/default.py\", line 669, in _init_compiled\r\n param.append(processors[key](compiled_params[key]))\r\n File \"/usr/lib/python3.7/site-packages/sqlalchemy/sql/type_api.py\", line 1182, in process\r\n return process_param(value, dialect)\r\n File \"/app/mailu/models.py\", line 30, in process_bind_param\r\n return idna.encode(value).decode(\"ascii\").lower()\r\n File \"/usr/lib/python3.7/site-packages/idna/core.py\", line 361, in encode\r\n s = alabel(label)\r\n File \"/usr/lib/python3.7/site-packages/idna/core.py\", line 273, in alabel\r\n raise IDNAError('The label {0} is not a valid A-label'.format(label))\r\nsqlalchemy.exc.StatementError: (idna.core.IDNAError) The label b'[116' is not a valid A-label [SQL: 'SELECT domain.created_at AS domain_created_at, domain.updated_at AS domain_updated_at, domain.comment AS domain_comment, domain.name AS domain_name, domain.max_users AS domain_max_users, domain.max_aliases AS domain_max_aliases, domain.max_quota_bytes AS domain_max_quota_bytes, domain.signup_enabled AS domain_signup_enabled \\nFROM domain \\nWHERE domain.name = ?'] [parameters: [{'%(140312238936248 param)s': '[116.203.165.200]'}]]\r\n10.36.0.0 - - [27/Jul/2019:14:26:27 +0000] \"GET /internal/postfix/domain/%5B116.203.165.200%5D HTTP/1.1\" 500 291 \"-\" \"Python/3.7 aiohttp/3.5.4\"\r\n10.44.0.0 - - [27/Jul/2019:14:26:31 +0000] \"GET /internal/auth/email HTTP/1.0\" 200 0 \"-\" \"-\"\r\n```\r\n\r\nNOTE: the lookup is cached I think and you need to use another IP address to reproduce the error multiple times.\r\n\r\nI think we're missing some validation rules, because trying a similar (invalid) MAIL FROM with an IP (but without brackets) is handled properly:\r\n\r\n```\r\ntelnet mail.example.com 25\r\nTrying ...\r\nConnected to example.com.\r\nEscape character is '^]'.\r\n220 mail.example.com ESMTP ready\r\nEHLO dummy.example.com\r\n250-mail.example.com\r\n250 STARTTLS\r\nMAIL FROM: [email protected]\r\n250 2.0.0 OK\r\nRCPT TO: [email protected]\r\n501 5.1.7 Bad sender address syntax\r\nConnection closed by foreign host.\r\n```\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*':\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}], "after_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport re\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}]}
3,148
252
gh_patches_debug_28244
rasdani/github-patches
git_diff
scrapy__scrapy-3383
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Contracts for FormRequest At the moment there's no way to create a contract for a callback that is passed a response from `FormRequest`. How about something simple like this (`ContractsManager.from_method`): ```python request = FormRequest(**kwargs) if 'formdata' in kwargs else Request(**kwargs) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/contracts/__init__.py` Content: ``` 1 import sys 2 import re 3 from functools import wraps 4 from inspect import getmembers 5 from unittest import TestCase 6 7 from scrapy.http import Request 8 from scrapy.utils.spider import iterate_spider_output 9 from scrapy.utils.python import get_spec 10 11 12 class ContractsManager(object): 13 contracts = {} 14 15 def __init__(self, contracts): 16 for contract in contracts: 17 self.contracts[contract.name] = contract 18 19 def tested_methods_from_spidercls(self, spidercls): 20 methods = [] 21 for key, value in getmembers(spidercls): 22 if (callable(value) and value.__doc__ and 23 re.search(r'^\s*@', value.__doc__, re.MULTILINE)): 24 methods.append(key) 25 26 return methods 27 28 def extract_contracts(self, method): 29 contracts = [] 30 for line in method.__doc__.split('\n'): 31 line = line.strip() 32 33 if line.startswith('@'): 34 name, args = re.match(r'@(\w+)\s*(.*)', line).groups() 35 args = re.split(r'\s+', args) 36 37 contracts.append(self.contracts[name](method, *args)) 38 39 return contracts 40 41 def from_spider(self, spider, results): 42 requests = [] 43 for method in self.tested_methods_from_spidercls(type(spider)): 44 bound_method = spider.__getattribute__(method) 45 requests.append(self.from_method(bound_method, results)) 46 47 return requests 48 49 def from_method(self, method, results): 50 contracts = self.extract_contracts(method) 51 if contracts: 52 # calculate request args 53 args, kwargs = get_spec(Request.__init__) 54 kwargs['callback'] = method 55 for contract in contracts: 56 kwargs = contract.adjust_request_args(kwargs) 57 58 # create and prepare request 59 args.remove('self') 60 if set(args).issubset(set(kwargs)): 61 request = Request(**kwargs) 62 63 # execute pre and post hooks in order 64 for contract in reversed(contracts): 65 request = contract.add_pre_hook(request, results) 66 for contract in contracts: 67 request = contract.add_post_hook(request, results) 68 69 self._clean_req(request, method, results) 70 return request 71 72 def _clean_req(self, request, method, results): 73 """ stop the request from returning objects and records any errors """ 74 75 cb = request.callback 76 77 @wraps(cb) 78 def cb_wrapper(response): 79 try: 80 output = cb(response) 81 output = list(iterate_spider_output(output)) 82 except: 83 case = _create_testcase(method, 'callback') 84 results.addError(case, sys.exc_info()) 85 86 def eb_wrapper(failure): 87 case = _create_testcase(method, 'errback') 88 exc_info = failure.type, failure.value, failure.getTracebackObject() 89 results.addError(case, exc_info) 90 91 request.callback = cb_wrapper 92 request.errback = eb_wrapper 93 94 95 class Contract(object): 96 """ Abstract class for contracts """ 97 98 def __init__(self, method, *args): 99 self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name) 100 self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name) 101 self.args = args 102 103 def add_pre_hook(self, request, results): 104 if hasattr(self, 'pre_process'): 105 cb = request.callback 106 107 @wraps(cb) 108 def wrapper(response): 109 try: 110 results.startTest(self.testcase_pre) 111 self.pre_process(response) 112 results.stopTest(self.testcase_pre) 113 except AssertionError: 114 results.addFailure(self.testcase_pre, sys.exc_info()) 115 except Exception: 116 results.addError(self.testcase_pre, sys.exc_info()) 117 else: 118 results.addSuccess(self.testcase_pre) 119 finally: 120 return list(iterate_spider_output(cb(response))) 121 122 request.callback = wrapper 123 124 return request 125 126 def add_post_hook(self, request, results): 127 if hasattr(self, 'post_process'): 128 cb = request.callback 129 130 @wraps(cb) 131 def wrapper(response): 132 output = list(iterate_spider_output(cb(response))) 133 try: 134 results.startTest(self.testcase_post) 135 self.post_process(output) 136 results.stopTest(self.testcase_post) 137 except AssertionError: 138 results.addFailure(self.testcase_post, sys.exc_info()) 139 except Exception: 140 results.addError(self.testcase_post, sys.exc_info()) 141 else: 142 results.addSuccess(self.testcase_post) 143 finally: 144 return output 145 146 request.callback = wrapper 147 148 return request 149 150 def adjust_request_args(self, args): 151 return args 152 153 154 def _create_testcase(method, desc): 155 spider = method.__self__.name 156 157 class ContractTestCase(TestCase): 158 def __str__(_self): 159 return "[%s] %s (%s)" % (spider, method.__name__, desc) 160 161 name = '%s_%s' % (spider, method.__name__) 162 setattr(ContractTestCase, name, lambda x: x) 163 return ContractTestCase(name) 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py --- a/scrapy/contracts/__init__.py +++ b/scrapy/contracts/__init__.py @@ -49,16 +49,22 @@ def from_method(self, method, results): contracts = self.extract_contracts(method) if contracts: + request_cls = Request + for contract in contracts: + if contract.request_cls is not None: + request_cls = contract.request_cls + # calculate request args - args, kwargs = get_spec(Request.__init__) + args, kwargs = get_spec(request_cls.__init__) kwargs['callback'] = method for contract in contracts: kwargs = contract.adjust_request_args(kwargs) - # create and prepare request args.remove('self') + + # check if all positional arguments are defined in kwargs if set(args).issubset(set(kwargs)): - request = Request(**kwargs) + request = request_cls(**kwargs) # execute pre and post hooks in order for contract in reversed(contracts): @@ -94,6 +100,7 @@ class Contract(object): """ Abstract class for contracts """ + request_cls = None def __init__(self, method, *args): self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)
{"golden_diff": "diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py\n--- a/scrapy/contracts/__init__.py\n+++ b/scrapy/contracts/__init__.py\n@@ -49,16 +49,22 @@\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n+ request_cls = Request\n+ for contract in contracts:\n+ if contract.request_cls is not None:\n+ request_cls = contract.request_cls\n+\n # calculate request args\n- args, kwargs = get_spec(Request.__init__)\n+ args, kwargs = get_spec(request_cls.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n \n- # create and prepare request\n args.remove('self')\n+\n+ # check if all positional arguments are defined in kwargs\n if set(args).issubset(set(kwargs)):\n- request = Request(**kwargs)\n+ request = request_cls(**kwargs)\n \n # execute pre and post hooks in order\n for contract in reversed(contracts):\n@@ -94,6 +100,7 @@\n \n class Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n+ request_cls = None\n \n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n", "issue": "Contracts for FormRequest\nAt the moment there's no way to create a contract for a callback that is passed a response from `FormRequest`.\r\n\r\nHow about something simple like this (`ContractsManager.from_method`):\r\n```python\r\nrequest = FormRequest(**kwargs) if 'formdata' in kwargs else Request(**kwargs)\r\n```\n", "before_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom inspect import getmembers\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in getmembers(spidercls):\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n # calculate request args\n args, kwargs = get_spec(Request.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n # create and prepare request\n args.remove('self')\n if set(args).issubset(set(kwargs)):\n request = Request(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}], "after_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom inspect import getmembers\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in getmembers(spidercls):\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n request_cls = Request\n for contract in contracts:\n if contract.request_cls is not None:\n request_cls = contract.request_cls\n\n # calculate request args\n args, kwargs = get_spec(request_cls.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n args.remove('self')\n\n # check if all positional arguments are defined in kwargs\n if set(args).issubset(set(kwargs)):\n request = request_cls(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n request_cls = None\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}]}
1,796
311
gh_patches_debug_18067
rasdani/github-patches
git_diff
ivy-llc__ivy-13637
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- beta --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/jax/random.py` Content: ``` 1 # local 2 import ivy 3 from ivy.functional.frontends.jax.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 handle_jax_dtype, 6 ) 7 8 9 @to_ivy_arrays_and_back 10 def PRNGKey(seed): 11 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64) 12 13 14 @handle_jax_dtype 15 @to_ivy_arrays_and_back 16 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0): 17 return ivy.random_uniform( 18 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]) 19 ) 20 21 22 @handle_jax_dtype 23 @to_ivy_arrays_and_back 24 def normal(key, shape=(), dtype=None): 25 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])) 26 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py --- a/ivy/functional/frontends/jax/random.py +++ b/ivy/functional/frontends/jax/random.py @@ -1,5 +1,6 @@ # local import ivy +from ivy.func_wrapper import with_unsupported_dtypes from ivy.functional.frontends.jax.func_wrapper import ( to_ivy_arrays_and_back, handle_jax_dtype, @@ -23,3 +24,24 @@ @to_ivy_arrays_and_back def normal(key, shape=(), dtype=None): return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])) + + +def _get_seed(key): + key1, key2 = int(key[0]), int(key[1]) + return ivy.to_scalar(int("".join(map(str, [key1, key2])))) + + +@handle_jax_dtype +@to_ivy_arrays_and_back +@with_unsupported_dtypes( + { + "0.3.14 and below": ( + "float16", + "bfloat16", + ) + }, + "jax", +) +def beta(key, a, b, shape=None, dtype=None): + seed = _get_seed(key) + return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -1,5 +1,6 @@\n # local\n import ivy\n+from ivy.func_wrapper import with_unsupported_dtypes\n from ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n@@ -23,3 +24,24 @@\n @to_ivy_arrays_and_back\n def normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n+\n+\n+def _get_seed(key):\n+ key1, key2 = int(key[0]), int(key[1])\n+ return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n+\n+\n+@handle_jax_dtype\n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes(\n+ {\n+ \"0.3.14 and below\": (\n+ \"float16\",\n+ \"bfloat16\",\n+ )\n+ },\n+ \"jax\",\n+)\n+def beta(key, a, b, shape=None, dtype=None):\n+ seed = _get_seed(key)\n+ return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n", "issue": "beta\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n", "path": "ivy/functional/frontends/jax/random.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/jax/random.py"}]}
526
327
gh_patches_debug_28361
rasdani/github-patches
git_diff
getsentry__sentry-41334
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip-compile and CUSTOM_COMPILE_COMMAND ### Environment SaaS (https://sentry.io/) ### Version _No response_ ### Steps to Reproduce JFYI https://github.com/getsentry/sentry/blob/2e36d4d3a3926e9efe06287e59d27be1c40300fa/tools/freeze_requirements.py#L13 There is `CUSTOM_COMPILE_COMMAND` env to [customize](https://github.com/jazzband/pip-tools#configuration) the header: ``` $ CUSTOM_COMPILE_COMMAND="make freeze-requirements" pip-compile # # This file is autogenerated by pip-compile with python 3.11 # To update, run: # # make freeze-requirements # .... ``` ### Expected Result N/A ### Actual Result N/A --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/freeze_requirements.py` Content: ``` 1 from __future__ import annotations 2 3 import argparse 4 from concurrent.futures import Future, ThreadPoolExecutor 5 from os.path import abspath 6 from subprocess import CalledProcessError, run 7 from typing import Sequence 8 9 from tools.lib import gitroot 10 11 12 def worker(args: tuple[str, ...]) -> None: 13 # pip-compile doesn't let you customize the header, so we write 14 # one ourselves. However, pip-compile needs -o DEST otherwise 15 # it will bump >= pins even if they're satisfied. So, we need to 16 # unfortunately rewrite the whole file. 17 dest = args[-1] 18 try: 19 run(args, check=True, capture_output=True) 20 except CalledProcessError as e: 21 raise e 22 23 with open(dest, "rb+") as f: 24 content = f.read() 25 f.seek(0, 0) 26 f.write( 27 b"""# DO NOT MODIFY. This file was generated with `make freeze-requirements`. 28 29 """ 30 + content 31 ) 32 33 34 def check_futures(futures: list[Future[None]]) -> int: 35 rc = 0 36 for future in futures: 37 try: 38 future.result() 39 except CalledProcessError as e: 40 rc = 1 41 print( 42 f"""`{e.cmd}` returned code {e.returncode} 43 44 stdout: 45 {e.stdout.decode()} 46 47 stderr: 48 {e.stderr.decode()} 49 """ 50 ) 51 return rc 52 53 54 def main(argv: Sequence[str] | None = None) -> int: 55 parser = argparse.ArgumentParser() 56 parser.parse_args(argv) 57 58 base_path = abspath(gitroot()) 59 60 base_cmd = ( 61 "pip-compile", 62 "--allow-unsafe", 63 "--no-annotate", 64 "--no-header", 65 "--quiet", 66 "--strip-extras", 67 "--index-url=https://pypi.devinfra.sentry.io/simple", 68 ) 69 70 executor = ThreadPoolExecutor(max_workers=2) 71 futures = [ 72 executor.submit( 73 worker, 74 ( 75 *base_cmd, 76 f"{base_path}/requirements-base.txt", 77 f"{base_path}/requirements-getsentry.txt", 78 "-o", 79 f"{base_path}/requirements-frozen.txt", 80 ), 81 ), 82 executor.submit( 83 worker, 84 ( 85 *base_cmd, 86 f"{base_path}/requirements-base.txt", 87 f"{base_path}/requirements-getsentry.txt", 88 f"{base_path}/requirements-dev.txt", 89 "-o", 90 f"{base_path}/requirements-dev-frozen.txt", 91 ), 92 ), 93 ] 94 95 rc = check_futures(futures) 96 executor.shutdown() 97 return rc 98 99 100 if __name__ == "__main__": 101 raise SystemExit(main()) 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/freeze_requirements.py b/tools/freeze_requirements.py --- a/tools/freeze_requirements.py +++ b/tools/freeze_requirements.py @@ -1,6 +1,7 @@ from __future__ import annotations import argparse +import os from concurrent.futures import Future, ThreadPoolExecutor from os.path import abspath from subprocess import CalledProcessError, run @@ -10,25 +11,10 @@ def worker(args: tuple[str, ...]) -> None: - # pip-compile doesn't let you customize the header, so we write - # one ourselves. However, pip-compile needs -o DEST otherwise - # it will bump >= pins even if they're satisfied. So, we need to - # unfortunately rewrite the whole file. - dest = args[-1] - try: - run(args, check=True, capture_output=True) - except CalledProcessError as e: - raise e - - with open(dest, "rb+") as f: - content = f.read() - f.seek(0, 0) - f.write( - b"""# DO NOT MODIFY. This file was generated with `make freeze-requirements`. + env = os.environ.copy() + env["CUSTOM_COMPILE_COMMAND"] = "make freeze-requirements" -""" - + content - ) + run(args, check=True, capture_output=True, env=env) def check_futures(futures: list[Future[None]]) -> int: @@ -61,7 +47,6 @@ "pip-compile", "--allow-unsafe", "--no-annotate", - "--no-header", "--quiet", "--strip-extras", "--index-url=https://pypi.devinfra.sentry.io/simple",
{"golden_diff": "diff --git a/tools/freeze_requirements.py b/tools/freeze_requirements.py\n--- a/tools/freeze_requirements.py\n+++ b/tools/freeze_requirements.py\n@@ -1,6 +1,7 @@\n from __future__ import annotations\n \n import argparse\n+import os\n from concurrent.futures import Future, ThreadPoolExecutor\n from os.path import abspath\n from subprocess import CalledProcessError, run\n@@ -10,25 +11,10 @@\n \n \n def worker(args: tuple[str, ...]) -> None:\n- # pip-compile doesn't let you customize the header, so we write\n- # one ourselves. However, pip-compile needs -o DEST otherwise\n- # it will bump >= pins even if they're satisfied. So, we need to\n- # unfortunately rewrite the whole file.\n- dest = args[-1]\n- try:\n- run(args, check=True, capture_output=True)\n- except CalledProcessError as e:\n- raise e\n-\n- with open(dest, \"rb+\") as f:\n- content = f.read()\n- f.seek(0, 0)\n- f.write(\n- b\"\"\"# DO NOT MODIFY. This file was generated with `make freeze-requirements`.\n+ env = os.environ.copy()\n+ env[\"CUSTOM_COMPILE_COMMAND\"] = \"make freeze-requirements\"\n \n-\"\"\"\n- + content\n- )\n+ run(args, check=True, capture_output=True, env=env)\n \n \n def check_futures(futures: list[Future[None]]) -> int:\n@@ -61,7 +47,6 @@\n \"pip-compile\",\n \"--allow-unsafe\",\n \"--no-annotate\",\n- \"--no-header\",\n \"--quiet\",\n \"--strip-extras\",\n \"--index-url=https://pypi.devinfra.sentry.io/simple\",\n", "issue": "pip-compile and CUSTOM_COMPILE_COMMAND\n### Environment\r\n\r\nSaaS (https://sentry.io/)\r\n\r\n### Version\r\n\r\n_No response_\r\n\r\n### Steps to Reproduce\r\n\r\nJFYI\r\n\r\nhttps://github.com/getsentry/sentry/blob/2e36d4d3a3926e9efe06287e59d27be1c40300fa/tools/freeze_requirements.py#L13\r\n\r\nThere is `CUSTOM_COMPILE_COMMAND` env to [customize](https://github.com/jazzband/pip-tools#configuration) the header:\r\n\r\n\r\n```\r\n$ CUSTOM_COMPILE_COMMAND=\"make freeze-requirements\" pip-compile\r\n#\r\n# This file is autogenerated by pip-compile with python 3.11\r\n# To update, run:\r\n#\r\n# make freeze-requirements\r\n#\r\n....\r\n```\r\n\r\n### Expected Result\r\n\r\nN/A\r\n\r\n### Actual Result\r\n\r\nN/A\n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nfrom concurrent.futures import Future, ThreadPoolExecutor\nfrom os.path import abspath\nfrom subprocess import CalledProcessError, run\nfrom typing import Sequence\n\nfrom tools.lib import gitroot\n\n\ndef worker(args: tuple[str, ...]) -> None:\n # pip-compile doesn't let you customize the header, so we write\n # one ourselves. However, pip-compile needs -o DEST otherwise\n # it will bump >= pins even if they're satisfied. So, we need to\n # unfortunately rewrite the whole file.\n dest = args[-1]\n try:\n run(args, check=True, capture_output=True)\n except CalledProcessError as e:\n raise e\n\n with open(dest, \"rb+\") as f:\n content = f.read()\n f.seek(0, 0)\n f.write(\n b\"\"\"# DO NOT MODIFY. This file was generated with `make freeze-requirements`.\n\n\"\"\"\n + content\n )\n\n\ndef check_futures(futures: list[Future[None]]) -> int:\n rc = 0\n for future in futures:\n try:\n future.result()\n except CalledProcessError as e:\n rc = 1\n print(\n f\"\"\"`{e.cmd}` returned code {e.returncode}\n\nstdout:\n{e.stdout.decode()}\n\nstderr:\n{e.stderr.decode()}\n\"\"\"\n )\n return rc\n\n\ndef main(argv: Sequence[str] | None = None) -> int:\n parser = argparse.ArgumentParser()\n parser.parse_args(argv)\n\n base_path = abspath(gitroot())\n\n base_cmd = (\n \"pip-compile\",\n \"--allow-unsafe\",\n \"--no-annotate\",\n \"--no-header\",\n \"--quiet\",\n \"--strip-extras\",\n \"--index-url=https://pypi.devinfra.sentry.io/simple\",\n )\n\n executor = ThreadPoolExecutor(max_workers=2)\n futures = [\n executor.submit(\n worker,\n (\n *base_cmd,\n f\"{base_path}/requirements-base.txt\",\n f\"{base_path}/requirements-getsentry.txt\",\n \"-o\",\n f\"{base_path}/requirements-frozen.txt\",\n ),\n ),\n executor.submit(\n worker,\n (\n *base_cmd,\n f\"{base_path}/requirements-base.txt\",\n f\"{base_path}/requirements-getsentry.txt\",\n f\"{base_path}/requirements-dev.txt\",\n \"-o\",\n f\"{base_path}/requirements-dev-frozen.txt\",\n ),\n ),\n ]\n\n rc = check_futures(futures)\n executor.shutdown()\n return rc\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "tools/freeze_requirements.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport os\nfrom concurrent.futures import Future, ThreadPoolExecutor\nfrom os.path import abspath\nfrom subprocess import CalledProcessError, run\nfrom typing import Sequence\n\nfrom tools.lib import gitroot\n\n\ndef worker(args: tuple[str, ...]) -> None:\n env = os.environ.copy()\n env[\"CUSTOM_COMPILE_COMMAND\"] = \"make freeze-requirements\"\n\n run(args, check=True, capture_output=True, env=env)\n\n\ndef check_futures(futures: list[Future[None]]) -> int:\n rc = 0\n for future in futures:\n try:\n future.result()\n except CalledProcessError as e:\n rc = 1\n print(\n f\"\"\"`{e.cmd}` returned code {e.returncode}\n\nstdout:\n{e.stdout.decode()}\n\nstderr:\n{e.stderr.decode()}\n\"\"\"\n )\n return rc\n\n\ndef main(argv: Sequence[str] | None = None) -> int:\n parser = argparse.ArgumentParser()\n parser.parse_args(argv)\n\n base_path = abspath(gitroot())\n\n base_cmd = (\n \"pip-compile\",\n \"--allow-unsafe\",\n \"--no-annotate\",\n \"--quiet\",\n \"--strip-extras\",\n \"--index-url=https://pypi.devinfra.sentry.io/simple\",\n )\n\n executor = ThreadPoolExecutor(max_workers=2)\n futures = [\n executor.submit(\n worker,\n (\n *base_cmd,\n f\"{base_path}/requirements-base.txt\",\n f\"{base_path}/requirements-getsentry.txt\",\n \"-o\",\n f\"{base_path}/requirements-frozen.txt\",\n ),\n ),\n executor.submit(\n worker,\n (\n *base_cmd,\n f\"{base_path}/requirements-base.txt\",\n f\"{base_path}/requirements-getsentry.txt\",\n f\"{base_path}/requirements-dev.txt\",\n \"-o\",\n f\"{base_path}/requirements-dev-frozen.txt\",\n ),\n ),\n ]\n\n rc = check_futures(futures)\n executor.shutdown()\n return rc\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n", "path": "tools/freeze_requirements.py"}]}
1,223
395
gh_patches_debug_9494
rasdani/github-patches
git_diff
quantopian__zipline-382
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make ta-lib optional `ta-lib` is causing us many installation problems. We should thus make it optional. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # 3 # Copyright 2013 Quantopian, Inc. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 import sys 17 18 from setuptools import setup, find_packages 19 20 LONG_DESCRIPTION = None 21 README_MARKDOWN = None 22 23 with open('README.md') as markdown_source: 24 README_MARKDOWN = markdown_source.read() 25 26 if 'upload' in sys.argv: 27 # Converts the README.md file to ReST, since PyPI uses ReST for formatting, 28 # This allows to have one canonical README file, being the README.md 29 # The conversion only needs to be done on upload. 30 # Otherwise, the pandoc import and errors that are thrown when 31 # pandoc are both overhead and a source of confusion for general 32 # usage/installation. 33 import pandoc 34 pandoc.core.PANDOC_PATH = 'pandoc' 35 doc = pandoc.Document() 36 doc.markdown = README_MARKDOWN 37 LONG_DESCRIPTION = doc.rst 38 else: 39 # If pandoc isn't installed, e.g. when downloading from pip, 40 # just use the regular README. 41 LONG_DESCRIPTION = README_MARKDOWN 42 43 setup( 44 name='zipline', 45 version='0.7.0', 46 description='A backtester for financial algorithms.', 47 author='Quantopian Inc.', 48 author_email='[email protected]', 49 packages=find_packages(), 50 scripts=['scripts/run_algo.py'], 51 long_description=LONG_DESCRIPTION, 52 license='Apache 2.0', 53 classifiers=[ 54 'Development Status :: 4 - Beta', 55 'License :: OSI Approved :: Apache Software License', 56 'Natural Language :: English', 57 'Programming Language :: Python', 58 'Programming Language :: Python :: 2.7', 59 'Programming Language :: Python :: 3.3', 60 'Operating System :: OS Independent', 61 'Intended Audience :: Science/Research', 62 'Topic :: Office/Business :: Financial', 63 'Topic :: Scientific/Engineering :: Information Analysis', 64 'Topic :: System :: Distributed Computing', 65 ], 66 install_requires=[ 67 'Logbook', 68 'pytz', 69 'requests', 70 'numpy', 71 'pandas', 72 'six' 73 ], 74 url="https://github.com/quantopian/zipline" 75 ) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -# Copyright 2013 Quantopian, Inc. +# Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -71,5 +71,8 @@ 'pandas', 'six' ], + extras_require = { + 'talib': ["talib"], + }, url="https://github.com/quantopian/zipline" )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,6 @@\n #!/usr/bin/env python\n #\n-# Copyright 2013 Quantopian, Inc.\n+# Copyright 2014 Quantopian, Inc.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -71,5 +71,8 @@\n 'pandas',\n 'six'\n ],\n+ extras_require = {\n+ 'talib': [\"talib\"],\n+ },\n url=\"https://github.com/quantopian/zipline\"\n )\n", "issue": "Make ta-lib optional\n`ta-lib` is causing us many installation problems. We should thus make it optional.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2013 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nfrom setuptools import setup, find_packages\n\nLONG_DESCRIPTION = None\nREADME_MARKDOWN = None\n\nwith open('README.md') as markdown_source:\n README_MARKDOWN = markdown_source.read()\n\nif 'upload' in sys.argv:\n # Converts the README.md file to ReST, since PyPI uses ReST for formatting,\n # This allows to have one canonical README file, being the README.md\n # The conversion only needs to be done on upload.\n # Otherwise, the pandoc import and errors that are thrown when\n # pandoc are both overhead and a source of confusion for general\n # usage/installation.\n import pandoc\n pandoc.core.PANDOC_PATH = 'pandoc'\n doc = pandoc.Document()\n doc.markdown = README_MARKDOWN\n LONG_DESCRIPTION = doc.rst\nelse:\n # If pandoc isn't installed, e.g. when downloading from pip,\n # just use the regular README.\n LONG_DESCRIPTION = README_MARKDOWN\n\nsetup(\n name='zipline',\n version='0.7.0',\n description='A backtester for financial algorithms.',\n author='Quantopian Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n scripts=['scripts/run_algo.py'],\n long_description=LONG_DESCRIPTION,\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: System :: Distributed Computing',\n ],\n install_requires=[\n 'Logbook',\n 'pytz',\n 'requests',\n 'numpy',\n 'pandas',\n 'six'\n ],\n url=\"https://github.com/quantopian/zipline\"\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2014 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\n\nfrom setuptools import setup, find_packages\n\nLONG_DESCRIPTION = None\nREADME_MARKDOWN = None\n\nwith open('README.md') as markdown_source:\n README_MARKDOWN = markdown_source.read()\n\nif 'upload' in sys.argv:\n # Converts the README.md file to ReST, since PyPI uses ReST for formatting,\n # This allows to have one canonical README file, being the README.md\n # The conversion only needs to be done on upload.\n # Otherwise, the pandoc import and errors that are thrown when\n # pandoc are both overhead and a source of confusion for general\n # usage/installation.\n import pandoc\n pandoc.core.PANDOC_PATH = 'pandoc'\n doc = pandoc.Document()\n doc.markdown = README_MARKDOWN\n LONG_DESCRIPTION = doc.rst\nelse:\n # If pandoc isn't installed, e.g. when downloading from pip,\n # just use the regular README.\n LONG_DESCRIPTION = README_MARKDOWN\n\nsetup(\n name='zipline',\n version='0.7.0',\n description='A backtester for financial algorithms.',\n author='Quantopian Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n scripts=['scripts/run_algo.py'],\n long_description=LONG_DESCRIPTION,\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: System :: Distributed Computing',\n ],\n install_requires=[\n 'Logbook',\n 'pytz',\n 'requests',\n 'numpy',\n 'pandas',\n 'six'\n ],\n extras_require = {\n 'talib': [\"talib\"],\n },\n url=\"https://github.com/quantopian/zipline\"\n)\n", "path": "setup.py"}]}
1,011
153
gh_patches_debug_14618
rasdani/github-patches
git_diff
Kinto__kinto-1312
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PostgreSQL Cache: Deadlock ``` (psycopg2.extensions.TransactionRollbackError) deadlock detected DETAIL: Process 12750 waits for ShareLock on transaction 632693475; blocked by process 10080. Process 10080 waits for ShareLock on transaction 632693479; blocked by process 12750. HINT: See server log for query details. CONTEXT: while deleting tuple (426,1) in relation "cache" [SQL: 'DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;'] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/cache/postgresql/__init__.py` Content: ``` 1 import logging 2 import os 3 4 from kinto.core.cache import CacheBase 5 from kinto.core.storage.postgresql.client import create_from_config 6 from kinto.core.utils import json 7 8 9 logger = logging.getLogger(__name__) 10 11 12 class Cache(CacheBase): 13 """Cache backend using PostgreSQL. 14 15 Enable in configuration:: 16 17 kinto.cache_backend = kinto.core.cache.postgresql 18 19 Database location URI can be customized:: 20 21 kinto.cache_url = postgres://user:[email protected]:5432/dbname 22 23 Alternatively, username and password could also rely on system user ident 24 or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*). 25 26 .. note:: 27 28 Some tables and indices are created when ``kinto migrate`` is run. 29 This requires some privileges on the database, or some error will 30 be raised. 31 32 **Alternatively**, the schema can be initialized outside the 33 python application, using the SQL file located in 34 :file:`kinto/core/cache/postgresql/schema.sql`. This allows to 35 distinguish schema manipulation privileges from schema usage. 36 37 38 A connection pool is enabled by default:: 39 40 kinto.cache_pool_size = 10 41 kinto.cache_maxoverflow = 10 42 kinto.cache_max_backlog = -1 43 kinto.cache_pool_recycle = -1 44 kinto.cache_pool_timeout = 30 45 kinto.cache_poolclass = 46 kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog 47 48 The ``max_backlog`` limits the number of threads that can be in the queue 49 waiting for a connection. Once this limit has been reached, any further 50 attempts to acquire a connection will be rejected immediately, instead of 51 locking up all threads by keeping them waiting in the queue. 52 53 See `dedicated section in SQLAlchemy documentation 54 <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_ 55 for default values and behaviour. 56 57 .. note:: 58 59 Using a `dedicated connection pool <http://pgpool.net>`_ is still 60 recommended to allow load balancing, replication or limit the number 61 of connections used in a multi-process deployment. 62 63 :noindex: 64 """ # NOQA 65 def __init__(self, client, *args, **kwargs): 66 super().__init__(*args, **kwargs) 67 self.client = client 68 69 def initialize_schema(self, dry_run=False): 70 # Check if cache table exists. 71 query = """ 72 SELECT 1 73 FROM information_schema.tables 74 WHERE table_name = 'cache'; 75 """ 76 with self.client.connect(readonly=True) as conn: 77 result = conn.execute(query) 78 if result.rowcount > 0: 79 logger.info("PostgreSQL cache schema is up-to-date.") 80 return 81 82 # Create schema 83 here = os.path.abspath(os.path.dirname(__file__)) 84 sql_file = os.path.join(here, 'schema.sql') 85 86 if dry_run: 87 logger.info("Create cache schema from '{}'".format(sql_file)) 88 return 89 90 # Since called outside request, force commit. 91 with open(sql_file) as f: 92 schema = f.read() 93 with self.client.connect(force_commit=True) as conn: 94 conn.execute(schema) 95 logger.info('Created PostgreSQL cache tables') 96 97 def flush(self): 98 query = """ 99 DELETE FROM cache; 100 """ 101 # Since called outside request (e.g. tests), force commit. 102 with self.client.connect(force_commit=True) as conn: 103 conn.execute(query) 104 logger.debug('Flushed PostgreSQL cache tables') 105 106 def ttl(self, key): 107 query = """ 108 SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl 109 FROM cache 110 WHERE key = :key 111 AND ttl IS NOT NULL; 112 """ 113 with self.client.connect(readonly=True) as conn: 114 result = conn.execute(query, dict(key=self.prefix + key)) 115 if result.rowcount > 0: 116 return result.fetchone()['ttl'] 117 return -1 118 119 def expire(self, key, ttl): 120 query = """ 121 UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key; 122 """ 123 with self.client.connect() as conn: 124 conn.execute(query, dict(ttl=ttl, key=self.prefix + key)) 125 126 def set(self, key, value, ttl): 127 if isinstance(value, bytes): 128 raise TypeError("a string-like object is required, not 'bytes'") 129 130 query = """ 131 INSERT INTO cache (key, value, ttl) 132 VALUES (:key, :value, sec2ttl(:ttl)) 133 ON CONFLICT (key) DO UPDATE 134 SET value = :value, 135 ttl = sec2ttl(:ttl); 136 """ 137 value = json.dumps(value) 138 with self.client.connect() as conn: 139 conn.execute(query, dict(key=self.prefix + key, 140 value=value, ttl=ttl)) 141 142 def get(self, key): 143 purge = "DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;" 144 query = "SELECT value FROM cache WHERE key = :key;" 145 with self.client.connect() as conn: 146 conn.execute(purge) 147 result = conn.execute(query, dict(key=self.prefix + key)) 148 if result.rowcount > 0: 149 value = result.fetchone()['value'] 150 return json.loads(value) 151 152 def delete(self, key): 153 query = "DELETE FROM cache WHERE key = :key RETURNING value;" 154 with self.client.connect() as conn: 155 result = conn.execute(query, dict(key=self.prefix + key)) 156 if result.rowcount > 0: 157 value = result.fetchone()['value'] 158 return json.loads(value) 159 return None 160 161 162 def load_from_config(config): 163 settings = config.get_settings() 164 client = create_from_config(config, prefix='cache_', with_transaction=False) 165 return Cache(client=client, cache_prefix=settings['cache_prefix']) 166 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py --- a/kinto/core/cache/postgresql/__init__.py +++ b/kinto/core/cache/postgresql/__init__.py @@ -140,8 +140,17 @@ value=value, ttl=ttl)) def get(self, key): - purge = "DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;" - query = "SELECT value FROM cache WHERE key = :key;" + purge = """ + DELETE FROM cache c + USING ( + SELECT key + FROM cache + WHERE ttl IS NOT NULL AND now() > ttl + ORDER BY key ASC + FOR UPDATE + ) del + WHERE del.key = c.key;""" + query = "SELECT value FROM cache WHERE key = :key AND now() < ttl;" with self.client.connect() as conn: conn.execute(purge) result = conn.execute(query, dict(key=self.prefix + key))
{"golden_diff": "diff --git a/kinto/core/cache/postgresql/__init__.py b/kinto/core/cache/postgresql/__init__.py\n--- a/kinto/core/cache/postgresql/__init__.py\n+++ b/kinto/core/cache/postgresql/__init__.py\n@@ -140,8 +140,17 @@\n value=value, ttl=ttl))\n \n def get(self, key):\n- purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n- query = \"SELECT value FROM cache WHERE key = :key;\"\n+ purge = \"\"\"\n+ DELETE FROM cache c\n+ USING (\n+ SELECT key\n+ FROM cache\n+ WHERE ttl IS NOT NULL AND now() > ttl\n+ ORDER BY key ASC\n+ FOR UPDATE\n+ ) del\n+ WHERE del.key = c.key;\"\"\"\n+ query = \"SELECT value FROM cache WHERE key = :key AND now() < ttl;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n", "issue": "PostgreSQL Cache: Deadlock\n```\r\n(psycopg2.extensions.TransactionRollbackError) deadlock detected\r\nDETAIL: Process 12750 waits for ShareLock on transaction 632693475; blocked by process 10080.\r\nProcess 10080 waits for ShareLock on transaction 632693479; blocked by process 12750.\r\nHINT: See server log for query details.\r\nCONTEXT: while deleting tuple (426,1) in relation \"cache\"\r\n [SQL: 'DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;']\r\n```\n", "before_files": [{"content": "import logging\nimport os\n\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.storage.postgresql.client import create_from_config\nfrom kinto.core.utils import json\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend using PostgreSQL.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.postgresql\n\n Database location URI can be customized::\n\n kinto.cache_url = postgres://user:[email protected]:5432/dbname\n\n Alternatively, username and password could also rely on system user ident\n or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).\n\n .. note::\n\n Some tables and indices are created when ``kinto migrate`` is run.\n This requires some privileges on the database, or some error will\n be raised.\n\n **Alternatively**, the schema can be initialized outside the\n python application, using the SQL file located in\n :file:`kinto/core/cache/postgresql/schema.sql`. This allows to\n distinguish schema manipulation privileges from schema usage.\n\n\n A connection pool is enabled by default::\n\n kinto.cache_pool_size = 10\n kinto.cache_maxoverflow = 10\n kinto.cache_max_backlog = -1\n kinto.cache_pool_recycle = -1\n kinto.cache_pool_timeout = 30\n kinto.cache_poolclass =\n kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog\n\n The ``max_backlog`` limits the number of threads that can be in the queue\n waiting for a connection. Once this limit has been reached, any further\n attempts to acquire a connection will be rejected immediately, instead of\n locking up all threads by keeping them waiting in the queue.\n\n See `dedicated section in SQLAlchemy documentation\n <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_\n for default values and behaviour.\n\n .. note::\n\n Using a `dedicated connection pool <http://pgpool.net>`_ is still\n recommended to allow load balancing, replication or limit the number\n of connections used in a multi-process deployment.\n\n :noindex:\n \"\"\" # NOQA\n def __init__(self, client, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.client = client\n\n def initialize_schema(self, dry_run=False):\n # Check if cache table exists.\n query = \"\"\"\n SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'cache';\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query)\n if result.rowcount > 0:\n logger.info(\"PostgreSQL cache schema is up-to-date.\")\n return\n\n # Create schema\n here = os.path.abspath(os.path.dirname(__file__))\n sql_file = os.path.join(here, 'schema.sql')\n\n if dry_run:\n logger.info(\"Create cache schema from '{}'\".format(sql_file))\n return\n\n # Since called outside request, force commit.\n with open(sql_file) as f:\n schema = f.read()\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)\n logger.info('Created PostgreSQL cache tables')\n\n def flush(self):\n query = \"\"\"\n DELETE FROM cache;\n \"\"\"\n # Since called outside request (e.g. tests), force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(query)\n logger.debug('Flushed PostgreSQL cache tables')\n\n def ttl(self, key):\n query = \"\"\"\n SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl\n FROM cache\n WHERE key = :key\n AND ttl IS NOT NULL;\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n return result.fetchone()['ttl']\n return -1\n\n def expire(self, key, ttl):\n query = \"\"\"\n UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;\n \"\"\"\n with self.client.connect() as conn:\n conn.execute(query, dict(ttl=ttl, key=self.prefix + key))\n\n def set(self, key, value, ttl):\n if isinstance(value, bytes):\n raise TypeError(\"a string-like object is required, not 'bytes'\")\n\n query = \"\"\"\n INSERT INTO cache (key, value, ttl)\n VALUES (:key, :value, sec2ttl(:ttl))\n ON CONFLICT (key) DO UPDATE\n SET value = :value,\n ttl = sec2ttl(:ttl);\n \"\"\"\n value = json.dumps(value)\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key,\n value=value, ttl=ttl))\n\n def get(self, key):\n purge = \"DELETE FROM cache WHERE ttl IS NOT NULL AND now() > ttl;\"\n query = \"SELECT value FROM cache WHERE key = :key;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n\n def delete(self, key):\n query = \"DELETE FROM cache WHERE key = :key RETURNING value;\"\n with self.client.connect() as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n return None\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n client = create_from_config(config, prefix='cache_', with_transaction=False)\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/postgresql/__init__.py"}], "after_files": [{"content": "import logging\nimport os\n\nfrom kinto.core.cache import CacheBase\nfrom kinto.core.storage.postgresql.client import create_from_config\nfrom kinto.core.utils import json\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Cache(CacheBase):\n \"\"\"Cache backend using PostgreSQL.\n\n Enable in configuration::\n\n kinto.cache_backend = kinto.core.cache.postgresql\n\n Database location URI can be customized::\n\n kinto.cache_url = postgres://user:[email protected]:5432/dbname\n\n Alternatively, username and password could also rely on system user ident\n or even specified in :file:`~/.pgpass` (*see PostgreSQL documentation*).\n\n .. note::\n\n Some tables and indices are created when ``kinto migrate`` is run.\n This requires some privileges on the database, or some error will\n be raised.\n\n **Alternatively**, the schema can be initialized outside the\n python application, using the SQL file located in\n :file:`kinto/core/cache/postgresql/schema.sql`. This allows to\n distinguish schema manipulation privileges from schema usage.\n\n\n A connection pool is enabled by default::\n\n kinto.cache_pool_size = 10\n kinto.cache_maxoverflow = 10\n kinto.cache_max_backlog = -1\n kinto.cache_pool_recycle = -1\n kinto.cache_pool_timeout = 30\n kinto.cache_poolclass =\n kinto.core.storage.postgresql.pool.QueuePoolWithMaxBacklog\n\n The ``max_backlog`` limits the number of threads that can be in the queue\n waiting for a connection. Once this limit has been reached, any further\n attempts to acquire a connection will be rejected immediately, instead of\n locking up all threads by keeping them waiting in the queue.\n\n See `dedicated section in SQLAlchemy documentation\n <http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html>`_\n for default values and behaviour.\n\n .. note::\n\n Using a `dedicated connection pool <http://pgpool.net>`_ is still\n recommended to allow load balancing, replication or limit the number\n of connections used in a multi-process deployment.\n\n :noindex:\n \"\"\" # NOQA\n def __init__(self, client, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.client = client\n\n def initialize_schema(self, dry_run=False):\n # Check if cache table exists.\n query = \"\"\"\n SELECT 1\n FROM information_schema.tables\n WHERE table_name = 'cache';\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query)\n if result.rowcount > 0:\n logger.info(\"PostgreSQL cache schema is up-to-date.\")\n return\n\n # Create schema\n here = os.path.abspath(os.path.dirname(__file__))\n sql_file = os.path.join(here, 'schema.sql')\n\n if dry_run:\n logger.info(\"Create cache schema from '{}'\".format(sql_file))\n return\n\n # Since called outside request, force commit.\n with open(sql_file) as f:\n schema = f.read()\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)\n logger.info('Created PostgreSQL cache tables')\n\n def flush(self):\n query = \"\"\"\n DELETE FROM cache;\n \"\"\"\n # Since called outside request (e.g. tests), force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(query)\n logger.debug('Flushed PostgreSQL cache tables')\n\n def ttl(self, key):\n query = \"\"\"\n SELECT EXTRACT(SECOND FROM (ttl - now())) AS ttl\n FROM cache\n WHERE key = :key\n AND ttl IS NOT NULL;\n \"\"\"\n with self.client.connect(readonly=True) as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n return result.fetchone()['ttl']\n return -1\n\n def expire(self, key, ttl):\n query = \"\"\"\n UPDATE cache SET ttl = sec2ttl(:ttl) WHERE key = :key;\n \"\"\"\n with self.client.connect() as conn:\n conn.execute(query, dict(ttl=ttl, key=self.prefix + key))\n\n def set(self, key, value, ttl):\n if isinstance(value, bytes):\n raise TypeError(\"a string-like object is required, not 'bytes'\")\n\n query = \"\"\"\n INSERT INTO cache (key, value, ttl)\n VALUES (:key, :value, sec2ttl(:ttl))\n ON CONFLICT (key) DO UPDATE\n SET value = :value,\n ttl = sec2ttl(:ttl);\n \"\"\"\n value = json.dumps(value)\n with self.client.connect() as conn:\n conn.execute(query, dict(key=self.prefix + key,\n value=value, ttl=ttl))\n\n def get(self, key):\n purge = \"\"\"\n DELETE FROM cache c\n USING (\n SELECT key\n FROM cache\n WHERE ttl IS NOT NULL AND now() > ttl\n ORDER BY key ASC\n FOR UPDATE\n ) del\n WHERE del.key = c.key;\"\"\"\n query = \"SELECT value FROM cache WHERE key = :key AND now() < ttl;\"\n with self.client.connect() as conn:\n conn.execute(purge)\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n\n def delete(self, key):\n query = \"DELETE FROM cache WHERE key = :key RETURNING value;\"\n with self.client.connect() as conn:\n result = conn.execute(query, dict(key=self.prefix + key))\n if result.rowcount > 0:\n value = result.fetchone()['value']\n return json.loads(value)\n return None\n\n\ndef load_from_config(config):\n settings = config.get_settings()\n client = create_from_config(config, prefix='cache_', with_transaction=False)\n return Cache(client=client, cache_prefix=settings['cache_prefix'])\n", "path": "kinto/core/cache/postgresql/__init__.py"}]}
2,067
232
gh_patches_debug_50307
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3345
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider konagrill is broken During the global build at 2021-06-16-14-42-20, spider **konagrill** failed with **0 features** and **24 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/konagrill.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/konagrill.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/konagrill.geojson)) Spider konagrill is broken During the global build at 2021-10-13-14-42-23, spider **konagrill** failed with **0 features** and **24 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/konagrill.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/konagrill.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/konagrill.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/kona_grill.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import json 3 import scrapy 4 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 STATES = [ 9 "AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", 10 "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", 11 "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", 12 "NJ", "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", 13 "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", 14 "WY", 15 ] 16 17 WEEKDAYS = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"] 18 19 20 class KonaGrillSpider(scrapy.Spider): 21 download_delay = 0.2 22 name = "konagrill" 23 item_attributes = {"brand": "Kona Grill", "brand_wikidata": "Q6428706"} 24 allowed_domains = ["konagrill.com"] 25 26 def start_requests(self): 27 url_by_state = "https://www.konagrill.com/ajax/getlocationsbystate" 28 headers = {"content-type": "application/x-www-form-urlencoded"} 29 30 # Get store id per state 31 for state in STATES: 32 yield scrapy.http.Request( 33 url_by_state, 34 method="POST", 35 body="state={}".format(state), 36 callback=self.parse, 37 headers=headers, 38 ) 39 40 def parse(self, response): 41 store_data = json.loads(response.text) 42 url_location_details = "https://www.konagrill.com/ajax/getlocationdetails" 43 headers = {"content-type": "application/x-www-form-urlencoded"} 44 store_ids = [] 45 46 if not store_data.get("data"): 47 return 48 49 store_ids += [s.get("id") for _, s in store_data.get("data").items()] 50 51 # Get store details 52 for i in store_ids: 53 yield scrapy.http.Request( 54 url_location_details, 55 method="POST", 56 body="id={}".format(i), 57 callback=self.parse_store, 58 headers=headers, 59 ) 60 61 def parse_store(self, response): 62 response_data = json.loads(response.text) 63 if not response_data.get("data"): 64 return 65 66 store = response_data.get("data") 67 dh = store.get("dininghours") 68 # Data is inconsistent some keys were found with a trailing space 69 opening_hours = self.parse_hours(dh.get("dining hours") or dh.get("dining hours ")) 70 properties = { 71 "addr_full": store.get("address"), 72 "city": store.get("city"), 73 "extras": {"email": store.get("email"),}, 74 "lat": store.get("latitude"), 75 "lon": store.get("longitude"), 76 "name": store.get("title"), 77 "opening_hours": opening_hours, 78 "phone": store.get("phone_number"), 79 "postcode": store.get("zip"), 80 "ref": store.get("id"), 81 "state": store.get("state"), 82 "website": store.get("order_online_url"), 83 } 84 85 yield GeojsonPointItem(**properties) 86 87 def parse_hours(self, hours): 88 oh = OpeningHours() 89 90 for t in hours: 91 days = self.parse_days(t.get("days")) 92 open_time, close_time = t.get("hours").split("-") 93 ot = open_time.strip() 94 ct = close_time.strip() 95 for day in days: 96 oh.add_range(day=day, open_time=ot, close_time=ct, time_format="%I%p") 97 98 return oh.as_opening_hours() 99 100 def parse_days(self, days): 101 """Parse day ranges and returns a list of days it represent 102 The following formats are considered: 103 - Single day, e.g. "Mon", "Monday" 104 - Range, e.g. "Mon-Fri", "Tue-Sund", "Sat-Sunday" 105 - Two days, e.g. "Sat & Sun", "Friday & Su" 106 107 Returns a list with the weekdays 108 """ 109 parsed_days = [] 110 111 # Range 112 # Produce a list of weekdays between two days e.g. su-sa, mo-th, etc. 113 if "-" in days: 114 d = days.split("-") 115 r = [i.strip()[:2] for i in d] 116 s = WEEKDAYS.index(r[0].title()) 117 e = WEEKDAYS.index(r[1].title()) 118 if s <= e: 119 return WEEKDAYS[s : e + 1] 120 else: 121 return WEEKDAYS[s:] + WEEKDAYS[: e + 1] 122 # Two days 123 if "&" in days: 124 d = days.split("&") 125 return [i.strip()[:2].title() for i in d] 126 # Single days 127 else: 128 return [days.strip()[:2].title()] 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/kona_grill.py b/locations/spiders/kona_grill.py --- a/locations/spiders/kona_grill.py +++ b/locations/spiders/kona_grill.py @@ -88,6 +88,10 @@ oh = OpeningHours() for t in hours: + # Some day entries contain invalid week data, e.g. "Brunch" + # "Brunch" is a special dining hour that is contained in regular hours, ignore it + if "Brunch" in t.get("days"): + continue days = self.parse_days(t.get("days")) open_time, close_time = t.get("hours").split("-") ot = open_time.strip()
{"golden_diff": "diff --git a/locations/spiders/kona_grill.py b/locations/spiders/kona_grill.py\n--- a/locations/spiders/kona_grill.py\n+++ b/locations/spiders/kona_grill.py\n@@ -88,6 +88,10 @@\n oh = OpeningHours()\n \n for t in hours:\n+ # Some day entries contain invalid week data, e.g. \"Brunch\"\n+ # \"Brunch\" is a special dining hour that is contained in regular hours, ignore it\n+ if \"Brunch\" in t.get(\"days\"):\n+ continue\n days = self.parse_days(t.get(\"days\"))\n open_time, close_time = t.get(\"hours\").split(\"-\")\n ot = open_time.strip()\n", "issue": "Spider konagrill is broken\nDuring the global build at 2021-06-16-14-42-20, spider **konagrill** failed with **0 features** and **24 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/konagrill.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/konagrill.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/konagrill.geojson))\nSpider konagrill is broken\nDuring the global build at 2021-10-13-14-42-23, spider **konagrill** failed with **0 features** and **24 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/konagrill.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/konagrill.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/konagrill.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nSTATES = [\n \"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\",\n \"GA\", \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\",\n \"MD\", \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\",\n \"NJ\", \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\",\n \"SC\", \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\",\n \"WY\",\n]\n\nWEEKDAYS = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n\n\nclass KonaGrillSpider(scrapy.Spider):\n download_delay = 0.2\n name = \"konagrill\"\n item_attributes = {\"brand\": \"Kona Grill\", \"brand_wikidata\": \"Q6428706\"}\n allowed_domains = [\"konagrill.com\"]\n\n def start_requests(self):\n url_by_state = \"https://www.konagrill.com/ajax/getlocationsbystate\"\n headers = {\"content-type\": \"application/x-www-form-urlencoded\"}\n\n # Get store id per state\n for state in STATES:\n yield scrapy.http.Request(\n url_by_state,\n method=\"POST\",\n body=\"state={}\".format(state),\n callback=self.parse,\n headers=headers,\n )\n\n def parse(self, response):\n store_data = json.loads(response.text)\n url_location_details = \"https://www.konagrill.com/ajax/getlocationdetails\"\n headers = {\"content-type\": \"application/x-www-form-urlencoded\"}\n store_ids = []\n\n if not store_data.get(\"data\"):\n return\n\n store_ids += [s.get(\"id\") for _, s in store_data.get(\"data\").items()]\n\n # Get store details\n for i in store_ids:\n yield scrapy.http.Request(\n url_location_details,\n method=\"POST\",\n body=\"id={}\".format(i),\n callback=self.parse_store,\n headers=headers,\n )\n\n def parse_store(self, response):\n response_data = json.loads(response.text)\n if not response_data.get(\"data\"):\n return\n\n store = response_data.get(\"data\")\n dh = store.get(\"dininghours\")\n # Data is inconsistent some keys were found with a trailing space\n opening_hours = self.parse_hours(dh.get(\"dining hours\") or dh.get(\"dining hours \"))\n properties = {\n \"addr_full\": store.get(\"address\"),\n \"city\": store.get(\"city\"),\n \"extras\": {\"email\": store.get(\"email\"),},\n \"lat\": store.get(\"latitude\"),\n \"lon\": store.get(\"longitude\"),\n \"name\": store.get(\"title\"),\n \"opening_hours\": opening_hours,\n \"phone\": store.get(\"phone_number\"),\n \"postcode\": store.get(\"zip\"),\n \"ref\": store.get(\"id\"),\n \"state\": store.get(\"state\"),\n \"website\": store.get(\"order_online_url\"),\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_hours(self, hours):\n oh = OpeningHours()\n\n for t in hours:\n days = self.parse_days(t.get(\"days\"))\n open_time, close_time = t.get(\"hours\").split(\"-\")\n ot = open_time.strip()\n ct = close_time.strip()\n for day in days:\n oh.add_range(day=day, open_time=ot, close_time=ct, time_format=\"%I%p\")\n\n return oh.as_opening_hours()\n\n def parse_days(self, days):\n \"\"\"Parse day ranges and returns a list of days it represent\n The following formats are considered:\n - Single day, e.g. \"Mon\", \"Monday\"\n - Range, e.g. \"Mon-Fri\", \"Tue-Sund\", \"Sat-Sunday\"\n - Two days, e.g. \"Sat & Sun\", \"Friday & Su\"\n\n Returns a list with the weekdays\n \"\"\"\n parsed_days = []\n\n # Range\n # Produce a list of weekdays between two days e.g. su-sa, mo-th, etc.\n if \"-\" in days:\n d = days.split(\"-\")\n r = [i.strip()[:2] for i in d]\n s = WEEKDAYS.index(r[0].title())\n e = WEEKDAYS.index(r[1].title())\n if s <= e:\n return WEEKDAYS[s : e + 1]\n else:\n return WEEKDAYS[s:] + WEEKDAYS[: e + 1]\n # Two days\n if \"&\" in days:\n d = days.split(\"&\")\n return [i.strip()[:2].title() for i in d]\n # Single days\n else:\n return [days.strip()[:2].title()]\n", "path": "locations/spiders/kona_grill.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nSTATES = [\n \"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\",\n \"GA\", \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\",\n \"MD\", \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\",\n \"NJ\", \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\",\n \"SC\", \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\",\n \"WY\",\n]\n\nWEEKDAYS = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n\n\nclass KonaGrillSpider(scrapy.Spider):\n download_delay = 0.2\n name = \"konagrill\"\n item_attributes = {\"brand\": \"Kona Grill\", \"brand_wikidata\": \"Q6428706\"}\n allowed_domains = [\"konagrill.com\"]\n\n def start_requests(self):\n url_by_state = \"https://www.konagrill.com/ajax/getlocationsbystate\"\n headers = {\"content-type\": \"application/x-www-form-urlencoded\"}\n\n # Get store id per state\n for state in STATES:\n yield scrapy.http.Request(\n url_by_state,\n method=\"POST\",\n body=\"state={}\".format(state),\n callback=self.parse,\n headers=headers,\n )\n\n def parse(self, response):\n store_data = json.loads(response.text)\n url_location_details = \"https://www.konagrill.com/ajax/getlocationdetails\"\n headers = {\"content-type\": \"application/x-www-form-urlencoded\"}\n store_ids = []\n\n if not store_data.get(\"data\"):\n return\n\n store_ids += [s.get(\"id\") for _, s in store_data.get(\"data\").items()]\n\n # Get store details\n for i in store_ids:\n yield scrapy.http.Request(\n url_location_details,\n method=\"POST\",\n body=\"id={}\".format(i),\n callback=self.parse_store,\n headers=headers,\n )\n\n def parse_store(self, response):\n response_data = json.loads(response.text)\n if not response_data.get(\"data\"):\n return\n\n store = response_data.get(\"data\")\n dh = store.get(\"dininghours\")\n # Data is inconsistent some keys were found with a trailing space\n opening_hours = self.parse_hours(dh.get(\"dining hours\") or dh.get(\"dining hours \"))\n properties = {\n \"addr_full\": store.get(\"address\"),\n \"city\": store.get(\"city\"),\n \"extras\": {\"email\": store.get(\"email\"),},\n \"lat\": store.get(\"latitude\"),\n \"lon\": store.get(\"longitude\"),\n \"name\": store.get(\"title\"),\n \"opening_hours\": opening_hours,\n \"phone\": store.get(\"phone_number\"),\n \"postcode\": store.get(\"zip\"),\n \"ref\": store.get(\"id\"),\n \"state\": store.get(\"state\"),\n \"website\": store.get(\"order_online_url\"),\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_hours(self, hours):\n oh = OpeningHours()\n\n for t in hours:\n # Some day entries contain invalid week data, e.g. \"Brunch\"\n # \"Brunch\" is a special dining hour that is contained in regular hours, ignore it\n if \"Brunch\" in t.get(\"days\"):\n continue\n days = self.parse_days(t.get(\"days\"))\n open_time, close_time = t.get(\"hours\").split(\"-\")\n ot = open_time.strip()\n ct = close_time.strip()\n for day in days:\n oh.add_range(day=day, open_time=ot, close_time=ct, time_format=\"%I%p\")\n\n return oh.as_opening_hours()\n\n def parse_days(self, days):\n \"\"\"Parse day ranges and returns a list of days it represent\n The following formats are considered:\n - Single day, e.g. \"Mon\", \"Monday\"\n - Range, e.g. \"Mon-Fri\", \"Tue-Sund\", \"Sat-Sunday\"\n - Two days, e.g. \"Sat & Sun\", \"Friday & Su\"\n\n Returns a list with the weekdays\n \"\"\"\n parsed_days = []\n\n # Range\n # Produce a list of weekdays between two days e.g. su-sa, mo-th, etc.\n if \"-\" in days:\n d = days.split(\"-\")\n r = [i.strip()[:2] for i in d]\n s = WEEKDAYS.index(r[0].title())\n e = WEEKDAYS.index(r[1].title())\n if s <= e:\n return WEEKDAYS[s : e + 1]\n else:\n return WEEKDAYS[s:] + WEEKDAYS[: e + 1]\n # Two days\n if \"&\" in days:\n d = days.split(\"&\")\n return [i.strip()[:2].title() for i in d]\n # Single days\n else:\n return [days.strip()[:2].title()]\n", "path": "locations/spiders/kona_grill.py"}]}
2,036
164
gh_patches_debug_3571
rasdani/github-patches
git_diff
nerfstudio-project__nerfstudio-2076
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Doc Description Wrong Hello, I find python doc in [get_depth_image_from_path](https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/data/utils/data_utils.py) is wrong about the return tensor shape, it should be [height, width, 1] not [width, height, 1]. ![图片](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/e6acb5ee-4f66-44ad-b21e-1161b1df61b3) a simple verification: ![图片](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/dfd65f57-7f8b-44ab-897b-d06793c03375) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nerfstudio/data/utils/data_utils.py` Content: ``` 1 # Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Utility functions to allow easy re-use of common operations across dataloaders""" 16 from pathlib import Path 17 from typing import List, Tuple, Union 18 19 import cv2 20 import numpy as np 21 import torch 22 from PIL import Image 23 24 25 def get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor: 26 """ 27 Utility function to read a mask image from the given path and return a boolean tensor 28 """ 29 pil_mask = Image.open(filepath) 30 if scale_factor != 1.0: 31 width, height = pil_mask.size 32 newsize = (int(width * scale_factor), int(height * scale_factor)) 33 pil_mask = pil_mask.resize(newsize, resample=Image.NEAREST) 34 mask_tensor = torch.from_numpy(np.array(pil_mask)).unsqueeze(-1).bool() 35 if len(mask_tensor.shape) != 3: 36 raise ValueError("The mask image should have 1 channel") 37 return mask_tensor 38 39 40 def get_semantics_and_mask_tensors_from_path( 41 filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0 42 ) -> Tuple[torch.Tensor, torch.Tensor]: 43 """ 44 Utility function to read segmentation from the given filepath 45 If no mask is required - use mask_indices = [] 46 """ 47 if isinstance(mask_indices, List): 48 mask_indices = torch.tensor(mask_indices, dtype=torch.int64).view(1, 1, -1) 49 pil_image = Image.open(filepath) 50 if scale_factor != 1.0: 51 width, height = pil_image.size 52 newsize = (int(width * scale_factor), int(height * scale_factor)) 53 pil_image = pil_image.resize(newsize, resample=Image.NEAREST) 54 semantics = torch.from_numpy(np.array(pil_image, dtype="int64"))[..., None] 55 mask = torch.sum(semantics == mask_indices, dim=-1, keepdim=True) == 0 56 return semantics, mask 57 58 59 def get_depth_image_from_path( 60 filepath: Path, 61 height: int, 62 width: int, 63 scale_factor: float, 64 interpolation: int = cv2.INTER_NEAREST, 65 ) -> torch.Tensor: 66 """Loads, rescales and resizes depth images. 67 Filepath points to a 16-bit or 32-bit depth image, or a numpy array `*.npy`. 68 69 Args: 70 filepath: Path to depth image. 71 height: Target depth image height. 72 width: Target depth image width. 73 scale_factor: Factor by which to scale depth image. 74 interpolation: Depth value interpolation for resizing. 75 76 Returns: 77 Depth image torch tensor with shape [width, height, 1]. 78 """ 79 if filepath.suffix == ".npy": 80 image = np.load(filepath) * scale_factor 81 image = cv2.resize(image, (width, height), interpolation=interpolation) 82 else: 83 image = cv2.imread(str(filepath.absolute()), cv2.IMREAD_ANYDEPTH) 84 image = image.astype(np.float64) * scale_factor 85 image = cv2.resize(image, (width, height), interpolation=interpolation) 86 return torch.from_numpy(image[:, :, np.newaxis]) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nerfstudio/data/utils/data_utils.py b/nerfstudio/data/utils/data_utils.py --- a/nerfstudio/data/utils/data_utils.py +++ b/nerfstudio/data/utils/data_utils.py @@ -74,7 +74,7 @@ interpolation: Depth value interpolation for resizing. Returns: - Depth image torch tensor with shape [width, height, 1]. + Depth image torch tensor with shape [height, width, 1]. """ if filepath.suffix == ".npy": image = np.load(filepath) * scale_factor
{"golden_diff": "diff --git a/nerfstudio/data/utils/data_utils.py b/nerfstudio/data/utils/data_utils.py\n--- a/nerfstudio/data/utils/data_utils.py\n+++ b/nerfstudio/data/utils/data_utils.py\n@@ -74,7 +74,7 @@\n interpolation: Depth value interpolation for resizing.\n \n Returns:\n- Depth image torch tensor with shape [width, height, 1].\n+ Depth image torch tensor with shape [height, width, 1].\n \"\"\"\n if filepath.suffix == \".npy\":\n image = np.load(filepath) * scale_factor\n", "issue": "Doc Description Wrong\nHello, \r\n\r\nI find python doc in [get_depth_image_from_path](https://github.com/nerfstudio-project/nerfstudio/blob/main/nerfstudio/data/utils/data_utils.py) is wrong about the return tensor shape, \r\n\r\nit should be [height, width, 1] not [width, height, 1].\r\n\r\n![\u56fe\u7247](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/e6acb5ee-4f66-44ad-b21e-1161b1df61b3)\r\n\r\na simple verification:\r\n\r\n![\u56fe\u7247](https://github.com/nerfstudio-project/nerfstudio/assets/20349525/dfd65f57-7f8b-44ab-897b-d06793c03375)\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions to allow easy re-use of common operations across dataloaders\"\"\"\nfrom pathlib import Path\nfrom typing import List, Tuple, Union\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\ndef get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor:\n \"\"\"\n Utility function to read a mask image from the given path and return a boolean tensor\n \"\"\"\n pil_mask = Image.open(filepath)\n if scale_factor != 1.0:\n width, height = pil_mask.size\n newsize = (int(width * scale_factor), int(height * scale_factor))\n pil_mask = pil_mask.resize(newsize, resample=Image.NEAREST)\n mask_tensor = torch.from_numpy(np.array(pil_mask)).unsqueeze(-1).bool()\n if len(mask_tensor.shape) != 3:\n raise ValueError(\"The mask image should have 1 channel\")\n return mask_tensor\n\n\ndef get_semantics_and_mask_tensors_from_path(\n filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Utility function to read segmentation from the given filepath\n If no mask is required - use mask_indices = []\n \"\"\"\n if isinstance(mask_indices, List):\n mask_indices = torch.tensor(mask_indices, dtype=torch.int64).view(1, 1, -1)\n pil_image = Image.open(filepath)\n if scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * scale_factor), int(height * scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.NEAREST)\n semantics = torch.from_numpy(np.array(pil_image, dtype=\"int64\"))[..., None]\n mask = torch.sum(semantics == mask_indices, dim=-1, keepdim=True) == 0\n return semantics, mask\n\n\ndef get_depth_image_from_path(\n filepath: Path,\n height: int,\n width: int,\n scale_factor: float,\n interpolation: int = cv2.INTER_NEAREST,\n) -> torch.Tensor:\n \"\"\"Loads, rescales and resizes depth images.\n Filepath points to a 16-bit or 32-bit depth image, or a numpy array `*.npy`.\n\n Args:\n filepath: Path to depth image.\n height: Target depth image height.\n width: Target depth image width.\n scale_factor: Factor by which to scale depth image.\n interpolation: Depth value interpolation for resizing.\n\n Returns:\n Depth image torch tensor with shape [width, height, 1].\n \"\"\"\n if filepath.suffix == \".npy\":\n image = np.load(filepath) * scale_factor\n image = cv2.resize(image, (width, height), interpolation=interpolation)\n else:\n image = cv2.imread(str(filepath.absolute()), cv2.IMREAD_ANYDEPTH)\n image = image.astype(np.float64) * scale_factor\n image = cv2.resize(image, (width, height), interpolation=interpolation)\n return torch.from_numpy(image[:, :, np.newaxis])\n", "path": "nerfstudio/data/utils/data_utils.py"}], "after_files": [{"content": "# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions to allow easy re-use of common operations across dataloaders\"\"\"\nfrom pathlib import Path\nfrom typing import List, Tuple, Union\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\ndef get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor:\n \"\"\"\n Utility function to read a mask image from the given path and return a boolean tensor\n \"\"\"\n pil_mask = Image.open(filepath)\n if scale_factor != 1.0:\n width, height = pil_mask.size\n newsize = (int(width * scale_factor), int(height * scale_factor))\n pil_mask = pil_mask.resize(newsize, resample=Image.NEAREST)\n mask_tensor = torch.from_numpy(np.array(pil_mask)).unsqueeze(-1).bool()\n if len(mask_tensor.shape) != 3:\n raise ValueError(\"The mask image should have 1 channel\")\n return mask_tensor\n\n\ndef get_semantics_and_mask_tensors_from_path(\n filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Utility function to read segmentation from the given filepath\n If no mask is required - use mask_indices = []\n \"\"\"\n if isinstance(mask_indices, List):\n mask_indices = torch.tensor(mask_indices, dtype=torch.int64).view(1, 1, -1)\n pil_image = Image.open(filepath)\n if scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * scale_factor), int(height * scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.NEAREST)\n semantics = torch.from_numpy(np.array(pil_image, dtype=\"int64\"))[..., None]\n mask = torch.sum(semantics == mask_indices, dim=-1, keepdim=True) == 0\n return semantics, mask\n\n\ndef get_depth_image_from_path(\n filepath: Path,\n height: int,\n width: int,\n scale_factor: float,\n interpolation: int = cv2.INTER_NEAREST,\n) -> torch.Tensor:\n \"\"\"Loads, rescales and resizes depth images.\n Filepath points to a 16-bit or 32-bit depth image, or a numpy array `*.npy`.\n\n Args:\n filepath: Path to depth image.\n height: Target depth image height.\n width: Target depth image width.\n scale_factor: Factor by which to scale depth image.\n interpolation: Depth value interpolation for resizing.\n\n Returns:\n Depth image torch tensor with shape [height, width, 1].\n \"\"\"\n if filepath.suffix == \".npy\":\n image = np.load(filepath) * scale_factor\n image = cv2.resize(image, (width, height), interpolation=interpolation)\n else:\n image = cv2.imread(str(filepath.absolute()), cv2.IMREAD_ANYDEPTH)\n image = image.astype(np.float64) * scale_factor\n image = cv2.resize(image, (width, height), interpolation=interpolation)\n return torch.from_numpy(image[:, :, np.newaxis])\n", "path": "nerfstudio/data/utils/data_utils.py"}]}
1,452
128
gh_patches_debug_30107
rasdani/github-patches
git_diff
getsentry__sentry-python-432
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enforce functools.wraps on all monkeypatches I'm using [`celery-once`](https://github.com/cameronmaske/celery-once) to make sure some tasks are only run one at a time. The lock is acquired in `apply_async` and released in `after_return`. When I use the `CeleryIntegration` I'm not seeing any of the locks being released after the task has finished running. I'm not very familiar with the inner workings of celery, but any ideas what's going on? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/integrations/celery.py` Content: ``` 1 from __future__ import absolute_import 2 3 import sys 4 5 from celery.exceptions import ( # type: ignore 6 SoftTimeLimitExceeded, 7 Retry, 8 Ignore, 9 Reject, 10 ) 11 12 from sentry_sdk.hub import Hub 13 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception 14 from sentry_sdk.tracing import Span 15 from sentry_sdk._compat import reraise 16 from sentry_sdk.integrations import Integration 17 from sentry_sdk.integrations.logging import ignore_logger 18 from sentry_sdk._types import MYPY 19 20 if MYPY: 21 from typing import Any 22 23 24 CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject) 25 26 27 class CeleryIntegration(Integration): 28 identifier = "celery" 29 30 def __init__(self, propagate_traces=True): 31 # type: (bool) -> None 32 self.propagate_traces = propagate_traces 33 34 @staticmethod 35 def setup_once(): 36 # type: () -> None 37 import celery.app.trace as trace # type: ignore 38 39 old_build_tracer = trace.build_tracer 40 41 def sentry_build_tracer(name, task, *args, **kwargs): 42 if not getattr(task, "_sentry_is_patched", False): 43 # Need to patch both methods because older celery sometimes 44 # short-circuits to task.run if it thinks it's safe. 45 task.__call__ = _wrap_task_call(task, task.__call__) 46 task.run = _wrap_task_call(task, task.run) 47 task.apply_async = _wrap_apply_async(task, task.apply_async) 48 49 # `build_tracer` is apparently called for every task 50 # invocation. Can't wrap every celery task for every invocation 51 # or we will get infinitely nested wrapper functions. 52 task._sentry_is_patched = True 53 54 return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs)) 55 56 trace.build_tracer = sentry_build_tracer 57 58 _patch_worker_exit() 59 60 # This logger logs every status of every task that ran on the worker. 61 # Meaning that every task's breadcrumbs are full of stuff like "Task 62 # <foo> raised unexpected <bar>". 63 ignore_logger("celery.worker.job") 64 65 66 def _wrap_apply_async(task, f): 67 def apply_async(*args, **kwargs): 68 hub = Hub.current 69 integration = hub.get_integration(CeleryIntegration) 70 if integration is not None and integration.propagate_traces: 71 headers = None 72 for key, value in hub.iter_trace_propagation_headers(): 73 if headers is None: 74 headers = dict(kwargs.get("headers") or {}) 75 headers[key] = value 76 if headers is not None: 77 kwargs["headers"] = headers 78 return f(*args, **kwargs) 79 80 return apply_async 81 82 83 def _wrap_tracer(task, f): 84 # Need to wrap tracer for pushing the scope before prerun is sent, and 85 # popping it after postrun is sent. 86 # 87 # This is the reason we don't use signals for hooking in the first place. 88 # Also because in Celery 3, signal dispatch returns early if one handler 89 # crashes. 90 def _inner(*args, **kwargs): 91 hub = Hub.current 92 if hub.get_integration(CeleryIntegration) is None: 93 return f(*args, **kwargs) 94 95 with hub.push_scope() as scope: 96 scope._name = "celery" 97 scope.clear_breadcrumbs() 98 scope.add_event_processor(_make_event_processor(task, *args, **kwargs)) 99 100 span = Span.continue_from_headers(args[3].get("headers") or {}) 101 span.transaction = "unknown celery task" 102 103 with capture_internal_exceptions(): 104 # Celery task objects are not a thing to be trusted. Even 105 # something such as attribute access can fail. 106 span.transaction = task.name 107 108 with hub.span(span): 109 return f(*args, **kwargs) 110 111 return _inner 112 113 114 def _wrap_task_call(task, f): 115 # Need to wrap task call because the exception is caught before we get to 116 # see it. Also celery's reported stacktrace is untrustworthy. 117 def _inner(*args, **kwargs): 118 try: 119 return f(*args, **kwargs) 120 except Exception: 121 exc_info = sys.exc_info() 122 with capture_internal_exceptions(): 123 _capture_exception(task, exc_info) 124 reraise(*exc_info) 125 126 return _inner 127 128 129 def _make_event_processor(task, uuid, args, kwargs, request=None): 130 def event_processor(event, hint): 131 with capture_internal_exceptions(): 132 extra = event.setdefault("extra", {}) 133 extra["celery-job"] = { 134 "task_name": task.name, 135 "args": args, 136 "kwargs": kwargs, 137 } 138 139 if "exc_info" in hint: 140 with capture_internal_exceptions(): 141 if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded): 142 event["fingerprint"] = [ 143 "celery", 144 "SoftTimeLimitExceeded", 145 getattr(task, "name", task), 146 ] 147 148 return event 149 150 return event_processor 151 152 153 def _capture_exception(task, exc_info): 154 hub = Hub.current 155 156 if hub.get_integration(CeleryIntegration) is None: 157 return 158 if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS): 159 return 160 if hasattr(task, "throws") and isinstance(exc_info[1], task.throws): 161 return 162 163 # If an integration is there, a client has to be there. 164 client = hub.client # type: Any 165 166 event, hint = event_from_exception( 167 exc_info, 168 client_options=client.options, 169 mechanism={"type": "celery", "handled": False}, 170 ) 171 172 hub.capture_event(event, hint=hint) 173 174 175 def _patch_worker_exit(): 176 # Need to flush queue before worker shutdown because a crashing worker will 177 # call os._exit 178 from billiard.pool import Worker # type: ignore 179 180 old_workloop = Worker.workloop 181 182 def sentry_workloop(*args, **kwargs): 183 try: 184 return old_workloop(*args, **kwargs) 185 finally: 186 with capture_internal_exceptions(): 187 hub = Hub.current 188 if hub.get_integration(CeleryIntegration) is not None: 189 hub.flush() 190 191 Worker.workloop = sentry_workloop 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py --- a/sentry_sdk/integrations/celery.py +++ b/sentry_sdk/integrations/celery.py @@ -1,5 +1,6 @@ from __future__ import absolute_import +import functools import sys from celery.exceptions import ( # type: ignore @@ -64,6 +65,7 @@ def _wrap_apply_async(task, f): + @functools.wraps(f) def apply_async(*args, **kwargs): hub = Hub.current integration = hub.get_integration(CeleryIntegration) @@ -87,6 +89,7 @@ # This is the reason we don't use signals for hooking in the first place. # Also because in Celery 3, signal dispatch returns early if one handler # crashes. + @functools.wraps(f) def _inner(*args, **kwargs): hub = Hub.current if hub.get_integration(CeleryIntegration) is None: @@ -114,6 +117,11 @@ def _wrap_task_call(task, f): # Need to wrap task call because the exception is caught before we get to # see it. Also celery's reported stacktrace is untrustworthy. + + # functools.wraps is important here because celery-once looks at this + # method's name. + # https://github.com/getsentry/sentry-python/issues/421 + @functools.wraps(f) def _inner(*args, **kwargs): try: return f(*args, **kwargs)
{"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -1,5 +1,6 @@\n from __future__ import absolute_import\n \n+import functools\n import sys\n \n from celery.exceptions import ( # type: ignore\n@@ -64,6 +65,7 @@\n \n \n def _wrap_apply_async(task, f):\n+ @functools.wraps(f)\n def apply_async(*args, **kwargs):\n hub = Hub.current\n integration = hub.get_integration(CeleryIntegration)\n@@ -87,6 +89,7 @@\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n+ @functools.wraps(f)\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n@@ -114,6 +117,11 @@\n def _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n+\n+ # functools.wraps is important here because celery-once looks at this\n+ # method's name.\n+ # https://github.com/getsentry/sentry-python/issues/421\n+ @functools.wraps(f)\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n", "issue": "Enforce functools.wraps on all monkeypatches\nI'm using [`celery-once`](https://github.com/cameronmaske/celery-once) to make sure some tasks are only run one at a time. The lock is acquired in `apply_async` and released in `after_return`. \r\n\r\nWhen I use the `CeleryIntegration` I'm not seeing any of the locks being released after the task has finished running. I'm not very familiar with the inner workings of celery, but any ideas what's going on?\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.exceptions import ( # type: ignore\n SoftTimeLimitExceeded,\n Retry,\n Ignore,\n Reject,\n)\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.tracing import Span\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n\n\nCELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n def __init__(self, propagate_traces=True):\n # type: (bool) -> None\n self.propagate_traces = propagate_traces\n\n @staticmethod\n def setup_once():\n # type: () -> None\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n if not getattr(task, \"_sentry_is_patched\", False):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task, task.__call__)\n task.run = _wrap_task_call(task, task.run)\n task.apply_async = _wrap_apply_async(task, task.apply_async)\n\n # `build_tracer` is apparently called for every task\n # invocation. Can't wrap every celery task for every invocation\n # or we will get infinitely nested wrapper functions.\n task._sentry_is_patched = True\n\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n _patch_worker_exit()\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_apply_async(task, f):\n def apply_async(*args, **kwargs):\n hub = Hub.current\n integration = hub.get_integration(CeleryIntegration)\n if integration is not None and integration.propagate_traces:\n headers = None\n for key, value in hub.iter_trace_propagation_headers():\n if headers is None:\n headers = dict(kwargs.get(\"headers\") or {})\n headers[key] = value\n if headers is not None:\n kwargs[\"headers\"] = headers\n return f(*args, **kwargs)\n\n return apply_async\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n span = Span.continue_from_headers(args[3].get(\"headers\") or {})\n span.transaction = \"unknown celery task\"\n\n with capture_internal_exceptions():\n # Celery task objects are not a thing to be trusted. Even\n # something such as attribute access can fail.\n span.transaction = task.name\n\n with hub.span(span):\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n with capture_internal_exceptions():\n _capture_exception(task, exc_info)\n reraise(*exc_info)\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(task, exc_info):\n hub = Hub.current\n\n if hub.get_integration(CeleryIntegration) is None:\n return\n if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _patch_worker_exit():\n # Need to flush queue before worker shutdown because a crashing worker will\n # call os._exit\n from billiard.pool import Worker # type: ignore\n\n old_workloop = Worker.workloop\n\n def sentry_workloop(*args, **kwargs):\n try:\n return old_workloop(*args, **kwargs)\n finally:\n with capture_internal_exceptions():\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.flush()\n\n Worker.workloop = sentry_workloop\n", "path": "sentry_sdk/integrations/celery.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport functools\nimport sys\n\nfrom celery.exceptions import ( # type: ignore\n SoftTimeLimitExceeded,\n Retry,\n Ignore,\n Reject,\n)\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.tracing import Span\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n\n\nCELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n def __init__(self, propagate_traces=True):\n # type: (bool) -> None\n self.propagate_traces = propagate_traces\n\n @staticmethod\n def setup_once():\n # type: () -> None\n import celery.app.trace as trace # type: ignore\n\n old_build_tracer = trace.build_tracer\n\n def sentry_build_tracer(name, task, *args, **kwargs):\n if not getattr(task, \"_sentry_is_patched\", False):\n # Need to patch both methods because older celery sometimes\n # short-circuits to task.run if it thinks it's safe.\n task.__call__ = _wrap_task_call(task, task.__call__)\n task.run = _wrap_task_call(task, task.run)\n task.apply_async = _wrap_apply_async(task, task.apply_async)\n\n # `build_tracer` is apparently called for every task\n # invocation. Can't wrap every celery task for every invocation\n # or we will get infinitely nested wrapper functions.\n task._sentry_is_patched = True\n\n return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))\n\n trace.build_tracer = sentry_build_tracer\n\n _patch_worker_exit()\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _wrap_apply_async(task, f):\n @functools.wraps(f)\n def apply_async(*args, **kwargs):\n hub = Hub.current\n integration = hub.get_integration(CeleryIntegration)\n if integration is not None and integration.propagate_traces:\n headers = None\n for key, value in hub.iter_trace_propagation_headers():\n if headers is None:\n headers = dict(kwargs.get(\"headers\") or {})\n headers[key] = value\n if headers is not None:\n kwargs[\"headers\"] = headers\n return f(*args, **kwargs)\n\n return apply_async\n\n\ndef _wrap_tracer(task, f):\n # Need to wrap tracer for pushing the scope before prerun is sent, and\n # popping it after postrun is sent.\n #\n # This is the reason we don't use signals for hooking in the first place.\n # Also because in Celery 3, signal dispatch returns early if one handler\n # crashes.\n @functools.wraps(f)\n def _inner(*args, **kwargs):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is None:\n return f(*args, **kwargs)\n\n with hub.push_scope() as scope:\n scope._name = \"celery\"\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(task, *args, **kwargs))\n\n span = Span.continue_from_headers(args[3].get(\"headers\") or {})\n span.transaction = \"unknown celery task\"\n\n with capture_internal_exceptions():\n # Celery task objects are not a thing to be trusted. Even\n # something such as attribute access can fail.\n span.transaction = task.name\n\n with hub.span(span):\n return f(*args, **kwargs)\n\n return _inner\n\n\ndef _wrap_task_call(task, f):\n # Need to wrap task call because the exception is caught before we get to\n # see it. Also celery's reported stacktrace is untrustworthy.\n\n # functools.wraps is important here because celery-once looks at this\n # method's name.\n # https://github.com/getsentry/sentry-python/issues/421\n @functools.wraps(f)\n def _inner(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception:\n exc_info = sys.exc_info()\n with capture_internal_exceptions():\n _capture_exception(task, exc_info)\n reraise(*exc_info)\n\n return _inner\n\n\ndef _make_event_processor(task, uuid, args, kwargs, request=None):\n def event_processor(event, hint):\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"celery-job\"] = {\n \"task_name\": task.name,\n \"args\": args,\n \"kwargs\": kwargs,\n }\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n event[\"fingerprint\"] = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(task, \"name\", task),\n ]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(task, exc_info):\n hub = Hub.current\n\n if hub.get_integration(CeleryIntegration) is None:\n return\n if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):\n return\n if hasattr(task, \"throws\") and isinstance(exc_info[1], task.throws):\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _patch_worker_exit():\n # Need to flush queue before worker shutdown because a crashing worker will\n # call os._exit\n from billiard.pool import Worker # type: ignore\n\n old_workloop = Worker.workloop\n\n def sentry_workloop(*args, **kwargs):\n try:\n return old_workloop(*args, **kwargs)\n finally:\n with capture_internal_exceptions():\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.flush()\n\n Worker.workloop = sentry_workloop\n", "path": "sentry_sdk/integrations/celery.py"}]}
2,239
372
gh_patches_debug_10752
rasdani/github-patches
git_diff
pypa__pip-3398
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip 8.0 returns pre-release files for pip list -o without --pre When running `pip list -o` pip 8.0 is listing pre-release files even though previously it did not. I suspect that this might be because of some of the refactors done by @xavfernandez. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/commands/list.py` Content: ``` 1 from __future__ import absolute_import 2 3 import logging 4 import warnings 5 6 from pip.basecommand import Command 7 from pip.exceptions import CommandError 8 from pip.index import PackageFinder 9 from pip.utils import ( 10 get_installed_distributions, dist_is_editable) 11 from pip.utils.deprecation import RemovedInPip10Warning 12 from pip.cmdoptions import make_option_group, index_group 13 14 15 logger = logging.getLogger(__name__) 16 17 18 class ListCommand(Command): 19 """ 20 List installed packages, including editables. 21 22 Packages are listed in a case-insensitive sorted order. 23 """ 24 name = 'list' 25 usage = """ 26 %prog [options]""" 27 summary = 'List installed packages.' 28 29 def __init__(self, *args, **kw): 30 super(ListCommand, self).__init__(*args, **kw) 31 32 cmd_opts = self.cmd_opts 33 34 cmd_opts.add_option( 35 '-o', '--outdated', 36 action='store_true', 37 default=False, 38 help='List outdated packages') 39 cmd_opts.add_option( 40 '-u', '--uptodate', 41 action='store_true', 42 default=False, 43 help='List uptodate packages') 44 cmd_opts.add_option( 45 '-e', '--editable', 46 action='store_true', 47 default=False, 48 help='List editable projects.') 49 cmd_opts.add_option( 50 '-l', '--local', 51 action='store_true', 52 default=False, 53 help=('If in a virtualenv that has global access, do not list ' 54 'globally-installed packages.'), 55 ) 56 self.cmd_opts.add_option( 57 '--user', 58 dest='user', 59 action='store_true', 60 default=False, 61 help='Only output packages installed in user-site.') 62 63 cmd_opts.add_option( 64 '--pre', 65 action='store_true', 66 default=False, 67 help=("Include pre-release and development versions. By default, " 68 "pip only finds stable versions."), 69 ) 70 71 index_opts = make_option_group(index_group, self.parser) 72 73 self.parser.insert_option_group(0, index_opts) 74 self.parser.insert_option_group(0, cmd_opts) 75 76 def _build_package_finder(self, options, index_urls, session): 77 """ 78 Create a package finder appropriate to this list command. 79 """ 80 return PackageFinder( 81 find_links=options.find_links, 82 index_urls=index_urls, 83 allow_all_prereleases=options.pre, 84 trusted_hosts=options.trusted_hosts, 85 process_dependency_links=options.process_dependency_links, 86 session=session, 87 ) 88 89 def run(self, options, args): 90 if options.allow_external: 91 warnings.warn( 92 "--allow-external has been deprecated and will be removed in " 93 "the future. Due to changes in the repository protocol, it no " 94 "longer has any effect.", 95 RemovedInPip10Warning, 96 ) 97 98 if options.allow_all_external: 99 warnings.warn( 100 "--allow-all-external has been deprecated and will be removed " 101 "in the future. Due to changes in the repository protocol, it " 102 "no longer has any effect.", 103 RemovedInPip10Warning, 104 ) 105 106 if options.allow_unverified: 107 warnings.warn( 108 "--allow-unverified has been deprecated and will be removed " 109 "in the future. Due to changes in the repository protocol, it " 110 "no longer has any effect.", 111 RemovedInPip10Warning, 112 ) 113 if options.outdated and options.uptodate: 114 raise CommandError( 115 "Options --outdated and --uptodate cannot be combined.") 116 117 if options.outdated: 118 self.run_outdated(options) 119 elif options.uptodate: 120 self.run_uptodate(options) 121 else: 122 self.run_listing(options) 123 124 def run_outdated(self, options): 125 for dist, latest_version, typ in sorted( 126 self.find_packages_latest_versions(options), 127 key=lambda p: p[0].project_name.lower()): 128 if latest_version > dist.parsed_version: 129 logger.info( 130 '%s - Latest: %s [%s]', 131 self.output_package(dist), latest_version, typ, 132 ) 133 134 def find_packages_latest_versions(self, options): 135 index_urls = [options.index_url] + options.extra_index_urls 136 if options.no_index: 137 logger.info('Ignoring indexes: %s', ','.join(index_urls)) 138 index_urls = [] 139 140 dependency_links = [] 141 for dist in get_installed_distributions( 142 local_only=options.local, 143 user_only=options.user, 144 editables_only=options.editable): 145 if dist.has_metadata('dependency_links.txt'): 146 dependency_links.extend( 147 dist.get_metadata_lines('dependency_links.txt'), 148 ) 149 150 with self._build_session(options) as session: 151 finder = self._build_package_finder(options, index_urls, session) 152 finder.add_dependency_links(dependency_links) 153 154 installed_packages = get_installed_distributions( 155 local_only=options.local, 156 user_only=options.user, 157 editables_only=options.editable, 158 ) 159 for dist in installed_packages: 160 typ = 'unknown' 161 all_candidates = finder.find_all_candidates(dist.key) 162 if not all_candidates: 163 continue 164 best_candidate = max(all_candidates, 165 key=finder._candidate_sort_key) 166 remote_version = best_candidate.version 167 if best_candidate.location.is_wheel: 168 typ = 'wheel' 169 else: 170 typ = 'sdist' 171 yield dist, remote_version, typ 172 173 def run_listing(self, options): 174 installed_packages = get_installed_distributions( 175 local_only=options.local, 176 user_only=options.user, 177 editables_only=options.editable, 178 ) 179 self.output_package_listing(installed_packages) 180 181 def output_package(self, dist): 182 if dist_is_editable(dist): 183 return '%s (%s, %s)' % ( 184 dist.project_name, 185 dist.version, 186 dist.location, 187 ) 188 else: 189 return '%s (%s)' % (dist.project_name, dist.version) 190 191 def output_package_listing(self, installed_packages): 192 installed_packages = sorted( 193 installed_packages, 194 key=lambda dist: dist.project_name.lower(), 195 ) 196 for dist in installed_packages: 197 logger.info(self.output_package(dist)) 198 199 def run_uptodate(self, options): 200 uptodate = [] 201 for dist, version, typ in self.find_packages_latest_versions(options): 202 if dist.parsed_version == version: 203 uptodate.append(dist) 204 self.output_package_listing(uptodate) 205 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/commands/list.py b/pip/commands/list.py --- a/pip/commands/list.py +++ b/pip/commands/list.py @@ -159,6 +159,11 @@ for dist in installed_packages: typ = 'unknown' all_candidates = finder.find_all_candidates(dist.key) + if not options.pre: + # Remove prereleases + all_candidates = [candidate for candidate in all_candidates + if not candidate.version.is_prerelease] + if not all_candidates: continue best_candidate = max(all_candidates,
{"golden_diff": "diff --git a/pip/commands/list.py b/pip/commands/list.py\n--- a/pip/commands/list.py\n+++ b/pip/commands/list.py\n@@ -159,6 +159,11 @@\n for dist in installed_packages:\n typ = 'unknown'\n all_candidates = finder.find_all_candidates(dist.key)\n+ if not options.pre:\n+ # Remove prereleases\n+ all_candidates = [candidate for candidate in all_candidates\n+ if not candidate.version.is_prerelease]\n+\n if not all_candidates:\n continue\n best_candidate = max(all_candidates,\n", "issue": "pip 8.0 returns pre-release files for pip list -o without --pre\nWhen running `pip list -o` pip 8.0 is listing pre-release files even though previously it did not. I suspect that this might be because of some of the refactors done by @xavfernandez.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom pip.basecommand import Command\nfrom pip.exceptions import CommandError\nfrom pip.index import PackageFinder\nfrom pip.utils import (\n get_installed_distributions, dist_is_editable)\nfrom pip.utils.deprecation import RemovedInPip10Warning\nfrom pip.cmdoptions import make_option_group, index_group\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ListCommand(Command):\n \"\"\"\n List installed packages, including editables.\n\n Packages are listed in a case-insensitive sorted order.\n \"\"\"\n name = 'list'\n usage = \"\"\"\n %prog [options]\"\"\"\n summary = 'List installed packages.'\n\n def __init__(self, *args, **kw):\n super(ListCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(\n '-o', '--outdated',\n action='store_true',\n default=False,\n help='List outdated packages')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n help='List uptodate packages')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n default=False,\n help='List editable projects.')\n cmd_opts.add_option(\n '-l', '--local',\n action='store_true',\n default=False,\n help=('If in a virtualenv that has global access, do not list '\n 'globally-installed packages.'),\n )\n self.cmd_opts.add_option(\n '--user',\n dest='user',\n action='store_true',\n default=False,\n help='Only output packages installed in user-site.')\n\n cmd_opts.add_option(\n '--pre',\n action='store_true',\n default=False,\n help=(\"Include pre-release and development versions. By default, \"\n \"pip only finds stable versions.\"),\n )\n\n index_opts = make_option_group(index_group, self.parser)\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def _build_package_finder(self, options, index_urls, session):\n \"\"\"\n Create a package finder appropriate to this list command.\n \"\"\"\n return PackageFinder(\n find_links=options.find_links,\n index_urls=index_urls,\n allow_all_prereleases=options.pre,\n trusted_hosts=options.trusted_hosts,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n\n def run(self, options, args):\n if options.allow_external:\n warnings.warn(\n \"--allow-external has been deprecated and will be removed in \"\n \"the future. Due to changes in the repository protocol, it no \"\n \"longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_all_external:\n warnings.warn(\n \"--allow-all-external has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_unverified:\n warnings.warn(\n \"--allow-unverified has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n if options.outdated and options.uptodate:\n raise CommandError(\n \"Options --outdated and --uptodate cannot be combined.\")\n\n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n else:\n self.run_listing(options)\n\n def run_outdated(self, options):\n for dist, latest_version, typ in sorted(\n self.find_packages_latest_versions(options),\n key=lambda p: p[0].project_name.lower()):\n if latest_version > dist.parsed_version:\n logger.info(\n '%s - Latest: %s [%s]',\n self.output_package(dist), latest_version, typ,\n )\n\n def find_packages_latest_versions(self, options):\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n dependency_links = []\n for dist in get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n )\n\n with self._build_session(options) as session:\n finder = self._build_package_finder(options, index_urls, session)\n finder.add_dependency_links(dependency_links)\n\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable,\n )\n for dist in installed_packages:\n typ = 'unknown'\n all_candidates = finder.find_all_candidates(dist.key)\n if not all_candidates:\n continue\n best_candidate = max(all_candidates,\n key=finder._candidate_sort_key)\n remote_version = best_candidate.version\n if best_candidate.location.is_wheel:\n typ = 'wheel'\n else:\n typ = 'sdist'\n yield dist, remote_version, typ\n\n def run_listing(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable,\n )\n self.output_package_listing(installed_packages)\n\n def output_package(self, dist):\n if dist_is_editable(dist):\n return '%s (%s, %s)' % (\n dist.project_name,\n dist.version,\n dist.location,\n )\n else:\n return '%s (%s)' % (dist.project_name, dist.version)\n\n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n installed_packages,\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n logger.info(self.output_package(dist))\n\n def run_uptodate(self, options):\n uptodate = []\n for dist, version, typ in self.find_packages_latest_versions(options):\n if dist.parsed_version == version:\n uptodate.append(dist)\n self.output_package_listing(uptodate)\n", "path": "pip/commands/list.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport warnings\n\nfrom pip.basecommand import Command\nfrom pip.exceptions import CommandError\nfrom pip.index import PackageFinder\nfrom pip.utils import (\n get_installed_distributions, dist_is_editable)\nfrom pip.utils.deprecation import RemovedInPip10Warning\nfrom pip.cmdoptions import make_option_group, index_group\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ListCommand(Command):\n \"\"\"\n List installed packages, including editables.\n\n Packages are listed in a case-insensitive sorted order.\n \"\"\"\n name = 'list'\n usage = \"\"\"\n %prog [options]\"\"\"\n summary = 'List installed packages.'\n\n def __init__(self, *args, **kw):\n super(ListCommand, self).__init__(*args, **kw)\n\n cmd_opts = self.cmd_opts\n\n cmd_opts.add_option(\n '-o', '--outdated',\n action='store_true',\n default=False,\n help='List outdated packages')\n cmd_opts.add_option(\n '-u', '--uptodate',\n action='store_true',\n default=False,\n help='List uptodate packages')\n cmd_opts.add_option(\n '-e', '--editable',\n action='store_true',\n default=False,\n help='List editable projects.')\n cmd_opts.add_option(\n '-l', '--local',\n action='store_true',\n default=False,\n help=('If in a virtualenv that has global access, do not list '\n 'globally-installed packages.'),\n )\n self.cmd_opts.add_option(\n '--user',\n dest='user',\n action='store_true',\n default=False,\n help='Only output packages installed in user-site.')\n\n cmd_opts.add_option(\n '--pre',\n action='store_true',\n default=False,\n help=(\"Include pre-release and development versions. By default, \"\n \"pip only finds stable versions.\"),\n )\n\n index_opts = make_option_group(index_group, self.parser)\n\n self.parser.insert_option_group(0, index_opts)\n self.parser.insert_option_group(0, cmd_opts)\n\n def _build_package_finder(self, options, index_urls, session):\n \"\"\"\n Create a package finder appropriate to this list command.\n \"\"\"\n return PackageFinder(\n find_links=options.find_links,\n index_urls=index_urls,\n allow_all_prereleases=options.pre,\n trusted_hosts=options.trusted_hosts,\n process_dependency_links=options.process_dependency_links,\n session=session,\n )\n\n def run(self, options, args):\n if options.allow_external:\n warnings.warn(\n \"--allow-external has been deprecated and will be removed in \"\n \"the future. Due to changes in the repository protocol, it no \"\n \"longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_all_external:\n warnings.warn(\n \"--allow-all-external has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n\n if options.allow_unverified:\n warnings.warn(\n \"--allow-unverified has been deprecated and will be removed \"\n \"in the future. Due to changes in the repository protocol, it \"\n \"no longer has any effect.\",\n RemovedInPip10Warning,\n )\n if options.outdated and options.uptodate:\n raise CommandError(\n \"Options --outdated and --uptodate cannot be combined.\")\n\n if options.outdated:\n self.run_outdated(options)\n elif options.uptodate:\n self.run_uptodate(options)\n else:\n self.run_listing(options)\n\n def run_outdated(self, options):\n for dist, latest_version, typ in sorted(\n self.find_packages_latest_versions(options),\n key=lambda p: p[0].project_name.lower()):\n if latest_version > dist.parsed_version:\n logger.info(\n '%s - Latest: %s [%s]',\n self.output_package(dist), latest_version, typ,\n )\n\n def find_packages_latest_versions(self, options):\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.info('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n dependency_links = []\n for dist in get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable):\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt'),\n )\n\n with self._build_session(options) as session:\n finder = self._build_package_finder(options, index_urls, session)\n finder.add_dependency_links(dependency_links)\n\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable,\n )\n for dist in installed_packages:\n typ = 'unknown'\n all_candidates = finder.find_all_candidates(dist.key)\n if not options.pre:\n # Remove prereleases\n all_candidates = [candidate for candidate in all_candidates\n if not candidate.version.is_prerelease]\n\n if not all_candidates:\n continue\n best_candidate = max(all_candidates,\n key=finder._candidate_sort_key)\n remote_version = best_candidate.version\n if best_candidate.location.is_wheel:\n typ = 'wheel'\n else:\n typ = 'sdist'\n yield dist, remote_version, typ\n\n def run_listing(self, options):\n installed_packages = get_installed_distributions(\n local_only=options.local,\n user_only=options.user,\n editables_only=options.editable,\n )\n self.output_package_listing(installed_packages)\n\n def output_package(self, dist):\n if dist_is_editable(dist):\n return '%s (%s, %s)' % (\n dist.project_name,\n dist.version,\n dist.location,\n )\n else:\n return '%s (%s)' % (dist.project_name, dist.version)\n\n def output_package_listing(self, installed_packages):\n installed_packages = sorted(\n installed_packages,\n key=lambda dist: dist.project_name.lower(),\n )\n for dist in installed_packages:\n logger.info(self.output_package(dist))\n\n def run_uptodate(self, options):\n uptodate = []\n for dist, version, typ in self.find_packages_latest_versions(options):\n if dist.parsed_version == version:\n uptodate.append(dist)\n self.output_package_listing(uptodate)\n", "path": "pip/commands/list.py"}]}
2,185
131
gh_patches_debug_29607
rasdani/github-patches
git_diff
nf-core__tools-1244
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Show ignore modules in `nf-core modules bump-versions` Modules ignore via the `.nf-core.yml` are currently not shown at all when running the `nf-core modules bump-versions` command. ## Describe the solution you'd like Add a new `Module ignored` section to `nf-core modules bump-versions` table with modules ignored via the `.nf-core.yml`. ## Describe alternatives you've considered Issue a log messages about which modules have been ignored. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nf_core/modules/bump_versions.py` Content: ``` 1 """ 2 Bump versions for all modules on nf-core/modules 3 or for a single module 4 """ 5 6 7 from __future__ import print_function 8 import logging 9 import questionary 10 import os 11 import re 12 import rich 13 from rich.console import Console 14 from rich.table import Table 15 from rich.markdown import Markdown 16 import rich 17 from nf_core.utils import rich_force_colors 18 19 import nf_core.utils 20 import nf_core.modules.module_utils 21 from nf_core.modules.nfcore_module import NFCoreModule 22 from .modules_command import ModuleCommand 23 24 log = logging.getLogger(__name__) 25 26 27 class ModuleVersionBumper(ModuleCommand): 28 def __init__(self, pipeline_dir): 29 super().__init__(pipeline_dir) 30 31 self.up_to_date = None 32 self.updated = None 33 self.failed = None 34 self.show_up_to_date = None 35 self.tools_config = {} 36 37 def bump_versions(self, module=None, all_modules=False, show_uptodate=False): 38 """ 39 Bump the container and conda version of single module or all modules 40 41 Looks for a bioconda tool version in the `main.nf` file of the module and checks whether 42 are more recent version is available. If yes, then tries to get docker/singularity 43 container links and replace the bioconda version and the container links in the main.nf file 44 of the respective module. 45 46 Args: 47 module: a specific module to update 48 all_modules: whether to bump versions for all modules 49 """ 50 self.up_to_date = [] 51 self.updated = [] 52 self.failed = [] 53 self.show_up_to_date = show_uptodate 54 55 # Verify that this is not a pipeline 56 self.dir, repo_type = nf_core.modules.module_utils.get_repo_type(self.dir) 57 if not repo_type == "modules": 58 raise nf_core.modules.module_utils.ModuleException( 59 "This command only works on the nf-core/modules repository, not on pipelines!" 60 ) 61 62 # Get list of all modules 63 _, nfcore_modules = nf_core.modules.module_utils.get_installed_modules(self.dir) 64 65 # Load the .nf-core.yml config 66 self.tools_config = nf_core.utils.load_tools_config(self.dir) 67 68 # Prompt for module or all 69 if module is None and not all_modules: 70 question = { 71 "type": "list", 72 "name": "all_modules", 73 "message": "Bump versions for all modules or a single named module?", 74 "choices": ["All modules", "Named module"], 75 } 76 answer = questionary.unsafe_prompt([question], style=nf_core.utils.nfcore_question_style) 77 if answer["all_modules"] == "All modules": 78 all_modules = True 79 else: 80 module = questionary.autocomplete( 81 "Tool name:", 82 choices=[m.module_name for m in nfcore_modules], 83 style=nf_core.utils.nfcore_question_style, 84 ).unsafe_ask() 85 86 if module: 87 self.show_up_to_date = True 88 if all_modules: 89 raise nf_core.modules.module_utils.ModuleException( 90 "You cannot specify a tool and request all tools to be bumped." 91 ) 92 nfcore_modules = [m for m in nfcore_modules if m.module_name == module] 93 if len(nfcore_modules) == 0: 94 raise nf_core.modules.module_utils.ModuleException(f"Could not find the specified module: '{module}'") 95 96 progress_bar = rich.progress.Progress( 97 "[bold blue]{task.description}", 98 rich.progress.BarColumn(bar_width=None), 99 "[magenta]{task.completed} of {task.total}[reset] » [bold yellow]{task.fields[test_name]}", 100 transient=True, 101 ) 102 with progress_bar: 103 bump_progress = progress_bar.add_task( 104 "Bumping nf-core modules versions", total=len(nfcore_modules), test_name=nfcore_modules[0].module_name 105 ) 106 for mod in nfcore_modules: 107 progress_bar.update(bump_progress, advance=1, test_name=mod.module_name) 108 self.bump_module_version(mod) 109 110 self._print_results() 111 112 def bump_module_version(self, module: NFCoreModule): 113 """ 114 Bump the bioconda and container version of a single NFCoreModule 115 116 Args: 117 module: NFCoreModule 118 """ 119 config_version = None 120 # Extract bioconda version from `main.nf` 121 bioconda_packages = self.get_bioconda_version(module) 122 123 # If multiple versions - don't update! (can't update mulled containers) 124 if not bioconda_packages or len(bioconda_packages) > 1: 125 self.failed.append((f"Ignoring mulled container", module.module_name)) 126 return False 127 128 # Don't update if blocked in blacklist 129 self.bump_versions_config = self.tools_config.get("bump-versions", {}) 130 if module.module_name in self.bump_versions_config: 131 config_version = self.bump_versions_config[module.module_name] 132 if not config_version: 133 self.up_to_date.append((f"Omitting module due to config: {module.module_name}", module.module_name)) 134 return False 135 136 # check for correct version and newer versions 137 bioconda_tool_name = bioconda_packages[0].split("=")[0].replace("bioconda::", "").strip("'").strip('"') 138 bp = bioconda_packages[0] 139 bp = bp.strip("'").strip('"') 140 bioconda_version = bp.split("=")[1] 141 142 if not config_version: 143 try: 144 response = nf_core.utils.anaconda_package(bp) 145 except (LookupError, ValueError) as e: 146 self.failed.append((f"Conda version not specified correctly: {module.main_nf}", module.module_name)) 147 return False 148 149 # Check that required version is available at all 150 if bioconda_version not in response.get("versions"): 151 self.failed.append((f"Conda package had unknown version: `{module.main_nf}`", module.module_name)) 152 return False 153 154 # Check version is latest available 155 last_ver = response.get("latest_version") 156 else: 157 last_ver = config_version 158 159 if last_ver is not None and last_ver != bioconda_version: 160 log.debug(f"Updating version for {module.module_name}") 161 # Get docker and singularity container links 162 try: 163 docker_img, singularity_img = nf_core.utils.get_biocontainer_tag(bioconda_tool_name, last_ver) 164 except LookupError as e: 165 self.failed.append((f"Could not download container tags: {e}", module.module_name)) 166 return False 167 168 patterns = [ 169 (bioconda_packages[0], f"'bioconda::{bioconda_tool_name}={last_ver}'"), 170 (r"quay.io/biocontainers/{}:[^'\"\s]+".format(bioconda_tool_name), docker_img), 171 ( 172 r"https://depot.galaxyproject.org/singularity/{}:[^'\"\s]+".format(bioconda_tool_name), 173 singularity_img, 174 ), 175 ] 176 177 with open(module.main_nf, "r") as fh: 178 content = fh.read() 179 180 # Go over file content of main.nf and find replacements 181 for pattern in patterns: 182 found_match = False 183 newcontent = [] 184 for line in content.splitlines(): 185 186 # Match the pattern 187 matches_pattern = re.findall("^.*{}.*$".format(pattern[0]), line) 188 if matches_pattern: 189 found_match = True 190 191 # Replace the match 192 newline = re.sub(pattern[0], pattern[1], line) 193 newcontent.append(newline) 194 # No match, keep line as it is 195 else: 196 newcontent.append(line) 197 198 if found_match: 199 content = "\n".join(newcontent) + "\n" 200 else: 201 self.failed.append( 202 (f"Did not find pattern {pattern[0]} in module {module.module_name}", module.module_name) 203 ) 204 return False 205 206 # Write new content to the file 207 with open(module.main_nf, "w") as fh: 208 fh.write(content) 209 210 self.updated.append( 211 ( 212 f"Module updated: {bioconda_version} --> {last_ver}", 213 module.module_name, 214 ) 215 ) 216 return True 217 218 else: 219 self.up_to_date.append((f"Module version up to date: {module.module_name}", module.module_name)) 220 return True 221 222 def get_bioconda_version(self, module): 223 """ 224 Extract the bioconda version from a module 225 """ 226 # Check whether file exists and load it 227 bioconda_packages = None 228 try: 229 with open(module.main_nf, "r") as fh: 230 lines = fh.readlines() 231 except FileNotFoundError as e: 232 log.error(f"Could not read `main.nf` of {module.module_name} module.") 233 return False 234 235 for l in lines: 236 if re.search("bioconda::", l): 237 bioconda_packages = [b for b in l.split() if "bioconda::" in b] 238 if re.search("org/singularity", l): 239 singularity_tag = l.split("/")[-1].replace('"', "").replace("'", "").split("--")[-1].strip() 240 if re.search("biocontainers", l): 241 docker_tag = l.split("/")[-1].replace('"', "").replace("'", "").split("--")[-1].strip() 242 243 if bioconda_packages: 244 return bioconda_packages 245 else: 246 return False 247 248 def _print_results(self): 249 """ 250 Print the results for the bump_versions command 251 Uses the ``rich`` library to print a set of formatted tables to the command line 252 summarising the linting results. 253 """ 254 255 log.debug("Printing bump_versions results") 256 257 console = Console(force_terminal=rich_force_colors()) 258 # Find maximum module name length 259 max_mod_name_len = 40 260 for m in [self.up_to_date, self.updated, self.failed]: 261 try: 262 max_mod_name_len = max(len(m[2]), max_mod_name_len) 263 except: 264 pass 265 266 def _s(some_list): 267 if len(some_list) > 1: 268 return "s" 269 return "" 270 271 def format_result(module_updates, table): 272 """ 273 Create rows for module updates 274 """ 275 # TODO: Row styles don't work current as table-level style overrides. 276 # I'd like to make an issue about this on the rich repo so leaving here in case there is a future fix 277 last_modname = False 278 row_style = None 279 for module_update in module_updates: 280 if last_modname and module_update[1] != last_modname: 281 if row_style: 282 row_style = None 283 else: 284 row_style = "magenta" 285 last_modname = module_update[1] 286 table.add_row( 287 Markdown(f"{module_update[1]}"), 288 Markdown(f"{module_update[0]}"), 289 style=row_style, 290 ) 291 return table 292 293 # Table of up to date modules 294 if len(self.up_to_date) > 0 and self.show_up_to_date: 295 console.print( 296 rich.panel.Panel( 297 r"[!] {} Module{} version{} up to date.".format( 298 len(self.up_to_date), _s(self.up_to_date), _s(self.up_to_date) 299 ), 300 style="bold green", 301 ) 302 ) 303 table = Table(style="green", box=rich.box.ROUNDED) 304 table.add_column("Module name", width=max_mod_name_len) 305 table.add_column("Update Message") 306 table = format_result(self.up_to_date, table) 307 console.print(table) 308 309 # Table of updated modules 310 if len(self.updated) > 0: 311 console.print( 312 rich.panel.Panel( 313 r"[!] {} Module{} updated".format(len(self.updated), _s(self.updated)), style="bold yellow" 314 ) 315 ) 316 table = Table(style="yellow", box=rich.box.ROUNDED) 317 table.add_column("Module name", width=max_mod_name_len) 318 table.add_column("Update message") 319 table = format_result(self.updated, table) 320 console.print(table) 321 322 # Table of modules that couldn't be updated 323 if len(self.failed) > 0: 324 console.print( 325 rich.panel.Panel( 326 r"[!] {} Module update{} failed".format(len(self.failed), _s(self.failed)), style="bold red" 327 ) 328 ) 329 table = Table(style="red", box=rich.box.ROUNDED) 330 table.add_column("Module name", width=max_mod_name_len) 331 table.add_column("Update message") 332 table = format_result(self.failed, table) 333 console.print(table) 334 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nf_core/modules/bump_versions.py b/nf_core/modules/bump_versions.py --- a/nf_core/modules/bump_versions.py +++ b/nf_core/modules/bump_versions.py @@ -50,6 +50,7 @@ self.up_to_date = [] self.updated = [] self.failed = [] + self.ignored = [] self.show_up_to_date = show_uptodate # Verify that this is not a pipeline @@ -130,7 +131,7 @@ if module.module_name in self.bump_versions_config: config_version = self.bump_versions_config[module.module_name] if not config_version: - self.up_to_date.append((f"Omitting module due to config: {module.module_name}", module.module_name)) + self.ignored.append((f"Omitting module due to config.", module.module_name)) return False # check for correct version and newer versions @@ -331,3 +332,16 @@ table.add_column("Update message") table = format_result(self.failed, table) console.print(table) + + # Table of modules ignored due to `.nf-core.yml` + if len(self.ignored) > 0: + console.print( + rich.panel.Panel( + r"[!] {} Module update{} ignored".format(len(self.ignored), _s(self.ignored)), style="grey58" + ) + ) + table = Table(style="grey58", box=rich.box.ROUNDED) + table.add_column("Module name", width=max_mod_name_len) + table.add_column("Update message") + table = format_result(self.ignored, table) + console.print(table)
{"golden_diff": "diff --git a/nf_core/modules/bump_versions.py b/nf_core/modules/bump_versions.py\n--- a/nf_core/modules/bump_versions.py\n+++ b/nf_core/modules/bump_versions.py\n@@ -50,6 +50,7 @@\n self.up_to_date = []\n self.updated = []\n self.failed = []\n+ self.ignored = []\n self.show_up_to_date = show_uptodate\n \n # Verify that this is not a pipeline\n@@ -130,7 +131,7 @@\n if module.module_name in self.bump_versions_config:\n config_version = self.bump_versions_config[module.module_name]\n if not config_version:\n- self.up_to_date.append((f\"Omitting module due to config: {module.module_name}\", module.module_name))\n+ self.ignored.append((f\"Omitting module due to config.\", module.module_name))\n return False\n \n # check for correct version and newer versions\n@@ -331,3 +332,16 @@\n table.add_column(\"Update message\")\n table = format_result(self.failed, table)\n console.print(table)\n+\n+ # Table of modules ignored due to `.nf-core.yml`\n+ if len(self.ignored) > 0:\n+ console.print(\n+ rich.panel.Panel(\n+ r\"[!] {} Module update{} ignored\".format(len(self.ignored), _s(self.ignored)), style=\"grey58\"\n+ )\n+ )\n+ table = Table(style=\"grey58\", box=rich.box.ROUNDED)\n+ table.add_column(\"Module name\", width=max_mod_name_len)\n+ table.add_column(\"Update message\")\n+ table = format_result(self.ignored, table)\n+ console.print(table)\n", "issue": "Show ignore modules in `nf-core modules bump-versions`\nModules ignore via the `.nf-core.yml` are currently not shown at all when running the `nf-core modules bump-versions` command. \r\n\r\n## Describe the solution you'd like\r\n\r\nAdd a new `Module ignored` section to `nf-core modules bump-versions` table with modules ignored via the `.nf-core.yml`. \r\n\r\n## Describe alternatives you've considered\r\n\r\nIssue a log messages about which modules have been ignored. \r\n\n", "before_files": [{"content": "\"\"\"\nBump versions for all modules on nf-core/modules\nor for a single module\n\"\"\"\n\n\nfrom __future__ import print_function\nimport logging\nimport questionary\nimport os\nimport re\nimport rich\nfrom rich.console import Console\nfrom rich.table import Table\nfrom rich.markdown import Markdown\nimport rich\nfrom nf_core.utils import rich_force_colors\n\nimport nf_core.utils\nimport nf_core.modules.module_utils\nfrom nf_core.modules.nfcore_module import NFCoreModule\nfrom .modules_command import ModuleCommand\n\nlog = logging.getLogger(__name__)\n\n\nclass ModuleVersionBumper(ModuleCommand):\n def __init__(self, pipeline_dir):\n super().__init__(pipeline_dir)\n\n self.up_to_date = None\n self.updated = None\n self.failed = None\n self.show_up_to_date = None\n self.tools_config = {}\n\n def bump_versions(self, module=None, all_modules=False, show_uptodate=False):\n \"\"\"\n Bump the container and conda version of single module or all modules\n\n Looks for a bioconda tool version in the `main.nf` file of the module and checks whether\n are more recent version is available. If yes, then tries to get docker/singularity\n container links and replace the bioconda version and the container links in the main.nf file\n of the respective module.\n\n Args:\n module: a specific module to update\n all_modules: whether to bump versions for all modules\n \"\"\"\n self.up_to_date = []\n self.updated = []\n self.failed = []\n self.show_up_to_date = show_uptodate\n\n # Verify that this is not a pipeline\n self.dir, repo_type = nf_core.modules.module_utils.get_repo_type(self.dir)\n if not repo_type == \"modules\":\n raise nf_core.modules.module_utils.ModuleException(\n \"This command only works on the nf-core/modules repository, not on pipelines!\"\n )\n\n # Get list of all modules\n _, nfcore_modules = nf_core.modules.module_utils.get_installed_modules(self.dir)\n\n # Load the .nf-core.yml config\n self.tools_config = nf_core.utils.load_tools_config(self.dir)\n\n # Prompt for module or all\n if module is None and not all_modules:\n question = {\n \"type\": \"list\",\n \"name\": \"all_modules\",\n \"message\": \"Bump versions for all modules or a single named module?\",\n \"choices\": [\"All modules\", \"Named module\"],\n }\n answer = questionary.unsafe_prompt([question], style=nf_core.utils.nfcore_question_style)\n if answer[\"all_modules\"] == \"All modules\":\n all_modules = True\n else:\n module = questionary.autocomplete(\n \"Tool name:\",\n choices=[m.module_name for m in nfcore_modules],\n style=nf_core.utils.nfcore_question_style,\n ).unsafe_ask()\n\n if module:\n self.show_up_to_date = True\n if all_modules:\n raise nf_core.modules.module_utils.ModuleException(\n \"You cannot specify a tool and request all tools to be bumped.\"\n )\n nfcore_modules = [m for m in nfcore_modules if m.module_name == module]\n if len(nfcore_modules) == 0:\n raise nf_core.modules.module_utils.ModuleException(f\"Could not find the specified module: '{module}'\")\n\n progress_bar = rich.progress.Progress(\n \"[bold blue]{task.description}\",\n rich.progress.BarColumn(bar_width=None),\n \"[magenta]{task.completed} of {task.total}[reset] \u00bb [bold yellow]{task.fields[test_name]}\",\n transient=True,\n )\n with progress_bar:\n bump_progress = progress_bar.add_task(\n \"Bumping nf-core modules versions\", total=len(nfcore_modules), test_name=nfcore_modules[0].module_name\n )\n for mod in nfcore_modules:\n progress_bar.update(bump_progress, advance=1, test_name=mod.module_name)\n self.bump_module_version(mod)\n\n self._print_results()\n\n def bump_module_version(self, module: NFCoreModule):\n \"\"\"\n Bump the bioconda and container version of a single NFCoreModule\n\n Args:\n module: NFCoreModule\n \"\"\"\n config_version = None\n # Extract bioconda version from `main.nf`\n bioconda_packages = self.get_bioconda_version(module)\n\n # If multiple versions - don't update! (can't update mulled containers)\n if not bioconda_packages or len(bioconda_packages) > 1:\n self.failed.append((f\"Ignoring mulled container\", module.module_name))\n return False\n\n # Don't update if blocked in blacklist\n self.bump_versions_config = self.tools_config.get(\"bump-versions\", {})\n if module.module_name in self.bump_versions_config:\n config_version = self.bump_versions_config[module.module_name]\n if not config_version:\n self.up_to_date.append((f\"Omitting module due to config: {module.module_name}\", module.module_name))\n return False\n\n # check for correct version and newer versions\n bioconda_tool_name = bioconda_packages[0].split(\"=\")[0].replace(\"bioconda::\", \"\").strip(\"'\").strip('\"')\n bp = bioconda_packages[0]\n bp = bp.strip(\"'\").strip('\"')\n bioconda_version = bp.split(\"=\")[1]\n\n if not config_version:\n try:\n response = nf_core.utils.anaconda_package(bp)\n except (LookupError, ValueError) as e:\n self.failed.append((f\"Conda version not specified correctly: {module.main_nf}\", module.module_name))\n return False\n\n # Check that required version is available at all\n if bioconda_version not in response.get(\"versions\"):\n self.failed.append((f\"Conda package had unknown version: `{module.main_nf}`\", module.module_name))\n return False\n\n # Check version is latest available\n last_ver = response.get(\"latest_version\")\n else:\n last_ver = config_version\n\n if last_ver is not None and last_ver != bioconda_version:\n log.debug(f\"Updating version for {module.module_name}\")\n # Get docker and singularity container links\n try:\n docker_img, singularity_img = nf_core.utils.get_biocontainer_tag(bioconda_tool_name, last_ver)\n except LookupError as e:\n self.failed.append((f\"Could not download container tags: {e}\", module.module_name))\n return False\n\n patterns = [\n (bioconda_packages[0], f\"'bioconda::{bioconda_tool_name}={last_ver}'\"),\n (r\"quay.io/biocontainers/{}:[^'\\\"\\s]+\".format(bioconda_tool_name), docker_img),\n (\n r\"https://depot.galaxyproject.org/singularity/{}:[^'\\\"\\s]+\".format(bioconda_tool_name),\n singularity_img,\n ),\n ]\n\n with open(module.main_nf, \"r\") as fh:\n content = fh.read()\n\n # Go over file content of main.nf and find replacements\n for pattern in patterns:\n found_match = False\n newcontent = []\n for line in content.splitlines():\n\n # Match the pattern\n matches_pattern = re.findall(\"^.*{}.*$\".format(pattern[0]), line)\n if matches_pattern:\n found_match = True\n\n # Replace the match\n newline = re.sub(pattern[0], pattern[1], line)\n newcontent.append(newline)\n # No match, keep line as it is\n else:\n newcontent.append(line)\n\n if found_match:\n content = \"\\n\".join(newcontent) + \"\\n\"\n else:\n self.failed.append(\n (f\"Did not find pattern {pattern[0]} in module {module.module_name}\", module.module_name)\n )\n return False\n\n # Write new content to the file\n with open(module.main_nf, \"w\") as fh:\n fh.write(content)\n\n self.updated.append(\n (\n f\"Module updated: {bioconda_version} --> {last_ver}\",\n module.module_name,\n )\n )\n return True\n\n else:\n self.up_to_date.append((f\"Module version up to date: {module.module_name}\", module.module_name))\n return True\n\n def get_bioconda_version(self, module):\n \"\"\"\n Extract the bioconda version from a module\n \"\"\"\n # Check whether file exists and load it\n bioconda_packages = None\n try:\n with open(module.main_nf, \"r\") as fh:\n lines = fh.readlines()\n except FileNotFoundError as e:\n log.error(f\"Could not read `main.nf` of {module.module_name} module.\")\n return False\n\n for l in lines:\n if re.search(\"bioconda::\", l):\n bioconda_packages = [b for b in l.split() if \"bioconda::\" in b]\n if re.search(\"org/singularity\", l):\n singularity_tag = l.split(\"/\")[-1].replace('\"', \"\").replace(\"'\", \"\").split(\"--\")[-1].strip()\n if re.search(\"biocontainers\", l):\n docker_tag = l.split(\"/\")[-1].replace('\"', \"\").replace(\"'\", \"\").split(\"--\")[-1].strip()\n\n if bioconda_packages:\n return bioconda_packages\n else:\n return False\n\n def _print_results(self):\n \"\"\"\n Print the results for the bump_versions command\n Uses the ``rich`` library to print a set of formatted tables to the command line\n summarising the linting results.\n \"\"\"\n\n log.debug(\"Printing bump_versions results\")\n\n console = Console(force_terminal=rich_force_colors())\n # Find maximum module name length\n max_mod_name_len = 40\n for m in [self.up_to_date, self.updated, self.failed]:\n try:\n max_mod_name_len = max(len(m[2]), max_mod_name_len)\n except:\n pass\n\n def _s(some_list):\n if len(some_list) > 1:\n return \"s\"\n return \"\"\n\n def format_result(module_updates, table):\n \"\"\"\n Create rows for module updates\n \"\"\"\n # TODO: Row styles don't work current as table-level style overrides.\n # I'd like to make an issue about this on the rich repo so leaving here in case there is a future fix\n last_modname = False\n row_style = None\n for module_update in module_updates:\n if last_modname and module_update[1] != last_modname:\n if row_style:\n row_style = None\n else:\n row_style = \"magenta\"\n last_modname = module_update[1]\n table.add_row(\n Markdown(f\"{module_update[1]}\"),\n Markdown(f\"{module_update[0]}\"),\n style=row_style,\n )\n return table\n\n # Table of up to date modules\n if len(self.up_to_date) > 0 and self.show_up_to_date:\n console.print(\n rich.panel.Panel(\n r\"[!] {} Module{} version{} up to date.\".format(\n len(self.up_to_date), _s(self.up_to_date), _s(self.up_to_date)\n ),\n style=\"bold green\",\n )\n )\n table = Table(style=\"green\", box=rich.box.ROUNDED)\n table.add_column(\"Module name\", width=max_mod_name_len)\n table.add_column(\"Update Message\")\n table = format_result(self.up_to_date, table)\n console.print(table)\n\n # Table of updated modules\n if len(self.updated) > 0:\n console.print(\n rich.panel.Panel(\n r\"[!] {} Module{} updated\".format(len(self.updated), _s(self.updated)), style=\"bold yellow\"\n )\n )\n table = Table(style=\"yellow\", box=rich.box.ROUNDED)\n table.add_column(\"Module name\", width=max_mod_name_len)\n table.add_column(\"Update message\")\n table = format_result(self.updated, table)\n console.print(table)\n\n # Table of modules that couldn't be updated\n if len(self.failed) > 0:\n console.print(\n rich.panel.Panel(\n r\"[!] {} Module update{} failed\".format(len(self.failed), _s(self.failed)), style=\"bold red\"\n )\n )\n table = Table(style=\"red\", box=rich.box.ROUNDED)\n table.add_column(\"Module name\", width=max_mod_name_len)\n table.add_column(\"Update message\")\n table = format_result(self.failed, table)\n console.print(table)\n", "path": "nf_core/modules/bump_versions.py"}], "after_files": [{"content": "\"\"\"\nBump versions for all modules on nf-core/modules\nor for a single module\n\"\"\"\n\n\nfrom __future__ import print_function\nimport logging\nimport questionary\nimport os\nimport re\nimport rich\nfrom rich.console import Console\nfrom rich.table import Table\nfrom rich.markdown import Markdown\nimport rich\nfrom nf_core.utils import rich_force_colors\n\nimport nf_core.utils\nimport nf_core.modules.module_utils\nfrom nf_core.modules.nfcore_module import NFCoreModule\nfrom .modules_command import ModuleCommand\n\nlog = logging.getLogger(__name__)\n\n\nclass ModuleVersionBumper(ModuleCommand):\n def __init__(self, pipeline_dir):\n super().__init__(pipeline_dir)\n\n self.up_to_date = None\n self.updated = None\n self.failed = None\n self.show_up_to_date = None\n self.tools_config = {}\n\n def bump_versions(self, module=None, all_modules=False, show_uptodate=False):\n \"\"\"\n Bump the container and conda version of single module or all modules\n\n Looks for a bioconda tool version in the `main.nf` file of the module and checks whether\n are more recent version is available. If yes, then tries to get docker/singularity\n container links and replace the bioconda version and the container links in the main.nf file\n of the respective module.\n\n Args:\n module: a specific module to update\n all_modules: whether to bump versions for all modules\n \"\"\"\n self.up_to_date = []\n self.updated = []\n self.failed = []\n self.ignored = []\n self.show_up_to_date = show_uptodate\n\n # Verify that this is not a pipeline\n self.dir, repo_type = nf_core.modules.module_utils.get_repo_type(self.dir)\n if not repo_type == \"modules\":\n raise nf_core.modules.module_utils.ModuleException(\n \"This command only works on the nf-core/modules repository, not on pipelines!\"\n )\n\n # Get list of all modules\n _, nfcore_modules = nf_core.modules.module_utils.get_installed_modules(self.dir)\n\n # Load the .nf-core.yml config\n self.tools_config = nf_core.utils.load_tools_config(self.dir)\n\n # Prompt for module or all\n if module is None and not all_modules:\n question = {\n \"type\": \"list\",\n \"name\": \"all_modules\",\n \"message\": \"Bump versions for all modules or a single named module?\",\n \"choices\": [\"All modules\", \"Named module\"],\n }\n answer = questionary.unsafe_prompt([question], style=nf_core.utils.nfcore_question_style)\n if answer[\"all_modules\"] == \"All modules\":\n all_modules = True\n else:\n module = questionary.autocomplete(\n \"Tool name:\",\n choices=[m.module_name for m in nfcore_modules],\n style=nf_core.utils.nfcore_question_style,\n ).unsafe_ask()\n\n if module:\n self.show_up_to_date = True\n if all_modules:\n raise nf_core.modules.module_utils.ModuleException(\n \"You cannot specify a tool and request all tools to be bumped.\"\n )\n nfcore_modules = [m for m in nfcore_modules if m.module_name == module]\n if len(nfcore_modules) == 0:\n raise nf_core.modules.module_utils.ModuleException(f\"Could not find the specified module: '{module}'\")\n\n progress_bar = rich.progress.Progress(\n \"[bold blue]{task.description}\",\n rich.progress.BarColumn(bar_width=None),\n \"[magenta]{task.completed} of {task.total}[reset] \u00bb [bold yellow]{task.fields[test_name]}\",\n transient=True,\n )\n with progress_bar:\n bump_progress = progress_bar.add_task(\n \"Bumping nf-core modules versions\", total=len(nfcore_modules), test_name=nfcore_modules[0].module_name\n )\n for mod in nfcore_modules:\n progress_bar.update(bump_progress, advance=1, test_name=mod.module_name)\n self.bump_module_version(mod)\n\n self._print_results()\n\n def bump_module_version(self, module: NFCoreModule):\n \"\"\"\n Bump the bioconda and container version of a single NFCoreModule\n\n Args:\n module: NFCoreModule\n \"\"\"\n config_version = None\n # Extract bioconda version from `main.nf`\n bioconda_packages = self.get_bioconda_version(module)\n\n # If multiple versions - don't update! (can't update mulled containers)\n if not bioconda_packages or len(bioconda_packages) > 1:\n self.failed.append((f\"Ignoring mulled container\", module.module_name))\n return False\n\n # Don't update if blocked in blacklist\n self.bump_versions_config = self.tools_config.get(\"bump-versions\", {})\n if module.module_name in self.bump_versions_config:\n config_version = self.bump_versions_config[module.module_name]\n if not config_version:\n self.ignored.append((f\"Omitting module due to config.\", module.module_name))\n return False\n\n # check for correct version and newer versions\n bioconda_tool_name = bioconda_packages[0].split(\"=\")[0].replace(\"bioconda::\", \"\").strip(\"'\").strip('\"')\n bp = bioconda_packages[0]\n bp = bp.strip(\"'\").strip('\"')\n bioconda_version = bp.split(\"=\")[1]\n\n if not config_version:\n try:\n response = nf_core.utils.anaconda_package(bp)\n except (LookupError, ValueError) as e:\n self.failed.append((f\"Conda version not specified correctly: {module.main_nf}\", module.module_name))\n return False\n\n # Check that required version is available at all\n if bioconda_version not in response.get(\"versions\"):\n self.failed.append((f\"Conda package had unknown version: `{module.main_nf}`\", module.module_name))\n return False\n\n # Check version is latest available\n last_ver = response.get(\"latest_version\")\n else:\n last_ver = config_version\n\n if last_ver is not None and last_ver != bioconda_version:\n log.debug(f\"Updating version for {module.module_name}\")\n # Get docker and singularity container links\n try:\n docker_img, singularity_img = nf_core.utils.get_biocontainer_tag(bioconda_tool_name, last_ver)\n except LookupError as e:\n self.failed.append((f\"Could not download container tags: {e}\", module.module_name))\n return False\n\n patterns = [\n (bioconda_packages[0], f\"'bioconda::{bioconda_tool_name}={last_ver}'\"),\n (r\"quay.io/biocontainers/{}:[^'\\\"\\s]+\".format(bioconda_tool_name), docker_img),\n (\n r\"https://depot.galaxyproject.org/singularity/{}:[^'\\\"\\s]+\".format(bioconda_tool_name),\n singularity_img,\n ),\n ]\n\n with open(module.main_nf, \"r\") as fh:\n content = fh.read()\n\n # Go over file content of main.nf and find replacements\n for pattern in patterns:\n found_match = False\n newcontent = []\n for line in content.splitlines():\n\n # Match the pattern\n matches_pattern = re.findall(\"^.*{}.*$\".format(pattern[0]), line)\n if matches_pattern:\n found_match = True\n\n # Replace the match\n newline = re.sub(pattern[0], pattern[1], line)\n newcontent.append(newline)\n # No match, keep line as it is\n else:\n newcontent.append(line)\n\n if found_match:\n content = \"\\n\".join(newcontent) + \"\\n\"\n else:\n self.failed.append(\n (f\"Did not find pattern {pattern[0]} in module {module.module_name}\", module.module_name)\n )\n return False\n\n # Write new content to the file\n with open(module.main_nf, \"w\") as fh:\n fh.write(content)\n\n self.updated.append(\n (\n f\"Module updated: {bioconda_version} --> {last_ver}\",\n module.module_name,\n )\n )\n return True\n\n else:\n self.up_to_date.append((f\"Module version up to date: {module.module_name}\", module.module_name))\n return True\n\n def get_bioconda_version(self, module):\n \"\"\"\n Extract the bioconda version from a module\n \"\"\"\n # Check whether file exists and load it\n bioconda_packages = None\n try:\n with open(module.main_nf, \"r\") as fh:\n lines = fh.readlines()\n except FileNotFoundError as e:\n log.error(f\"Could not read `main.nf` of {module.module_name} module.\")\n return False\n\n for l in lines:\n if re.search(\"bioconda::\", l):\n bioconda_packages = [b for b in l.split() if \"bioconda::\" in b]\n if re.search(\"org/singularity\", l):\n singularity_tag = l.split(\"/\")[-1].replace('\"', \"\").replace(\"'\", \"\").split(\"--\")[-1].strip()\n if re.search(\"biocontainers\", l):\n docker_tag = l.split(\"/\")[-1].replace('\"', \"\").replace(\"'\", \"\").split(\"--\")[-1].strip()\n\n if bioconda_packages:\n return bioconda_packages\n else:\n return False\n\n def _print_results(self):\n \"\"\"\n Print the results for the bump_versions command\n Uses the ``rich`` library to print a set of formatted tables to the command line\n summarising the linting results.\n \"\"\"\n\n log.debug(\"Printing bump_versions results\")\n\n console = Console(force_terminal=rich_force_colors())\n # Find maximum module name length\n max_mod_name_len = 40\n for m in [self.up_to_date, self.updated, self.failed]:\n try:\n max_mod_name_len = max(len(m[2]), max_mod_name_len)\n except:\n pass\n\n def _s(some_list):\n if len(some_list) > 1:\n return \"s\"\n return \"\"\n\n def format_result(module_updates, table):\n \"\"\"\n Create rows for module updates\n \"\"\"\n # TODO: Row styles don't work current as table-level style overrides.\n # I'd like to make an issue about this on the rich repo so leaving here in case there is a future fix\n last_modname = False\n row_style = None\n for module_update in module_updates:\n if last_modname and module_update[1] != last_modname:\n if row_style:\n row_style = None\n else:\n row_style = \"magenta\"\n last_modname = module_update[1]\n table.add_row(\n Markdown(f\"{module_update[1]}\"),\n Markdown(f\"{module_update[0]}\"),\n style=row_style,\n )\n return table\n\n # Table of up to date modules\n if len(self.up_to_date) > 0 and self.show_up_to_date:\n console.print(\n rich.panel.Panel(\n r\"[!] {} Module{} version{} up to date.\".format(\n len(self.up_to_date), _s(self.up_to_date), _s(self.up_to_date)\n ),\n style=\"bold green\",\n )\n )\n table = Table(style=\"green\", box=rich.box.ROUNDED)\n table.add_column(\"Module name\", width=max_mod_name_len)\n table.add_column(\"Update Message\")\n table = format_result(self.up_to_date, table)\n console.print(table)\n\n # Table of updated modules\n if len(self.updated) > 0:\n console.print(\n rich.panel.Panel(\n r\"[!] {} Module{} updated\".format(len(self.updated), _s(self.updated)), style=\"bold yellow\"\n )\n )\n table = Table(style=\"yellow\", box=rich.box.ROUNDED)\n table.add_column(\"Module name\", width=max_mod_name_len)\n table.add_column(\"Update message\")\n table = format_result(self.updated, table)\n console.print(table)\n\n # Table of modules that couldn't be updated\n if len(self.failed) > 0:\n console.print(\n rich.panel.Panel(\n r\"[!] {} Module update{} failed\".format(len(self.failed), _s(self.failed)), style=\"bold red\"\n )\n )\n table = Table(style=\"red\", box=rich.box.ROUNDED)\n table.add_column(\"Module name\", width=max_mod_name_len)\n table.add_column(\"Update message\")\n table = format_result(self.failed, table)\n console.print(table)\n\n # Table of modules ignored due to `.nf-core.yml`\n if len(self.ignored) > 0:\n console.print(\n rich.panel.Panel(\n r\"[!] {} Module update{} ignored\".format(len(self.ignored), _s(self.ignored)), style=\"grey58\"\n )\n )\n table = Table(style=\"grey58\", box=rich.box.ROUNDED)\n table.add_column(\"Module name\", width=max_mod_name_len)\n table.add_column(\"Update message\")\n table = format_result(self.ignored, table)\n console.print(table)\n", "path": "nf_core/modules/bump_versions.py"}]}
3,968
383
gh_patches_debug_9412
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-3099
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- allow_remote=false does not prevent remote access ##### Steps to reproduce the problem: 1. Run mitmproxy on a publicly routable host, with default configuration including `listen_host=""`, `listen_port=8080`, and `allow_remote=false`. 2. From a host on a different network, send a request through that instance of mitmproxy, e.g. with `curl --proxy http://your-host.example:8080` The default `allow_remote=false` should prevent this request from succeeding. However, it is served by mitmproxy just fine. ##### Any other comments? What have you tried so far? I have a laptop sitting in the “DMZ” of a home router, which is globally IPv4 routable. I also have a VPS which is globally IPv4 routable. Both the laptop and the VPS are running Ubuntu 16.04 “Xenial Xerus”. I can reproduce the problem with mitmproxy running on the VPS and curl on the laptop, as well as vice-versa. Both tcpdump and mitmproxy’s own Details pane show the request as originating from a remote network. I only noticed this because I saw strange flows in a mitmproxy instance that I spun up on the laptop. ##### System information Mitmproxy: 3.0.3 binary Python: 3.5.2 OpenSSL: OpenSSL 1.1.0g 2 Nov 2017 Platform: Linux-4.4.0-116-generic-x86_64-with-debian-stretch-sid --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/addons/allowremote.py` Content: ``` 1 import ipaddress 2 from mitmproxy import ctx 3 4 5 class AllowRemote: 6 def load(self, loader): 7 loader.add_option( 8 "allow_remote", bool, False, 9 """ 10 Allow remote clients to connect to proxy. If set to false, 11 client will not be able to connect to proxy unless it is on the same network 12 or the proxyauth option is set 13 """ 14 ) 15 16 def clientconnect(self, layer): 17 address = layer.client_conn.address 18 19 accept_connection = ( 20 ctx.options.allow_remote or 21 ipaddress.ip_address(address[0]).is_private or 22 ctx.options.proxyauth is not None 23 ) 24 25 if not accept_connection: 26 layer.reply.kill() 27 ctx.log.warn("Client connection was killed because allow_remote option is set to false, " 28 "client IP was not a private IP and proxyauth was not set.\n" 29 "To allow remote connections set allow_remote option to true or set proxyauth option.") 30 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/addons/allowremote.py b/mitmproxy/addons/allowremote.py --- a/mitmproxy/addons/allowremote.py +++ b/mitmproxy/addons/allowremote.py @@ -14,11 +14,13 @@ ) def clientconnect(self, layer): - address = layer.client_conn.address + address = ipaddress.ip_address(layer.client_conn.address[0]) + if isinstance(address, ipaddress.IPv6Address): + address = address.ipv4_mapped or address accept_connection = ( ctx.options.allow_remote or - ipaddress.ip_address(address[0]).is_private or + ipaddress.ip_address(address).is_private or ctx.options.proxyauth is not None )
{"golden_diff": "diff --git a/mitmproxy/addons/allowremote.py b/mitmproxy/addons/allowremote.py\n--- a/mitmproxy/addons/allowremote.py\n+++ b/mitmproxy/addons/allowremote.py\n@@ -14,11 +14,13 @@\n )\n \n def clientconnect(self, layer):\n- address = layer.client_conn.address\n+ address = ipaddress.ip_address(layer.client_conn.address[0])\n+ if isinstance(address, ipaddress.IPv6Address):\n+ address = address.ipv4_mapped or address\n \n accept_connection = (\n ctx.options.allow_remote or\n- ipaddress.ip_address(address[0]).is_private or\n+ ipaddress.ip_address(address).is_private or\n ctx.options.proxyauth is not None\n )\n", "issue": "allow_remote=false does not prevent remote access\n##### Steps to reproduce the problem:\r\n\r\n1. Run mitmproxy on a publicly routable host, with default configuration including `listen_host=\"\"`, `listen_port=8080`, and `allow_remote=false`.\r\n2. From a host on a different network, send a request through that instance of mitmproxy, e.g. with `curl --proxy http://your-host.example:8080`\r\n\r\nThe default `allow_remote=false` should prevent this request from succeeding. However, it is served by mitmproxy just fine.\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nI have a laptop sitting in the \u201cDMZ\u201d of a home router, which is globally IPv4 routable. I also have a VPS which is globally IPv4 routable. Both the laptop and the VPS are running Ubuntu 16.04 \u201cXenial Xerus\u201d. I can reproduce the problem with mitmproxy running on the VPS and curl on the laptop, as well as vice-versa.\r\n\r\nBoth tcpdump and mitmproxy\u2019s own Details pane show the request as originating from a remote network.\r\n\r\nI only noticed this because I saw strange flows in a mitmproxy instance that I spun up on the laptop.\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.3 binary\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-116-generic-x86_64-with-debian-stretch-sid\r\n\n", "before_files": [{"content": "import ipaddress\nfrom mitmproxy import ctx\n\n\nclass AllowRemote:\n def load(self, loader):\n loader.add_option(\n \"allow_remote\", bool, False,\n \"\"\"\n Allow remote clients to connect to proxy. If set to false,\n client will not be able to connect to proxy unless it is on the same network\n or the proxyauth option is set\n \"\"\"\n )\n\n def clientconnect(self, layer):\n address = layer.client_conn.address\n\n accept_connection = (\n ctx.options.allow_remote or\n ipaddress.ip_address(address[0]).is_private or\n ctx.options.proxyauth is not None\n )\n\n if not accept_connection:\n layer.reply.kill()\n ctx.log.warn(\"Client connection was killed because allow_remote option is set to false, \"\n \"client IP was not a private IP and proxyauth was not set.\\n\"\n \"To allow remote connections set allow_remote option to true or set proxyauth option.\")\n", "path": "mitmproxy/addons/allowremote.py"}], "after_files": [{"content": "import ipaddress\nfrom mitmproxy import ctx\n\n\nclass AllowRemote:\n def load(self, loader):\n loader.add_option(\n \"allow_remote\", bool, False,\n \"\"\"\n Allow remote clients to connect to proxy. If set to false,\n client will not be able to connect to proxy unless it is on the same network\n or the proxyauth option is set\n \"\"\"\n )\n\n def clientconnect(self, layer):\n address = ipaddress.ip_address(layer.client_conn.address[0])\n if isinstance(address, ipaddress.IPv6Address):\n address = address.ipv4_mapped or address\n\n accept_connection = (\n ctx.options.allow_remote or\n ipaddress.ip_address(address).is_private or\n ctx.options.proxyauth is not None\n )\n\n if not accept_connection:\n layer.reply.kill()\n ctx.log.warn(\"Client connection was killed because allow_remote option is set to false, \"\n \"client IP was not a private IP and proxyauth was not set.\\n\"\n \"To allow remote connections set allow_remote option to true or set proxyauth option.\")\n", "path": "mitmproxy/addons/allowremote.py"}]}
864
170
gh_patches_debug_10238
rasdani/github-patches
git_diff
quantumlib__Cirq-4003
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- optimized_for_sycamore fails when you measure more than 16 qubits ```python def thing(): q = cirq.LineQubit.range(17) c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z')) c = cg.optimized_for_sycamore(c) return c thing() ``` ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-44-accaace1183a> in <module> 4 c = cg.optimized_for_sycamore(c) 5 return c ----> 6 thing() <ipython-input-44-accaace1183a> in thing() 2 q = cirq.LineQubit.range(17) 3 c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z')) ----> 4 c = cg.optimized_for_sycamore(c) 5 return c 6 thing() ~/ecc/cirq/cirq/google/optimizers/optimize_for_sycamore.py in optimized_for_sycamore(circuit, new_device, qubit_map, optimizer_type, tolerance, tabulation_resolution) 159 opts = _OPTIMIZER_TYPES[optimizer_type](tolerance=tolerance, tabulation=tabulation) 160 for optimizer in opts: --> 161 optimizer(copy) 162 163 return circuits.Circuit( ~/ecc/cirq/cirq/optimizers/drop_negligible.py in optimize_circuit(self, circuit) 37 for moment_index, moment in enumerate(circuit): 38 for op in moment.operations: ---> 39 if op is not None and protocols.trace_distance_bound(op) <= self.tolerance: 40 deletions.append((moment_index, op)) 41 circuit.batch_remove(deletions) ~/ecc/cirq/cirq/protocols/trace_distance_bound.py in trace_distance_bound(val) 71 72 for strat in strats: ---> 73 result = strat(val) 74 if result is None: 75 break ~/ecc/cirq/cirq/protocols/trace_distance_bound.py in _strat_distance_from_unitary(val) 96 def _strat_distance_from_unitary(val: Any) -> Optional[float]: 97 """Attempts to compute a value's trace_distance_bound from its unitary.""" ---> 98 u = unitary_protocol.unitary(val, default=None) 99 100 if u is None: ~/ecc/cirq/cirq/protocols/unitary_protocol.py in unitary(val, default) 127 ] 128 for strat in strats: --> 129 result = strat(val) 130 if result is None: 131 break ~/ecc/cirq/cirq/protocols/unitary_protocol.py in _strat_unitary_from_apply_unitary(val) 173 174 # Apply unitary effect to an identity matrix. --> 175 state = qis.eye_tensor(val_qid_shape, dtype=np.complex128) 176 buffer = np.empty_like(state) 177 result = method(ApplyUnitaryArgs(state, buffer, range(len(val_qid_shape)))) ~/ecc/cirq/cirq/qis/states.py in eye_tensor(half_shape, dtype) 669 """ 670 identity = np.eye(np.prod(half_shape, dtype=int), dtype=dtype) --> 671 identity.shape = half_shape * 2 672 return identity ValueError: maximum supported dimension for an ndarray is 32, found 34 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/optimizers/drop_negligible.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """An optimization pass that removes operations with tiny effects.""" 16 17 from typing import List, Tuple, TYPE_CHECKING 18 19 from cirq import protocols 20 from cirq.circuits import circuit as _circuit 21 22 if TYPE_CHECKING: 23 from cirq import ops 24 25 26 class DropNegligible: 27 """An optimization pass that removes operations with tiny effects.""" 28 29 def __init__(self, tolerance: float = 1e-8) -> None: 30 self.tolerance = tolerance 31 32 def __call__(self, circuit: _circuit.Circuit): 33 self.optimize_circuit(circuit) 34 35 def optimize_circuit(self, circuit: _circuit.Circuit) -> None: 36 deletions: List[Tuple[int, ops.Operation]] = [] 37 for moment_index, moment in enumerate(circuit): 38 for op in moment.operations: 39 if op is not None and protocols.trace_distance_bound(op) <= self.tolerance: 40 deletions.append((moment_index, op)) 41 circuit.batch_remove(deletions) 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/optimizers/drop_negligible.py b/cirq/optimizers/drop_negligible.py --- a/cirq/optimizers/drop_negligible.py +++ b/cirq/optimizers/drop_negligible.py @@ -36,6 +36,8 @@ deletions: List[Tuple[int, ops.Operation]] = [] for moment_index, moment in enumerate(circuit): for op in moment.operations: - if op is not None and protocols.trace_distance_bound(op) <= self.tolerance: + if protocols.is_measurement(op): + continue + if protocols.trace_distance_bound(op) <= self.tolerance: deletions.append((moment_index, op)) circuit.batch_remove(deletions)
{"golden_diff": "diff --git a/cirq/optimizers/drop_negligible.py b/cirq/optimizers/drop_negligible.py\n--- a/cirq/optimizers/drop_negligible.py\n+++ b/cirq/optimizers/drop_negligible.py\n@@ -36,6 +36,8 @@\n deletions: List[Tuple[int, ops.Operation]] = []\n for moment_index, moment in enumerate(circuit):\n for op in moment.operations:\n- if op is not None and protocols.trace_distance_bound(op) <= self.tolerance:\n+ if protocols.is_measurement(op):\n+ continue\n+ if protocols.trace_distance_bound(op) <= self.tolerance:\n deletions.append((moment_index, op))\n circuit.batch_remove(deletions)\n", "issue": "optimized_for_sycamore fails when you measure more than 16 qubits\n```python\r\ndef thing():\r\n q = cirq.LineQubit.range(17)\r\n c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z'))\r\n c = cg.optimized_for_sycamore(c)\r\n return c\r\nthing() \r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-44-accaace1183a> in <module>\r\n 4 c = cg.optimized_for_sycamore(c)\r\n 5 return c\r\n----> 6 thing()\r\n\r\n<ipython-input-44-accaace1183a> in thing()\r\n 2 q = cirq.LineQubit.range(17)\r\n 3 c = cirq.Circuit(cirq.H.on_each(*q), cirq.measure(*q, key='z'))\r\n----> 4 c = cg.optimized_for_sycamore(c)\r\n 5 return c\r\n 6 thing()\r\n\r\n~/ecc/cirq/cirq/google/optimizers/optimize_for_sycamore.py in optimized_for_sycamore(circuit, new_device, qubit_map, optimizer_type, tolerance, tabulation_resolution)\r\n 159 opts = _OPTIMIZER_TYPES[optimizer_type](tolerance=tolerance, tabulation=tabulation)\r\n 160 for optimizer in opts:\r\n--> 161 optimizer(copy)\r\n 162 \r\n 163 return circuits.Circuit(\r\n\r\n~/ecc/cirq/cirq/optimizers/drop_negligible.py in optimize_circuit(self, circuit)\r\n 37 for moment_index, moment in enumerate(circuit):\r\n 38 for op in moment.operations:\r\n---> 39 if op is not None and protocols.trace_distance_bound(op) <= self.tolerance:\r\n 40 deletions.append((moment_index, op))\r\n 41 circuit.batch_remove(deletions)\r\n\r\n~/ecc/cirq/cirq/protocols/trace_distance_bound.py in trace_distance_bound(val)\r\n 71 \r\n 72 for strat in strats:\r\n---> 73 result = strat(val)\r\n 74 if result is None:\r\n 75 break\r\n\r\n~/ecc/cirq/cirq/protocols/trace_distance_bound.py in _strat_distance_from_unitary(val)\r\n 96 def _strat_distance_from_unitary(val: Any) -> Optional[float]:\r\n 97 \"\"\"Attempts to compute a value's trace_distance_bound from its unitary.\"\"\"\r\n---> 98 u = unitary_protocol.unitary(val, default=None)\r\n 99 \r\n 100 if u is None:\r\n\r\n~/ecc/cirq/cirq/protocols/unitary_protocol.py in unitary(val, default)\r\n 127 ]\r\n 128 for strat in strats:\r\n--> 129 result = strat(val)\r\n 130 if result is None:\r\n 131 break\r\n\r\n~/ecc/cirq/cirq/protocols/unitary_protocol.py in _strat_unitary_from_apply_unitary(val)\r\n 173 \r\n 174 # Apply unitary effect to an identity matrix.\r\n--> 175 state = qis.eye_tensor(val_qid_shape, dtype=np.complex128)\r\n 176 buffer = np.empty_like(state)\r\n 177 result = method(ApplyUnitaryArgs(state, buffer, range(len(val_qid_shape))))\r\n\r\n~/ecc/cirq/cirq/qis/states.py in eye_tensor(half_shape, dtype)\r\n 669 \"\"\"\r\n 670 identity = np.eye(np.prod(half_shape, dtype=int), dtype=dtype)\r\n--> 671 identity.shape = half_shape * 2\r\n 672 return identity\r\n\r\nValueError: maximum supported dimension for an ndarray is 32, found 34\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that removes operations with tiny effects.\"\"\"\n\nfrom typing import List, Tuple, TYPE_CHECKING\n\nfrom cirq import protocols\nfrom cirq.circuits import circuit as _circuit\n\nif TYPE_CHECKING:\n from cirq import ops\n\n\nclass DropNegligible:\n \"\"\"An optimization pass that removes operations with tiny effects.\"\"\"\n\n def __init__(self, tolerance: float = 1e-8) -> None:\n self.tolerance = tolerance\n\n def __call__(self, circuit: _circuit.Circuit):\n self.optimize_circuit(circuit)\n\n def optimize_circuit(self, circuit: _circuit.Circuit) -> None:\n deletions: List[Tuple[int, ops.Operation]] = []\n for moment_index, moment in enumerate(circuit):\n for op in moment.operations:\n if op is not None and protocols.trace_distance_bound(op) <= self.tolerance:\n deletions.append((moment_index, op))\n circuit.batch_remove(deletions)\n", "path": "cirq/optimizers/drop_negligible.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that removes operations with tiny effects.\"\"\"\n\nfrom typing import List, Tuple, TYPE_CHECKING\n\nfrom cirq import protocols\nfrom cirq.circuits import circuit as _circuit\n\nif TYPE_CHECKING:\n from cirq import ops\n\n\nclass DropNegligible:\n \"\"\"An optimization pass that removes operations with tiny effects.\"\"\"\n\n def __init__(self, tolerance: float = 1e-8) -> None:\n self.tolerance = tolerance\n\n def __call__(self, circuit: _circuit.Circuit):\n self.optimize_circuit(circuit)\n\n def optimize_circuit(self, circuit: _circuit.Circuit) -> None:\n deletions: List[Tuple[int, ops.Operation]] = []\n for moment_index, moment in enumerate(circuit):\n for op in moment.operations:\n if protocols.is_measurement(op):\n continue\n if protocols.trace_distance_bound(op) <= self.tolerance:\n deletions.append((moment_index, op))\n circuit.batch_remove(deletions)\n", "path": "cirq/optimizers/drop_negligible.py"}]}
1,573
163
gh_patches_debug_32634
rasdani/github-patches
git_diff
huggingface__trl-555
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- StackLLAMA examples uses sentiment model as reward model The script to train the StackLLAMA model uses a sentiment pipeline for training the reward model while it should use the model that was trained before. https://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/rl_training.py#L216 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/research_projects/stack_llama/scripts/rl_training.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 from dataclasses import dataclass, field 16 from typing import Optional 17 18 import torch 19 from accelerate import Accelerator 20 from datasets import load_dataset 21 from peft import LoraConfig 22 from tqdm import tqdm 23 from transformers import Adafactor, AutoTokenizer, HfArgumentParser, pipeline 24 25 from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, set_seed 26 from trl.core import LengthSampler 27 28 29 tqdm.pandas() 30 31 32 @dataclass 33 class ScriptArguments: 34 """ 35 The name of the Casual LM model we wish to fine with PPO 36 """ 37 38 # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode 39 # models like gpt-neo* models are more suitable. 40 model_name: Optional[str] = field(default="", metadata={"help": "the model name"}) 41 tokenizer_name: Optional[str] = field(default="", metadata={"help": "the tokenizer name"}) 42 reward_model_name: Optional[str] = field(default="", metadata={"help": "the reward model name"}) 43 log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) 44 learning_rate: Optional[float] = field(default=1.41e-5, metadata={"help": "the learning rate"}) 45 output_max_length: Optional[int] = field(default=128, metadata={"help": "maximum length for generation"}) 46 mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) 47 batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) 48 ppo_epochs: Optional[int] = field(default=4, metadata={"help": "the number of ppo epochs"}) 49 gradient_accumulation_steps: Optional[int] = field( 50 default=4, metadata={"help": "the number of gradient accumulation steps"} 51 ) 52 adafactor: Optional[bool] = field(default=False, metadata={"help": "whether to use the adafactor optimizer"}) 53 early_stopping: Optional[bool] = field(default=False, metadata={"help": "whether to early stop"}) 54 target_kl: Optional[float] = field(default=0.1, metadata={"help": "kl target for early stopping"}) 55 reward_baseline: Optional[float] = field( 56 default=0.0, 57 metadata={"help": "a baseline value that is subtracted from the reward"}, 58 ) 59 batched_gen: Optional[bool] = field(default=False, metadata={"help": "whether to use the batched text gen"}) 60 save_freq: Optional[int] = field(default=None, metadata={"help": "n steps to save the model"}) 61 output_dir: Optional[str] = field(default="runs/", metadata={"help": "n steps to save the model"}) 62 seed: Optional[int] = field(default=0, metadata={"help": "the seed"}) 63 steps: Optional[int] = field(default=20000, metadata={"help": "number of epochs"}) 64 init_kl_coef: Optional[float] = field( 65 default=0.2, 66 metadata={"help": "Initial KL penalty coefficient (used for adaptive and linear control)"}, 67 ) 68 69 adap_kl_ctrl: Optional[bool] = field(default=True, metadata={"help": "Use adaptive KL control, otherwise linear"}) 70 71 72 parser = HfArgumentParser(ScriptArguments) 73 script_args: ScriptArguments = parser.parse_args_into_dataclasses()[0] 74 reward_model_name = script_args.reward_model_name 75 dataset_name = "lvwerra/stack-exchange-paired" 76 config = PPOConfig( 77 steps=script_args.steps, 78 model_name=script_args.model_name, 79 learning_rate=script_args.learning_rate, 80 log_with=script_args.log_with, 81 batch_size=script_args.batch_size, 82 mini_batch_size=script_args.mini_batch_size, 83 gradient_accumulation_steps=script_args.gradient_accumulation_steps, 84 optimize_cuda_cache=True, 85 early_stopping=script_args.early_stopping, 86 target_kl=script_args.target_kl, 87 ppo_epochs=script_args.ppo_epochs, 88 seed=script_args.seed, 89 init_kl_coef=script_args.init_kl_coef, 90 adap_kl_ctrl=script_args.adap_kl_ctrl, 91 ) 92 93 train_dataset = load_dataset("lvwerra/stack-exchange-paired", data_dir="data/rl", split="train") 94 train_dataset = train_dataset.select(range(100000)) 95 # We then define the arguments to pass to the sentiment analysis pipeline. 96 # We set `return_all_scores` to True to get the sentiment score for each token. 97 sent_kwargs = { 98 "return_all_scores": True, 99 "function_to_apply": "none", 100 "batch_size": 16, 101 "truncation": True, 102 } 103 104 tokenizer = AutoTokenizer.from_pretrained(script_args.tokenizer_name) 105 # GPT-2 tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token. 106 # only for this model. 107 108 if getattr(tokenizer, "pad_token", None) is None: 109 tokenizer.pad_token = tokenizer.eos_token 110 111 112 # Below is an example function to build the dataset. In our case, we use the IMDB dataset 113 # from the `datasets` library. One should customize this function to train the model on 114 # its own dataset. 115 def build_dataset( 116 tokenizer, 117 dataset_name="lvwerra/stack-exchange-paired", 118 ): 119 """ 120 Build dataset for training. This builds the dataset from `load_dataset`, one should 121 customize this function to train the model on its own dataset. 122 123 Args: 124 dataset_name (`str`): 125 The name of the dataset to be loaded. 126 127 Returns: 128 dataloader (`torch.utils.data.DataLoader`): 129 The dataloader for the dataset. 130 """ 131 132 # load imdb with datasets 133 ds = load_dataset(dataset_name, data_dir="data/rl", split="train") 134 original_columns = ds.column_names 135 num_proc = 24 136 137 def preprocess_function(examples): 138 new_examples = { 139 "query": [], 140 "input_ids": [], 141 } 142 for question in examples["question"]: 143 query = "Question: " + question + "\n\nAnswer: " 144 tokenized_question = tokenizer(query, truncation=True) 145 new_examples["query"].append(query) 146 new_examples["input_ids"].append(tokenized_question["input_ids"]) 147 148 return new_examples 149 150 ds = train_dataset.map( 151 preprocess_function, 152 batched=True, 153 num_proc=num_proc, 154 remove_columns=original_columns, 155 ) 156 ds = ds.filter(lambda x: len(x["input_ids"]) < 512, batched=False) 157 158 ds.set_format(type="torch") 159 return ds 160 161 162 # We retrieve the dataloader by calling the `build_dataset` function. 163 dataset = build_dataset(tokenizer) 164 165 166 def collator(data): 167 return dict((key, [d[key] for d in data]) for key in data[0]) 168 169 170 # set seed before initializing value head for deterministic eval 171 set_seed(config.seed) 172 173 # Now let's build the model, the reference model, and the tokenizer. 174 current_device = Accelerator().local_process_index 175 176 lora_config = LoraConfig( 177 r=16, 178 lora_alpha=32, 179 lora_dropout=0.05, 180 bias="none", 181 task_type="CAUSAL_LM", 182 ) 183 model = AutoModelForCausalLMWithValueHead.from_pretrained( 184 config.model_name, 185 load_in_8bit=True, 186 device_map={"": current_device}, 187 peft_config=lora_config, 188 ) 189 190 optimizer = None 191 if script_args.adafactor: 192 optimizer = Adafactor( 193 filter(lambda p: p.requires_grad, model.parameters()), 194 scale_parameter=False, 195 relative_step=False, 196 warmup_init=False, 197 lr=config.learning_rate, 198 ) 199 # We then build the PPOTrainer, passing the model, the reference model, the tokenizer 200 ppo_trainer = PPOTrainer( 201 config, 202 model, 203 ref_model=None, 204 tokenizer=tokenizer, 205 dataset=dataset, 206 data_collator=collator, 207 optimizer=optimizer, 208 ) 209 210 # We then build the sentiment analysis pipeline, passing the model name and the 211 # sentiment analysis pipeline arguments. Let's also make sure to set the device 212 # to the same device as the PPOTrainer. 213 device = ppo_trainer.accelerator.device 214 if ppo_trainer.accelerator.num_processes == 1: 215 device = 0 if torch.cuda.is_available() else "cpu" # to avoid a ` pipeline` bug 216 sentiment_pipe = pipeline( 217 "sentiment-analysis", 218 model=reward_model_name, 219 device_map={"": current_device}, 220 model_kwargs={"load_in_8bit": True}, 221 tokenizer=tokenizer, 222 return_token_type_ids=False, 223 ) 224 225 # We then define the arguments to pass to the `generate` function. These arguments 226 # are passed to the `generate` function of the PPOTrainer, which is a wrapper around 227 # the `generate` function of the trained model. 228 generation_kwargs = { 229 # "min_length": -1, 230 "top_k": 0.0, 231 "top_p": 1.0, 232 "do_sample": True, 233 "pad_token_id": tokenizer.pad_token_id, 234 "eos_token_id": 100_000, 235 } 236 output_min_length = 32 237 output_max_length = script_args.output_max_length 238 output_length_sampler = LengthSampler(output_min_length, output_max_length) 239 240 for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): 241 if epoch >= config.total_ppo_epochs: 242 break 243 244 question_tensors = batch["input_ids"] 245 246 response_tensors = ppo_trainer.generate( 247 question_tensors, 248 return_prompt=False, 249 length_sampler=output_length_sampler, 250 **generation_kwargs, 251 ) 252 batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) 253 254 # Compute sentiment score 255 texts = [q + r for q, r in zip(batch["query"], batch["response"])] 256 pipe_outputs = sentiment_pipe(texts, **sent_kwargs) 257 rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs] 258 259 # Run PPO step 260 stats = ppo_trainer.step(question_tensors, response_tensors, rewards) 261 ppo_trainer.log_stats(stats, batch, rewards) 262 263 if script_args.save_freq and epoch and epoch % script_args.save_freq == 0: 264 ppo_trainer.save_pretrained(script_args.output_dir + f"step_{epoch}") 265 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/research_projects/stack_llama/scripts/rl_training.py b/examples/research_projects/stack_llama/scripts/rl_training.py --- a/examples/research_projects/stack_llama/scripts/rl_training.py +++ b/examples/research_projects/stack_llama/scripts/rl_training.py @@ -92,6 +92,7 @@ train_dataset = load_dataset("lvwerra/stack-exchange-paired", data_dir="data/rl", split="train") train_dataset = train_dataset.select(range(100000)) + # We then define the arguments to pass to the sentiment analysis pipeline. # We set `return_all_scores` to True to get the sentiment score for each token. sent_kwargs = { @@ -207,9 +208,9 @@ optimizer=optimizer, ) -# We then build the sentiment analysis pipeline, passing the model name and the -# sentiment analysis pipeline arguments. Let's also make sure to set the device -# to the same device as the PPOTrainer. +# We then build the sentiment analysis pipeline using our reward model, passing the +# model name and the sentiment analysis pipeline arguments. Let's also make sure to +# set the device to the same device as the PPOTrainer. device = ppo_trainer.accelerator.device if ppo_trainer.accelerator.num_processes == 1: device = 0 if torch.cuda.is_available() else "cpu" # to avoid a ` pipeline` bug @@ -251,7 +252,7 @@ ) batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) - # Compute sentiment score + # Compute reward score (using the sentiment analysis pipeline) texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs]
{"golden_diff": "diff --git a/examples/research_projects/stack_llama/scripts/rl_training.py b/examples/research_projects/stack_llama/scripts/rl_training.py\n--- a/examples/research_projects/stack_llama/scripts/rl_training.py\n+++ b/examples/research_projects/stack_llama/scripts/rl_training.py\n@@ -92,6 +92,7 @@\n \n train_dataset = load_dataset(\"lvwerra/stack-exchange-paired\", data_dir=\"data/rl\", split=\"train\")\n train_dataset = train_dataset.select(range(100000))\n+\n # We then define the arguments to pass to the sentiment analysis pipeline.\n # We set `return_all_scores` to True to get the sentiment score for each token.\n sent_kwargs = {\n@@ -207,9 +208,9 @@\n optimizer=optimizer,\n )\n \n-# We then build the sentiment analysis pipeline, passing the model name and the\n-# sentiment analysis pipeline arguments. Let's also make sure to set the device\n-# to the same device as the PPOTrainer.\n+# We then build the sentiment analysis pipeline using our reward model, passing the\n+# model name and the sentiment analysis pipeline arguments. Let's also make sure to\n+# set the device to the same device as the PPOTrainer.\n device = ppo_trainer.accelerator.device\n if ppo_trainer.accelerator.num_processes == 1:\n device = 0 if torch.cuda.is_available() else \"cpu\" # to avoid a ` pipeline` bug\n@@ -251,7 +252,7 @@\n )\n batch[\"response\"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)\n \n- # Compute sentiment score\n+ # Compute reward score (using the sentiment analysis pipeline)\n texts = [q + r for q, r in zip(batch[\"query\"], batch[\"response\"])]\n pipe_outputs = sentiment_pipe(texts, **sent_kwargs)\n rewards = [torch.tensor(output[0][\"score\"] - script_args.reward_baseline) for output in pipe_outputs]\n", "issue": "StackLLAMA examples uses sentiment model as reward model\nThe script to train the StackLLAMA model uses a sentiment pipeline for training the reward model while it should use the model that was trained before. \r\n\r\nhttps://github.com/lvwerra/trl/blob/main/examples/stack_llama/scripts/rl_training.py#L216\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport torch\nfrom accelerate import Accelerator\nfrom datasets import load_dataset\nfrom peft import LoraConfig\nfrom tqdm import tqdm\nfrom transformers import Adafactor, AutoTokenizer, HfArgumentParser, pipeline\n\nfrom trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, set_seed\nfrom trl.core import LengthSampler\n\n\ntqdm.pandas()\n\n\n@dataclass\nclass ScriptArguments:\n \"\"\"\n The name of the Casual LM model we wish to fine with PPO\n \"\"\"\n\n # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode\n # models like gpt-neo* models are more suitable.\n model_name: Optional[str] = field(default=\"\", metadata={\"help\": \"the model name\"})\n tokenizer_name: Optional[str] = field(default=\"\", metadata={\"help\": \"the tokenizer name\"})\n reward_model_name: Optional[str] = field(default=\"\", metadata={\"help\": \"the reward model name\"})\n log_with: Optional[str] = field(default=None, metadata={\"help\": \"use 'wandb' to log with wandb\"})\n learning_rate: Optional[float] = field(default=1.41e-5, metadata={\"help\": \"the learning rate\"})\n output_max_length: Optional[int] = field(default=128, metadata={\"help\": \"maximum length for generation\"})\n mini_batch_size: Optional[int] = field(default=1, metadata={\"help\": \"the PPO minibatch size\"})\n batch_size: Optional[int] = field(default=32, metadata={\"help\": \"the batch size\"})\n ppo_epochs: Optional[int] = field(default=4, metadata={\"help\": \"the number of ppo epochs\"})\n gradient_accumulation_steps: Optional[int] = field(\n default=4, metadata={\"help\": \"the number of gradient accumulation steps\"}\n )\n adafactor: Optional[bool] = field(default=False, metadata={\"help\": \"whether to use the adafactor optimizer\"})\n early_stopping: Optional[bool] = field(default=False, metadata={\"help\": \"whether to early stop\"})\n target_kl: Optional[float] = field(default=0.1, metadata={\"help\": \"kl target for early stopping\"})\n reward_baseline: Optional[float] = field(\n default=0.0,\n metadata={\"help\": \"a baseline value that is subtracted from the reward\"},\n )\n batched_gen: Optional[bool] = field(default=False, metadata={\"help\": \"whether to use the batched text gen\"})\n save_freq: Optional[int] = field(default=None, metadata={\"help\": \"n steps to save the model\"})\n output_dir: Optional[str] = field(default=\"runs/\", metadata={\"help\": \"n steps to save the model\"})\n seed: Optional[int] = field(default=0, metadata={\"help\": \"the seed\"})\n steps: Optional[int] = field(default=20000, metadata={\"help\": \"number of epochs\"})\n init_kl_coef: Optional[float] = field(\n default=0.2,\n metadata={\"help\": \"Initial KL penalty coefficient (used for adaptive and linear control)\"},\n )\n\n adap_kl_ctrl: Optional[bool] = field(default=True, metadata={\"help\": \"Use adaptive KL control, otherwise linear\"})\n\n\nparser = HfArgumentParser(ScriptArguments)\nscript_args: ScriptArguments = parser.parse_args_into_dataclasses()[0]\nreward_model_name = script_args.reward_model_name\ndataset_name = \"lvwerra/stack-exchange-paired\"\nconfig = PPOConfig(\n steps=script_args.steps,\n model_name=script_args.model_name,\n learning_rate=script_args.learning_rate,\n log_with=script_args.log_with,\n batch_size=script_args.batch_size,\n mini_batch_size=script_args.mini_batch_size,\n gradient_accumulation_steps=script_args.gradient_accumulation_steps,\n optimize_cuda_cache=True,\n early_stopping=script_args.early_stopping,\n target_kl=script_args.target_kl,\n ppo_epochs=script_args.ppo_epochs,\n seed=script_args.seed,\n init_kl_coef=script_args.init_kl_coef,\n adap_kl_ctrl=script_args.adap_kl_ctrl,\n)\n\ntrain_dataset = load_dataset(\"lvwerra/stack-exchange-paired\", data_dir=\"data/rl\", split=\"train\")\ntrain_dataset = train_dataset.select(range(100000))\n# We then define the arguments to pass to the sentiment analysis pipeline.\n# We set `return_all_scores` to True to get the sentiment score for each token.\nsent_kwargs = {\n \"return_all_scores\": True,\n \"function_to_apply\": \"none\",\n \"batch_size\": 16,\n \"truncation\": True,\n}\n\ntokenizer = AutoTokenizer.from_pretrained(script_args.tokenizer_name)\n# GPT-2 tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token.\n# only for this model.\n\nif getattr(tokenizer, \"pad_token\", None) is None:\n tokenizer.pad_token = tokenizer.eos_token\n\n\n# Below is an example function to build the dataset. In our case, we use the IMDB dataset\n# from the `datasets` library. One should customize this function to train the model on\n# its own dataset.\ndef build_dataset(\n tokenizer,\n dataset_name=\"lvwerra/stack-exchange-paired\",\n):\n \"\"\"\n Build dataset for training. This builds the dataset from `load_dataset`, one should\n customize this function to train the model on its own dataset.\n\n Args:\n dataset_name (`str`):\n The name of the dataset to be loaded.\n\n Returns:\n dataloader (`torch.utils.data.DataLoader`):\n The dataloader for the dataset.\n \"\"\"\n\n # load imdb with datasets\n ds = load_dataset(dataset_name, data_dir=\"data/rl\", split=\"train\")\n original_columns = ds.column_names\n num_proc = 24\n\n def preprocess_function(examples):\n new_examples = {\n \"query\": [],\n \"input_ids\": [],\n }\n for question in examples[\"question\"]:\n query = \"Question: \" + question + \"\\n\\nAnswer: \"\n tokenized_question = tokenizer(query, truncation=True)\n new_examples[\"query\"].append(query)\n new_examples[\"input_ids\"].append(tokenized_question[\"input_ids\"])\n\n return new_examples\n\n ds = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=num_proc,\n remove_columns=original_columns,\n )\n ds = ds.filter(lambda x: len(x[\"input_ids\"]) < 512, batched=False)\n\n ds.set_format(type=\"torch\")\n return ds\n\n\n# We retrieve the dataloader by calling the `build_dataset` function.\ndataset = build_dataset(tokenizer)\n\n\ndef collator(data):\n return dict((key, [d[key] for d in data]) for key in data[0])\n\n\n# set seed before initializing value head for deterministic eval\nset_seed(config.seed)\n\n# Now let's build the model, the reference model, and the tokenizer.\ncurrent_device = Accelerator().local_process_index\n\nlora_config = LoraConfig(\n r=16,\n lora_alpha=32,\n lora_dropout=0.05,\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n)\nmodel = AutoModelForCausalLMWithValueHead.from_pretrained(\n config.model_name,\n load_in_8bit=True,\n device_map={\"\": current_device},\n peft_config=lora_config,\n)\n\noptimizer = None\nif script_args.adafactor:\n optimizer = Adafactor(\n filter(lambda p: p.requires_grad, model.parameters()),\n scale_parameter=False,\n relative_step=False,\n warmup_init=False,\n lr=config.learning_rate,\n )\n# We then build the PPOTrainer, passing the model, the reference model, the tokenizer\nppo_trainer = PPOTrainer(\n config,\n model,\n ref_model=None,\n tokenizer=tokenizer,\n dataset=dataset,\n data_collator=collator,\n optimizer=optimizer,\n)\n\n# We then build the sentiment analysis pipeline, passing the model name and the\n# sentiment analysis pipeline arguments. Let's also make sure to set the device\n# to the same device as the PPOTrainer.\ndevice = ppo_trainer.accelerator.device\nif ppo_trainer.accelerator.num_processes == 1:\n device = 0 if torch.cuda.is_available() else \"cpu\" # to avoid a ` pipeline` bug\nsentiment_pipe = pipeline(\n \"sentiment-analysis\",\n model=reward_model_name,\n device_map={\"\": current_device},\n model_kwargs={\"load_in_8bit\": True},\n tokenizer=tokenizer,\n return_token_type_ids=False,\n)\n\n# We then define the arguments to pass to the `generate` function. These arguments\n# are passed to the `generate` function of the PPOTrainer, which is a wrapper around\n# the `generate` function of the trained model.\ngeneration_kwargs = {\n # \"min_length\": -1,\n \"top_k\": 0.0,\n \"top_p\": 1.0,\n \"do_sample\": True,\n \"pad_token_id\": tokenizer.pad_token_id,\n \"eos_token_id\": 100_000,\n}\noutput_min_length = 32\noutput_max_length = script_args.output_max_length\noutput_length_sampler = LengthSampler(output_min_length, output_max_length)\n\nfor epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)):\n if epoch >= config.total_ppo_epochs:\n break\n\n question_tensors = batch[\"input_ids\"]\n\n response_tensors = ppo_trainer.generate(\n question_tensors,\n return_prompt=False,\n length_sampler=output_length_sampler,\n **generation_kwargs,\n )\n batch[\"response\"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)\n\n # Compute sentiment score\n texts = [q + r for q, r in zip(batch[\"query\"], batch[\"response\"])]\n pipe_outputs = sentiment_pipe(texts, **sent_kwargs)\n rewards = [torch.tensor(output[0][\"score\"] - script_args.reward_baseline) for output in pipe_outputs]\n\n # Run PPO step\n stats = ppo_trainer.step(question_tensors, response_tensors, rewards)\n ppo_trainer.log_stats(stats, batch, rewards)\n\n if script_args.save_freq and epoch and epoch % script_args.save_freq == 0:\n ppo_trainer.save_pretrained(script_args.output_dir + f\"step_{epoch}\")\n", "path": "examples/research_projects/stack_llama/scripts/rl_training.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport torch\nfrom accelerate import Accelerator\nfrom datasets import load_dataset\nfrom peft import LoraConfig\nfrom tqdm import tqdm\nfrom transformers import Adafactor, AutoTokenizer, HfArgumentParser, pipeline\n\nfrom trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, set_seed\nfrom trl.core import LengthSampler\n\n\ntqdm.pandas()\n\n\n@dataclass\nclass ScriptArguments:\n \"\"\"\n The name of the Casual LM model we wish to fine with PPO\n \"\"\"\n\n # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode\n # models like gpt-neo* models are more suitable.\n model_name: Optional[str] = field(default=\"\", metadata={\"help\": \"the model name\"})\n tokenizer_name: Optional[str] = field(default=\"\", metadata={\"help\": \"the tokenizer name\"})\n reward_model_name: Optional[str] = field(default=\"\", metadata={\"help\": \"the reward model name\"})\n log_with: Optional[str] = field(default=None, metadata={\"help\": \"use 'wandb' to log with wandb\"})\n learning_rate: Optional[float] = field(default=1.41e-5, metadata={\"help\": \"the learning rate\"})\n output_max_length: Optional[int] = field(default=128, metadata={\"help\": \"maximum length for generation\"})\n mini_batch_size: Optional[int] = field(default=1, metadata={\"help\": \"the PPO minibatch size\"})\n batch_size: Optional[int] = field(default=32, metadata={\"help\": \"the batch size\"})\n ppo_epochs: Optional[int] = field(default=4, metadata={\"help\": \"the number of ppo epochs\"})\n gradient_accumulation_steps: Optional[int] = field(\n default=4, metadata={\"help\": \"the number of gradient accumulation steps\"}\n )\n adafactor: Optional[bool] = field(default=False, metadata={\"help\": \"whether to use the adafactor optimizer\"})\n early_stopping: Optional[bool] = field(default=False, metadata={\"help\": \"whether to early stop\"})\n target_kl: Optional[float] = field(default=0.1, metadata={\"help\": \"kl target for early stopping\"})\n reward_baseline: Optional[float] = field(\n default=0.0,\n metadata={\"help\": \"a baseline value that is subtracted from the reward\"},\n )\n batched_gen: Optional[bool] = field(default=False, metadata={\"help\": \"whether to use the batched text gen\"})\n save_freq: Optional[int] = field(default=None, metadata={\"help\": \"n steps to save the model\"})\n output_dir: Optional[str] = field(default=\"runs/\", metadata={\"help\": \"n steps to save the model\"})\n seed: Optional[int] = field(default=0, metadata={\"help\": \"the seed\"})\n steps: Optional[int] = field(default=20000, metadata={\"help\": \"number of epochs\"})\n init_kl_coef: Optional[float] = field(\n default=0.2,\n metadata={\"help\": \"Initial KL penalty coefficient (used for adaptive and linear control)\"},\n )\n\n adap_kl_ctrl: Optional[bool] = field(default=True, metadata={\"help\": \"Use adaptive KL control, otherwise linear\"})\n\n\nparser = HfArgumentParser(ScriptArguments)\nscript_args: ScriptArguments = parser.parse_args_into_dataclasses()[0]\nreward_model_name = script_args.reward_model_name\ndataset_name = \"lvwerra/stack-exchange-paired\"\nconfig = PPOConfig(\n steps=script_args.steps,\n model_name=script_args.model_name,\n learning_rate=script_args.learning_rate,\n log_with=script_args.log_with,\n batch_size=script_args.batch_size,\n mini_batch_size=script_args.mini_batch_size,\n gradient_accumulation_steps=script_args.gradient_accumulation_steps,\n optimize_cuda_cache=True,\n early_stopping=script_args.early_stopping,\n target_kl=script_args.target_kl,\n ppo_epochs=script_args.ppo_epochs,\n seed=script_args.seed,\n init_kl_coef=script_args.init_kl_coef,\n adap_kl_ctrl=script_args.adap_kl_ctrl,\n)\n\ntrain_dataset = load_dataset(\"lvwerra/stack-exchange-paired\", data_dir=\"data/rl\", split=\"train\")\ntrain_dataset = train_dataset.select(range(100000))\n\n# We then define the arguments to pass to the sentiment analysis pipeline.\n# We set `return_all_scores` to True to get the sentiment score for each token.\nsent_kwargs = {\n \"return_all_scores\": True,\n \"function_to_apply\": \"none\",\n \"batch_size\": 16,\n \"truncation\": True,\n}\n\ntokenizer = AutoTokenizer.from_pretrained(script_args.tokenizer_name)\n# GPT-2 tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token.\n# only for this model.\n\nif getattr(tokenizer, \"pad_token\", None) is None:\n tokenizer.pad_token = tokenizer.eos_token\n\n\n# Below is an example function to build the dataset. In our case, we use the IMDB dataset\n# from the `datasets` library. One should customize this function to train the model on\n# its own dataset.\ndef build_dataset(\n tokenizer,\n dataset_name=\"lvwerra/stack-exchange-paired\",\n):\n \"\"\"\n Build dataset for training. This builds the dataset from `load_dataset`, one should\n customize this function to train the model on its own dataset.\n\n Args:\n dataset_name (`str`):\n The name of the dataset to be loaded.\n\n Returns:\n dataloader (`torch.utils.data.DataLoader`):\n The dataloader for the dataset.\n \"\"\"\n\n # load imdb with datasets\n ds = load_dataset(dataset_name, data_dir=\"data/rl\", split=\"train\")\n original_columns = ds.column_names\n num_proc = 24\n\n def preprocess_function(examples):\n new_examples = {\n \"query\": [],\n \"input_ids\": [],\n }\n for question in examples[\"question\"]:\n query = \"Question: \" + question + \"\\n\\nAnswer: \"\n tokenized_question = tokenizer(query, truncation=True)\n new_examples[\"query\"].append(query)\n new_examples[\"input_ids\"].append(tokenized_question[\"input_ids\"])\n\n return new_examples\n\n ds = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=num_proc,\n remove_columns=original_columns,\n )\n ds = ds.filter(lambda x: len(x[\"input_ids\"]) < 512, batched=False)\n\n ds.set_format(type=\"torch\")\n return ds\n\n\n# We retrieve the dataloader by calling the `build_dataset` function.\ndataset = build_dataset(tokenizer)\n\n\ndef collator(data):\n return dict((key, [d[key] for d in data]) for key in data[0])\n\n\n# set seed before initializing value head for deterministic eval\nset_seed(config.seed)\n\n# Now let's build the model, the reference model, and the tokenizer.\ncurrent_device = Accelerator().local_process_index\n\nlora_config = LoraConfig(\n r=16,\n lora_alpha=32,\n lora_dropout=0.05,\n bias=\"none\",\n task_type=\"CAUSAL_LM\",\n)\nmodel = AutoModelForCausalLMWithValueHead.from_pretrained(\n config.model_name,\n load_in_8bit=True,\n device_map={\"\": current_device},\n peft_config=lora_config,\n)\n\noptimizer = None\nif script_args.adafactor:\n optimizer = Adafactor(\n filter(lambda p: p.requires_grad, model.parameters()),\n scale_parameter=False,\n relative_step=False,\n warmup_init=False,\n lr=config.learning_rate,\n )\n# We then build the PPOTrainer, passing the model, the reference model, the tokenizer\nppo_trainer = PPOTrainer(\n config,\n model,\n ref_model=None,\n tokenizer=tokenizer,\n dataset=dataset,\n data_collator=collator,\n optimizer=optimizer,\n)\n\n# We then build the sentiment analysis pipeline using our reward model, passing the\n# model name and the sentiment analysis pipeline arguments. Let's also make sure to\n# set the device to the same device as the PPOTrainer.\ndevice = ppo_trainer.accelerator.device\nif ppo_trainer.accelerator.num_processes == 1:\n device = 0 if torch.cuda.is_available() else \"cpu\" # to avoid a ` pipeline` bug\nsentiment_pipe = pipeline(\n \"sentiment-analysis\",\n model=reward_model_name,\n device_map={\"\": current_device},\n model_kwargs={\"load_in_8bit\": True},\n tokenizer=tokenizer,\n return_token_type_ids=False,\n)\n\n# We then define the arguments to pass to the `generate` function. These arguments\n# are passed to the `generate` function of the PPOTrainer, which is a wrapper around\n# the `generate` function of the trained model.\ngeneration_kwargs = {\n # \"min_length\": -1,\n \"top_k\": 0.0,\n \"top_p\": 1.0,\n \"do_sample\": True,\n \"pad_token_id\": tokenizer.pad_token_id,\n \"eos_token_id\": 100_000,\n}\noutput_min_length = 32\noutput_max_length = script_args.output_max_length\noutput_length_sampler = LengthSampler(output_min_length, output_max_length)\n\nfor epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)):\n if epoch >= config.total_ppo_epochs:\n break\n\n question_tensors = batch[\"input_ids\"]\n\n response_tensors = ppo_trainer.generate(\n question_tensors,\n return_prompt=False,\n length_sampler=output_length_sampler,\n **generation_kwargs,\n )\n batch[\"response\"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)\n\n # Compute reward score (using the sentiment analysis pipeline)\n texts = [q + r for q, r in zip(batch[\"query\"], batch[\"response\"])]\n pipe_outputs = sentiment_pipe(texts, **sent_kwargs)\n rewards = [torch.tensor(output[0][\"score\"] - script_args.reward_baseline) for output in pipe_outputs]\n\n # Run PPO step\n stats = ppo_trainer.step(question_tensors, response_tensors, rewards)\n ppo_trainer.log_stats(stats, batch, rewards)\n\n if script_args.save_freq and epoch and epoch % script_args.save_freq == 0:\n ppo_trainer.save_pretrained(script_args.output_dir + f\"step_{epoch}\")\n", "path": "examples/research_projects/stack_llama/scripts/rl_training.py"}]}
3,478
434
gh_patches_debug_42418
rasdani/github-patches
git_diff
vispy__vispy-698
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implement a server-side GLIR cache This is backend-independent. The GL context needs to store a list of (almost) all GLIR commands emitted since initialization. Unnecessary GLIR commands (old commands superseded by new commands) are automatically discarded (pruning), so that this list should never be very long. - [ ] implement the cache (easy) - [ ] dynamically prune old GLIR commands that are superseded by new ones: `DRAW`, `UNIFORM`, `FUNC` (slightly less straightforward) - completely get rid of `on_initialize()` (can be done in another PR) - fix the `ipynb_webgl` backend so that the latest state is automatically reinitialized when the context is lost (can be done in another PR) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vispy/app/backends/_ipynb_webgl.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright (c) 2014, Vispy Development Team. 3 # Distributed under the (new) BSD License. See LICENSE.txt for more info. 4 5 """ 6 Vispy backend for the IPython notebook (WebGL approach). 7 """ 8 9 from __future__ import division 10 11 from ..base import (BaseApplicationBackend, BaseCanvasBackend, 12 BaseTimerBackend) 13 from ._ipynb_util import create_glir_message 14 from ...util import logger, keys 15 from ...ext import six 16 from vispy.gloo.glir import BaseGlirParser 17 18 # Import for displaying Javascript on notebook 19 import os.path as op 20 21 # -------------------------------------------------------------------- init --- 22 23 capability = dict( # things that can be set by the backend 24 title=True, # But it only applies to the dummy window :P 25 size=True, # We cannot possibly say we dont, because Canvas always sets it 26 position=True, # Dito 27 show=True, 28 vsync=False, 29 resizable=True, 30 decorate=False, 31 fullscreen=True, 32 context=True, 33 multi_window=False, 34 scroll=True, 35 parent=False, 36 ) 37 38 39 # Init dummy objects needed to import this module withour errors. 40 # These are all overwritten with imports from IPython (on success) 41 DOMWidget = object 42 Unicode = Int = Float = Bool = lambda *args, **kwargs: None 43 44 # Try importing IPython 45 try: 46 import tornado 47 import IPython 48 IPYTHON_MAJOR_VERSION = IPython.version_info[0] 49 if IPYTHON_MAJOR_VERSION < 2: 50 raise RuntimeError('ipynb_webgl backend requires IPython >= 2.0') 51 from IPython.html.widgets import DOMWidget 52 from IPython.utils.traitlets import Unicode, Int 53 from IPython.display import display, Javascript 54 from IPython.html.nbextensions import install_nbextension 55 except Exception as exp: 56 # raise ImportError("The WebGL backend requires IPython >= 2.0") 57 available, testable, why_not, which = False, False, str(exp), None 58 else: 59 available, testable, why_not, which = True, False, None, None 60 61 62 # ------------------------------------------------------------- application --- 63 def _prepare_js(): 64 pkgdir = op.dirname(__file__) 65 jsdir = op.join(pkgdir, '../../html/static/js/') 66 # Make sure the JS files are installed to user directory (new argument 67 # in IPython 3.0). 68 if IPYTHON_MAJOR_VERSION >= 3: 69 kwargs = {'user': True} 70 else: 71 kwargs = {} 72 install_nbextension([op.join(jsdir, 'vispy.min.js'), 73 op.join(jsdir, 'jquery.mousewheel.min.js')], 74 **kwargs) 75 backend_path = op.join(jsdir, 'webgl-backend.js') 76 with open(backend_path, 'r') as f: 77 script = f.read() 78 display(Javascript(script)) 79 80 81 class ApplicationBackend(BaseApplicationBackend): 82 83 def __init__(self): 84 BaseApplicationBackend.__init__(self) 85 _prepare_js() 86 87 def _vispy_reuse(self): 88 _prepare_js() 89 90 def _vispy_get_backend_name(self): 91 return 'ipynb_webgl' 92 93 def _vispy_process_events(self): 94 # TODO: may be implemented later. 95 raise NotImplementedError() 96 97 def _vispy_run(self): 98 pass 99 100 def _vispy_quit(self): 101 pass 102 103 def _vispy_get_native_app(self): 104 return self 105 106 107 # ------------------------------------------------------------------ canvas --- 108 class WebGLGlirParser(BaseGlirParser): 109 def __init__(self, widget): 110 self._widget = widget 111 112 def is_remote(self): 113 return True 114 115 def convert_shaders(self): 116 return 'es2' 117 118 def parse(self, commands): 119 self._widget.send_glir_commands(commands) 120 121 122 class CanvasBackend(BaseCanvasBackend): 123 # args are for BaseCanvasBackend, kwargs are for us. 124 def __init__(self, *args, **kwargs): 125 BaseCanvasBackend.__init__(self, *args) 126 # Maybe to ensure that exactly all arguments are passed? 127 title, size, position, show, vsync, resize, dec, fs, parent, context, \ 128 = self._process_backend_kwargs(kwargs) 129 self._context = context 130 131 # TODO: do something with context.config 132 # Take the context. 133 context.shared.add_ref('webgl', self) 134 if context.shared.ref is self: 135 pass # ok 136 else: 137 raise RuntimeError("WebGL doesn't yet support context sharing.") 138 139 self._create_widget(size=size) 140 141 def _create_widget(self, size=None): 142 self._widget = VispyWidget(self._gen_event, size=size) 143 # Set glir parser on context and context.shared 144 context = self._vispy_canvas.context 145 context.shared.parser = WebGLGlirParser(self._widget) 146 147 def _reinit_widget(self): 148 self._vispy_canvas.set_current() 149 150 self._vispy_canvas.events.initialize() 151 self._vispy_canvas.events.resize(size=(self._widget.width, 152 self._widget.height)) 153 self._vispy_canvas.events.draw() 154 155 def _vispy_warmup(self): 156 pass 157 158 # Uncommenting these makes the backend crash. 159 def _vispy_set_current(self): 160 pass 161 162 def _vispy_swap_buffers(self): 163 pass 164 165 def _vispy_set_title(self, title): 166 raise NotImplementedError() 167 168 def _vispy_get_fullscreen(self): 169 # We don't want error messages to show up when the user presses 170 # F11 to fullscreen the browser. 171 pass 172 173 def _vispy_set_fullscreen(self, fullscreen): 174 # We don't want error messages to show up when the user presses 175 # F11 to fullscreen the browser. 176 pass 177 178 def _vispy_get_size(self): 179 return (self._widget.width, self._widget.height) 180 181 def _vispy_set_size(self, w, h): 182 self._widget.width = w 183 self._widget.height = h 184 185 def _vispy_get_position(self): 186 raise NotImplementedError() 187 188 def _vispy_set_position(self, x, y): 189 logger.warning('IPython notebook canvas cannot be repositioned.') 190 191 def _vispy_set_visible(self, visible): 192 if not visible: 193 logger.warning('IPython notebook canvas cannot be hidden.') 194 else: 195 display(self._widget) 196 self._reinit_widget() 197 198 def _vispy_update(self): 199 ioloop = tornado.ioloop.IOLoop.current() 200 ioloop.add_callback(self._draw_event) 201 202 def _draw_event(self): 203 self._vispy_canvas.set_current() 204 self._vispy_canvas.events.draw() 205 206 def _vispy_close(self): 207 raise NotImplementedError() 208 209 def _vispy_mouse_release(self, **kwds): 210 # HACK: override this method from the base canvas in order to 211 # avoid breaking other backends. 212 kwds.update(self._vispy_mouse_data) 213 ev = self._vispy_canvas.events.mouse_release(**kwds) 214 if ev is None: 215 return 216 self._vispy_mouse_data['press_event'] = None 217 # TODO: this is a bit ugly, need to improve mouse button handling in 218 # app 219 ev._button = None 220 self._vispy_mouse_data['buttons'] = [] 221 self._vispy_mouse_data['last_event'] = ev 222 return ev 223 224 # Generate vispy events according to upcoming JS events 225 _modifiers_map = { 226 'ctrl': keys.CONTROL, 227 'shift': keys.SHIFT, 228 'alt': keys.ALT, 229 } 230 231 def _gen_event(self, ev): 232 if self._vispy_canvas is None: 233 return 234 event_type = ev['type'] 235 key_code = ev.get('key_code', None) 236 if key_code is None: 237 key, key_text = None, None 238 else: 239 if hasattr(keys, key_code): 240 key = getattr(keys, key_code) 241 else: 242 key = keys.Key(key_code) 243 # Generate the key text to pass to the event handler. 244 if key_code == 'SPACE': 245 key_text = ' ' 246 else: 247 key_text = six.text_type(key_code) 248 # Process modifiers. 249 modifiers = ev.get('modifiers', None) 250 if modifiers: 251 modifiers = tuple([self._modifiers_map[modifier] 252 for modifier in modifiers 253 if modifier in self._modifiers_map]) 254 if event_type == "mouse_move": 255 self._vispy_mouse_move(native=ev, 256 button=ev["button"], 257 pos=ev["pos"], 258 modifiers=modifiers, 259 ) 260 elif event_type == "mouse_press": 261 self._vispy_mouse_press(native=ev, 262 pos=ev["pos"], 263 button=ev["button"], 264 modifiers=modifiers, 265 ) 266 elif event_type == "mouse_release": 267 self._vispy_mouse_release(native=ev, 268 pos=ev["pos"], 269 button=ev["button"], 270 modifiers=modifiers, 271 ) 272 elif event_type == "mouse_wheel": 273 self._vispy_canvas.events.mouse_wheel(native=ev, 274 delta=ev["delta"], 275 pos=ev["pos"], 276 button=ev["button"], 277 modifiers=modifiers, 278 ) 279 elif event_type == "key_press": 280 self._vispy_canvas.events.key_press(native=ev, 281 key=key, 282 text=key_text, 283 modifiers=modifiers, 284 ) 285 elif event_type == "key_release": 286 self._vispy_canvas.events.key_release(native=ev, 287 key=key, 288 text=key_text, 289 modifiers=modifiers, 290 ) 291 elif event_type == "resize": 292 self._vispy_canvas.events.resize(native=ev, 293 size=ev["size"]) 294 elif event_type == "paint": 295 self._vispy_canvas.events.draw() 296 297 298 # ------------------------------------------------------------------- Timer --- 299 class TimerBackend(BaseTimerBackend): 300 def __init__(self, *args, **kwargs): 301 super(TimerBackend, self).__init__(*args, **kwargs) 302 self._timer = tornado.ioloop.PeriodicCallback( 303 self._vispy_timer._timeout, 304 1000) 305 306 def _vispy_start(self, interval): 307 self._timer.callback_time = interval * 1000 308 self._timer.start() 309 310 def _vispy_stop(self): 311 self._timer.stop() 312 313 314 # ---------------------------------------------------------- IPython Widget --- 315 class VispyWidget(DOMWidget): 316 _view_name = Unicode("VispyView", sync=True) 317 318 width = Int(sync=True) 319 height = Int(sync=True) 320 321 def __init__(self, gen_event, **kwargs): 322 super(VispyWidget, self).__init__(**kwargs) 323 w, h = kwargs.get('size', (500, 200)) 324 self.width = w 325 self.height = h 326 self.gen_event = gen_event 327 self.on_msg(self.events_received) 328 329 def events_received(self, _, msg): 330 if msg['msg_type'] == 'events': 331 events = msg['contents'] 332 for ev in events: 333 self.gen_event(ev) 334 335 def send_glir_commands(self, commands): 336 # TODO: check whether binary websocket is available (ipython >= 3) 337 # Until IPython 3.0 is released, use base64. 338 array_serialization = 'base64' 339 # array_serialization = 'binary' 340 if array_serialization == 'base64': 341 msg = create_glir_message(commands, 'base64') 342 msg['array_serialization'] = 'base64' 343 self.send(msg) 344 elif array_serialization == 'binary': 345 msg = create_glir_message(commands, 'binary') 346 msg['array_serialization'] = 'binary' 347 # Remove the buffers from the JSON message: they will be sent 348 # independently via binary WebSocket. 349 buffers = msg.pop('buffers') 350 self.comm.send({"method": "custom", "content": msg}, 351 buffers=buffers) 352 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vispy/app/backends/_ipynb_webgl.py b/vispy/app/backends/_ipynb_webgl.py --- a/vispy/app/backends/_ipynb_webgl.py +++ b/vispy/app/backends/_ipynb_webgl.py @@ -14,6 +14,7 @@ from ...util import logger, keys from ...ext import six from vispy.gloo.glir import BaseGlirParser +from vispy.app import Timer # Import for displaying Javascript on notebook import os.path as op @@ -60,7 +61,7 @@ # ------------------------------------------------------------- application --- -def _prepare_js(): +def _prepare_js(force=False): pkgdir = op.dirname(__file__) jsdir = op.join(pkgdir, '../../html/static/js/') # Make sure the JS files are installed to user directory (new argument @@ -69,8 +70,7 @@ kwargs = {'user': True} else: kwargs = {} - install_nbextension([op.join(jsdir, 'vispy.min.js'), - op.join(jsdir, 'jquery.mousewheel.min.js')], + install_nbextension(op.join(jsdir, 'vispy.min.js'), overwrite=force, **kwargs) backend_path = op.join(jsdir, 'webgl-backend.js') with open(backend_path, 'r') as f: @@ -139,7 +139,7 @@ self._create_widget(size=size) def _create_widget(self, size=None): - self._widget = VispyWidget(self._gen_event, size=size) + self._widget = VispyWidget(self, size=size) # Set glir parser on context and context.shared context = self._vispy_canvas.context context.shared.parser = WebGLGlirParser(self._widget) @@ -312,18 +312,32 @@ # ---------------------------------------------------------- IPython Widget --- +def _stop_timers(canvas): + """Stop all timers in a canvas.""" + for attr in dir(canvas): + try: + attr_obj = getattr(canvas, attr) + except NotImplementedError: + # This try/except is needed because canvas.position raises + # an error (it is not implemented in this backend). + attr_obj = None + if isinstance(attr_obj, Timer): + attr_obj.stop() + + class VispyWidget(DOMWidget): _view_name = Unicode("VispyView", sync=True) width = Int(sync=True) height = Int(sync=True) - def __init__(self, gen_event, **kwargs): + def __init__(self, canvas_backend, **kwargs): super(VispyWidget, self).__init__(**kwargs) w, h = kwargs.get('size', (500, 200)) self.width = w self.height = h - self.gen_event = gen_event + self.canvas_backend = canvas_backend + self.gen_event = canvas_backend._gen_event self.on_msg(self.events_received) def events_received(self, _, msg): @@ -331,6 +345,10 @@ events = msg['contents'] for ev in events: self.gen_event(ev) + elif msg['msg_type'] == 'status': + if msg['contents'] == 'removed': + # Stop all timers associated to the widget. + _stop_timers(self.canvas_backend._vispy_canvas) def send_glir_commands(self, commands): # TODO: check whether binary websocket is available (ipython >= 3)
{"golden_diff": "diff --git a/vispy/app/backends/_ipynb_webgl.py b/vispy/app/backends/_ipynb_webgl.py\n--- a/vispy/app/backends/_ipynb_webgl.py\n+++ b/vispy/app/backends/_ipynb_webgl.py\n@@ -14,6 +14,7 @@\n from ...util import logger, keys\n from ...ext import six\n from vispy.gloo.glir import BaseGlirParser\n+from vispy.app import Timer\n \n # Import for displaying Javascript on notebook\n import os.path as op\n@@ -60,7 +61,7 @@\n \n \n # ------------------------------------------------------------- application ---\n-def _prepare_js():\n+def _prepare_js(force=False):\n pkgdir = op.dirname(__file__)\n jsdir = op.join(pkgdir, '../../html/static/js/')\n # Make sure the JS files are installed to user directory (new argument\n@@ -69,8 +70,7 @@\n kwargs = {'user': True}\n else:\n kwargs = {}\n- install_nbextension([op.join(jsdir, 'vispy.min.js'),\n- op.join(jsdir, 'jquery.mousewheel.min.js')],\n+ install_nbextension(op.join(jsdir, 'vispy.min.js'), overwrite=force,\n **kwargs)\n backend_path = op.join(jsdir, 'webgl-backend.js')\n with open(backend_path, 'r') as f:\n@@ -139,7 +139,7 @@\n self._create_widget(size=size)\n \n def _create_widget(self, size=None):\n- self._widget = VispyWidget(self._gen_event, size=size)\n+ self._widget = VispyWidget(self, size=size)\n # Set glir parser on context and context.shared\n context = self._vispy_canvas.context\n context.shared.parser = WebGLGlirParser(self._widget)\n@@ -312,18 +312,32 @@\n \n \n # ---------------------------------------------------------- IPython Widget ---\n+def _stop_timers(canvas):\n+ \"\"\"Stop all timers in a canvas.\"\"\"\n+ for attr in dir(canvas):\n+ try:\n+ attr_obj = getattr(canvas, attr)\n+ except NotImplementedError:\n+ # This try/except is needed because canvas.position raises\n+ # an error (it is not implemented in this backend).\n+ attr_obj = None\n+ if isinstance(attr_obj, Timer):\n+ attr_obj.stop()\n+\n+\n class VispyWidget(DOMWidget):\n _view_name = Unicode(\"VispyView\", sync=True)\n \n width = Int(sync=True)\n height = Int(sync=True)\n \n- def __init__(self, gen_event, **kwargs):\n+ def __init__(self, canvas_backend, **kwargs):\n super(VispyWidget, self).__init__(**kwargs)\n w, h = kwargs.get('size', (500, 200))\n self.width = w\n self.height = h\n- self.gen_event = gen_event\n+ self.canvas_backend = canvas_backend\n+ self.gen_event = canvas_backend._gen_event\n self.on_msg(self.events_received)\n \n def events_received(self, _, msg):\n@@ -331,6 +345,10 @@\n events = msg['contents']\n for ev in events:\n self.gen_event(ev)\n+ elif msg['msg_type'] == 'status':\n+ if msg['contents'] == 'removed':\n+ # Stop all timers associated to the widget.\n+ _stop_timers(self.canvas_backend._vispy_canvas)\n \n def send_glir_commands(self, commands):\n # TODO: check whether binary websocket is available (ipython >= 3)\n", "issue": "Implement a server-side GLIR cache\nThis is backend-independent. The GL context needs to store a list of (almost) all GLIR commands emitted since initialization. Unnecessary GLIR commands (old commands superseded by new commands) are automatically discarded (pruning), so that this list should never be very long.\n- [ ] implement the cache (easy)\n- [ ] dynamically prune old GLIR commands that are superseded by new ones: `DRAW`, `UNIFORM`, `FUNC` (slightly less straightforward)\n- completely get rid of `on_initialize()` (can be done in another PR)\n- fix the `ipynb_webgl` backend so that the latest state is automatically reinitialized when the context is lost (can be done in another PR)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\nVispy backend for the IPython notebook (WebGL approach).\n\"\"\"\n\nfrom __future__ import division\n\nfrom ..base import (BaseApplicationBackend, BaseCanvasBackend,\n BaseTimerBackend)\nfrom ._ipynb_util import create_glir_message\nfrom ...util import logger, keys\nfrom ...ext import six\nfrom vispy.gloo.glir import BaseGlirParser\n\n# Import for displaying Javascript on notebook\nimport os.path as op\n\n# -------------------------------------------------------------------- init ---\n\ncapability = dict( # things that can be set by the backend\n title=True, # But it only applies to the dummy window :P\n size=True, # We cannot possibly say we dont, because Canvas always sets it\n position=True, # Dito\n show=True,\n vsync=False,\n resizable=True,\n decorate=False,\n fullscreen=True,\n context=True,\n multi_window=False,\n scroll=True,\n parent=False,\n)\n\n\n# Init dummy objects needed to import this module withour errors.\n# These are all overwritten with imports from IPython (on success)\nDOMWidget = object\nUnicode = Int = Float = Bool = lambda *args, **kwargs: None\n\n# Try importing IPython\ntry:\n import tornado\n import IPython\n IPYTHON_MAJOR_VERSION = IPython.version_info[0]\n if IPYTHON_MAJOR_VERSION < 2:\n raise RuntimeError('ipynb_webgl backend requires IPython >= 2.0')\n from IPython.html.widgets import DOMWidget\n from IPython.utils.traitlets import Unicode, Int\n from IPython.display import display, Javascript\n from IPython.html.nbextensions import install_nbextension\nexcept Exception as exp:\n # raise ImportError(\"The WebGL backend requires IPython >= 2.0\")\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n available, testable, why_not, which = True, False, None, None\n\n\n# ------------------------------------------------------------- application ---\ndef _prepare_js():\n pkgdir = op.dirname(__file__)\n jsdir = op.join(pkgdir, '../../html/static/js/')\n # Make sure the JS files are installed to user directory (new argument\n # in IPython 3.0).\n if IPYTHON_MAJOR_VERSION >= 3:\n kwargs = {'user': True}\n else:\n kwargs = {}\n install_nbextension([op.join(jsdir, 'vispy.min.js'),\n op.join(jsdir, 'jquery.mousewheel.min.js')],\n **kwargs)\n backend_path = op.join(jsdir, 'webgl-backend.js')\n with open(backend_path, 'r') as f:\n script = f.read()\n display(Javascript(script))\n\n\nclass ApplicationBackend(BaseApplicationBackend):\n\n def __init__(self):\n BaseApplicationBackend.__init__(self)\n _prepare_js()\n\n def _vispy_reuse(self):\n _prepare_js()\n\n def _vispy_get_backend_name(self):\n return 'ipynb_webgl'\n\n def _vispy_process_events(self):\n # TODO: may be implemented later.\n raise NotImplementedError()\n\n def _vispy_run(self):\n pass\n\n def _vispy_quit(self):\n pass\n\n def _vispy_get_native_app(self):\n return self\n\n\n# ------------------------------------------------------------------ canvas ---\nclass WebGLGlirParser(BaseGlirParser):\n def __init__(self, widget):\n self._widget = widget\n\n def is_remote(self):\n return True\n\n def convert_shaders(self):\n return 'es2'\n\n def parse(self, commands):\n self._widget.send_glir_commands(commands)\n\n\nclass CanvasBackend(BaseCanvasBackend):\n # args are for BaseCanvasBackend, kwargs are for us.\n def __init__(self, *args, **kwargs):\n BaseCanvasBackend.__init__(self, *args)\n # Maybe to ensure that exactly all arguments are passed?\n title, size, position, show, vsync, resize, dec, fs, parent, context, \\\n = self._process_backend_kwargs(kwargs)\n self._context = context\n\n # TODO: do something with context.config\n # Take the context.\n context.shared.add_ref('webgl', self)\n if context.shared.ref is self:\n pass # ok\n else:\n raise RuntimeError(\"WebGL doesn't yet support context sharing.\")\n\n self._create_widget(size=size)\n\n def _create_widget(self, size=None):\n self._widget = VispyWidget(self._gen_event, size=size)\n # Set glir parser on context and context.shared\n context = self._vispy_canvas.context\n context.shared.parser = WebGLGlirParser(self._widget)\n\n def _reinit_widget(self):\n self._vispy_canvas.set_current()\n\n self._vispy_canvas.events.initialize()\n self._vispy_canvas.events.resize(size=(self._widget.width,\n self._widget.height))\n self._vispy_canvas.events.draw()\n\n def _vispy_warmup(self):\n pass\n\n # Uncommenting these makes the backend crash.\n def _vispy_set_current(self):\n pass\n\n def _vispy_swap_buffers(self):\n pass\n\n def _vispy_set_title(self, title):\n raise NotImplementedError()\n\n def _vispy_get_fullscreen(self):\n # We don't want error messages to show up when the user presses\n # F11 to fullscreen the browser.\n pass\n\n def _vispy_set_fullscreen(self, fullscreen):\n # We don't want error messages to show up when the user presses\n # F11 to fullscreen the browser.\n pass\n\n def _vispy_get_size(self):\n return (self._widget.width, self._widget.height)\n\n def _vispy_set_size(self, w, h):\n self._widget.width = w\n self._widget.height = h\n\n def _vispy_get_position(self):\n raise NotImplementedError()\n\n def _vispy_set_position(self, x, y):\n logger.warning('IPython notebook canvas cannot be repositioned.')\n\n def _vispy_set_visible(self, visible):\n if not visible:\n logger.warning('IPython notebook canvas cannot be hidden.')\n else:\n display(self._widget)\n self._reinit_widget()\n\n def _vispy_update(self):\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.add_callback(self._draw_event)\n\n def _draw_event(self):\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.draw()\n\n def _vispy_close(self):\n raise NotImplementedError()\n\n def _vispy_mouse_release(self, **kwds):\n # HACK: override this method from the base canvas in order to\n # avoid breaking other backends.\n kwds.update(self._vispy_mouse_data)\n ev = self._vispy_canvas.events.mouse_release(**kwds)\n if ev is None:\n return\n self._vispy_mouse_data['press_event'] = None\n # TODO: this is a bit ugly, need to improve mouse button handling in\n # app\n ev._button = None\n self._vispy_mouse_data['buttons'] = []\n self._vispy_mouse_data['last_event'] = ev\n return ev\n\n # Generate vispy events according to upcoming JS events\n _modifiers_map = {\n 'ctrl': keys.CONTROL,\n 'shift': keys.SHIFT,\n 'alt': keys.ALT,\n }\n\n def _gen_event(self, ev):\n if self._vispy_canvas is None:\n return\n event_type = ev['type']\n key_code = ev.get('key_code', None)\n if key_code is None:\n key, key_text = None, None\n else:\n if hasattr(keys, key_code):\n key = getattr(keys, key_code)\n else:\n key = keys.Key(key_code)\n # Generate the key text to pass to the event handler.\n if key_code == 'SPACE':\n key_text = ' '\n else:\n key_text = six.text_type(key_code)\n # Process modifiers.\n modifiers = ev.get('modifiers', None)\n if modifiers:\n modifiers = tuple([self._modifiers_map[modifier]\n for modifier in modifiers\n if modifier in self._modifiers_map])\n if event_type == \"mouse_move\":\n self._vispy_mouse_move(native=ev,\n button=ev[\"button\"],\n pos=ev[\"pos\"],\n modifiers=modifiers,\n )\n elif event_type == \"mouse_press\":\n self._vispy_mouse_press(native=ev,\n pos=ev[\"pos\"],\n button=ev[\"button\"],\n modifiers=modifiers,\n )\n elif event_type == \"mouse_release\":\n self._vispy_mouse_release(native=ev,\n pos=ev[\"pos\"],\n button=ev[\"button\"],\n modifiers=modifiers,\n )\n elif event_type == \"mouse_wheel\":\n self._vispy_canvas.events.mouse_wheel(native=ev,\n delta=ev[\"delta\"],\n pos=ev[\"pos\"],\n button=ev[\"button\"],\n modifiers=modifiers,\n )\n elif event_type == \"key_press\":\n self._vispy_canvas.events.key_press(native=ev,\n key=key,\n text=key_text,\n modifiers=modifiers,\n )\n elif event_type == \"key_release\":\n self._vispy_canvas.events.key_release(native=ev,\n key=key,\n text=key_text,\n modifiers=modifiers,\n )\n elif event_type == \"resize\":\n self._vispy_canvas.events.resize(native=ev,\n size=ev[\"size\"])\n elif event_type == \"paint\":\n self._vispy_canvas.events.draw()\n\n\n# ------------------------------------------------------------------- Timer ---\nclass TimerBackend(BaseTimerBackend):\n def __init__(self, *args, **kwargs):\n super(TimerBackend, self).__init__(*args, **kwargs)\n self._timer = tornado.ioloop.PeriodicCallback(\n self._vispy_timer._timeout,\n 1000)\n\n def _vispy_start(self, interval):\n self._timer.callback_time = interval * 1000\n self._timer.start()\n\n def _vispy_stop(self):\n self._timer.stop()\n\n\n# ---------------------------------------------------------- IPython Widget ---\nclass VispyWidget(DOMWidget):\n _view_name = Unicode(\"VispyView\", sync=True)\n\n width = Int(sync=True)\n height = Int(sync=True)\n\n def __init__(self, gen_event, **kwargs):\n super(VispyWidget, self).__init__(**kwargs)\n w, h = kwargs.get('size', (500, 200))\n self.width = w\n self.height = h\n self.gen_event = gen_event\n self.on_msg(self.events_received)\n\n def events_received(self, _, msg):\n if msg['msg_type'] == 'events':\n events = msg['contents']\n for ev in events:\n self.gen_event(ev)\n\n def send_glir_commands(self, commands):\n # TODO: check whether binary websocket is available (ipython >= 3)\n # Until IPython 3.0 is released, use base64.\n array_serialization = 'base64'\n # array_serialization = 'binary'\n if array_serialization == 'base64':\n msg = create_glir_message(commands, 'base64')\n msg['array_serialization'] = 'base64'\n self.send(msg)\n elif array_serialization == 'binary':\n msg = create_glir_message(commands, 'binary')\n msg['array_serialization'] = 'binary'\n # Remove the buffers from the JSON message: they will be sent\n # independently via binary WebSocket.\n buffers = msg.pop('buffers')\n self.comm.send({\"method\": \"custom\", \"content\": msg},\n buffers=buffers)\n", "path": "vispy/app/backends/_ipynb_webgl.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\nVispy backend for the IPython notebook (WebGL approach).\n\"\"\"\n\nfrom __future__ import division\n\nfrom ..base import (BaseApplicationBackend, BaseCanvasBackend,\n BaseTimerBackend)\nfrom ._ipynb_util import create_glir_message\nfrom ...util import logger, keys\nfrom ...ext import six\nfrom vispy.gloo.glir import BaseGlirParser\nfrom vispy.app import Timer\n\n# Import for displaying Javascript on notebook\nimport os.path as op\n\n# -------------------------------------------------------------------- init ---\n\ncapability = dict( # things that can be set by the backend\n title=True, # But it only applies to the dummy window :P\n size=True, # We cannot possibly say we dont, because Canvas always sets it\n position=True, # Dito\n show=True,\n vsync=False,\n resizable=True,\n decorate=False,\n fullscreen=True,\n context=True,\n multi_window=False,\n scroll=True,\n parent=False,\n)\n\n\n# Init dummy objects needed to import this module withour errors.\n# These are all overwritten with imports from IPython (on success)\nDOMWidget = object\nUnicode = Int = Float = Bool = lambda *args, **kwargs: None\n\n# Try importing IPython\ntry:\n import tornado\n import IPython\n IPYTHON_MAJOR_VERSION = IPython.version_info[0]\n if IPYTHON_MAJOR_VERSION < 2:\n raise RuntimeError('ipynb_webgl backend requires IPython >= 2.0')\n from IPython.html.widgets import DOMWidget\n from IPython.utils.traitlets import Unicode, Int\n from IPython.display import display, Javascript\n from IPython.html.nbextensions import install_nbextension\nexcept Exception as exp:\n # raise ImportError(\"The WebGL backend requires IPython >= 2.0\")\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n available, testable, why_not, which = True, False, None, None\n\n\n# ------------------------------------------------------------- application ---\ndef _prepare_js(force=False):\n pkgdir = op.dirname(__file__)\n jsdir = op.join(pkgdir, '../../html/static/js/')\n # Make sure the JS files are installed to user directory (new argument\n # in IPython 3.0).\n if IPYTHON_MAJOR_VERSION >= 3:\n kwargs = {'user': True}\n else:\n kwargs = {}\n install_nbextension(op.join(jsdir, 'vispy.min.js'), overwrite=force,\n **kwargs)\n backend_path = op.join(jsdir, 'webgl-backend.js')\n with open(backend_path, 'r') as f:\n script = f.read()\n display(Javascript(script))\n\n\nclass ApplicationBackend(BaseApplicationBackend):\n\n def __init__(self):\n BaseApplicationBackend.__init__(self)\n _prepare_js()\n\n def _vispy_reuse(self):\n _prepare_js()\n\n def _vispy_get_backend_name(self):\n return 'ipynb_webgl'\n\n def _vispy_process_events(self):\n # TODO: may be implemented later.\n raise NotImplementedError()\n\n def _vispy_run(self):\n pass\n\n def _vispy_quit(self):\n pass\n\n def _vispy_get_native_app(self):\n return self\n\n\n# ------------------------------------------------------------------ canvas ---\nclass WebGLGlirParser(BaseGlirParser):\n def __init__(self, widget):\n self._widget = widget\n\n def is_remote(self):\n return True\n\n def convert_shaders(self):\n return 'es2'\n\n def parse(self, commands):\n self._widget.send_glir_commands(commands)\n\n\nclass CanvasBackend(BaseCanvasBackend):\n # args are for BaseCanvasBackend, kwargs are for us.\n def __init__(self, *args, **kwargs):\n BaseCanvasBackend.__init__(self, *args)\n # Maybe to ensure that exactly all arguments are passed?\n title, size, position, show, vsync, resize, dec, fs, parent, context, \\\n = self._process_backend_kwargs(kwargs)\n self._context = context\n\n # TODO: do something with context.config\n # Take the context.\n context.shared.add_ref('webgl', self)\n if context.shared.ref is self:\n pass # ok\n else:\n raise RuntimeError(\"WebGL doesn't yet support context sharing.\")\n\n self._create_widget(size=size)\n\n def _create_widget(self, size=None):\n self._widget = VispyWidget(self, size=size)\n # Set glir parser on context and context.shared\n context = self._vispy_canvas.context\n context.shared.parser = WebGLGlirParser(self._widget)\n\n def _reinit_widget(self):\n self._vispy_canvas.set_current()\n\n self._vispy_canvas.events.initialize()\n self._vispy_canvas.events.resize(size=(self._widget.width,\n self._widget.height))\n self._vispy_canvas.events.draw()\n\n def _vispy_warmup(self):\n pass\n\n # Uncommenting these makes the backend crash.\n def _vispy_set_current(self):\n pass\n\n def _vispy_swap_buffers(self):\n pass\n\n def _vispy_set_title(self, title):\n raise NotImplementedError()\n\n def _vispy_get_fullscreen(self):\n # We don't want error messages to show up when the user presses\n # F11 to fullscreen the browser.\n pass\n\n def _vispy_set_fullscreen(self, fullscreen):\n # We don't want error messages to show up when the user presses\n # F11 to fullscreen the browser.\n pass\n\n def _vispy_get_size(self):\n return (self._widget.width, self._widget.height)\n\n def _vispy_set_size(self, w, h):\n self._widget.width = w\n self._widget.height = h\n\n def _vispy_get_position(self):\n raise NotImplementedError()\n\n def _vispy_set_position(self, x, y):\n logger.warning('IPython notebook canvas cannot be repositioned.')\n\n def _vispy_set_visible(self, visible):\n if not visible:\n logger.warning('IPython notebook canvas cannot be hidden.')\n else:\n display(self._widget)\n self._reinit_widget()\n\n def _vispy_update(self):\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.add_callback(self._draw_event)\n\n def _draw_event(self):\n self._vispy_canvas.set_current()\n self._vispy_canvas.events.draw()\n\n def _vispy_close(self):\n raise NotImplementedError()\n\n def _vispy_mouse_release(self, **kwds):\n # HACK: override this method from the base canvas in order to\n # avoid breaking other backends.\n kwds.update(self._vispy_mouse_data)\n ev = self._vispy_canvas.events.mouse_release(**kwds)\n if ev is None:\n return\n self._vispy_mouse_data['press_event'] = None\n # TODO: this is a bit ugly, need to improve mouse button handling in\n # app\n ev._button = None\n self._vispy_mouse_data['buttons'] = []\n self._vispy_mouse_data['last_event'] = ev\n return ev\n\n # Generate vispy events according to upcoming JS events\n _modifiers_map = {\n 'ctrl': keys.CONTROL,\n 'shift': keys.SHIFT,\n 'alt': keys.ALT,\n }\n\n def _gen_event(self, ev):\n if self._vispy_canvas is None:\n return\n event_type = ev['type']\n key_code = ev.get('key_code', None)\n if key_code is None:\n key, key_text = None, None\n else:\n if hasattr(keys, key_code):\n key = getattr(keys, key_code)\n else:\n key = keys.Key(key_code)\n # Generate the key text to pass to the event handler.\n if key_code == 'SPACE':\n key_text = ' '\n else:\n key_text = six.text_type(key_code)\n # Process modifiers.\n modifiers = ev.get('modifiers', None)\n if modifiers:\n modifiers = tuple([self._modifiers_map[modifier]\n for modifier in modifiers\n if modifier in self._modifiers_map])\n if event_type == \"mouse_move\":\n self._vispy_mouse_move(native=ev,\n button=ev[\"button\"],\n pos=ev[\"pos\"],\n modifiers=modifiers,\n )\n elif event_type == \"mouse_press\":\n self._vispy_mouse_press(native=ev,\n pos=ev[\"pos\"],\n button=ev[\"button\"],\n modifiers=modifiers,\n )\n elif event_type == \"mouse_release\":\n self._vispy_mouse_release(native=ev,\n pos=ev[\"pos\"],\n button=ev[\"button\"],\n modifiers=modifiers,\n )\n elif event_type == \"mouse_wheel\":\n self._vispy_canvas.events.mouse_wheel(native=ev,\n delta=ev[\"delta\"],\n pos=ev[\"pos\"],\n button=ev[\"button\"],\n modifiers=modifiers,\n )\n elif event_type == \"key_press\":\n self._vispy_canvas.events.key_press(native=ev,\n key=key,\n text=key_text,\n modifiers=modifiers,\n )\n elif event_type == \"key_release\":\n self._vispy_canvas.events.key_release(native=ev,\n key=key,\n text=key_text,\n modifiers=modifiers,\n )\n elif event_type == \"resize\":\n self._vispy_canvas.events.resize(native=ev,\n size=ev[\"size\"])\n elif event_type == \"paint\":\n self._vispy_canvas.events.draw()\n\n\n# ------------------------------------------------------------------- Timer ---\nclass TimerBackend(BaseTimerBackend):\n def __init__(self, *args, **kwargs):\n super(TimerBackend, self).__init__(*args, **kwargs)\n self._timer = tornado.ioloop.PeriodicCallback(\n self._vispy_timer._timeout,\n 1000)\n\n def _vispy_start(self, interval):\n self._timer.callback_time = interval * 1000\n self._timer.start()\n\n def _vispy_stop(self):\n self._timer.stop()\n\n\n# ---------------------------------------------------------- IPython Widget ---\ndef _stop_timers(canvas):\n \"\"\"Stop all timers in a canvas.\"\"\"\n for attr in dir(canvas):\n try:\n attr_obj = getattr(canvas, attr)\n except NotImplementedError:\n # This try/except is needed because canvas.position raises\n # an error (it is not implemented in this backend).\n attr_obj = None\n if isinstance(attr_obj, Timer):\n attr_obj.stop()\n\n\nclass VispyWidget(DOMWidget):\n _view_name = Unicode(\"VispyView\", sync=True)\n\n width = Int(sync=True)\n height = Int(sync=True)\n\n def __init__(self, canvas_backend, **kwargs):\n super(VispyWidget, self).__init__(**kwargs)\n w, h = kwargs.get('size', (500, 200))\n self.width = w\n self.height = h\n self.canvas_backend = canvas_backend\n self.gen_event = canvas_backend._gen_event\n self.on_msg(self.events_received)\n\n def events_received(self, _, msg):\n if msg['msg_type'] == 'events':\n events = msg['contents']\n for ev in events:\n self.gen_event(ev)\n elif msg['msg_type'] == 'status':\n if msg['contents'] == 'removed':\n # Stop all timers associated to the widget.\n _stop_timers(self.canvas_backend._vispy_canvas)\n\n def send_glir_commands(self, commands):\n # TODO: check whether binary websocket is available (ipython >= 3)\n # Until IPython 3.0 is released, use base64.\n array_serialization = 'base64'\n # array_serialization = 'binary'\n if array_serialization == 'base64':\n msg = create_glir_message(commands, 'base64')\n msg['array_serialization'] = 'base64'\n self.send(msg)\n elif array_serialization == 'binary':\n msg = create_glir_message(commands, 'binary')\n msg['array_serialization'] = 'binary'\n # Remove the buffers from the JSON message: they will be sent\n # independently via binary WebSocket.\n buffers = msg.pop('buffers')\n self.comm.send({\"method\": \"custom\", \"content\": msg},\n buffers=buffers)\n", "path": "vispy/app/backends/_ipynb_webgl.py"}]}
3,984
795
gh_patches_debug_41774
rasdani/github-patches
git_diff
pantsbuild__pants-4696
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dict type options in config files One can also put a dict value into a config file (please add to http://www.pantsbuild.org/options.html): [scope] config: { "domain": "example.com", } This is not well handled though: ```shell ./pants options --options-scope=scope --options-name=config --options-output-format=json { "scope.config": { "source": "CONFIG", "value": "{\"domain\": \"example.com\",}" } } ``` expected: ```shell ./pants options --options-scope=scope --options-name=config --options-output-format=json { "scope.config": { "source": "CONFIG", "value": { "domain": "example.com" } } } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/core_tasks/explain_options_task.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 3 # Licensed under the Apache License, Version 2.0 (see LICENSE). 4 5 from __future__ import (absolute_import, division, generators, nested_scopes, print_function, 6 unicode_literals, with_statement) 7 8 import json 9 10 from colors import black, blue, cyan, green, magenta, red, white 11 from packaging.version import Version 12 13 from pants.option.ranked_value import RankedValue 14 from pants.task.console_task import ConsoleTask 15 from pants.version import PANTS_SEMVER 16 17 18 class ExplainOptionsTask(ConsoleTask): 19 """Display meta-information about options. 20 21 This "meta-information" includes what values options have, and what values they *used* to have 22 before they were overridden by a higher-rank value (eg, a HARDCODED value overridden by a CONFIG 23 value and then a cli FLAG value). 24 """ 25 26 @classmethod 27 def register_options(cls, register): 28 super(ExplainOptionsTask, cls).register_options(register) 29 register('--scope', help='Only show options in this scope.') 30 register('--name', help='Only show options with this name.') 31 register('--rank', choices=RankedValue.get_names(), 32 help='Only show options with at least this importance.') 33 register('--show-history', type=bool, 34 help='Show the previous values options had before being overridden.') 35 register('--only-overridden', type=bool, 36 help='Only show values that overrode defaults.') 37 register('--skip-inherited', type=bool, default=True, 38 help='Do not show inherited options, unless their values differ from their parents.') 39 register('--output-format', choices=['text', 'json'], default='text', 40 help='Specify the format options will be printed.') 41 42 def _scope_filter(self, scope): 43 pattern = self.get_options().scope 44 return not pattern or scope.startswith(pattern) 45 46 def _option_filter(self, option): 47 pattern = self.get_options().name 48 if not pattern: 49 return True 50 pattern = pattern.replace('-', '_') 51 return option == pattern 52 53 def _rank_filter(self, rank): 54 pattern = self.get_options().rank 55 if not pattern: 56 return True 57 return rank >= RankedValue.get_rank_value(pattern) 58 59 def _rank_color(self, rank): 60 if not self.get_options().colors: 61 return lambda x: x 62 if rank == RankedValue.NONE: return white 63 if rank == RankedValue.HARDCODED: return white 64 if rank == RankedValue.ENVIRONMENT: return red 65 if rank == RankedValue.CONFIG: return blue 66 if rank == RankedValue.FLAG: return magenta 67 return black 68 69 def _format_scope(self, scope, option, no_color=False): 70 if no_color: 71 return '{scope}{option}'.format( 72 scope='{}.'.format(scope) if scope else '', 73 option=option, 74 ) 75 scope_color = cyan if self.get_options().colors else lambda x: x 76 option_color = blue if self.get_options().colors else lambda x: x 77 return '{scope}{option}'.format( 78 scope=scope_color('{}.'.format(scope) if scope else ''), 79 option=option_color(option), 80 ) 81 82 def _format_record(self, record): 83 value_color = green if self.get_options().colors else lambda x: x 84 rank_color = self._rank_color(record.rank) 85 simple_value = str(record.value) 86 formatted_value = value_color(simple_value) 87 simple_rank = RankedValue.get_rank_name(record.rank) 88 formatted_rank = '(from {rank}{details})'.format( 89 rank=simple_rank, 90 details=rank_color(' {}'.format(record.details)) if record.details else '', 91 ) 92 if self.is_json(): 93 return simple_value.replace('\n', ''), simple_rank 94 elif self.is_text(): 95 return '{value} {rank}'.format( 96 value=formatted_value, 97 rank=formatted_rank, 98 ) 99 100 def _show_history(self, history): 101 for record in reversed(list(history)[:-1]): 102 if record.rank > RankedValue.NONE: 103 yield ' overrode {}'.format(self._format_record(record)) 104 105 def _force_option_parsing(self): 106 scopes = filter(self._scope_filter, list(self.context.options.known_scope_to_info.keys())) 107 for scope in scopes: 108 self.context.options.for_scope(scope) 109 110 def _get_parent_scope_option(self, scope, name): 111 if not scope: 112 return None, None 113 parent_scope = '' 114 if '.' in scope: 115 parent_scope, _ = scope.rsplit('.', 1) 116 options = self.context.options.for_scope(parent_scope) 117 try: 118 return parent_scope, options[name] 119 except AttributeError: 120 return None, None 121 122 def is_json(self): 123 return self.get_options().output_format == 'json' 124 125 def is_text(self): 126 return self.get_options().output_format == 'text' 127 128 def console_output(self, targets): 129 self._force_option_parsing() 130 if self.is_json(): 131 output_map = {} 132 for scope, options in sorted(self.context.options.tracker.option_history_by_scope.items()): 133 if not self._scope_filter(scope): 134 continue 135 for option, history in sorted(options.items()): 136 if not self._option_filter(option): 137 continue 138 if not self._rank_filter(history.latest.rank): 139 continue 140 if self.get_options().only_overridden and not history.was_overridden: 141 continue 142 # Skip the option if it has already passed the deprecation period. 143 if history.latest.deprecation_version and PANTS_SEMVER >= Version( 144 history.latest.deprecation_version): 145 continue 146 if self.get_options().skip_inherited: 147 parent_scope, parent_value = self._get_parent_scope_option(scope, option) 148 if parent_scope is not None and parent_value == history.latest.value: 149 continue 150 if self.is_json(): 151 opt_vals = self._format_record(history.latest) 152 scope_key = self._format_scope(scope, option, True) 153 inner_map = dict(value=opt_vals[0], source=opt_vals[1]) 154 output_map[scope_key] = inner_map 155 elif self.is_text(): 156 yield '{} = {}'.format(self._format_scope(scope, option), 157 self._format_record(history.latest)) 158 if self.get_options().show_history: 159 history_list = [] 160 for line in self._show_history(history): 161 if self.is_text(): 162 yield line 163 elif self.is_json(): 164 history_list.append(line.strip()) 165 if self.is_json(): 166 inner_map["history"] = history_list 167 if self.is_json(): 168 yield json.dumps(output_map, indent=2, sort_keys=True) 169 ``` Path: `src/python/pants/backend/jvm/tasks/scalafmt.py` Content: ``` 1 # coding=utf-8 2 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md). 3 # Licensed under the Apache License, Version 2.0 (see LICENSE). 4 5 from __future__ import (absolute_import, division, generators, nested_scopes, print_function, 6 unicode_literals, with_statement) 7 8 from abc import abstractproperty 9 10 from pants.backend.jvm.tasks.nailgun_task import NailgunTask 11 from pants.base.exceptions import TaskError 12 from pants.java.jar.jar_dependency import JarDependency 13 from pants.option.custom_types import file_option 14 from pants.util.memo import memoized_property 15 from pants.util.meta import AbstractClass 16 17 18 class ScalaFmt(NailgunTask, AbstractClass): 19 """Abstract class to run ScalaFmt commands. 20 21 Classes that inherit from this should override get_command_args and 22 process_results to run different scalafmt commands 23 24 :API: public 25 """ 26 _SCALAFMT_MAIN = 'org.scalafmt.cli.Cli' 27 _SCALA_SOURCE_EXTENSION = '.scala' 28 29 @classmethod 30 def register_options(cls, register): 31 super(ScalaFmt, cls).register_options(register) 32 register('--skip', type=bool, fingerprint=False, help='Skip Scalafmt Check') 33 register('--configuration', advanced=True, type=file_option, fingerprint=False, 34 help='Path to scalafmt config file, if not specified default scalafmt config used') 35 register('--target-types', 36 default={'scala_library', 'junit_tests', 'java_tests'}, 37 advanced=True, 38 type=set, 39 help='The target types to apply formatting to.') 40 cls.register_jvm_tool(register, 41 'scalafmt', 42 classpath=[ 43 JarDependency(org='com.geirsson', 44 name='scalafmt-cli_2.11', 45 rev='0.2.11') 46 ]) 47 48 @memoized_property 49 def _formatted_target_types(self): 50 aliases = self.get_options().target_types 51 registered_aliases = self.context.build_file_parser.registered_aliases() 52 return tuple({target_type 53 for alias in aliases 54 for target_type in registered_aliases.target_types_by_alias[alias]}) 55 56 def execute(self): 57 """Runs Scalafmt on all found Scala Source Files.""" 58 if self.get_options().skip: 59 return 60 61 targets = self.get_non_synthetic_scala_targets(self.context.targets()) 62 sources = self.calculate_sources(targets) 63 64 if sources: 65 files = ",".join(sources) 66 67 result = self.runjava(classpath=self.tool_classpath('scalafmt'), 68 main=self._SCALAFMT_MAIN, 69 args=self.get_command_args(files), 70 workunit_name='scalafmt') 71 72 self.process_results(result) 73 74 @abstractproperty 75 def get_command_args(self, files): 76 """Returns the arguments used to run Scalafmt command. 77 78 The return value should be an array of strings. For 79 example, to run the Scalafmt help command: 80 ['--help'] 81 """ 82 83 @abstractproperty 84 def process_results(self, result): 85 """This method processes the results of the scalafmt command. 86 87 No return value is expected. If an error occurs running 88 Scalafmt raising a TaskError is recommended. 89 """ 90 91 def get_non_synthetic_scala_targets(self, targets): 92 return filter( 93 lambda target: isinstance(target, self._formatted_target_types) 94 and target.has_sources(self._SCALA_SOURCE_EXTENSION) 95 and (not target.is_synthetic), 96 targets) 97 98 def calculate_sources(self, targets): 99 sources = set() 100 for target in targets: 101 sources.update(source for source in target.sources_relative_to_buildroot() 102 if source.endswith(self._SCALA_SOURCE_EXTENSION)) 103 return sources 104 105 106 class ScalaFmtCheckFormat(ScalaFmt): 107 """This Task checks that all scala files in the target are formatted 108 correctly. 109 110 If the files are not formatted correctly an error is raised 111 including the command to run to format the files correctly 112 113 :API: public 114 """ 115 deprecated_options_scope = 'compile.scalafmt' 116 deprecated_options_scope_removal_version = '1.5.0.dev0' 117 118 def get_command_args(self, files): 119 # If no config file is specified use default scalafmt config. 120 config_file = self.get_options().configuration 121 args = ['--test', '--files', files] 122 if config_file!= None: 123 args.extend(['--config', config_file]) 124 125 return args 126 127 def process_results(self, result): 128 if result != 0: 129 raise TaskError('Scalafmt failed with exit code {}; to fix run: ' 130 '`./pants fmt <targets>`'.format(result), exit_code=result) 131 132 133 class ScalaFmtFormat(ScalaFmt): 134 """This Task reads all scala files in the target and emits 135 the source in a standard style as specified by the configuration 136 file. 137 138 This task mutates the underlying flies. 139 140 :API: public 141 """ 142 143 def get_command_args(self, files): 144 # If no config file is specified use default scalafmt config. 145 config_file = self.get_options().configuration 146 args = ['-i', '--files', files] 147 if config_file!= None: 148 args.extend(['--config', config_file]) 149 150 return args 151 152 def process_results(self, result): 153 # Processes the results of running the scalafmt command. 154 if result != 0: 155 raise TaskError('Scalafmt failed to format files', exit_code=result) 156 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/jvm/tasks/scalafmt.py b/src/python/pants/backend/jvm/tasks/scalafmt.py --- a/src/python/pants/backend/jvm/tasks/scalafmt.py +++ b/src/python/pants/backend/jvm/tasks/scalafmt.py @@ -33,9 +33,9 @@ register('--configuration', advanced=True, type=file_option, fingerprint=False, help='Path to scalafmt config file, if not specified default scalafmt config used') register('--target-types', - default={'scala_library', 'junit_tests', 'java_tests'}, + default=['scala_library', 'junit_tests', 'java_tests'], advanced=True, - type=set, + type=list, help='The target types to apply formatting to.') cls.register_jvm_tool(register, 'scalafmt', @@ -47,7 +47,7 @@ @memoized_property def _formatted_target_types(self): - aliases = self.get_options().target_types + aliases = set(self.get_options().target_types) registered_aliases = self.context.build_file_parser.registered_aliases() return tuple({target_type for alias in aliases diff --git a/src/python/pants/core_tasks/explain_options_task.py b/src/python/pants/core_tasks/explain_options_task.py --- a/src/python/pants/core_tasks/explain_options_task.py +++ b/src/python/pants/core_tasks/explain_options_task.py @@ -80,18 +80,18 @@ ) def _format_record(self, record): - value_color = green if self.get_options().colors else lambda x: x - rank_color = self._rank_color(record.rank) - simple_value = str(record.value) - formatted_value = value_color(simple_value) simple_rank = RankedValue.get_rank_name(record.rank) - formatted_rank = '(from {rank}{details})'.format( - rank=simple_rank, - details=rank_color(' {}'.format(record.details)) if record.details else '', - ) if self.is_json(): - return simple_value.replace('\n', ''), simple_rank + return record.value, simple_rank elif self.is_text(): + simple_value = str(record.value) + value_color = green if self.get_options().colors else lambda x: x + formatted_value = value_color(simple_value) + rank_color = self._rank_color(record.rank) + formatted_rank = '(from {rank}{details})'.format( + rank=simple_rank, + details=rank_color(' {}'.format(record.details)) if record.details else '', + ) return '{value} {rank}'.format( value=formatted_value, rank=formatted_rank, @@ -148,9 +148,16 @@ if parent_scope is not None and parent_value == history.latest.value: continue if self.is_json(): - opt_vals = self._format_record(history.latest) + value, rank_name = self._format_record(history.latest) scope_key = self._format_scope(scope, option, True) - inner_map = dict(value=opt_vals[0], source=opt_vals[1]) + # We rely on the fact that option values are restricted to a set of types compatible with + # json. In particular, we expect dict, list, str, bool, int and float, and so do no + # processing here. + # TODO(John Sirois): The option parsing system currently lets options of unexpected types + # slide by, which can lead to un-overridable values and which would also blow up below in + # json encoding, fix options to restrict the allowed `type`s: + # https://github.com/pantsbuild/pants/issues/4695 + inner_map = dict(value=value, source=rank_name) output_map[scope_key] = inner_map elif self.is_text(): yield '{} = {}'.format(self._format_scope(scope, option),
{"golden_diff": "diff --git a/src/python/pants/backend/jvm/tasks/scalafmt.py b/src/python/pants/backend/jvm/tasks/scalafmt.py\n--- a/src/python/pants/backend/jvm/tasks/scalafmt.py\n+++ b/src/python/pants/backend/jvm/tasks/scalafmt.py\n@@ -33,9 +33,9 @@\n register('--configuration', advanced=True, type=file_option, fingerprint=False,\n help='Path to scalafmt config file, if not specified default scalafmt config used')\n register('--target-types',\n- default={'scala_library', 'junit_tests', 'java_tests'},\n+ default=['scala_library', 'junit_tests', 'java_tests'],\n advanced=True,\n- type=set,\n+ type=list,\n help='The target types to apply formatting to.')\n cls.register_jvm_tool(register,\n 'scalafmt',\n@@ -47,7 +47,7 @@\n \n @memoized_property\n def _formatted_target_types(self):\n- aliases = self.get_options().target_types\n+ aliases = set(self.get_options().target_types)\n registered_aliases = self.context.build_file_parser.registered_aliases()\n return tuple({target_type\n for alias in aliases\ndiff --git a/src/python/pants/core_tasks/explain_options_task.py b/src/python/pants/core_tasks/explain_options_task.py\n--- a/src/python/pants/core_tasks/explain_options_task.py\n+++ b/src/python/pants/core_tasks/explain_options_task.py\n@@ -80,18 +80,18 @@\n )\n \n def _format_record(self, record):\n- value_color = green if self.get_options().colors else lambda x: x\n- rank_color = self._rank_color(record.rank)\n- simple_value = str(record.value)\n- formatted_value = value_color(simple_value)\n simple_rank = RankedValue.get_rank_name(record.rank)\n- formatted_rank = '(from {rank}{details})'.format(\n- rank=simple_rank,\n- details=rank_color(' {}'.format(record.details)) if record.details else '',\n- )\n if self.is_json():\n- return simple_value.replace('\\n', ''), simple_rank\n+ return record.value, simple_rank\n elif self.is_text():\n+ simple_value = str(record.value)\n+ value_color = green if self.get_options().colors else lambda x: x\n+ formatted_value = value_color(simple_value)\n+ rank_color = self._rank_color(record.rank)\n+ formatted_rank = '(from {rank}{details})'.format(\n+ rank=simple_rank,\n+ details=rank_color(' {}'.format(record.details)) if record.details else '',\n+ )\n return '{value} {rank}'.format(\n value=formatted_value,\n rank=formatted_rank,\n@@ -148,9 +148,16 @@\n if parent_scope is not None and parent_value == history.latest.value:\n continue\n if self.is_json():\n- opt_vals = self._format_record(history.latest)\n+ value, rank_name = self._format_record(history.latest)\n scope_key = self._format_scope(scope, option, True)\n- inner_map = dict(value=opt_vals[0], source=opt_vals[1])\n+ # We rely on the fact that option values are restricted to a set of types compatible with\n+ # json. In particular, we expect dict, list, str, bool, int and float, and so do no\n+ # processing here.\n+ # TODO(John Sirois): The option parsing system currently lets options of unexpected types\n+ # slide by, which can lead to un-overridable values and which would also blow up below in\n+ # json encoding, fix options to restrict the allowed `type`s:\n+ # https://github.com/pantsbuild/pants/issues/4695\n+ inner_map = dict(value=value, source=rank_name)\n output_map[scope_key] = inner_map\n elif self.is_text():\n yield '{} = {}'.format(self._format_scope(scope, option),\n", "issue": "dict type options in config files\nOne can also put a dict value into a config file (please add to http://www.pantsbuild.org/options.html):\r\n\r\n [scope]\r\n config: {\r\n \"domain\": \"example.com\",\r\n }\r\n\r\nThis is not well handled though:\r\n```shell\r\n./pants options --options-scope=scope --options-name=config --options-output-format=json\r\n{\r\n \"scope.config\": {\r\n \"source\": \"CONFIG\", \r\n \"value\": \"{\\\"domain\\\": \\\"example.com\\\",}\"\r\n }\r\n}\r\n```\r\nexpected:\r\n```shell\r\n./pants options --options-scope=scope --options-name=config --options-output-format=json\r\n{\r\n \"scope.config\": {\r\n \"source\": \"CONFIG\", \r\n \"value\": {\r\n \"domain\": \"example.com\"\r\n }\r\n }\r\n}\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport json\n\nfrom colors import black, blue, cyan, green, magenta, red, white\nfrom packaging.version import Version\n\nfrom pants.option.ranked_value import RankedValue\nfrom pants.task.console_task import ConsoleTask\nfrom pants.version import PANTS_SEMVER\n\n\nclass ExplainOptionsTask(ConsoleTask):\n \"\"\"Display meta-information about options.\n\n This \"meta-information\" includes what values options have, and what values they *used* to have\n before they were overridden by a higher-rank value (eg, a HARDCODED value overridden by a CONFIG\n value and then a cli FLAG value).\n \"\"\"\n\n @classmethod\n def register_options(cls, register):\n super(ExplainOptionsTask, cls).register_options(register)\n register('--scope', help='Only show options in this scope.')\n register('--name', help='Only show options with this name.')\n register('--rank', choices=RankedValue.get_names(),\n help='Only show options with at least this importance.')\n register('--show-history', type=bool,\n help='Show the previous values options had before being overridden.')\n register('--only-overridden', type=bool,\n help='Only show values that overrode defaults.')\n register('--skip-inherited', type=bool, default=True,\n help='Do not show inherited options, unless their values differ from their parents.')\n register('--output-format', choices=['text', 'json'], default='text',\n help='Specify the format options will be printed.')\n\n def _scope_filter(self, scope):\n pattern = self.get_options().scope\n return not pattern or scope.startswith(pattern)\n\n def _option_filter(self, option):\n pattern = self.get_options().name\n if not pattern:\n return True\n pattern = pattern.replace('-', '_')\n return option == pattern\n\n def _rank_filter(self, rank):\n pattern = self.get_options().rank\n if not pattern:\n return True\n return rank >= RankedValue.get_rank_value(pattern)\n\n def _rank_color(self, rank):\n if not self.get_options().colors:\n return lambda x: x\n if rank == RankedValue.NONE: return white\n if rank == RankedValue.HARDCODED: return white\n if rank == RankedValue.ENVIRONMENT: return red\n if rank == RankedValue.CONFIG: return blue\n if rank == RankedValue.FLAG: return magenta\n return black\n\n def _format_scope(self, scope, option, no_color=False):\n if no_color:\n return '{scope}{option}'.format(\n scope='{}.'.format(scope) if scope else '',\n option=option,\n )\n scope_color = cyan if self.get_options().colors else lambda x: x\n option_color = blue if self.get_options().colors else lambda x: x\n return '{scope}{option}'.format(\n scope=scope_color('{}.'.format(scope) if scope else ''),\n option=option_color(option),\n )\n\n def _format_record(self, record):\n value_color = green if self.get_options().colors else lambda x: x\n rank_color = self._rank_color(record.rank)\n simple_value = str(record.value)\n formatted_value = value_color(simple_value)\n simple_rank = RankedValue.get_rank_name(record.rank)\n formatted_rank = '(from {rank}{details})'.format(\n rank=simple_rank,\n details=rank_color(' {}'.format(record.details)) if record.details else '',\n )\n if self.is_json():\n return simple_value.replace('\\n', ''), simple_rank\n elif self.is_text():\n return '{value} {rank}'.format(\n value=formatted_value,\n rank=formatted_rank,\n )\n\n def _show_history(self, history):\n for record in reversed(list(history)[:-1]):\n if record.rank > RankedValue.NONE:\n yield ' overrode {}'.format(self._format_record(record))\n\n def _force_option_parsing(self):\n scopes = filter(self._scope_filter, list(self.context.options.known_scope_to_info.keys()))\n for scope in scopes:\n self.context.options.for_scope(scope)\n\n def _get_parent_scope_option(self, scope, name):\n if not scope:\n return None, None\n parent_scope = ''\n if '.' in scope:\n parent_scope, _ = scope.rsplit('.', 1)\n options = self.context.options.for_scope(parent_scope)\n try:\n return parent_scope, options[name]\n except AttributeError:\n return None, None\n\n def is_json(self):\n return self.get_options().output_format == 'json'\n\n def is_text(self):\n return self.get_options().output_format == 'text'\n\n def console_output(self, targets):\n self._force_option_parsing()\n if self.is_json():\n output_map = {}\n for scope, options in sorted(self.context.options.tracker.option_history_by_scope.items()):\n if not self._scope_filter(scope):\n continue\n for option, history in sorted(options.items()):\n if not self._option_filter(option):\n continue\n if not self._rank_filter(history.latest.rank):\n continue\n if self.get_options().only_overridden and not history.was_overridden:\n continue\n # Skip the option if it has already passed the deprecation period.\n if history.latest.deprecation_version and PANTS_SEMVER >= Version(\n history.latest.deprecation_version):\n continue\n if self.get_options().skip_inherited:\n parent_scope, parent_value = self._get_parent_scope_option(scope, option)\n if parent_scope is not None and parent_value == history.latest.value:\n continue\n if self.is_json():\n opt_vals = self._format_record(history.latest)\n scope_key = self._format_scope(scope, option, True)\n inner_map = dict(value=opt_vals[0], source=opt_vals[1])\n output_map[scope_key] = inner_map\n elif self.is_text():\n yield '{} = {}'.format(self._format_scope(scope, option),\n self._format_record(history.latest))\n if self.get_options().show_history:\n history_list = []\n for line in self._show_history(history):\n if self.is_text():\n yield line\n elif self.is_json():\n history_list.append(line.strip())\n if self.is_json():\n inner_map[\"history\"] = history_list\n if self.is_json():\n yield json.dumps(output_map, indent=2, sort_keys=True)\n", "path": "src/python/pants/core_tasks/explain_options_task.py"}, {"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nfrom abc import abstractproperty\n\nfrom pants.backend.jvm.tasks.nailgun_task import NailgunTask\nfrom pants.base.exceptions import TaskError\nfrom pants.java.jar.jar_dependency import JarDependency\nfrom pants.option.custom_types import file_option\nfrom pants.util.memo import memoized_property\nfrom pants.util.meta import AbstractClass\n\n\nclass ScalaFmt(NailgunTask, AbstractClass):\n \"\"\"Abstract class to run ScalaFmt commands.\n\n Classes that inherit from this should override get_command_args and\n process_results to run different scalafmt commands\n\n :API: public\n \"\"\"\n _SCALAFMT_MAIN = 'org.scalafmt.cli.Cli'\n _SCALA_SOURCE_EXTENSION = '.scala'\n\n @classmethod\n def register_options(cls, register):\n super(ScalaFmt, cls).register_options(register)\n register('--skip', type=bool, fingerprint=False, help='Skip Scalafmt Check')\n register('--configuration', advanced=True, type=file_option, fingerprint=False,\n help='Path to scalafmt config file, if not specified default scalafmt config used')\n register('--target-types',\n default={'scala_library', 'junit_tests', 'java_tests'},\n advanced=True,\n type=set,\n help='The target types to apply formatting to.')\n cls.register_jvm_tool(register,\n 'scalafmt',\n classpath=[\n JarDependency(org='com.geirsson',\n name='scalafmt-cli_2.11',\n rev='0.2.11')\n ])\n\n @memoized_property\n def _formatted_target_types(self):\n aliases = self.get_options().target_types\n registered_aliases = self.context.build_file_parser.registered_aliases()\n return tuple({target_type\n for alias in aliases\n for target_type in registered_aliases.target_types_by_alias[alias]})\n\n def execute(self):\n \"\"\"Runs Scalafmt on all found Scala Source Files.\"\"\"\n if self.get_options().skip:\n return\n\n targets = self.get_non_synthetic_scala_targets(self.context.targets())\n sources = self.calculate_sources(targets)\n\n if sources:\n files = \",\".join(sources)\n\n result = self.runjava(classpath=self.tool_classpath('scalafmt'),\n main=self._SCALAFMT_MAIN,\n args=self.get_command_args(files),\n workunit_name='scalafmt')\n\n self.process_results(result)\n\n @abstractproperty\n def get_command_args(self, files):\n \"\"\"Returns the arguments used to run Scalafmt command.\n\n The return value should be an array of strings. For\n example, to run the Scalafmt help command:\n ['--help']\n \"\"\"\n\n @abstractproperty\n def process_results(self, result):\n \"\"\"This method processes the results of the scalafmt command.\n\n No return value is expected. If an error occurs running\n Scalafmt raising a TaskError is recommended.\n \"\"\"\n\n def get_non_synthetic_scala_targets(self, targets):\n return filter(\n lambda target: isinstance(target, self._formatted_target_types)\n and target.has_sources(self._SCALA_SOURCE_EXTENSION)\n and (not target.is_synthetic),\n targets)\n\n def calculate_sources(self, targets):\n sources = set()\n for target in targets:\n sources.update(source for source in target.sources_relative_to_buildroot()\n if source.endswith(self._SCALA_SOURCE_EXTENSION))\n return sources\n\n\nclass ScalaFmtCheckFormat(ScalaFmt):\n \"\"\"This Task checks that all scala files in the target are formatted\n correctly.\n\n If the files are not formatted correctly an error is raised\n including the command to run to format the files correctly\n\n :API: public\n \"\"\"\n deprecated_options_scope = 'compile.scalafmt'\n deprecated_options_scope_removal_version = '1.5.0.dev0'\n\n def get_command_args(self, files):\n # If no config file is specified use default scalafmt config.\n config_file = self.get_options().configuration\n args = ['--test', '--files', files]\n if config_file!= None:\n args.extend(['--config', config_file])\n\n return args\n\n def process_results(self, result):\n if result != 0:\n raise TaskError('Scalafmt failed with exit code {}; to fix run: '\n '`./pants fmt <targets>`'.format(result), exit_code=result)\n\n\nclass ScalaFmtFormat(ScalaFmt):\n \"\"\"This Task reads all scala files in the target and emits\n the source in a standard style as specified by the configuration\n file.\n\n This task mutates the underlying flies.\n\n :API: public\n \"\"\"\n\n def get_command_args(self, files):\n # If no config file is specified use default scalafmt config.\n config_file = self.get_options().configuration\n args = ['-i', '--files', files]\n if config_file!= None:\n args.extend(['--config', config_file])\n\n return args\n\n def process_results(self, result):\n # Processes the results of running the scalafmt command.\n if result != 0:\n raise TaskError('Scalafmt failed to format files', exit_code=result)\n", "path": "src/python/pants/backend/jvm/tasks/scalafmt.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport json\n\nfrom colors import black, blue, cyan, green, magenta, red, white\nfrom packaging.version import Version\n\nfrom pants.option.ranked_value import RankedValue\nfrom pants.task.console_task import ConsoleTask\nfrom pants.version import PANTS_SEMVER\n\n\nclass ExplainOptionsTask(ConsoleTask):\n \"\"\"Display meta-information about options.\n\n This \"meta-information\" includes what values options have, and what values they *used* to have\n before they were overridden by a higher-rank value (eg, a HARDCODED value overridden by a CONFIG\n value and then a cli FLAG value).\n \"\"\"\n\n @classmethod\n def register_options(cls, register):\n super(ExplainOptionsTask, cls).register_options(register)\n register('--scope', help='Only show options in this scope.')\n register('--name', help='Only show options with this name.')\n register('--rank', choices=RankedValue.get_names(),\n help='Only show options with at least this importance.')\n register('--show-history', type=bool,\n help='Show the previous values options had before being overridden.')\n register('--only-overridden', type=bool,\n help='Only show values that overrode defaults.')\n register('--skip-inherited', type=bool, default=True,\n help='Do not show inherited options, unless their values differ from their parents.')\n register('--output-format', choices=['text', 'json'], default='text',\n help='Specify the format options will be printed.')\n\n def _scope_filter(self, scope):\n pattern = self.get_options().scope\n return not pattern or scope.startswith(pattern)\n\n def _option_filter(self, option):\n pattern = self.get_options().name\n if not pattern:\n return True\n pattern = pattern.replace('-', '_')\n return option == pattern\n\n def _rank_filter(self, rank):\n pattern = self.get_options().rank\n if not pattern:\n return True\n return rank >= RankedValue.get_rank_value(pattern)\n\n def _rank_color(self, rank):\n if not self.get_options().colors:\n return lambda x: x\n if rank == RankedValue.NONE: return white\n if rank == RankedValue.HARDCODED: return white\n if rank == RankedValue.ENVIRONMENT: return red\n if rank == RankedValue.CONFIG: return blue\n if rank == RankedValue.FLAG: return magenta\n return black\n\n def _format_scope(self, scope, option, no_color=False):\n if no_color:\n return '{scope}{option}'.format(\n scope='{}.'.format(scope) if scope else '',\n option=option,\n )\n scope_color = cyan if self.get_options().colors else lambda x: x\n option_color = blue if self.get_options().colors else lambda x: x\n return '{scope}{option}'.format(\n scope=scope_color('{}.'.format(scope) if scope else ''),\n option=option_color(option),\n )\n\n def _format_record(self, record):\n simple_rank = RankedValue.get_rank_name(record.rank)\n if self.is_json():\n return record.value, simple_rank\n elif self.is_text():\n simple_value = str(record.value)\n value_color = green if self.get_options().colors else lambda x: x\n formatted_value = value_color(simple_value)\n rank_color = self._rank_color(record.rank)\n formatted_rank = '(from {rank}{details})'.format(\n rank=simple_rank,\n details=rank_color(' {}'.format(record.details)) if record.details else '',\n )\n return '{value} {rank}'.format(\n value=formatted_value,\n rank=formatted_rank,\n )\n\n def _show_history(self, history):\n for record in reversed(list(history)[:-1]):\n if record.rank > RankedValue.NONE:\n yield ' overrode {}'.format(self._format_record(record))\n\n def _force_option_parsing(self):\n scopes = filter(self._scope_filter, list(self.context.options.known_scope_to_info.keys()))\n for scope in scopes:\n self.context.options.for_scope(scope)\n\n def _get_parent_scope_option(self, scope, name):\n if not scope:\n return None, None\n parent_scope = ''\n if '.' in scope:\n parent_scope, _ = scope.rsplit('.', 1)\n options = self.context.options.for_scope(parent_scope)\n try:\n return parent_scope, options[name]\n except AttributeError:\n return None, None\n\n def is_json(self):\n return self.get_options().output_format == 'json'\n\n def is_text(self):\n return self.get_options().output_format == 'text'\n\n def console_output(self, targets):\n self._force_option_parsing()\n if self.is_json():\n output_map = {}\n for scope, options in sorted(self.context.options.tracker.option_history_by_scope.items()):\n if not self._scope_filter(scope):\n continue\n for option, history in sorted(options.items()):\n if not self._option_filter(option):\n continue\n if not self._rank_filter(history.latest.rank):\n continue\n if self.get_options().only_overridden and not history.was_overridden:\n continue\n # Skip the option if it has already passed the deprecation period.\n if history.latest.deprecation_version and PANTS_SEMVER >= Version(\n history.latest.deprecation_version):\n continue\n if self.get_options().skip_inherited:\n parent_scope, parent_value = self._get_parent_scope_option(scope, option)\n if parent_scope is not None and parent_value == history.latest.value:\n continue\n if self.is_json():\n value, rank_name = self._format_record(history.latest)\n scope_key = self._format_scope(scope, option, True)\n # We rely on the fact that option values are restricted to a set of types compatible with\n # json. In particular, we expect dict, list, str, bool, int and float, and so do no\n # processing here.\n # TODO(John Sirois): The option parsing system currently lets options of unexpected types\n # slide by, which can lead to un-overridable values and which would also blow up below in\n # json encoding, fix options to restrict the allowed `type`s:\n # https://github.com/pantsbuild/pants/issues/4695\n inner_map = dict(value=value, source=rank_name)\n output_map[scope_key] = inner_map\n elif self.is_text():\n yield '{} = {}'.format(self._format_scope(scope, option),\n self._format_record(history.latest))\n if self.get_options().show_history:\n history_list = []\n for line in self._show_history(history):\n if self.is_text():\n yield line\n elif self.is_json():\n history_list.append(line.strip())\n if self.is_json():\n inner_map[\"history\"] = history_list\n if self.is_json():\n yield json.dumps(output_map, indent=2, sort_keys=True)\n", "path": "src/python/pants/core_tasks/explain_options_task.py"}, {"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nfrom abc import abstractproperty\n\nfrom pants.backend.jvm.tasks.nailgun_task import NailgunTask\nfrom pants.base.exceptions import TaskError\nfrom pants.java.jar.jar_dependency import JarDependency\nfrom pants.option.custom_types import file_option\nfrom pants.util.memo import memoized_property\nfrom pants.util.meta import AbstractClass\n\n\nclass ScalaFmt(NailgunTask, AbstractClass):\n \"\"\"Abstract class to run ScalaFmt commands.\n\n Classes that inherit from this should override get_command_args and\n process_results to run different scalafmt commands\n\n :API: public\n \"\"\"\n _SCALAFMT_MAIN = 'org.scalafmt.cli.Cli'\n _SCALA_SOURCE_EXTENSION = '.scala'\n\n @classmethod\n def register_options(cls, register):\n super(ScalaFmt, cls).register_options(register)\n register('--skip', type=bool, fingerprint=False, help='Skip Scalafmt Check')\n register('--configuration', advanced=True, type=file_option, fingerprint=False,\n help='Path to scalafmt config file, if not specified default scalafmt config used')\n register('--target-types',\n default=['scala_library', 'junit_tests', 'java_tests'],\n advanced=True,\n type=list,\n help='The target types to apply formatting to.')\n cls.register_jvm_tool(register,\n 'scalafmt',\n classpath=[\n JarDependency(org='com.geirsson',\n name='scalafmt-cli_2.11',\n rev='0.2.11')\n ])\n\n @memoized_property\n def _formatted_target_types(self):\n aliases = set(self.get_options().target_types)\n registered_aliases = self.context.build_file_parser.registered_aliases()\n return tuple({target_type\n for alias in aliases\n for target_type in registered_aliases.target_types_by_alias[alias]})\n\n def execute(self):\n \"\"\"Runs Scalafmt on all found Scala Source Files.\"\"\"\n if self.get_options().skip:\n return\n\n targets = self.get_non_synthetic_scala_targets(self.context.targets())\n sources = self.calculate_sources(targets)\n\n if sources:\n files = \",\".join(sources)\n\n result = self.runjava(classpath=self.tool_classpath('scalafmt'),\n main=self._SCALAFMT_MAIN,\n args=self.get_command_args(files),\n workunit_name='scalafmt')\n\n self.process_results(result)\n\n @abstractproperty\n def get_command_args(self, files):\n \"\"\"Returns the arguments used to run Scalafmt command.\n\n The return value should be an array of strings. For\n example, to run the Scalafmt help command:\n ['--help']\n \"\"\"\n\n @abstractproperty\n def process_results(self, result):\n \"\"\"This method processes the results of the scalafmt command.\n\n No return value is expected. If an error occurs running\n Scalafmt raising a TaskError is recommended.\n \"\"\"\n\n def get_non_synthetic_scala_targets(self, targets):\n return filter(\n lambda target: isinstance(target, self._formatted_target_types)\n and target.has_sources(self._SCALA_SOURCE_EXTENSION)\n and (not target.is_synthetic),\n targets)\n\n def calculate_sources(self, targets):\n sources = set()\n for target in targets:\n sources.update(source for source in target.sources_relative_to_buildroot()\n if source.endswith(self._SCALA_SOURCE_EXTENSION))\n return sources\n\n\nclass ScalaFmtCheckFormat(ScalaFmt):\n \"\"\"This Task checks that all scala files in the target are formatted\n correctly.\n\n If the files are not formatted correctly an error is raised\n including the command to run to format the files correctly\n\n :API: public\n \"\"\"\n deprecated_options_scope = 'compile.scalafmt'\n deprecated_options_scope_removal_version = '1.5.0.dev0'\n\n def get_command_args(self, files):\n # If no config file is specified use default scalafmt config.\n config_file = self.get_options().configuration\n args = ['--test', '--files', files]\n if config_file!= None:\n args.extend(['--config', config_file])\n\n return args\n\n def process_results(self, result):\n if result != 0:\n raise TaskError('Scalafmt failed with exit code {}; to fix run: '\n '`./pants fmt <targets>`'.format(result), exit_code=result)\n\n\nclass ScalaFmtFormat(ScalaFmt):\n \"\"\"This Task reads all scala files in the target and emits\n the source in a standard style as specified by the configuration\n file.\n\n This task mutates the underlying flies.\n\n :API: public\n \"\"\"\n\n def get_command_args(self, files):\n # If no config file is specified use default scalafmt config.\n config_file = self.get_options().configuration\n args = ['-i', '--files', files]\n if config_file!= None:\n args.extend(['--config', config_file])\n\n return args\n\n def process_results(self, result):\n # Processes the results of running the scalafmt command.\n if result != 0:\n raise TaskError('Scalafmt failed to format files', exit_code=result)\n", "path": "src/python/pants/backend/jvm/tasks/scalafmt.py"}]}
3,835
878
gh_patches_debug_1184
rasdani/github-patches
git_diff
voxel51__fiftyone-3297
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] fiftyone forces starlette=0.16.0 and it breaks integrations with applications that use FastAPI in newer versions. ### Instructions Thank you for submitting an issue. Please refer to our [issue policy](https://www.github.com/voxel51/fiftyone/blob/develop/ISSUE_POLICY.md) for information on what types of issues we address. **Please fill in this template to ensure a timely and thorough response.** - Place an "x" between the brackets next to an option if it applies. Example: - [x] Selected option - Please delete this section (all content above this line) before submitting the issue ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: linux 18.04 - **FiftyOne installed from (pip or source)**:pip - **FiftyOne version (run `fiftyone --version`)**:0.17.2 - **Python version**:3.8 ### Commands to reproduce As thoroughly as possible, please provide the Python and/or shell commands used to encounter the issue. Application steps can be described in the next section. requriements.txt: fastapi==0.79.0 fiftyone==0.17.2 ``` pip install -r requirments.txt ``` ### Describe the problem fiftyone cannot be used with the newer versions of fastapi, because it forces starlette to be in the version starlette=0.16.0 Is it possible to add a condition like: starlette>=0.16.0. In this way it would not break apps that use fiftyone ### Code to reproduce issue fastapi==0.79.0 fiftyone==0.17.2 pip install -r requirments.txt ### Other info / logs es. #0 388.1 #0 388.1 The conflict is caused by: #0 388.1 bentoml 1.0.4 depends on starlette #0 388.1 fastapi 0.79.0 depends on starlette==0.19.1 #0 388.1 fiftyone 0.17.2 depends on starlette==0.16.0 #0 388.1 #0 388.1 To fix this you could try to: #0 388.1 1. loosen the range of package versions you've specified #0 388.1 2. remove package versions to allow pip attempt to solve the dependency conflict #0 388.1 #0 388.1 ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/topics/dependency-resolution/#dealing-with-dependency-conflicts #0 388.1 WARNING: You are using pip version 22.0.4; however, version 22.3 is available. #0 388.1 You should consider upgrading via the '/usr/bin/python -m pip install --upgrade pip' command. ------ ### What areas of FiftyOne does this bug affect? - [X ] `App`: FiftyOne application issue - [ ] `Core`: Core `fiftyone` Python library issue - [ ] `Server`: Fiftyone server issue ### Willingness to contribute The FiftyOne Community encourages bug fix contributions. Would you or another member of your organization be willing to contribute a fix for this bug to the FiftyOne codebase? - [ X] Yes. I can contribute a fix for this bug independently. - [ ] Yes. I would be willing to contribute a fix for this bug with guidance from the FiftyOne community. - [ ] No. I cannot contribute a bug fix at this time. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 """ 3 Installs FiftyOne. 4 5 | Copyright 2017-2023, Voxel51, Inc. 6 | `voxel51.com <https://voxel51.com/>`_ 7 | 8 """ 9 try: 10 from importlib import metadata 11 except ImportError: 12 import importlib_metadata as metadata 13 14 import os 15 import re 16 from setuptools import setup, find_packages 17 18 19 VERSION = "0.21.3" 20 21 22 def get_version(): 23 if "RELEASE_VERSION" in os.environ: 24 version = os.environ["RELEASE_VERSION"] 25 if not version.startswith(VERSION): 26 raise ValueError( 27 "Release version does not match version: %s and %s" 28 % (version, VERSION) 29 ) 30 return version 31 32 return VERSION 33 34 35 INSTALL_REQUIRES = [ 36 # third-party packages 37 "aiofiles", 38 "argcomplete", 39 "boto3", 40 "cachetools", 41 "dacite>=1.6.0,<1.8.0", 42 "Deprecated", 43 "eventlet", 44 "ftfy", 45 "future", 46 "hypercorn>=0.13.2", 47 "importlib-metadata; python_version<'3.8'", 48 "Jinja2>=3", 49 "kaleido", 50 "matplotlib", 51 "mongoengine==0.24.2", 52 "motor>=2.5", 53 "numpy", 54 "packaging", 55 "pandas", 56 "Pillow>=6.2", 57 "plotly>=4.14", 58 "pprintpp", 59 "psutil", 60 "pymongo>=3.12", 61 "pytz", 62 "PyYAML", 63 "regex", 64 "retrying", 65 "scikit-learn", 66 "scikit-image", 67 "setuptools", 68 "sseclient-py>=1.7.2,<2", 69 "sse-starlette>=0.10.3,<1", 70 "starlette>=0.24.0,<0.27", 71 "strawberry-graphql==0.138.1", 72 "tabulate", 73 "xmltodict", 74 "universal-analytics-python3>=1.0.1,<2", 75 # internal packages 76 "fiftyone-brain>=0.13,<0.14", 77 "fiftyone-db>=0.4,<0.5", 78 "voxel51-eta>=0.10,<0.11", 79 ] 80 81 82 CHOOSE_INSTALL_REQUIRES = [ 83 ( 84 ( 85 "opencv-python", 86 "opencv-contrib-python", 87 "opencv-contrib-python-headless", 88 ), 89 "opencv-python-headless", 90 ) 91 ] 92 93 94 def choose_requirement(mains, secondary): 95 chosen = secondary 96 for main in mains: 97 try: 98 name = re.split(r"[!<>=]", main)[0] 99 metadata.version(name) 100 chosen = main 101 break 102 except metadata.PackageNotFoundError: 103 pass 104 105 return str(chosen) 106 107 108 def get_install_requirements(install_requires, choose_install_requires): 109 for mains, secondary in choose_install_requires: 110 install_requires.append(choose_requirement(mains, secondary)) 111 112 return install_requires 113 114 115 EXTRAS_REQUIREMENTS = {"desktop": ["fiftyone-desktop>=0.28.2,<0.29"]} 116 117 118 with open("README.md", "r") as fh: 119 long_description = fh.read() 120 121 122 setup( 123 name="fiftyone", 124 version=get_version(), 125 description=( 126 "FiftyOne: the open-source tool for building high-quality datasets " 127 "and computer vision models" 128 ), 129 author="Voxel51, Inc.", 130 author_email="[email protected]", 131 url="https://github.com/voxel51/fiftyone", 132 extras_require=EXTRAS_REQUIREMENTS, 133 license="Apache", 134 long_description=long_description, 135 long_description_content_type="text/markdown", 136 packages=find_packages( 137 exclude=["app", "eta", "package", "requirements", "tests", "tools"] 138 ) 139 + ["fiftyone.recipes", "fiftyone.tutorials"], 140 package_dir={ 141 "fiftyone.recipes": "docs/source/recipes", 142 "fiftyone.tutorials": "docs/source/tutorials", 143 }, 144 install_requires=get_install_requirements( 145 INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES 146 ), 147 include_package_data=True, 148 classifiers=[ 149 "Development Status :: 4 - Beta", 150 "Intended Audience :: Developers", 151 "Intended Audience :: Science/Research", 152 "License :: OSI Approved :: Apache Software License", 153 "Topic :: Scientific/Engineering :: Artificial Intelligence", 154 "Topic :: Scientific/Engineering :: Image Processing", 155 "Topic :: Scientific/Engineering :: Image Recognition", 156 "Topic :: Scientific/Engineering :: Information Analysis", 157 "Topic :: Scientific/Engineering :: Visualization", 158 "Operating System :: MacOS :: MacOS X", 159 "Operating System :: POSIX :: Linux", 160 "Operating System :: Microsoft :: Windows", 161 "Programming Language :: Python :: 3", 162 "Programming Language :: Python :: 3.7", 163 "Programming Language :: Python :: 3.8", 164 "Programming Language :: Python :: 3.9", 165 "Programming Language :: Python :: 3.10", 166 ], 167 entry_points={"console_scripts": ["fiftyone=fiftyone.core.cli:main"]}, 168 python_requires=">=3.7", 169 ) 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ "setuptools", "sseclient-py>=1.7.2,<2", "sse-starlette>=0.10.3,<1", - "starlette>=0.24.0,<0.27", + "starlette>=0.24.0", "strawberry-graphql==0.138.1", "tabulate", "xmltodict",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -67,7 +67,7 @@\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n- \"starlette>=0.24.0,<0.27\",\n+ \"starlette>=0.24.0\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n", "issue": "[BUG] fiftyone forces starlette=0.16.0 and it breaks integrations with applications that use FastAPI in newer versions.\n### Instructions\r\n\r\nThank you for submitting an issue. Please refer to our\r\n[issue policy](https://www.github.com/voxel51/fiftyone/blob/develop/ISSUE_POLICY.md)\r\nfor information on what types of issues we address.\r\n\r\n**Please fill in this template to ensure a timely and thorough response.**\r\n\r\n- Place an \"x\" between the brackets next to an option if it applies. Example:\r\n - [x] Selected option\r\n- Please delete this section (all content above this line) before submitting\r\n the issue\r\n\r\n### System information\r\n\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: linux 18.04\r\n- **FiftyOne installed from (pip or source)**:pip\r\n- **FiftyOne version (run `fiftyone --version`)**:0.17.2\r\n- **Python version**:3.8\r\n\r\n### Commands to reproduce\r\n\r\nAs thoroughly as possible, please provide the Python and/or shell commands used\r\nto encounter the issue. Application steps can be described in the next section.\r\nrequriements.txt:\r\nfastapi==0.79.0\r\nfiftyone==0.17.2\r\n\r\n```\r\npip install -r requirments.txt\r\n```\r\n\r\n### Describe the problem\r\n\r\nfiftyone cannot be used with the newer versions of fastapi, because it forces starlette to be in the version starlette=0.16.0\r\nIs it possible to add a condition like: starlette>=0.16.0. In this way it would not break apps that use fiftyone\r\n\r\n### Code to reproduce issue\r\nfastapi==0.79.0\r\nfiftyone==0.17.2\r\n\r\npip install -r requirments.txt\r\n\r\n### Other info / logs\r\n\r\nes.\r\n#0 388.1 \r\n#0 388.1 The conflict is caused by:\r\n#0 388.1 bentoml 1.0.4 depends on starlette\r\n#0 388.1 fastapi 0.79.0 depends on starlette==0.19.1\r\n#0 388.1 fiftyone 0.17.2 depends on starlette==0.16.0\r\n#0 388.1 \r\n#0 388.1 To fix this you could try to:\r\n#0 388.1 1. loosen the range of package versions you've specified\r\n#0 388.1 2. remove package versions to allow pip attempt to solve the dependency conflict\r\n#0 388.1 \r\n#0 388.1 ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/topics/dependency-resolution/#dealing-with-dependency-conflicts\r\n#0 388.1 WARNING: You are using pip version 22.0.4; however, version 22.3 is available.\r\n#0 388.1 You should consider upgrading via the '/usr/bin/python -m pip install --upgrade pip' command.\r\n------\r\n\r\n### What areas of FiftyOne does this bug affect?\r\n\r\n- [X ] `App`: FiftyOne application issue\r\n- [ ] `Core`: Core `fiftyone` Python library issue\r\n- [ ] `Server`: Fiftyone server issue\r\n\r\n### Willingness to contribute\r\n\r\nThe FiftyOne Community encourages bug fix contributions. Would you or another\r\nmember of your organization be willing to contribute a fix for this bug to the\r\nFiftyOne codebase?\r\n\r\n- [ X] Yes. I can contribute a fix for this bug independently.\r\n- [ ] Yes. I would be willing to contribute a fix for this bug with guidance\r\n from the FiftyOne community.\r\n- [ ] No. I cannot contribute a bug fix at this time.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\ntry:\n from importlib import metadata\nexcept ImportError:\n import importlib_metadata as metadata\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.21.3\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n \"eventlet\",\n \"ftfy\",\n \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n \"motor>=2.5\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.12\",\n \"pytz\",\n \"PyYAML\",\n \"regex\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n \"starlette>=0.24.0,<0.27\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.13,<0.14\",\n \"fiftyone-db>=0.4,<0.5\",\n \"voxel51-eta>=0.10,<0.11\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n metadata.version(name)\n chosen = main\n break\n except metadata.PackageNotFoundError:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.28.2,<0.29\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\ntry:\n from importlib import metadata\nexcept ImportError:\n import importlib_metadata as metadata\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.21.3\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n \"eventlet\",\n \"ftfy\",\n \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n \"motor>=2.5\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.12\",\n \"pytz\",\n \"PyYAML\",\n \"regex\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n \"starlette>=0.24.0\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.13,<0.14\",\n \"fiftyone-db>=0.4,<0.5\",\n \"voxel51-eta>=0.10,<0.11\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n metadata.version(name)\n chosen = main\n break\n except metadata.PackageNotFoundError:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.28.2,<0.29\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}]}
2,707
125
gh_patches_debug_22077
rasdani/github-patches
git_diff
spack__spack-13605
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pybind11: fix get_include() The `py-pybind11` package has a python module helper of the form: ```python import pybind11 as py print(py.get_include()) ``` which helps downstream, if they are not using the CMake config package (which probably everyone is using), to query the install location. `get_include()` currently points to the spec's Python include location instead of the package's include. Diff for a new unit test: ```diff diff --git a/var/spack/repos/builtin/packages/py-pybind11/package.py b/var/spack/repos/builtin/packages/py-pybind11/package.py index cd482ceb3..0a86afb97 100644 --- a/var/spack/repos/builtin/packages/py-pybind11/package.py +++ b/var/spack/repos/builtin/packages/py-pybind11/package.py @@ -60,3 +60,17 @@ class PyPybind11(CMakePackage): super(PyPybind11, self).install(spec, prefix) setup_py('install', '--single-version-externally-managed', '--root=/', '--prefix={0}'.format(prefix)) + + @run_after('install') + @on_package_attributes(run_tests=True) + def test(self): + with working_dir('spack-test', create=True): + # test include helper points to right location + module = "pybind11" + python = Executable(self.spec['python'].command.path) + python('-c', 'import {0}'.format(module)) + inc = python('-c', 'import {0} as py; print(py.get_include())'.format( + module), output=str) + print("INC:") + print(inc) + assert inc == self.prefix.include ``` Similar to: - https://github.com/conda-forge/pybind11-feedstock/pull/32 - https://github.com/pybind/pybind11/pull/1877 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/py-pybind11/package.py` Content: ``` 1 # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class PyPybind11(CMakePackage): 10 """pybind11 -- Seamless operability between C++11 and Python. 11 12 pybind11 is a lightweight header-only library that exposes C++ types in 13 Python and vice versa, mainly to create Python bindings of existing C++ 14 code. Its goals and syntax are similar to the excellent Boost.Python 15 library by David Abrahams: to minimize boilerplate code in traditional 16 extension modules by inferring type information using compile-time 17 introspection.""" 18 19 homepage = "https://pybind11.readthedocs.io" 20 url = "https://github.com/pybind/pybind11/archive/v2.1.0.tar.gz" 21 git = "https://github.com/pybind/pybind11.git" 22 23 maintainers = ['ax3l'] 24 25 version('master', branch='master') 26 version('2.4.3', sha256='1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d') 27 version('2.3.0', sha256='0f34838f2c8024a6765168227ba587b3687729ebf03dc912f88ff75c7aa9cfe8') 28 version('2.2.4', sha256='b69e83658513215b8d1443544d0549b7d231b9f201f6fc787a2b2218b408181e') 29 version('2.2.3', sha256='3a3b7b651afab1c5ba557f4c37d785a522b8030dfc765da26adc2ecd1de940ea') 30 version('2.2.2', sha256='b639a2b2cbf1c467849660801c4665ffc1a4d0a9e153ae1996ed6f21c492064e') 31 version('2.2.1', sha256='f8bd1509578b2a1e7407d52e6ee8afe64268909a1bbda620ca407318598927e7') 32 version('2.2.0', sha256='1b0fda17c650c493f5862902e90f426df6751da8c0b58c05983ab009951ed769') 33 version('2.1.1', sha256='f2c6874f1ea5b4ad4ffffe352413f7d2cd1a49f9050940805c2a082348621540') 34 version('2.1.0', sha256='2860f2b8d0c9f65f0698289a161385f59d099b7ead1bf64e8993c486f2b93ee0') 35 36 depends_on('py-pytest', type='test') 37 depends_on('py-setuptools', type='build') 38 39 extends('python') 40 41 # compiler support 42 conflicts('%gcc@:4.7') 43 conflicts('%clang@:3.2') 44 conflicts('%intel@:16') 45 46 def cmake_args(self): 47 args = [] 48 args.append('-DPYTHON_EXECUTABLE:FILEPATH=%s' 49 % self.spec['python'].command.path) 50 args += [ 51 '-DPYBIND11_TEST:BOOL={0}'.format( 52 'ON' if self.run_tests else 'OFF') 53 ] 54 return args 55 56 def setup_build_environment(self, env): 57 env.set('PYBIND11_USE_CMAKE', 1) 58 59 def install(self, spec, prefix): 60 super(PyPybind11, self).install(spec, prefix) 61 setup_py('install', '--single-version-externally-managed', '--root=/', 62 '--prefix={0}'.format(prefix)) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/py-pybind11/package.py b/var/spack/repos/builtin/packages/py-pybind11/package.py --- a/var/spack/repos/builtin/packages/py-pybind11/package.py +++ b/var/spack/repos/builtin/packages/py-pybind11/package.py @@ -56,7 +56,27 @@ def setup_build_environment(self, env): env.set('PYBIND11_USE_CMAKE', 1) + def patch(self): + """ see https://github.com/spack/spack/issues/13559 """ + filter_file('import sys', + 'import sys; return "{0}"'.format(self.prefix.include), + 'pybind11/__init__.py', + string=True) + def install(self, spec, prefix): super(PyPybind11, self).install(spec, prefix) setup_py('install', '--single-version-externally-managed', '--root=/', '--prefix={0}'.format(prefix)) + + @run_after('install') + @on_package_attributes(run_tests=True) + def test(self): + with working_dir('spack-test', create=True): + # test include helper points to right location + python = self.spec['python'].command + inc = python( + '-c', + 'import pybind11 as py; ' + + self.spec['python'].package.print_string('py.get_include()'), + output=str) + assert inc.strip() == str(self.prefix.include)
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-pybind11/package.py b/var/spack/repos/builtin/packages/py-pybind11/package.py\n--- a/var/spack/repos/builtin/packages/py-pybind11/package.py\n+++ b/var/spack/repos/builtin/packages/py-pybind11/package.py\n@@ -56,7 +56,27 @@\n def setup_build_environment(self, env):\n env.set('PYBIND11_USE_CMAKE', 1)\n \n+ def patch(self):\n+ \"\"\" see https://github.com/spack/spack/issues/13559 \"\"\"\n+ filter_file('import sys',\n+ 'import sys; return \"{0}\"'.format(self.prefix.include),\n+ 'pybind11/__init__.py',\n+ string=True)\n+\n def install(self, spec, prefix):\n super(PyPybind11, self).install(spec, prefix)\n setup_py('install', '--single-version-externally-managed', '--root=/',\n '--prefix={0}'.format(prefix))\n+\n+ @run_after('install')\n+ @on_package_attributes(run_tests=True)\n+ def test(self):\n+ with working_dir('spack-test', create=True):\n+ # test include helper points to right location\n+ python = self.spec['python'].command\n+ inc = python(\n+ '-c',\n+ 'import pybind11 as py; ' +\n+ self.spec['python'].package.print_string('py.get_include()'),\n+ output=str)\n+ assert inc.strip() == str(self.prefix.include)\n", "issue": "pybind11: fix get_include()\nThe `py-pybind11` package has a python module helper of the form:\r\n\r\n```python\r\nimport pybind11 as py\r\nprint(py.get_include())\r\n```\r\n\r\nwhich helps downstream, if they are not using the CMake config package (which probably everyone is using), to query the install location.\r\n\r\n`get_include()` currently points to the spec's Python include location instead of the package's include.\r\n\r\nDiff for a new unit test:\r\n```diff\r\ndiff --git a/var/spack/repos/builtin/packages/py-pybind11/package.py b/var/spack/repos/builtin/packages/py-pybind11/package.py\r\nindex cd482ceb3..0a86afb97 100644\r\n--- a/var/spack/repos/builtin/packages/py-pybind11/package.py\r\n+++ b/var/spack/repos/builtin/packages/py-pybind11/package.py\r\n@@ -60,3 +60,17 @@ class PyPybind11(CMakePackage):\r\n super(PyPybind11, self).install(spec, prefix)\r\n setup_py('install', '--single-version-externally-managed', '--root=/',\r\n '--prefix={0}'.format(prefix))\r\n+\r\n+ @run_after('install')\r\n+ @on_package_attributes(run_tests=True)\r\n+ def test(self):\r\n+ with working_dir('spack-test', create=True):\r\n+ # test include helper points to right location\r\n+ module = \"pybind11\"\r\n+ python = Executable(self.spec['python'].command.path)\r\n+ python('-c', 'import {0}'.format(module))\r\n+ inc = python('-c', 'import {0} as py; print(py.get_include())'.format(\r\n+ module), output=str)\r\n+ print(\"INC:\")\r\n+ print(inc)\r\n+ assert inc == self.prefix.include\r\n```\r\n\r\nSimilar to:\r\n- https://github.com/conda-forge/pybind11-feedstock/pull/32\r\n- https://github.com/pybind/pybind11/pull/1877\n", "before_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyPybind11(CMakePackage):\n \"\"\"pybind11 -- Seamless operability between C++11 and Python.\n\n pybind11 is a lightweight header-only library that exposes C++ types in\n Python and vice versa, mainly to create Python bindings of existing C++\n code. Its goals and syntax are similar to the excellent Boost.Python\n library by David Abrahams: to minimize boilerplate code in traditional\n extension modules by inferring type information using compile-time\n introspection.\"\"\"\n\n homepage = \"https://pybind11.readthedocs.io\"\n url = \"https://github.com/pybind/pybind11/archive/v2.1.0.tar.gz\"\n git = \"https://github.com/pybind/pybind11.git\"\n\n maintainers = ['ax3l']\n\n version('master', branch='master')\n version('2.4.3', sha256='1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d')\n version('2.3.0', sha256='0f34838f2c8024a6765168227ba587b3687729ebf03dc912f88ff75c7aa9cfe8')\n version('2.2.4', sha256='b69e83658513215b8d1443544d0549b7d231b9f201f6fc787a2b2218b408181e')\n version('2.2.3', sha256='3a3b7b651afab1c5ba557f4c37d785a522b8030dfc765da26adc2ecd1de940ea')\n version('2.2.2', sha256='b639a2b2cbf1c467849660801c4665ffc1a4d0a9e153ae1996ed6f21c492064e')\n version('2.2.1', sha256='f8bd1509578b2a1e7407d52e6ee8afe64268909a1bbda620ca407318598927e7')\n version('2.2.0', sha256='1b0fda17c650c493f5862902e90f426df6751da8c0b58c05983ab009951ed769')\n version('2.1.1', sha256='f2c6874f1ea5b4ad4ffffe352413f7d2cd1a49f9050940805c2a082348621540')\n version('2.1.0', sha256='2860f2b8d0c9f65f0698289a161385f59d099b7ead1bf64e8993c486f2b93ee0')\n\n depends_on('py-pytest', type='test')\n depends_on('py-setuptools', type='build')\n\n extends('python')\n\n # compiler support\n conflicts('%gcc@:4.7')\n conflicts('%clang@:3.2')\n conflicts('%intel@:16')\n\n def cmake_args(self):\n args = []\n args.append('-DPYTHON_EXECUTABLE:FILEPATH=%s'\n % self.spec['python'].command.path)\n args += [\n '-DPYBIND11_TEST:BOOL={0}'.format(\n 'ON' if self.run_tests else 'OFF')\n ]\n return args\n\n def setup_build_environment(self, env):\n env.set('PYBIND11_USE_CMAKE', 1)\n\n def install(self, spec, prefix):\n super(PyPybind11, self).install(spec, prefix)\n setup_py('install', '--single-version-externally-managed', '--root=/',\n '--prefix={0}'.format(prefix))\n", "path": "var/spack/repos/builtin/packages/py-pybind11/package.py"}], "after_files": [{"content": "# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyPybind11(CMakePackage):\n \"\"\"pybind11 -- Seamless operability between C++11 and Python.\n\n pybind11 is a lightweight header-only library that exposes C++ types in\n Python and vice versa, mainly to create Python bindings of existing C++\n code. Its goals and syntax are similar to the excellent Boost.Python\n library by David Abrahams: to minimize boilerplate code in traditional\n extension modules by inferring type information using compile-time\n introspection.\"\"\"\n\n homepage = \"https://pybind11.readthedocs.io\"\n url = \"https://github.com/pybind/pybind11/archive/v2.1.0.tar.gz\"\n git = \"https://github.com/pybind/pybind11.git\"\n\n maintainers = ['ax3l']\n\n version('master', branch='master')\n version('2.4.3', sha256='1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d')\n version('2.3.0', sha256='0f34838f2c8024a6765168227ba587b3687729ebf03dc912f88ff75c7aa9cfe8')\n version('2.2.4', sha256='b69e83658513215b8d1443544d0549b7d231b9f201f6fc787a2b2218b408181e')\n version('2.2.3', sha256='3a3b7b651afab1c5ba557f4c37d785a522b8030dfc765da26adc2ecd1de940ea')\n version('2.2.2', sha256='b639a2b2cbf1c467849660801c4665ffc1a4d0a9e153ae1996ed6f21c492064e')\n version('2.2.1', sha256='f8bd1509578b2a1e7407d52e6ee8afe64268909a1bbda620ca407318598927e7')\n version('2.2.0', sha256='1b0fda17c650c493f5862902e90f426df6751da8c0b58c05983ab009951ed769')\n version('2.1.1', sha256='f2c6874f1ea5b4ad4ffffe352413f7d2cd1a49f9050940805c2a082348621540')\n version('2.1.0', sha256='2860f2b8d0c9f65f0698289a161385f59d099b7ead1bf64e8993c486f2b93ee0')\n\n depends_on('py-pytest', type='test')\n depends_on('py-setuptools', type='build')\n\n extends('python')\n\n # compiler support\n conflicts('%gcc@:4.7')\n conflicts('%clang@:3.2')\n conflicts('%intel@:16')\n\n def cmake_args(self):\n args = []\n args.append('-DPYTHON_EXECUTABLE:FILEPATH=%s'\n % self.spec['python'].command.path)\n args += [\n '-DPYBIND11_TEST:BOOL={0}'.format(\n 'ON' if self.run_tests else 'OFF')\n ]\n return args\n\n def setup_build_environment(self, env):\n env.set('PYBIND11_USE_CMAKE', 1)\n\n def patch(self):\n \"\"\" see https://github.com/spack/spack/issues/13559 \"\"\"\n filter_file('import sys',\n 'import sys; return \"{0}\"'.format(self.prefix.include),\n 'pybind11/__init__.py',\n string=True)\n\n def install(self, spec, prefix):\n super(PyPybind11, self).install(spec, prefix)\n setup_py('install', '--single-version-externally-managed', '--root=/',\n '--prefix={0}'.format(prefix))\n\n @run_after('install')\n @on_package_attributes(run_tests=True)\n def test(self):\n with working_dir('spack-test', create=True):\n # test include helper points to right location\n python = self.spec['python'].command\n inc = python(\n '-c',\n 'import pybind11 as py; ' +\n self.spec['python'].package.print_string('py.get_include()'),\n output=str)\n assert inc.strip() == str(self.prefix.include)\n", "path": "var/spack/repos/builtin/packages/py-pybind11/package.py"}]}
1,962
347
gh_patches_debug_25698
rasdani/github-patches
git_diff
DataDog__dd-trace-py-3378
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `ddtrace.opentracer.span.Span` method return types not compliant with `opentracing.span.Span` I'll be happy to submit a PR for this--it should be a small fix--but wanted to make sure this isn't a known issue first. ### Which version of dd-trace-py are you using? 0.59.0 ### Which version of pip are you using? 22.0.3 ### Explanation `opentracing.span.Span` defines a handful of methods that return `self` for the instance their invoked on, in order to support call chaining. `ddtrace.opentracer.span.Span` does not implement this interface for `set_tag` or `set_operation_name`. https://github.com/DataDog/dd-trace-py/blob/1a6bad3ec2cbfffab0f83ebf5b72fe786b76dbed/ddtrace/opentracer/span.py#L137-L138 https://github.com/DataDog/dd-trace-py/blob/1a6bad3ec2cbfffab0f83ebf5b72fe786b76dbed/ddtrace/opentracer/span.py#L98-L99 See the `opentracing` interfaces for [set_tag](https://github.com/opentracing/opentracing-python/blob/1d495f7b1c89e3577c0ce4911ed2962c8dcf1482/opentracing/span.py#L120-L139) and [set_operation_name](https://github.com/opentracing/opentracing-python/blob/1d495f7b1c89e3577c0ce4911ed2962c8dcf1482/opentracing/span.py#L95-L104). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/opentracer/span.py` Content: ``` 1 import threading 2 from typing import Any 3 from typing import Dict 4 from typing import Optional 5 from typing import TYPE_CHECKING 6 from typing import Text 7 from typing import Union 8 9 from opentracing import Span as OpenTracingSpan 10 from opentracing.ext import tags as OTTags 11 12 from ddtrace.constants import ERROR_MSG 13 from ddtrace.constants import ERROR_STACK 14 from ddtrace.constants import ERROR_TYPE 15 from ddtrace.context import Context as DatadogContext 16 from ddtrace.internal.compat import NumericType 17 from ddtrace.span import Span as DatadogSpan 18 19 from .span_context import SpanContext 20 from .tags import Tags 21 22 23 if TYPE_CHECKING: 24 from .tracer import Tracer 25 26 27 _TagNameType = Union[Text, bytes] 28 29 30 class Span(OpenTracingSpan): 31 """Datadog implementation of :class:`opentracing.Span`""" 32 33 def __init__(self, tracer, context, operation_name): 34 # type: (Tracer, Optional[SpanContext], str) -> None 35 if context is not None: 36 context = SpanContext(ddcontext=context._dd_context, baggage=context.baggage) 37 else: 38 context = SpanContext() 39 40 super(Span, self).__init__(tracer, context) 41 42 self.finished = False 43 self._lock = threading.Lock() 44 # use a datadog span 45 self._dd_span = DatadogSpan(operation_name, context=context._dd_context) 46 47 def finish(self, finish_time=None): 48 # type: (Optional[float]) -> None 49 """Finish the span. 50 51 This calls finish on the ddspan. 52 53 :param finish_time: specify a custom finish time with a unix timestamp 54 per time.time() 55 :type timestamp: float 56 """ 57 if self.finished: 58 return 59 60 # finish the datadog span 61 self._dd_span.finish(finish_time) 62 self.finished = True 63 64 def set_baggage_item(self, key, value): 65 # type: (str, Any) -> Span 66 """Sets a baggage item in the span context of this span. 67 68 Baggage is used to propagate state between spans. 69 70 :param key: baggage item key 71 :type key: str 72 73 :param value: baggage item value 74 :type value: a type that can be compat.stringify()'d 75 76 :rtype: Span 77 :return: itself for chaining calls 78 """ 79 new_ctx = self.context.with_baggage_item(key, value) 80 with self._lock: 81 self._context = new_ctx 82 return self 83 84 def get_baggage_item(self, key): 85 # type: (str) -> Optional[str] 86 """Gets a baggage item from the span context of this span. 87 88 :param key: baggage item key 89 :type key: str 90 91 :rtype: str 92 :return: the baggage value for the given key or ``None``. 93 """ 94 return self.context.get_baggage_item(key) 95 96 def set_operation_name(self, operation_name): 97 # type: (str) -> None 98 """Set the operation name.""" 99 self._dd_span.name = operation_name 100 101 def log_kv(self, key_values, timestamp=None): 102 # type: (Dict[_TagNameType, Any], Optional[float]) -> Span 103 """Add a log record to this span. 104 105 Passes on relevant opentracing key values onto the datadog span. 106 107 :param key_values: a dict of string keys and values of any type 108 :type key_values: dict 109 110 :param timestamp: a unix timestamp per time.time() 111 :type timestamp: float 112 113 :return: the span itself, for call chaining 114 :rtype: Span 115 """ 116 117 # match opentracing defined keys to datadog functionality 118 # opentracing/specification/blob/1be630515dafd4d2a468d083300900f89f28e24d/semantic_conventions.md#log-fields-table 119 for key, val in key_values.items(): 120 if key == "event" and val == "error": 121 # TODO: not sure if it's actually necessary to set the error manually 122 self._dd_span.error = 1 123 self.set_tag("error", 1) 124 elif key == "error" or key == "error.object": 125 self.set_tag(ERROR_TYPE, val) 126 elif key == "message": 127 self.set_tag(ERROR_MSG, val) 128 elif key == "stack": 129 self.set_tag(ERROR_STACK, val) 130 else: 131 pass 132 133 return self 134 135 def set_tag(self, key, value): 136 # type: (_TagNameType, Any) -> None 137 """Set a tag on the span. 138 139 This sets the tag on the underlying datadog span. 140 """ 141 if key == Tags.SPAN_TYPE: 142 self._dd_span.span_type = value 143 elif key == Tags.SERVICE_NAME: 144 self._dd_span.service = value 145 elif key == Tags.RESOURCE_NAME or key == OTTags.DATABASE_STATEMENT: 146 self._dd_span.resource = value 147 elif key == OTTags.PEER_HOSTNAME: 148 self._dd_span.set_tag(Tags.TARGET_HOST, value) 149 elif key == OTTags.PEER_PORT: 150 self._dd_span.set_tag(Tags.TARGET_PORT, value) 151 elif key == Tags.SAMPLING_PRIORITY: 152 self._dd_span.context.sampling_priority = value 153 else: 154 self._dd_span.set_tag(key, value) 155 156 def _get_tag(self, key): 157 # type: (_TagNameType) -> Optional[Text] 158 """Gets a tag from the span. 159 160 This method retrieves the tag from the underlying datadog span. 161 """ 162 return self._dd_span.get_tag(key) 163 164 def _get_metric(self, key): 165 # type: (_TagNameType) -> Optional[NumericType] 166 """Gets a metric from the span. 167 168 This method retrieves the metric from the underlying datadog span. 169 """ 170 return self._dd_span.get_metric(key) 171 172 def __enter__(self): 173 return self 174 175 def __exit__(self, exc_type, exc_val, exc_tb): 176 if exc_type: 177 self._dd_span.set_exc_info(exc_type, exc_val, exc_tb) 178 179 # note: self.finish() AND _dd_span.__exit__ will call _span.finish() but 180 # it is idempotent 181 self._dd_span.__exit__(exc_type, exc_val, exc_tb) 182 self.finish() 183 184 def _associate_dd_span(self, ddspan): 185 # type: (DatadogSpan) -> None 186 """Associates a DD span with this span.""" 187 # get the datadog span context 188 self._dd_span = ddspan 189 self.context._dd_context = ddspan.context 190 191 @property 192 def _dd_context(self): 193 # type: () -> DatadogContext 194 return self._dd_span.context 195 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/opentracer/span.py b/ddtrace/opentracer/span.py --- a/ddtrace/opentracer/span.py +++ b/ddtrace/opentracer/span.py @@ -94,9 +94,10 @@ return self.context.get_baggage_item(key) def set_operation_name(self, operation_name): - # type: (str) -> None + # type: (str) -> Span """Set the operation name.""" self._dd_span.name = operation_name + return self def log_kv(self, key_values, timestamp=None): # type: (Dict[_TagNameType, Any], Optional[float]) -> Span @@ -133,7 +134,7 @@ return self def set_tag(self, key, value): - # type: (_TagNameType, Any) -> None + # type: (_TagNameType, Any) -> Span """Set a tag on the span. This sets the tag on the underlying datadog span. @@ -152,6 +153,7 @@ self._dd_span.context.sampling_priority = value else: self._dd_span.set_tag(key, value) + return self def _get_tag(self, key): # type: (_TagNameType) -> Optional[Text]
{"golden_diff": "diff --git a/ddtrace/opentracer/span.py b/ddtrace/opentracer/span.py\n--- a/ddtrace/opentracer/span.py\n+++ b/ddtrace/opentracer/span.py\n@@ -94,9 +94,10 @@\n return self.context.get_baggage_item(key)\n \n def set_operation_name(self, operation_name):\n- # type: (str) -> None\n+ # type: (str) -> Span\n \"\"\"Set the operation name.\"\"\"\n self._dd_span.name = operation_name\n+ return self\n \n def log_kv(self, key_values, timestamp=None):\n # type: (Dict[_TagNameType, Any], Optional[float]) -> Span\n@@ -133,7 +134,7 @@\n return self\n \n def set_tag(self, key, value):\n- # type: (_TagNameType, Any) -> None\n+ # type: (_TagNameType, Any) -> Span\n \"\"\"Set a tag on the span.\n \n This sets the tag on the underlying datadog span.\n@@ -152,6 +153,7 @@\n self._dd_span.context.sampling_priority = value\n else:\n self._dd_span.set_tag(key, value)\n+ return self\n \n def _get_tag(self, key):\n # type: (_TagNameType) -> Optional[Text]\n", "issue": "`ddtrace.opentracer.span.Span` method return types not compliant with `opentracing.span.Span`\nI'll be happy to submit a PR for this--it should be a small fix--but wanted to make sure this isn't a known issue first.\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n0.59.0\r\n\r\n### Which version of pip are you using?\r\n\r\n22.0.3\r\n\r\n### Explanation\r\n\r\n`opentracing.span.Span` defines a handful of methods that return `self` for the instance their invoked on, in order to support call chaining.\r\n\r\n`ddtrace.opentracer.span.Span` does not implement this interface for `set_tag` or `set_operation_name`.\r\n\r\nhttps://github.com/DataDog/dd-trace-py/blob/1a6bad3ec2cbfffab0f83ebf5b72fe786b76dbed/ddtrace/opentracer/span.py#L137-L138\r\nhttps://github.com/DataDog/dd-trace-py/blob/1a6bad3ec2cbfffab0f83ebf5b72fe786b76dbed/ddtrace/opentracer/span.py#L98-L99\r\n\r\nSee the `opentracing` interfaces for [set_tag](https://github.com/opentracing/opentracing-python/blob/1d495f7b1c89e3577c0ce4911ed2962c8dcf1482/opentracing/span.py#L120-L139) and [set_operation_name](https://github.com/opentracing/opentracing-python/blob/1d495f7b1c89e3577c0ce4911ed2962c8dcf1482/opentracing/span.py#L95-L104).\r\n\n", "before_files": [{"content": "import threading\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\nfrom typing import Text\nfrom typing import Union\n\nfrom opentracing import Span as OpenTracingSpan\nfrom opentracing.ext import tags as OTTags\n\nfrom ddtrace.constants import ERROR_MSG\nfrom ddtrace.constants import ERROR_STACK\nfrom ddtrace.constants import ERROR_TYPE\nfrom ddtrace.context import Context as DatadogContext\nfrom ddtrace.internal.compat import NumericType\nfrom ddtrace.span import Span as DatadogSpan\n\nfrom .span_context import SpanContext\nfrom .tags import Tags\n\n\nif TYPE_CHECKING:\n from .tracer import Tracer\n\n\n_TagNameType = Union[Text, bytes]\n\n\nclass Span(OpenTracingSpan):\n \"\"\"Datadog implementation of :class:`opentracing.Span`\"\"\"\n\n def __init__(self, tracer, context, operation_name):\n # type: (Tracer, Optional[SpanContext], str) -> None\n if context is not None:\n context = SpanContext(ddcontext=context._dd_context, baggage=context.baggage)\n else:\n context = SpanContext()\n\n super(Span, self).__init__(tracer, context)\n\n self.finished = False\n self._lock = threading.Lock()\n # use a datadog span\n self._dd_span = DatadogSpan(operation_name, context=context._dd_context)\n\n def finish(self, finish_time=None):\n # type: (Optional[float]) -> None\n \"\"\"Finish the span.\n\n This calls finish on the ddspan.\n\n :param finish_time: specify a custom finish time with a unix timestamp\n per time.time()\n :type timestamp: float\n \"\"\"\n if self.finished:\n return\n\n # finish the datadog span\n self._dd_span.finish(finish_time)\n self.finished = True\n\n def set_baggage_item(self, key, value):\n # type: (str, Any) -> Span\n \"\"\"Sets a baggage item in the span context of this span.\n\n Baggage is used to propagate state between spans.\n\n :param key: baggage item key\n :type key: str\n\n :param value: baggage item value\n :type value: a type that can be compat.stringify()'d\n\n :rtype: Span\n :return: itself for chaining calls\n \"\"\"\n new_ctx = self.context.with_baggage_item(key, value)\n with self._lock:\n self._context = new_ctx\n return self\n\n def get_baggage_item(self, key):\n # type: (str) -> Optional[str]\n \"\"\"Gets a baggage item from the span context of this span.\n\n :param key: baggage item key\n :type key: str\n\n :rtype: str\n :return: the baggage value for the given key or ``None``.\n \"\"\"\n return self.context.get_baggage_item(key)\n\n def set_operation_name(self, operation_name):\n # type: (str) -> None\n \"\"\"Set the operation name.\"\"\"\n self._dd_span.name = operation_name\n\n def log_kv(self, key_values, timestamp=None):\n # type: (Dict[_TagNameType, Any], Optional[float]) -> Span\n \"\"\"Add a log record to this span.\n\n Passes on relevant opentracing key values onto the datadog span.\n\n :param key_values: a dict of string keys and values of any type\n :type key_values: dict\n\n :param timestamp: a unix timestamp per time.time()\n :type timestamp: float\n\n :return: the span itself, for call chaining\n :rtype: Span\n \"\"\"\n\n # match opentracing defined keys to datadog functionality\n # opentracing/specification/blob/1be630515dafd4d2a468d083300900f89f28e24d/semantic_conventions.md#log-fields-table\n for key, val in key_values.items():\n if key == \"event\" and val == \"error\":\n # TODO: not sure if it's actually necessary to set the error manually\n self._dd_span.error = 1\n self.set_tag(\"error\", 1)\n elif key == \"error\" or key == \"error.object\":\n self.set_tag(ERROR_TYPE, val)\n elif key == \"message\":\n self.set_tag(ERROR_MSG, val)\n elif key == \"stack\":\n self.set_tag(ERROR_STACK, val)\n else:\n pass\n\n return self\n\n def set_tag(self, key, value):\n # type: (_TagNameType, Any) -> None\n \"\"\"Set a tag on the span.\n\n This sets the tag on the underlying datadog span.\n \"\"\"\n if key == Tags.SPAN_TYPE:\n self._dd_span.span_type = value\n elif key == Tags.SERVICE_NAME:\n self._dd_span.service = value\n elif key == Tags.RESOURCE_NAME or key == OTTags.DATABASE_STATEMENT:\n self._dd_span.resource = value\n elif key == OTTags.PEER_HOSTNAME:\n self._dd_span.set_tag(Tags.TARGET_HOST, value)\n elif key == OTTags.PEER_PORT:\n self._dd_span.set_tag(Tags.TARGET_PORT, value)\n elif key == Tags.SAMPLING_PRIORITY:\n self._dd_span.context.sampling_priority = value\n else:\n self._dd_span.set_tag(key, value)\n\n def _get_tag(self, key):\n # type: (_TagNameType) -> Optional[Text]\n \"\"\"Gets a tag from the span.\n\n This method retrieves the tag from the underlying datadog span.\n \"\"\"\n return self._dd_span.get_tag(key)\n\n def _get_metric(self, key):\n # type: (_TagNameType) -> Optional[NumericType]\n \"\"\"Gets a metric from the span.\n\n This method retrieves the metric from the underlying datadog span.\n \"\"\"\n return self._dd_span.get_metric(key)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type:\n self._dd_span.set_exc_info(exc_type, exc_val, exc_tb)\n\n # note: self.finish() AND _dd_span.__exit__ will call _span.finish() but\n # it is idempotent\n self._dd_span.__exit__(exc_type, exc_val, exc_tb)\n self.finish()\n\n def _associate_dd_span(self, ddspan):\n # type: (DatadogSpan) -> None\n \"\"\"Associates a DD span with this span.\"\"\"\n # get the datadog span context\n self._dd_span = ddspan\n self.context._dd_context = ddspan.context\n\n @property\n def _dd_context(self):\n # type: () -> DatadogContext\n return self._dd_span.context\n", "path": "ddtrace/opentracer/span.py"}], "after_files": [{"content": "import threading\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\nfrom typing import Text\nfrom typing import Union\n\nfrom opentracing import Span as OpenTracingSpan\nfrom opentracing.ext import tags as OTTags\n\nfrom ddtrace.constants import ERROR_MSG\nfrom ddtrace.constants import ERROR_STACK\nfrom ddtrace.constants import ERROR_TYPE\nfrom ddtrace.context import Context as DatadogContext\nfrom ddtrace.internal.compat import NumericType\nfrom ddtrace.span import Span as DatadogSpan\n\nfrom .span_context import SpanContext\nfrom .tags import Tags\n\n\nif TYPE_CHECKING:\n from .tracer import Tracer\n\n\n_TagNameType = Union[Text, bytes]\n\n\nclass Span(OpenTracingSpan):\n \"\"\"Datadog implementation of :class:`opentracing.Span`\"\"\"\n\n def __init__(self, tracer, context, operation_name):\n # type: (Tracer, Optional[SpanContext], str) -> None\n if context is not None:\n context = SpanContext(ddcontext=context._dd_context, baggage=context.baggage)\n else:\n context = SpanContext()\n\n super(Span, self).__init__(tracer, context)\n\n self.finished = False\n self._lock = threading.Lock()\n # use a datadog span\n self._dd_span = DatadogSpan(operation_name, context=context._dd_context)\n\n def finish(self, finish_time=None):\n # type: (Optional[float]) -> None\n \"\"\"Finish the span.\n\n This calls finish on the ddspan.\n\n :param finish_time: specify a custom finish time with a unix timestamp\n per time.time()\n :type timestamp: float\n \"\"\"\n if self.finished:\n return\n\n # finish the datadog span\n self._dd_span.finish(finish_time)\n self.finished = True\n\n def set_baggage_item(self, key, value):\n # type: (str, Any) -> Span\n \"\"\"Sets a baggage item in the span context of this span.\n\n Baggage is used to propagate state between spans.\n\n :param key: baggage item key\n :type key: str\n\n :param value: baggage item value\n :type value: a type that can be compat.stringify()'d\n\n :rtype: Span\n :return: itself for chaining calls\n \"\"\"\n new_ctx = self.context.with_baggage_item(key, value)\n with self._lock:\n self._context = new_ctx\n return self\n\n def get_baggage_item(self, key):\n # type: (str) -> Optional[str]\n \"\"\"Gets a baggage item from the span context of this span.\n\n :param key: baggage item key\n :type key: str\n\n :rtype: str\n :return: the baggage value for the given key or ``None``.\n \"\"\"\n return self.context.get_baggage_item(key)\n\n def set_operation_name(self, operation_name):\n # type: (str) -> Span\n \"\"\"Set the operation name.\"\"\"\n self._dd_span.name = operation_name\n return self\n\n def log_kv(self, key_values, timestamp=None):\n # type: (Dict[_TagNameType, Any], Optional[float]) -> Span\n \"\"\"Add a log record to this span.\n\n Passes on relevant opentracing key values onto the datadog span.\n\n :param key_values: a dict of string keys and values of any type\n :type key_values: dict\n\n :param timestamp: a unix timestamp per time.time()\n :type timestamp: float\n\n :return: the span itself, for call chaining\n :rtype: Span\n \"\"\"\n\n # match opentracing defined keys to datadog functionality\n # opentracing/specification/blob/1be630515dafd4d2a468d083300900f89f28e24d/semantic_conventions.md#log-fields-table\n for key, val in key_values.items():\n if key == \"event\" and val == \"error\":\n # TODO: not sure if it's actually necessary to set the error manually\n self._dd_span.error = 1\n self.set_tag(\"error\", 1)\n elif key == \"error\" or key == \"error.object\":\n self.set_tag(ERROR_TYPE, val)\n elif key == \"message\":\n self.set_tag(ERROR_MSG, val)\n elif key == \"stack\":\n self.set_tag(ERROR_STACK, val)\n else:\n pass\n\n return self\n\n def set_tag(self, key, value):\n # type: (_TagNameType, Any) -> Span\n \"\"\"Set a tag on the span.\n\n This sets the tag on the underlying datadog span.\n \"\"\"\n if key == Tags.SPAN_TYPE:\n self._dd_span.span_type = value\n elif key == Tags.SERVICE_NAME:\n self._dd_span.service = value\n elif key == Tags.RESOURCE_NAME or key == OTTags.DATABASE_STATEMENT:\n self._dd_span.resource = value\n elif key == OTTags.PEER_HOSTNAME:\n self._dd_span.set_tag(Tags.TARGET_HOST, value)\n elif key == OTTags.PEER_PORT:\n self._dd_span.set_tag(Tags.TARGET_PORT, value)\n elif key == Tags.SAMPLING_PRIORITY:\n self._dd_span.context.sampling_priority = value\n else:\n self._dd_span.set_tag(key, value)\n return self\n\n def _get_tag(self, key):\n # type: (_TagNameType) -> Optional[Text]\n \"\"\"Gets a tag from the span.\n\n This method retrieves the tag from the underlying datadog span.\n \"\"\"\n return self._dd_span.get_tag(key)\n\n def _get_metric(self, key):\n # type: (_TagNameType) -> Optional[NumericType]\n \"\"\"Gets a metric from the span.\n\n This method retrieves the metric from the underlying datadog span.\n \"\"\"\n return self._dd_span.get_metric(key)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type:\n self._dd_span.set_exc_info(exc_type, exc_val, exc_tb)\n\n # note: self.finish() AND _dd_span.__exit__ will call _span.finish() but\n # it is idempotent\n self._dd_span.__exit__(exc_type, exc_val, exc_tb)\n self.finish()\n\n def _associate_dd_span(self, ddspan):\n # type: (DatadogSpan) -> None\n \"\"\"Associates a DD span with this span.\"\"\"\n # get the datadog span context\n self._dd_span = ddspan\n self.context._dd_context = ddspan.context\n\n @property\n def _dd_context(self):\n # type: () -> DatadogContext\n return self._dd_span.context\n", "path": "ddtrace/opentracer/span.py"}]}
2,679
294
gh_patches_debug_24504
rasdani/github-patches
git_diff
pyca__cryptography-5438
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make OpenSSL 1.0.2 error (+ env var fallback) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cryptography/hazmat/bindings/openssl/binding.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 import collections 8 import threading 9 import types 10 import warnings 11 12 import cryptography 13 from cryptography import utils 14 from cryptography.exceptions import InternalError 15 from cryptography.hazmat.bindings._openssl import ffi, lib 16 from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES 17 18 _OpenSSLErrorWithText = collections.namedtuple( 19 "_OpenSSLErrorWithText", ["code", "lib", "func", "reason", "reason_text"] 20 ) 21 22 23 class _OpenSSLError(object): 24 def __init__(self, code, lib, func, reason): 25 self._code = code 26 self._lib = lib 27 self._func = func 28 self._reason = reason 29 30 def _lib_reason_match(self, lib, reason): 31 return lib == self.lib and reason == self.reason 32 33 code = utils.read_only_property("_code") 34 lib = utils.read_only_property("_lib") 35 func = utils.read_only_property("_func") 36 reason = utils.read_only_property("_reason") 37 38 39 def _consume_errors(lib): 40 errors = [] 41 while True: 42 code = lib.ERR_get_error() 43 if code == 0: 44 break 45 46 err_lib = lib.ERR_GET_LIB(code) 47 err_func = lib.ERR_GET_FUNC(code) 48 err_reason = lib.ERR_GET_REASON(code) 49 50 errors.append(_OpenSSLError(code, err_lib, err_func, err_reason)) 51 52 return errors 53 54 55 def _errors_with_text(errors): 56 errors_with_text = [] 57 for err in errors: 58 buf = ffi.new("char[]", 256) 59 lib.ERR_error_string_n(err.code, buf, len(buf)) 60 err_text_reason = ffi.string(buf) 61 62 errors_with_text.append( 63 _OpenSSLErrorWithText( 64 err.code, err.lib, err.func, err.reason, err_text_reason 65 ) 66 ) 67 68 return errors_with_text 69 70 71 def _consume_errors_with_text(lib): 72 return _errors_with_text(_consume_errors(lib)) 73 74 75 def _openssl_assert(lib, ok, errors=None): 76 if not ok: 77 if errors is None: 78 errors = _consume_errors(lib) 79 errors_with_text = _errors_with_text(errors) 80 81 raise InternalError( 82 "Unknown OpenSSL error. This error is commonly encountered when " 83 "another library is not cleaning up the OpenSSL error stack. If " 84 "you are using cryptography with another library that uses " 85 "OpenSSL try disabling it before reporting a bug. Otherwise " 86 "please file an issue at https://github.com/pyca/cryptography/" 87 "issues with information on how to reproduce " 88 "this. ({0!r})".format(errors_with_text), 89 errors_with_text, 90 ) 91 92 93 def build_conditional_library(lib, conditional_names): 94 conditional_lib = types.ModuleType("lib") 95 conditional_lib._original_lib = lib 96 excluded_names = set() 97 for condition, names_cb in conditional_names.items(): 98 if not getattr(lib, condition): 99 excluded_names.update(names_cb()) 100 101 for attr in dir(lib): 102 if attr not in excluded_names: 103 setattr(conditional_lib, attr, getattr(lib, attr)) 104 105 return conditional_lib 106 107 108 class Binding(object): 109 """ 110 OpenSSL API wrapper. 111 """ 112 113 lib = None 114 ffi = ffi 115 _lib_loaded = False 116 _init_lock = threading.Lock() 117 _lock_init_lock = threading.Lock() 118 119 def __init__(self): 120 self._ensure_ffi_initialized() 121 122 @classmethod 123 def _register_osrandom_engine(cls): 124 # Clear any errors extant in the queue before we start. In many 125 # scenarios other things may be interacting with OpenSSL in the same 126 # process space and it has proven untenable to assume that they will 127 # reliably clear the error queue. Once we clear it here we will 128 # error on any subsequent unexpected item in the stack. 129 cls.lib.ERR_clear_error() 130 if cls.lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE: 131 result = cls.lib.Cryptography_add_osrandom_engine() 132 _openssl_assert(cls.lib, result in (1, 2)) 133 134 @classmethod 135 def _ensure_ffi_initialized(cls): 136 with cls._init_lock: 137 if not cls._lib_loaded: 138 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES) 139 cls._lib_loaded = True 140 # initialize the SSL library 141 cls.lib.SSL_library_init() 142 # adds all ciphers/digests for EVP 143 cls.lib.OpenSSL_add_all_algorithms() 144 # loads error strings for libcrypto and libssl functions 145 cls.lib.SSL_load_error_strings() 146 cls._register_osrandom_engine() 147 148 @classmethod 149 def init_static_locks(cls): 150 with cls._lock_init_lock: 151 cls._ensure_ffi_initialized() 152 # Use Python's implementation if available, importing _ssl triggers 153 # the setup for this. 154 __import__("_ssl") 155 156 if ( 157 not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS 158 or cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL 159 ): 160 return 161 162 # If nothing else has setup a locking callback already, we set up 163 # our own 164 res = lib.Cryptography_setup_ssl_threads() 165 _openssl_assert(cls.lib, res == 1) 166 167 168 def _verify_openssl_version(lib): 169 if ( 170 lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 171 and not lib.CRYPTOGRAPHY_IS_LIBRESSL 172 ): 173 warnings.warn( 174 "OpenSSL version 1.0.2 is no longer supported by the OpenSSL " 175 "project, please upgrade. The next version of cryptography will " 176 "drop support for it.", 177 utils.CryptographyDeprecationWarning, 178 ) 179 180 181 def _verify_package_version(version): 182 # Occasionally we run into situations where the version of the Python 183 # package does not match the version of the shared object that is loaded. 184 # This may occur in environments where multiple versions of cryptography 185 # are installed and available in the python path. To avoid errors cropping 186 # up later this code checks that the currently imported package and the 187 # shared object that were loaded have the same version and raise an 188 # ImportError if they do not 189 so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION) 190 if version.encode("ascii") != so_package_version: 191 raise ImportError( 192 "The version of cryptography does not match the loaded " 193 "shared object. This can happen if you have multiple copies of " 194 "cryptography installed in your Python path. Please try creating " 195 "a new virtual environment to resolve this issue. " 196 "Loaded python version: {}, shared object version: {}".format( 197 version, so_package_version 198 ) 199 ) 200 201 202 _verify_package_version(cryptography.__version__) 203 204 # OpenSSL is not thread safe until the locks are initialized. We call this 205 # method in module scope so that it executes with the import lock. On 206 # Pythons < 3.4 this import lock is a global lock, which can prevent a race 207 # condition registering the OpenSSL locks. On Python 3.4+ the import lock 208 # is per module so this approach will not work. 209 Binding.init_static_locks() 210 211 _verify_openssl_version(Binding.lib) 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py --- a/src/cryptography/hazmat/bindings/openssl/binding.py +++ b/src/cryptography/hazmat/bindings/openssl/binding.py @@ -5,6 +5,7 @@ from __future__ import absolute_import, division, print_function import collections +import os import threading import types import warnings @@ -170,12 +171,19 @@ lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 and not lib.CRYPTOGRAPHY_IS_LIBRESSL ): - warnings.warn( - "OpenSSL version 1.0.2 is no longer supported by the OpenSSL " - "project, please upgrade. The next version of cryptography will " - "drop support for it.", - utils.CryptographyDeprecationWarning, - ) + if os.environ.get("CRYPTOGRAPHY_ALLOW_OPENSSL_102"): + warnings.warn( + "OpenSSL version 1.0.2 is no longer supported by the OpenSSL " + "project, please upgrade. The next version of cryptography " + "will completely remove support for it.", + utils.CryptographyDeprecationWarning, + ) + else: + raise RuntimeError( + "You are linking against OpenSSL 1.0.2, which is no longer " + "supported by the OpenSSL project. You need to upgrade to a " + "newer version of OpenSSL." + ) def _verify_package_version(version):
{"golden_diff": "diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -5,6 +5,7 @@\n from __future__ import absolute_import, division, print_function\n \n import collections\n+import os\n import threading\n import types\n import warnings\n@@ -170,12 +171,19 @@\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_110\n and not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n- warnings.warn(\n- \"OpenSSL version 1.0.2 is no longer supported by the OpenSSL \"\n- \"project, please upgrade. The next version of cryptography will \"\n- \"drop support for it.\",\n- utils.CryptographyDeprecationWarning,\n- )\n+ if os.environ.get(\"CRYPTOGRAPHY_ALLOW_OPENSSL_102\"):\n+ warnings.warn(\n+ \"OpenSSL version 1.0.2 is no longer supported by the OpenSSL \"\n+ \"project, please upgrade. The next version of cryptography \"\n+ \"will completely remove support for it.\",\n+ utils.CryptographyDeprecationWarning,\n+ )\n+ else:\n+ raise RuntimeError(\n+ \"You are linking against OpenSSL 1.0.2, which is no longer \"\n+ \"supported by the OpenSSL project. You need to upgrade to a \"\n+ \"newer version of OpenSSL.\"\n+ )\n \n \n def _verify_package_version(version):\n", "issue": "Make OpenSSL 1.0.2 error (+ env var fallback)\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport threading\nimport types\nimport warnings\n\nimport cryptography\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _errors_with_text(errors):\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n return errors_with_text\n\n\ndef _consume_errors_with_text(lib):\n return _errors_with_text(_consume_errors(lib))\n\n\ndef _openssl_assert(lib, ok, errors=None):\n if not ok:\n if errors is None:\n errors = _consume_errors(lib)\n errors_with_text = _errors_with_text(errors)\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text,\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n if cls.lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE:\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (\n not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS\n or cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL\n ):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\ndef _verify_openssl_version(lib):\n if (\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_110\n and not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n warnings.warn(\n \"OpenSSL version 1.0.2 is no longer supported by the OpenSSL \"\n \"project, please upgrade. The next version of cryptography will \"\n \"drop support for it.\",\n utils.CryptographyDeprecationWarning,\n )\n\n\ndef _verify_package_version(version):\n # Occasionally we run into situations where the version of the Python\n # package does not match the version of the shared object that is loaded.\n # This may occur in environments where multiple versions of cryptography\n # are installed and available in the python path. To avoid errors cropping\n # up later this code checks that the currently imported package and the\n # shared object that were loaded have the same version and raise an\n # ImportError if they do not\n so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)\n if version.encode(\"ascii\") != so_package_version:\n raise ImportError(\n \"The version of cryptography does not match the loaded \"\n \"shared object. This can happen if you have multiple copies of \"\n \"cryptography installed in your Python path. Please try creating \"\n \"a new virtual environment to resolve this issue. \"\n \"Loaded python version: {}, shared object version: {}\".format(\n version, so_package_version\n )\n )\n\n\n_verify_package_version(cryptography.__version__)\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\n_verify_openssl_version(Binding.lib)\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport os\nimport threading\nimport types\nimport warnings\n\nimport cryptography\nfrom cryptography import utils\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n_OpenSSLErrorWithText = collections.namedtuple(\n \"_OpenSSLErrorWithText\", [\"code\", \"lib\", \"func\", \"reason\", \"reason_text\"]\n)\n\n\nclass _OpenSSLError(object):\n def __init__(self, code, lib, func, reason):\n self._code = code\n self._lib = lib\n self._func = func\n self._reason = reason\n\n def _lib_reason_match(self, lib, reason):\n return lib == self.lib and reason == self.reason\n\n code = utils.read_only_property(\"_code\")\n lib = utils.read_only_property(\"_lib\")\n func = utils.read_only_property(\"_func\")\n reason = utils.read_only_property(\"_reason\")\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n\n return errors\n\n\ndef _errors_with_text(errors):\n errors_with_text = []\n for err in errors:\n buf = ffi.new(\"char[]\", 256)\n lib.ERR_error_string_n(err.code, buf, len(buf))\n err_text_reason = ffi.string(buf)\n\n errors_with_text.append(\n _OpenSSLErrorWithText(\n err.code, err.lib, err.func, err.reason, err_text_reason\n )\n )\n\n return errors_with_text\n\n\ndef _consume_errors_with_text(lib):\n return _errors_with_text(_consume_errors(lib))\n\n\ndef _openssl_assert(lib, ok, errors=None):\n if not ok:\n if errors is None:\n errors = _consume_errors(lib)\n errors_with_text = _errors_with_text(errors)\n\n raise InternalError(\n \"Unknown OpenSSL error. This error is commonly encountered when \"\n \"another library is not cleaning up the OpenSSL error stack. If \"\n \"you are using cryptography with another library that uses \"\n \"OpenSSL try disabling it before reporting a bug. Otherwise \"\n \"please file an issue at https://github.com/pyca/cryptography/\"\n \"issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors_with_text),\n errors_with_text,\n )\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n conditional_lib._original_lib = lib\n excluded_names = set()\n for condition, names_cb in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names.update(names_cb())\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n\n lib = None\n ffi = ffi\n _lib_loaded = False\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n # Clear any errors extant in the queue before we start. In many\n # scenarios other things may be interacting with OpenSSL in the same\n # process space and it has proven untenable to assume that they will\n # reliably clear the error queue. Once we clear it here we will\n # error on any subsequent unexpected item in the stack.\n cls.lib.ERR_clear_error()\n if cls.lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE:\n result = cls.lib.Cryptography_add_osrandom_engine()\n _openssl_assert(cls.lib, result in (1, 2))\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if (\n not cls.lib.Cryptography_HAS_LOCKING_CALLBACKS\n or cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL\n ):\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n res = lib.Cryptography_setup_ssl_threads()\n _openssl_assert(cls.lib, res == 1)\n\n\ndef _verify_openssl_version(lib):\n if (\n lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_110\n and not lib.CRYPTOGRAPHY_IS_LIBRESSL\n ):\n if os.environ.get(\"CRYPTOGRAPHY_ALLOW_OPENSSL_102\"):\n warnings.warn(\n \"OpenSSL version 1.0.2 is no longer supported by the OpenSSL \"\n \"project, please upgrade. The next version of cryptography \"\n \"will completely remove support for it.\",\n utils.CryptographyDeprecationWarning,\n )\n else:\n raise RuntimeError(\n \"You are linking against OpenSSL 1.0.2, which is no longer \"\n \"supported by the OpenSSL project. You need to upgrade to a \"\n \"newer version of OpenSSL.\"\n )\n\n\ndef _verify_package_version(version):\n # Occasionally we run into situations where the version of the Python\n # package does not match the version of the shared object that is loaded.\n # This may occur in environments where multiple versions of cryptography\n # are installed and available in the python path. To avoid errors cropping\n # up later this code checks that the currently imported package and the\n # shared object that were loaded have the same version and raise an\n # ImportError if they do not\n so_package_version = ffi.string(lib.CRYPTOGRAPHY_PACKAGE_VERSION)\n if version.encode(\"ascii\") != so_package_version:\n raise ImportError(\n \"The version of cryptography does not match the loaded \"\n \"shared object. This can happen if you have multiple copies of \"\n \"cryptography installed in your Python path. Please try creating \"\n \"a new virtual environment to resolve this issue. \"\n \"Loaded python version: {}, shared object version: {}\".format(\n version, so_package_version\n )\n )\n\n\n_verify_package_version(cryptography.__version__)\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n\n_verify_openssl_version(Binding.lib)\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}]}
2,424
353
gh_patches_debug_38672
rasdani/github-patches
git_diff
dask__dask-4160
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use key names rather than function names in dask.visualize When creating task graph images we currently inspect the task graph to find functions and use those function names in the graph. Now that atop produces `subgraph_callables` all the time, this is less informative than it used to be. We could either dig deeper here, or we could use the keynames instead. This is what is done in most of the other diagnostics. It might be a good time to switch over? cc @jcrist --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dask/dot.py` Content: ``` 1 from __future__ import absolute_import, division, print_function 2 3 import re 4 import os 5 from functools import partial 6 7 from .compatibility import apply 8 from .core import istask, get_dependencies, ishashable 9 from .utils import funcname, import_required 10 11 12 graphviz = import_required("graphviz", "Drawing dask graphs requires the " 13 "`graphviz` python library and the " 14 "`graphviz` system library to be " 15 "installed.") 16 17 18 def task_label(task): 19 """Label for a task on a dot graph. 20 21 Examples 22 -------- 23 >>> from operator import add 24 >>> task_label((add, 1, 2)) 25 'add' 26 >>> task_label((add, (add, 1, 2), 3)) 27 'add(...)' 28 """ 29 func = task[0] 30 if func is apply: 31 func = task[1] 32 if hasattr(func, 'funcs'): 33 if len(func.funcs) > 1: 34 return '{0}(...)'.format(funcname(func.funcs[0])) 35 else: 36 head = funcname(func.funcs[0]) 37 else: 38 head = funcname(func) 39 if any(has_sub_tasks(i) for i in task[1:]): 40 return '{0}(...)'.format(head) 41 else: 42 return head 43 44 45 def has_sub_tasks(task): 46 """Returns True if the task has sub tasks""" 47 if istask(task): 48 return True 49 elif isinstance(task, list): 50 return any(has_sub_tasks(i) for i in task) 51 else: 52 return False 53 54 55 def name(x): 56 try: 57 return str(hash(x)) 58 except TypeError: 59 return str(hash(str(x))) 60 61 62 _HASHPAT = re.compile('([0-9a-z]{32})') 63 _UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})') 64 65 66 def label(x, cache=None): 67 """ 68 69 >>> label('x') 70 'x' 71 72 >>> label(('x', 1)) 73 "('x', 1)" 74 75 >>> from hashlib import md5 76 >>> x = 'x-%s-hello' % md5(b'1234').hexdigest() 77 >>> x 78 'x-81dc9bdb52d04dc20036dbd8313ed055-hello' 79 80 >>> label(x) 81 'x-#-hello' 82 83 >>> from uuid import uuid1 84 >>> x = 'x-%s-hello' % uuid1() 85 >>> x # doctest: +SKIP 86 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello' 87 88 >>> label(x) 89 'x-#-hello' 90 """ 91 s = str(x) 92 for pattern in (_HASHPAT, _UUIDPAT): 93 m = re.search(pattern, s) 94 if m is not None: 95 for h in m.groups(): 96 if cache is not None: 97 n = cache.get(h, len(cache)) 98 label = '#{0}'.format(n) 99 # cache will be overwritten destructively 100 cache[h] = n 101 else: 102 label = '#' 103 s = s.replace(h, label) 104 return s 105 106 107 def to_graphviz(dsk, data_attributes=None, function_attributes=None, 108 rankdir='BT', graph_attr={}, node_attr=None, edge_attr=None, **kwargs): 109 if data_attributes is None: 110 data_attributes = {} 111 if function_attributes is None: 112 function_attributes = {} 113 114 graph_attr = graph_attr or {} 115 graph_attr['rankdir'] = rankdir 116 graph_attr.update(kwargs) 117 g = graphviz.Digraph(graph_attr=graph_attr, 118 node_attr=node_attr, 119 edge_attr=edge_attr) 120 121 seen = set() 122 cache = {} 123 124 for k, v in dsk.items(): 125 k_name = name(k) 126 if k_name not in seen: 127 seen.add(k_name) 128 attrs = data_attributes.get(k, {}) 129 attrs.setdefault('label', label(k, cache=cache)) 130 attrs.setdefault('shape', 'box') 131 g.node(k_name, **attrs) 132 133 if istask(v): 134 func_name = name((k, 'function')) 135 if func_name not in seen: 136 seen.add(func_name) 137 attrs = function_attributes.get(k, {}) 138 attrs.setdefault('label', task_label(v)) 139 attrs.setdefault('shape', 'circle') 140 g.node(func_name, **attrs) 141 g.edge(func_name, k_name) 142 143 for dep in get_dependencies(dsk, k): 144 dep_name = name(dep) 145 if dep_name not in seen: 146 seen.add(dep_name) 147 attrs = data_attributes.get(dep, {}) 148 attrs.setdefault('label', label(dep, cache=cache)) 149 attrs.setdefault('shape', 'box') 150 g.node(dep_name, **attrs) 151 g.edge(dep_name, func_name) 152 elif ishashable(v) and v in dsk: 153 g.edge(name(v), k_name) 154 return g 155 156 157 IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png']) 158 IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf']) 159 160 161 def _get_display_cls(format): 162 """ 163 Get the appropriate IPython display class for `format`. 164 165 Returns `IPython.display.SVG` if format=='svg', otherwise 166 `IPython.display.Image`. 167 168 If IPython is not importable, return dummy function that swallows its 169 arguments and returns None. 170 """ 171 dummy = lambda *args, **kwargs: None 172 try: 173 import IPython.display as display 174 except ImportError: 175 # Can't return a display object if no IPython. 176 return dummy 177 178 if format in IPYTHON_NO_DISPLAY_FORMATS: 179 # IPython can't display this format natively, so just return None. 180 return dummy 181 elif format in IPYTHON_IMAGE_FORMATS: 182 # Partially apply `format` so that `Image` and `SVG` supply a uniform 183 # interface to the caller. 184 return partial(display.Image, format=format) 185 elif format == 'svg': 186 return display.SVG 187 else: 188 raise ValueError("Unknown format '%s' passed to `dot_graph`" % format) 189 190 191 def dot_graph(dsk, filename='mydask', format=None, **kwargs): 192 """ 193 Render a task graph using dot. 194 195 If `filename` is not None, write a file to disk with that name in the 196 format specified by `format`. `filename` should not include an extension. 197 198 Parameters 199 ---------- 200 dsk : dict 201 The graph to display. 202 filename : str or None, optional 203 The name (without an extension) of the file to write to disk. If 204 `filename` is None, no file will be written, and we communicate with 205 dot using only pipes. Default is 'mydask'. 206 format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional 207 Format in which to write output file. Default is 'png'. 208 **kwargs 209 Additional keyword arguments to forward to `to_graphviz`. 210 211 Returns 212 ------- 213 result : None or IPython.display.Image or IPython.display.SVG (See below.) 214 215 Notes 216 ----- 217 If IPython is installed, we return an IPython.display object in the 218 requested format. If IPython is not installed, we just return None. 219 220 We always return None if format is 'pdf' or 'dot', because IPython can't 221 display these formats natively. Passing these formats with filename=None 222 will not produce any useful output. 223 224 See Also 225 -------- 226 dask.dot.to_graphviz 227 """ 228 g = to_graphviz(dsk, **kwargs) 229 230 fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg'] 231 if format is None and any(filename.lower().endswith(fmt) for fmt in fmts): 232 filename, format = os.path.splitext(filename) 233 format = format[1:].lower() 234 235 if format is None: 236 format = 'png' 237 238 data = g.pipe(format=format) 239 if not data: 240 raise RuntimeError("Graphviz failed to properly produce an image. " 241 "This probably means your installation of graphviz " 242 "is missing png support. See: " 243 "https://github.com/ContinuumIO/anaconda-issues/" 244 "issues/485 for more information.") 245 246 display_cls = _get_display_cls(format) 247 248 if not filename: 249 return display_cls(data=data) 250 251 full_filename = '.'.join([filename, format]) 252 with open(full_filename, 'wb') as f: 253 f.write(data) 254 255 return display_cls(filename=full_filename) 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dask/dot.py b/dask/dot.py --- a/dask/dot.py +++ b/dask/dot.py @@ -6,7 +6,7 @@ from .compatibility import apply from .core import istask, get_dependencies, ishashable -from .utils import funcname, import_required +from .utils import funcname, import_required, key_split graphviz = import_required("graphviz", "Drawing dask graphs requires the " @@ -104,6 +104,25 @@ return s +def box_label(key): + """ Label boxes in graph by chunk index + + >>> box_label(('x', 1, 2, 3)) + '(1, 2, 3)' + >>> box_label(('x', 123)) + '123' + >>> box_label('x') + '' + """ + if isinstance(key, tuple): + key = key[1:] + if len(key) == 1: + [key] = key + return str(key) + else: + return "" + + def to_graphviz(dsk, data_attributes=None, function_attributes=None, rankdir='BT', graph_attr={}, node_attr=None, edge_attr=None, **kwargs): if data_attributes is None: @@ -119,14 +138,13 @@ edge_attr=edge_attr) seen = set() - cache = {} for k, v in dsk.items(): k_name = name(k) if k_name not in seen: seen.add(k_name) attrs = data_attributes.get(k, {}) - attrs.setdefault('label', label(k, cache=cache)) + attrs.setdefault('label', box_label(k)) attrs.setdefault('shape', 'box') g.node(k_name, **attrs) @@ -135,7 +153,7 @@ if func_name not in seen: seen.add(func_name) attrs = function_attributes.get(k, {}) - attrs.setdefault('label', task_label(v)) + attrs.setdefault('label', key_split(k)) attrs.setdefault('shape', 'circle') g.node(func_name, **attrs) g.edge(func_name, k_name) @@ -145,7 +163,7 @@ if dep_name not in seen: seen.add(dep_name) attrs = data_attributes.get(dep, {}) - attrs.setdefault('label', label(dep, cache=cache)) + attrs.setdefault('label', box_label(dep)) attrs.setdefault('shape', 'box') g.node(dep_name, **attrs) g.edge(dep_name, func_name)
{"golden_diff": "diff --git a/dask/dot.py b/dask/dot.py\n--- a/dask/dot.py\n+++ b/dask/dot.py\n@@ -6,7 +6,7 @@\n \n from .compatibility import apply\n from .core import istask, get_dependencies, ishashable\n-from .utils import funcname, import_required\n+from .utils import funcname, import_required, key_split\n \n \n graphviz = import_required(\"graphviz\", \"Drawing dask graphs requires the \"\n@@ -104,6 +104,25 @@\n return s\n \n \n+def box_label(key):\n+ \"\"\" Label boxes in graph by chunk index\n+\n+ >>> box_label(('x', 1, 2, 3))\n+ '(1, 2, 3)'\n+ >>> box_label(('x', 123))\n+ '123'\n+ >>> box_label('x')\n+ ''\n+ \"\"\"\n+ if isinstance(key, tuple):\n+ key = key[1:]\n+ if len(key) == 1:\n+ [key] = key\n+ return str(key)\n+ else:\n+ return \"\"\n+\n+\n def to_graphviz(dsk, data_attributes=None, function_attributes=None,\n rankdir='BT', graph_attr={}, node_attr=None, edge_attr=None, **kwargs):\n if data_attributes is None:\n@@ -119,14 +138,13 @@\n edge_attr=edge_attr)\n \n seen = set()\n- cache = {}\n \n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n attrs = data_attributes.get(k, {})\n- attrs.setdefault('label', label(k, cache=cache))\n+ attrs.setdefault('label', box_label(k))\n attrs.setdefault('shape', 'box')\n g.node(k_name, **attrs)\n \n@@ -135,7 +153,7 @@\n if func_name not in seen:\n seen.add(func_name)\n attrs = function_attributes.get(k, {})\n- attrs.setdefault('label', task_label(v))\n+ attrs.setdefault('label', key_split(k))\n attrs.setdefault('shape', 'circle')\n g.node(func_name, **attrs)\n g.edge(func_name, k_name)\n@@ -145,7 +163,7 @@\n if dep_name not in seen:\n seen.add(dep_name)\n attrs = data_attributes.get(dep, {})\n- attrs.setdefault('label', label(dep, cache=cache))\n+ attrs.setdefault('label', box_label(dep))\n attrs.setdefault('shape', 'box')\n g.node(dep_name, **attrs)\n g.edge(dep_name, func_name)\n", "issue": "Use key names rather than function names in dask.visualize\nWhen creating task graph images we currently inspect the task graph to find functions and use those function names in the graph. Now that atop produces `subgraph_callables` all the time, this is less informative than it used to be. We could either dig deeper here, or we could use the keynames instead. This is what is done in most of the other diagnostics. It might be a good time to switch over?\r\n\r\ncc @jcrist \n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nimport os\nfrom functools import partial\n\nfrom .compatibility import apply\nfrom .core import istask, get_dependencies, ishashable\nfrom .utils import funcname, import_required\n\n\ngraphviz = import_required(\"graphviz\", \"Drawing dask graphs requires the \"\n \"`graphviz` python library and the \"\n \"`graphviz` system library to be \"\n \"installed.\")\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if func is apply:\n func = task[1]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(func)\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n\n >>> from uuid import uuid1\n >>> x = 'x-%s-hello' % uuid1()\n >>> x # doctest: +SKIP\n 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n for pattern in (_HASHPAT, _UUIDPAT):\n m = re.search(pattern, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None,\n rankdir='BT', graph_attr={}, node_attr=None, edge_attr=None, **kwargs):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n graph_attr = graph_attr or {}\n graph_attr['rankdir'] = rankdir\n graph_attr.update(kwargs)\n g = graphviz.Digraph(graph_attr=graph_attr,\n node_attr=node_attr,\n edge_attr=edge_attr)\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n attrs = data_attributes.get(k, {})\n attrs.setdefault('label', label(k, cache=cache))\n attrs.setdefault('shape', 'box')\n g.node(k_name, **attrs)\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n attrs = function_attributes.get(k, {})\n attrs.setdefault('label', task_label(v))\n attrs.setdefault('shape', 'circle')\n g.node(func_name, **attrs)\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n attrs = data_attributes.get(dep, {})\n attrs.setdefault('label', label(dep, cache=cache))\n attrs.setdefault('shape', 'box')\n g.node(dep_name, **attrs)\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\nIPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])\nIPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])\n\n\ndef _get_display_cls(format):\n \"\"\"\n Get the appropriate IPython display class for `format`.\n\n Returns `IPython.display.SVG` if format=='svg', otherwise\n `IPython.display.Image`.\n\n If IPython is not importable, return dummy function that swallows its\n arguments and returns None.\n \"\"\"\n dummy = lambda *args, **kwargs: None\n try:\n import IPython.display as display\n except ImportError:\n # Can't return a display object if no IPython.\n return dummy\n\n if format in IPYTHON_NO_DISPLAY_FORMATS:\n # IPython can't display this format natively, so just return None.\n return dummy\n elif format in IPYTHON_IMAGE_FORMATS:\n # Partially apply `format` so that `Image` and `SVG` supply a uniform\n # interface to the caller.\n return partial(display.Image, format=format)\n elif format == 'svg':\n return display.SVG\n else:\n raise ValueError(\"Unknown format '%s' passed to `dot_graph`\" % format)\n\n\ndef dot_graph(dsk, filename='mydask', format=None, **kwargs):\n \"\"\"\n Render a task graph using dot.\n\n If `filename` is not None, write a file to disk with that name in the\n format specified by `format`. `filename` should not include an extension.\n\n Parameters\n ----------\n dsk : dict\n The graph to display.\n filename : str or None, optional\n The name (without an extension) of the file to write to disk. If\n `filename` is None, no file will be written, and we communicate with\n dot using only pipes. Default is 'mydask'.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n **kwargs\n Additional keyword arguments to forward to `to_graphviz`.\n\n Returns\n -------\n result : None or IPython.display.Image or IPython.display.SVG (See below.)\n\n Notes\n -----\n If IPython is installed, we return an IPython.display object in the\n requested format. If IPython is not installed, we just return None.\n\n We always return None if format is 'pdf' or 'dot', because IPython can't\n display these formats natively. Passing these formats with filename=None\n will not produce any useful output.\n\n See Also\n --------\n dask.dot.to_graphviz\n \"\"\"\n g = to_graphviz(dsk, **kwargs)\n\n fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']\n if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):\n filename, format = os.path.splitext(filename)\n format = format[1:].lower()\n\n if format is None:\n format = 'png'\n\n data = g.pipe(format=format)\n if not data:\n raise RuntimeError(\"Graphviz failed to properly produce an image. \"\n \"This probably means your installation of graphviz \"\n \"is missing png support. See: \"\n \"https://github.com/ContinuumIO/anaconda-issues/\"\n \"issues/485 for more information.\")\n\n display_cls = _get_display_cls(format)\n\n if not filename:\n return display_cls(data=data)\n\n full_filename = '.'.join([filename, format])\n with open(full_filename, 'wb') as f:\n f.write(data)\n\n return display_cls(filename=full_filename)\n", "path": "dask/dot.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nimport os\nfrom functools import partial\n\nfrom .compatibility import apply\nfrom .core import istask, get_dependencies, ishashable\nfrom .utils import funcname, import_required, key_split\n\n\ngraphviz = import_required(\"graphviz\", \"Drawing dask graphs requires the \"\n \"`graphviz` python library and the \"\n \"`graphviz` system library to be \"\n \"installed.\")\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if func is apply:\n func = task[1]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(func)\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n_UUIDPAT = re.compile('([0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n\n >>> from uuid import uuid1\n >>> x = 'x-%s-hello' % uuid1()\n >>> x # doctest: +SKIP\n 'x-4c1a3d7e-0b45-11e6-8334-54ee75105593-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n for pattern in (_HASHPAT, _UUIDPAT):\n m = re.search(pattern, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef box_label(key):\n \"\"\" Label boxes in graph by chunk index\n\n >>> box_label(('x', 1, 2, 3))\n '(1, 2, 3)'\n >>> box_label(('x', 123))\n '123'\n >>> box_label('x')\n ''\n \"\"\"\n if isinstance(key, tuple):\n key = key[1:]\n if len(key) == 1:\n [key] = key\n return str(key)\n else:\n return \"\"\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None,\n rankdir='BT', graph_attr={}, node_attr=None, edge_attr=None, **kwargs):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n graph_attr = graph_attr or {}\n graph_attr['rankdir'] = rankdir\n graph_attr.update(kwargs)\n g = graphviz.Digraph(graph_attr=graph_attr,\n node_attr=node_attr,\n edge_attr=edge_attr)\n\n seen = set()\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n attrs = data_attributes.get(k, {})\n attrs.setdefault('label', box_label(k))\n attrs.setdefault('shape', 'box')\n g.node(k_name, **attrs)\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n attrs = function_attributes.get(k, {})\n attrs.setdefault('label', key_split(k))\n attrs.setdefault('shape', 'circle')\n g.node(func_name, **attrs)\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n attrs = data_attributes.get(dep, {})\n attrs.setdefault('label', box_label(dep))\n attrs.setdefault('shape', 'box')\n g.node(dep_name, **attrs)\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\nIPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])\nIPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])\n\n\ndef _get_display_cls(format):\n \"\"\"\n Get the appropriate IPython display class for `format`.\n\n Returns `IPython.display.SVG` if format=='svg', otherwise\n `IPython.display.Image`.\n\n If IPython is not importable, return dummy function that swallows its\n arguments and returns None.\n \"\"\"\n dummy = lambda *args, **kwargs: None\n try:\n import IPython.display as display\n except ImportError:\n # Can't return a display object if no IPython.\n return dummy\n\n if format in IPYTHON_NO_DISPLAY_FORMATS:\n # IPython can't display this format natively, so just return None.\n return dummy\n elif format in IPYTHON_IMAGE_FORMATS:\n # Partially apply `format` so that `Image` and `SVG` supply a uniform\n # interface to the caller.\n return partial(display.Image, format=format)\n elif format == 'svg':\n return display.SVG\n else:\n raise ValueError(\"Unknown format '%s' passed to `dot_graph`\" % format)\n\n\ndef dot_graph(dsk, filename='mydask', format=None, **kwargs):\n \"\"\"\n Render a task graph using dot.\n\n If `filename` is not None, write a file to disk with that name in the\n format specified by `format`. `filename` should not include an extension.\n\n Parameters\n ----------\n dsk : dict\n The graph to display.\n filename : str or None, optional\n The name (without an extension) of the file to write to disk. If\n `filename` is None, no file will be written, and we communicate with\n dot using only pipes. Default is 'mydask'.\n format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional\n Format in which to write output file. Default is 'png'.\n **kwargs\n Additional keyword arguments to forward to `to_graphviz`.\n\n Returns\n -------\n result : None or IPython.display.Image or IPython.display.SVG (See below.)\n\n Notes\n -----\n If IPython is installed, we return an IPython.display object in the\n requested format. If IPython is not installed, we just return None.\n\n We always return None if format is 'pdf' or 'dot', because IPython can't\n display these formats natively. Passing these formats with filename=None\n will not produce any useful output.\n\n See Also\n --------\n dask.dot.to_graphviz\n \"\"\"\n g = to_graphviz(dsk, **kwargs)\n\n fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']\n if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):\n filename, format = os.path.splitext(filename)\n format = format[1:].lower()\n\n if format is None:\n format = 'png'\n\n data = g.pipe(format=format)\n if not data:\n raise RuntimeError(\"Graphviz failed to properly produce an image. \"\n \"This probably means your installation of graphviz \"\n \"is missing png support. See: \"\n \"https://github.com/ContinuumIO/anaconda-issues/\"\n \"issues/485 for more information.\")\n\n display_cls = _get_display_cls(format)\n\n if not filename:\n return display_cls(data=data)\n\n full_filename = '.'.join([filename, format])\n with open(full_filename, 'wb') as f:\n f.write(data)\n\n return display_cls(filename=full_filename)\n", "path": "dask/dot.py"}]}
2,993
591
gh_patches_debug_9405
rasdani/github-patches
git_diff
ivy-llc__ivy-17873
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dropout1d --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/torch/nn/functional/dropout_functions.py` Content: ``` 1 # local 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 5 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 6 7 8 @to_ivy_arrays_and_back 9 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") 10 def dropout(input, p=0.5, training=True, inplace=False): 11 return ivy.dropout(input, p, training=training) 12 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py --- a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py +++ b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py @@ -9,3 +9,11 @@ @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") def dropout(input, p=0.5, training=True, inplace=False): return ivy.dropout(input, p, training=training) + + +@to_ivy_arrays_and_back +@with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") +def dropout1d(input, p=0.5, training=True, inplace=False): + if inplace: + return ivy.dropout1d(input, p, training=training, data_format="NCW", out=input) + return ivy.dropout1d(input, p, training=training, data_format="NCW")
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py\n--- a/ivy/functional/frontends/torch/nn/functional/dropout_functions.py\n+++ b/ivy/functional/frontends/torch/nn/functional/dropout_functions.py\n@@ -9,3 +9,11 @@\n @with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n def dropout(input, p=0.5, training=True, inplace=False):\n return ivy.dropout(input, p, training=training)\n+\n+\n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n+def dropout1d(input, p=0.5, training=True, inplace=False):\n+ if inplace:\n+ return ivy.dropout1d(input, p, training=training, data_format=\"NCW\", out=input)\n+ return ivy.dropout1d(input, p, training=training, data_format=\"NCW\")\n", "issue": "dropout1d\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef dropout(input, p=0.5, training=True, inplace=False):\n return ivy.dropout(input, p, training=training)\n", "path": "ivy/functional/frontends/torch/nn/functional/dropout_functions.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef dropout(input, p=0.5, training=True, inplace=False):\n return ivy.dropout(input, p, training=training)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef dropout1d(input, p=0.5, training=True, inplace=False):\n if inplace:\n return ivy.dropout1d(input, p, training=training, data_format=\"NCW\", out=input)\n return ivy.dropout1d(input, p, training=training, data_format=\"NCW\")\n", "path": "ivy/functional/frontends/torch/nn/functional/dropout_functions.py"}]}
390
251
gh_patches_debug_34828
rasdani/github-patches
git_diff
PaddlePaddle__models-1879
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 文本分类text_classification训练选项不匹配 1. train.py中batch_size参数冗余 2. 命令行选项gru配置错误 3. GRU Layer embedding学习率默认配置过大,导致模型训练补课收敛。 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fluid/PaddleNLP/text_classification/train.py` Content: ``` 1 import os 2 import six 3 import sys 4 import time 5 import unittest 6 import contextlib 7 8 import paddle 9 import paddle.fluid as fluid 10 11 import utils 12 from nets import bow_net 13 from nets import cnn_net 14 from nets import lstm_net 15 from nets import gru_net 16 17 18 def train(train_reader, 19 word_dict, 20 network, 21 use_cuda, 22 parallel, 23 save_dirname, 24 lr=0.2, 25 batch_size=128, 26 pass_num=30): 27 """ 28 train network 29 """ 30 data = fluid.layers.data( 31 name="words", shape=[1], dtype="int64", lod_level=1) 32 33 label = fluid.layers.data(name="label", shape=[1], dtype="int64") 34 35 if not parallel: 36 cost, acc, prediction = network(data, label, len(word_dict)) 37 else: 38 places = fluid.layers.device.get_places(device_count=2) 39 pd = fluid.layers.ParallelDo(places) 40 with pd.do(): 41 cost, acc, prediction = network( 42 pd.read_input(data), pd.read_input(label), len(word_dict)) 43 44 pd.write_output(cost) 45 pd.write_output(acc) 46 47 cost, acc = pd() 48 cost = fluid.layers.mean(cost) 49 acc = fluid.layers.mean(acc) 50 51 sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=lr) 52 sgd_optimizer.minimize(cost) 53 54 place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() 55 exe = fluid.Executor(place) 56 feeder = fluid.DataFeeder(feed_list=[data, label], place=place) 57 58 # For internal continuous evaluation 59 if "CE_MODE_X" in os.environ: 60 fluid.default_startup_program().random_seed = 110 61 exe.run(fluid.default_startup_program()) 62 for pass_id in six.moves.xrange(pass_num): 63 pass_start = time.time() 64 data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0 65 for data in train_reader(): 66 avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(), 67 feed=feeder.feed(data), 68 fetch_list=[cost, acc]) 69 data_size = len(data) 70 total_acc += data_size * avg_acc_np 71 total_cost += data_size * avg_cost_np 72 data_count += data_size 73 avg_cost = total_cost / data_count 74 75 avg_acc = total_acc / data_count 76 print("pass_id: %d, avg_acc: %f, avg_cost: %f, pass_time_cost: %f" % 77 (pass_id, avg_acc, avg_cost, time.time() - pass_start)) 78 79 epoch_model = save_dirname + "/" + "epoch" + str(pass_id) 80 fluid.io.save_inference_model(epoch_model, ["words", "label"], acc, exe) 81 82 pass_end = time.time() 83 # For internal continuous evaluation 84 if "CE_MODE_X" in os.environ: 85 print("kpis train_acc %f" % avg_acc) 86 print("kpis train_cost %f" % avg_cost) 87 print("kpis train_duration %f" % (pass_end - pass_start)) 88 89 90 def train_net(): 91 word_dict, train_reader, test_reader = utils.prepare_data( 92 "imdb", self_dict=False, batch_size=128, buf_size=50000) 93 94 if sys.argv[1] == "bow": 95 train( 96 train_reader, 97 word_dict, 98 bow_net, 99 use_cuda=False, 100 parallel=False, 101 save_dirname="bow_model", 102 lr=0.002, 103 pass_num=30, 104 batch_size=4) 105 elif sys.argv[1] == "cnn": 106 train( 107 train_reader, 108 word_dict, 109 cnn_net, 110 use_cuda=True, 111 parallel=False, 112 save_dirname="cnn_model", 113 lr=0.01, 114 pass_num=30, 115 batch_size=4) 116 elif sys.argv[1] == "lstm": 117 train( 118 train_reader, 119 word_dict, 120 lstm_net, 121 use_cuda=True, 122 parallel=False, 123 save_dirname="lstm_model", 124 lr=0.05, 125 pass_num=30, 126 batch_size=4) 127 elif sys.argv[1] == "gru": 128 train( 129 train_reader, 130 word_dict, 131 lstm_net, 132 use_cuda=True, 133 parallel=False, 134 save_dirname="gru_model", 135 lr=0.05, 136 pass_num=30, 137 batch_size=4) 138 else: 139 print("network name cannot be found!") 140 sys.exit(1) 141 142 143 if __name__ == "__main__": 144 train_net() 145 ``` Path: `fluid/PaddleNLP/text_classification/nets.py` Content: ``` 1 import sys 2 import time 3 import numpy as np 4 5 import paddle 6 import paddle.fluid as fluid 7 8 9 def bow_net(data, 10 label, 11 dict_dim, 12 emb_dim=128, 13 hid_dim=128, 14 hid_dim2=96, 15 class_dim=2): 16 """ 17 bow net 18 """ 19 emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) 20 bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') 21 bow_tanh = fluid.layers.tanh(bow) 22 fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") 23 fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") 24 prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") 25 cost = fluid.layers.cross_entropy(input=prediction, label=label) 26 avg_cost = fluid.layers.mean(x=cost) 27 acc = fluid.layers.accuracy(input=prediction, label=label) 28 29 return avg_cost, acc, prediction 30 31 32 def cnn_net(data, 33 label, 34 dict_dim, 35 emb_dim=128, 36 hid_dim=128, 37 hid_dim2=96, 38 class_dim=2, 39 win_size=3): 40 """ 41 conv net 42 """ 43 emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) 44 45 conv_3 = fluid.nets.sequence_conv_pool( 46 input=emb, 47 num_filters=hid_dim, 48 filter_size=win_size, 49 act="tanh", 50 pool_type="max") 51 52 fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2) 53 54 prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act="softmax") 55 cost = fluid.layers.cross_entropy(input=prediction, label=label) 56 avg_cost = fluid.layers.mean(x=cost) 57 acc = fluid.layers.accuracy(input=prediction, label=label) 58 59 return avg_cost, acc, prediction 60 61 62 def lstm_net(data, 63 label, 64 dict_dim, 65 emb_dim=128, 66 hid_dim=128, 67 hid_dim2=96, 68 class_dim=2, 69 emb_lr=30.0): 70 """ 71 lstm net 72 """ 73 emb = fluid.layers.embedding( 74 input=data, 75 size=[dict_dim, emb_dim], 76 param_attr=fluid.ParamAttr(learning_rate=emb_lr)) 77 78 fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4) 79 80 lstm_h, c = fluid.layers.dynamic_lstm( 81 input=fc0, size=hid_dim * 4, is_reverse=False) 82 83 lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max') 84 lstm_max_tanh = fluid.layers.tanh(lstm_max) 85 86 fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') 87 88 prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') 89 90 cost = fluid.layers.cross_entropy(input=prediction, label=label) 91 avg_cost = fluid.layers.mean(x=cost) 92 acc = fluid.layers.accuracy(input=prediction, label=label) 93 94 return avg_cost, acc, prediction 95 96 97 def gru_net(data, 98 label, 99 dict_dim, 100 emb_dim=128, 101 hid_dim=128, 102 hid_dim2=96, 103 class_dim=2, 104 emb_lr=400.0): 105 """ 106 gru net 107 """ 108 emb = fluid.layers.embedding( 109 input=data, 110 size=[dict_dim, emb_dim], 111 param_attr=fluid.ParamAttr(learning_rate=emb_lr)) 112 113 fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3) 114 gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False) 115 gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max') 116 gru_max_tanh = fluid.layers.tanh(gru_max) 117 fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh') 118 prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') 119 120 cost = fluid.layers.cross_entropy(input=prediction, label=label) 121 avg_cost = fluid.layers.mean(x=cost) 122 acc = fluid.layers.accuracy(input=prediction, label=label) 123 124 return avg_cost, acc, prediction 125 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fluid/PaddleNLP/text_classification/nets.py b/fluid/PaddleNLP/text_classification/nets.py --- a/fluid/PaddleNLP/text_classification/nets.py +++ b/fluid/PaddleNLP/text_classification/nets.py @@ -101,7 +101,7 @@ hid_dim=128, hid_dim2=96, class_dim=2, - emb_lr=400.0): + emb_lr=30.0): """ gru net """ diff --git a/fluid/PaddleNLP/text_classification/train.py b/fluid/PaddleNLP/text_classification/train.py --- a/fluid/PaddleNLP/text_classification/train.py +++ b/fluid/PaddleNLP/text_classification/train.py @@ -22,7 +22,6 @@ parallel, save_dirname, lr=0.2, - batch_size=128, pass_num=30): """ train network @@ -100,8 +99,7 @@ parallel=False, save_dirname="bow_model", lr=0.002, - pass_num=30, - batch_size=4) + pass_num=30) elif sys.argv[1] == "cnn": train( train_reader, @@ -111,8 +109,7 @@ parallel=False, save_dirname="cnn_model", lr=0.01, - pass_num=30, - batch_size=4) + pass_num=30) elif sys.argv[1] == "lstm": train( train_reader, @@ -122,19 +119,17 @@ parallel=False, save_dirname="lstm_model", lr=0.05, - pass_num=30, - batch_size=4) + pass_num=30) elif sys.argv[1] == "gru": train( train_reader, word_dict, - lstm_net, + gru_net, use_cuda=True, parallel=False, save_dirname="gru_model", lr=0.05, - pass_num=30, - batch_size=4) + pass_num=30) else: print("network name cannot be found!") sys.exit(1)
{"golden_diff": "diff --git a/fluid/PaddleNLP/text_classification/nets.py b/fluid/PaddleNLP/text_classification/nets.py\n--- a/fluid/PaddleNLP/text_classification/nets.py\n+++ b/fluid/PaddleNLP/text_classification/nets.py\n@@ -101,7 +101,7 @@\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n- emb_lr=400.0):\n+ emb_lr=30.0):\n \"\"\"\n gru net\n \"\"\"\ndiff --git a/fluid/PaddleNLP/text_classification/train.py b/fluid/PaddleNLP/text_classification/train.py\n--- a/fluid/PaddleNLP/text_classification/train.py\n+++ b/fluid/PaddleNLP/text_classification/train.py\n@@ -22,7 +22,6 @@\n parallel,\n save_dirname,\n lr=0.2,\n- batch_size=128,\n pass_num=30):\n \"\"\"\n train network\n@@ -100,8 +99,7 @@\n parallel=False,\n save_dirname=\"bow_model\",\n lr=0.002,\n- pass_num=30,\n- batch_size=4)\n+ pass_num=30)\n elif sys.argv[1] == \"cnn\":\n train(\n train_reader,\n@@ -111,8 +109,7 @@\n parallel=False,\n save_dirname=\"cnn_model\",\n lr=0.01,\n- pass_num=30,\n- batch_size=4)\n+ pass_num=30)\n elif sys.argv[1] == \"lstm\":\n train(\n train_reader,\n@@ -122,19 +119,17 @@\n parallel=False,\n save_dirname=\"lstm_model\",\n lr=0.05,\n- pass_num=30,\n- batch_size=4)\n+ pass_num=30)\n elif sys.argv[1] == \"gru\":\n train(\n train_reader,\n word_dict,\n- lstm_net,\n+ gru_net,\n use_cuda=True,\n parallel=False,\n save_dirname=\"gru_model\",\n lr=0.05,\n- pass_num=30,\n- batch_size=4)\n+ pass_num=30)\n else:\n print(\"network name cannot be found!\")\n sys.exit(1)\n", "issue": "\u6587\u672c\u5206\u7c7btext_classification\u8bad\u7ec3\u9009\u9879\u4e0d\u5339\u914d\n1. train.py\u4e2dbatch_size\u53c2\u6570\u5197\u4f59\r\n\r\n2. \u547d\u4ee4\u884c\u9009\u9879gru\u914d\u7f6e\u9519\u8bef\r\n\r\n3. GRU Layer embedding\u5b66\u4e60\u7387\u9ed8\u8ba4\u914d\u7f6e\u8fc7\u5927\uff0c\u5bfc\u81f4\u6a21\u578b\u8bad\u7ec3\u8865\u8bfe\u6536\u655b\u3002\n", "before_files": [{"content": "import os\nimport six\nimport sys\nimport time\nimport unittest\nimport contextlib\n\nimport paddle\nimport paddle.fluid as fluid\n\nimport utils\nfrom nets import bow_net\nfrom nets import cnn_net\nfrom nets import lstm_net\nfrom nets import gru_net\n\n\ndef train(train_reader,\n word_dict,\n network,\n use_cuda,\n parallel,\n save_dirname,\n lr=0.2,\n batch_size=128,\n pass_num=30):\n \"\"\"\n train network\n \"\"\"\n data = fluid.layers.data(\n name=\"words\", shape=[1], dtype=\"int64\", lod_level=1)\n\n label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\n if not parallel:\n cost, acc, prediction = network(data, label, len(word_dict))\n else:\n places = fluid.layers.device.get_places(device_count=2)\n pd = fluid.layers.ParallelDo(places)\n with pd.do():\n cost, acc, prediction = network(\n pd.read_input(data), pd.read_input(label), len(word_dict))\n\n pd.write_output(cost)\n pd.write_output(acc)\n\n cost, acc = pd()\n cost = fluid.layers.mean(cost)\n acc = fluid.layers.mean(acc)\n\n sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=lr)\n sgd_optimizer.minimize(cost)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=[data, label], place=place)\n\n # For internal continuous evaluation\n if \"CE_MODE_X\" in os.environ:\n fluid.default_startup_program().random_seed = 110\n exe.run(fluid.default_startup_program())\n for pass_id in six.moves.xrange(pass_num):\n pass_start = time.time()\n data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0\n for data in train_reader():\n avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[cost, acc])\n data_size = len(data)\n total_acc += data_size * avg_acc_np\n total_cost += data_size * avg_cost_np\n data_count += data_size\n avg_cost = total_cost / data_count\n\n avg_acc = total_acc / data_count\n print(\"pass_id: %d, avg_acc: %f, avg_cost: %f, pass_time_cost: %f\" %\n (pass_id, avg_acc, avg_cost, time.time() - pass_start))\n\n epoch_model = save_dirname + \"/\" + \"epoch\" + str(pass_id)\n fluid.io.save_inference_model(epoch_model, [\"words\", \"label\"], acc, exe)\n\n pass_end = time.time()\n # For internal continuous evaluation\n if \"CE_MODE_X\" in os.environ:\n print(\"kpis\ttrain_acc\t%f\" % avg_acc)\n print(\"kpis\ttrain_cost\t%f\" % avg_cost)\n print(\"kpis\ttrain_duration\t%f\" % (pass_end - pass_start))\n\n\ndef train_net():\n word_dict, train_reader, test_reader = utils.prepare_data(\n \"imdb\", self_dict=False, batch_size=128, buf_size=50000)\n\n if sys.argv[1] == \"bow\":\n train(\n train_reader,\n word_dict,\n bow_net,\n use_cuda=False,\n parallel=False,\n save_dirname=\"bow_model\",\n lr=0.002,\n pass_num=30,\n batch_size=4)\n elif sys.argv[1] == \"cnn\":\n train(\n train_reader,\n word_dict,\n cnn_net,\n use_cuda=True,\n parallel=False,\n save_dirname=\"cnn_model\",\n lr=0.01,\n pass_num=30,\n batch_size=4)\n elif sys.argv[1] == \"lstm\":\n train(\n train_reader,\n word_dict,\n lstm_net,\n use_cuda=True,\n parallel=False,\n save_dirname=\"lstm_model\",\n lr=0.05,\n pass_num=30,\n batch_size=4)\n elif sys.argv[1] == \"gru\":\n train(\n train_reader,\n word_dict,\n lstm_net,\n use_cuda=True,\n parallel=False,\n save_dirname=\"gru_model\",\n lr=0.05,\n pass_num=30,\n batch_size=4)\n else:\n print(\"network name cannot be found!\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n train_net()\n", "path": "fluid/PaddleNLP/text_classification/train.py"}, {"content": "import sys\nimport time\nimport numpy as np\n\nimport paddle\nimport paddle.fluid as fluid\n\n\ndef bow_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2):\n \"\"\"\n bow net\n \"\"\"\n emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])\n bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')\n bow_tanh = fluid.layers.tanh(bow)\n fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act=\"tanh\")\n fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act=\"tanh\")\n prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef cnn_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n win_size=3):\n \"\"\"\n conv net\n \"\"\"\n emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])\n\n conv_3 = fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=hid_dim,\n filter_size=win_size,\n act=\"tanh\",\n pool_type=\"max\")\n\n fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2)\n\n prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef lstm_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n emb_lr=30.0):\n \"\"\"\n lstm net\n \"\"\"\n emb = fluid.layers.embedding(\n input=data,\n size=[dict_dim, emb_dim],\n param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)\n\n lstm_h, c = fluid.layers.dynamic_lstm(\n input=fc0, size=hid_dim * 4, is_reverse=False)\n\n lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')\n lstm_max_tanh = fluid.layers.tanh(lstm_max)\n\n fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')\n\n prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef gru_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n emb_lr=400.0):\n \"\"\"\n gru net\n \"\"\"\n emb = fluid.layers.embedding(\n input=data,\n size=[dict_dim, emb_dim],\n param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3)\n gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False)\n gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max')\n gru_max_tanh = fluid.layers.tanh(gru_max)\n fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh')\n prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n", "path": "fluid/PaddleNLP/text_classification/nets.py"}], "after_files": [{"content": "import os\nimport six\nimport sys\nimport time\nimport unittest\nimport contextlib\n\nimport paddle\nimport paddle.fluid as fluid\n\nimport utils\nfrom nets import bow_net\nfrom nets import cnn_net\nfrom nets import lstm_net\nfrom nets import gru_net\n\n\ndef train(train_reader,\n word_dict,\n network,\n use_cuda,\n parallel,\n save_dirname,\n lr=0.2,\n pass_num=30):\n \"\"\"\n train network\n \"\"\"\n data = fluid.layers.data(\n name=\"words\", shape=[1], dtype=\"int64\", lod_level=1)\n\n label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\n if not parallel:\n cost, acc, prediction = network(data, label, len(word_dict))\n else:\n places = fluid.layers.device.get_places(device_count=2)\n pd = fluid.layers.ParallelDo(places)\n with pd.do():\n cost, acc, prediction = network(\n pd.read_input(data), pd.read_input(label), len(word_dict))\n\n pd.write_output(cost)\n pd.write_output(acc)\n\n cost, acc = pd()\n cost = fluid.layers.mean(cost)\n acc = fluid.layers.mean(acc)\n\n sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=lr)\n sgd_optimizer.minimize(cost)\n\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=[data, label], place=place)\n\n # For internal continuous evaluation\n if \"CE_MODE_X\" in os.environ:\n fluid.default_startup_program().random_seed = 110\n exe.run(fluid.default_startup_program())\n for pass_id in six.moves.xrange(pass_num):\n pass_start = time.time()\n data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0\n for data in train_reader():\n avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[cost, acc])\n data_size = len(data)\n total_acc += data_size * avg_acc_np\n total_cost += data_size * avg_cost_np\n data_count += data_size\n avg_cost = total_cost / data_count\n\n avg_acc = total_acc / data_count\n print(\"pass_id: %d, avg_acc: %f, avg_cost: %f, pass_time_cost: %f\" %\n (pass_id, avg_acc, avg_cost, time.time() - pass_start))\n\n epoch_model = save_dirname + \"/\" + \"epoch\" + str(pass_id)\n fluid.io.save_inference_model(epoch_model, [\"words\", \"label\"], acc, exe)\n\n pass_end = time.time()\n # For internal continuous evaluation\n if \"CE_MODE_X\" in os.environ:\n print(\"kpis\ttrain_acc\t%f\" % avg_acc)\n print(\"kpis\ttrain_cost\t%f\" % avg_cost)\n print(\"kpis\ttrain_duration\t%f\" % (pass_end - pass_start))\n\n\ndef train_net():\n word_dict, train_reader, test_reader = utils.prepare_data(\n \"imdb\", self_dict=False, batch_size=128, buf_size=50000)\n\n if sys.argv[1] == \"bow\":\n train(\n train_reader,\n word_dict,\n bow_net,\n use_cuda=False,\n parallel=False,\n save_dirname=\"bow_model\",\n lr=0.002,\n pass_num=30)\n elif sys.argv[1] == \"cnn\":\n train(\n train_reader,\n word_dict,\n cnn_net,\n use_cuda=True,\n parallel=False,\n save_dirname=\"cnn_model\",\n lr=0.01,\n pass_num=30)\n elif sys.argv[1] == \"lstm\":\n train(\n train_reader,\n word_dict,\n lstm_net,\n use_cuda=True,\n parallel=False,\n save_dirname=\"lstm_model\",\n lr=0.05,\n pass_num=30)\n elif sys.argv[1] == \"gru\":\n train(\n train_reader,\n word_dict,\n gru_net,\n use_cuda=True,\n parallel=False,\n save_dirname=\"gru_model\",\n lr=0.05,\n pass_num=30)\n else:\n print(\"network name cannot be found!\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n train_net()\n", "path": "fluid/PaddleNLP/text_classification/train.py"}, {"content": "import sys\nimport time\nimport numpy as np\n\nimport paddle\nimport paddle.fluid as fluid\n\n\ndef bow_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2):\n \"\"\"\n bow net\n \"\"\"\n emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])\n bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')\n bow_tanh = fluid.layers.tanh(bow)\n fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act=\"tanh\")\n fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act=\"tanh\")\n prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef cnn_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n win_size=3):\n \"\"\"\n conv net\n \"\"\"\n emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])\n\n conv_3 = fluid.nets.sequence_conv_pool(\n input=emb,\n num_filters=hid_dim,\n filter_size=win_size,\n act=\"tanh\",\n pool_type=\"max\")\n\n fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2)\n\n prediction = fluid.layers.fc(input=[fc_1], size=class_dim, act=\"softmax\")\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef lstm_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n emb_lr=30.0):\n \"\"\"\n lstm net\n \"\"\"\n emb = fluid.layers.embedding(\n input=data,\n size=[dict_dim, emb_dim],\n param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)\n\n lstm_h, c = fluid.layers.dynamic_lstm(\n input=fc0, size=hid_dim * 4, is_reverse=False)\n\n lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')\n lstm_max_tanh = fluid.layers.tanh(lstm_max)\n\n fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')\n\n prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n\n\ndef gru_net(data,\n label,\n dict_dim,\n emb_dim=128,\n hid_dim=128,\n hid_dim2=96,\n class_dim=2,\n emb_lr=30.0):\n \"\"\"\n gru net\n \"\"\"\n emb = fluid.layers.embedding(\n input=data,\n size=[dict_dim, emb_dim],\n param_attr=fluid.ParamAttr(learning_rate=emb_lr))\n\n fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3)\n gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False)\n gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max')\n gru_max_tanh = fluid.layers.tanh(gru_max)\n fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh')\n prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')\n\n cost = fluid.layers.cross_entropy(input=prediction, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n acc = fluid.layers.accuracy(input=prediction, label=label)\n\n return avg_cost, acc, prediction\n", "path": "fluid/PaddleNLP/text_classification/nets.py"}]}
2,968
537
gh_patches_debug_21209
rasdani/github-patches
git_diff
quantumlib__Cirq-2004
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ControlledOperation objects do not (un)pickle Python version 3.6.8 Cirq Version = 0.6.0.dev (also in 0.5.0) Attempting to unpickle any ops.ControlledOperation object, either by pickle or by dill, results in an error. Code to reproduce: ``` import pickle import cirq import sympy dump=pickle.dumps(cirq.Rx(sympy.Symbol('param')).on(cirq.LineQubit(0)).controlled_by(cirq.LineQubit(1))) load=pickle.loads(dump) ``` Error message: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/sumneralperin-lea/anaconda3/envs/quantum/lib/python3.6/pickle.py", line 1558, in _loads encoding=encoding, errors=errors).load() File "/Users/sumneralperin-lea/anaconda3/envs/quantum/lib/python3.6/pickle.py", line 1050, in load dispatch[key[0]](self) File "/Users/sumneralperin-lea/anaconda3/envs/quantum/lib/python3.6/pickle.py", line 1323, in load_newobj obj = cls.__new__(cls, *args) TypeError: __new__() missing 2 required positional arguments: 'controls' and 'sub_operation' ``` Extra info/commentary: Though this issue doesn't reside exclusively in cirq, it does restrict the (full) use of cirq with some standard python modules. In my particular use case, it prevents use of the multiprocessing module to parallelize the simulation of circuits. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/ops/controlled_operation.py` Content: ``` 1 # Copyright 2019 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Union, Any, Optional, List, Sequence 15 16 import numpy as np 17 18 from cirq import protocols, linalg, value 19 from cirq.ops import raw_types, gate_operation 20 from cirq.type_workarounds import NotImplementedType 21 from cirq.protocols import trace_distance_from_angle_list 22 23 24 @value.value_equality 25 class ControlledOperation(raw_types.Operation): 26 def __new__(cls, 27 controls: Sequence[raw_types.Qid], 28 sub_operation: raw_types.Operation): 29 """Auto-flatten nested controlled operations.""" 30 if isinstance(sub_operation, ControlledOperation): 31 return ControlledOperation( 32 tuple(controls) + sub_operation.controls, 33 sub_operation.sub_operation) 34 return super().__new__(cls) 35 36 def __init__(self, 37 controls: Sequence[raw_types.Qid], 38 sub_operation: raw_types.Operation): 39 self.controls = tuple(controls) 40 self.sub_operation = sub_operation 41 42 @property 43 def qubits(self): 44 return self.controls + self.sub_operation.qubits 45 46 def with_qubits(self, *new_qubits): 47 n = len(self.controls) 48 return ControlledOperation( 49 new_qubits[:n], 50 self.sub_operation.with_qubits(*new_qubits[n:])) 51 52 def _decompose_(self): 53 result = protocols.decompose_once(self.sub_operation, NotImplemented) 54 if result is NotImplemented: 55 return NotImplemented 56 57 return [ControlledOperation(self.controls, op) for op in result] 58 59 def _value_equality_values_(self): 60 return frozenset(self.controls), self.sub_operation 61 62 def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs) -> np.ndarray: 63 n = len(self.controls) 64 control_axes = args.axes[:n] 65 sub_axes = args.axes[n:] 66 active = linalg.slice_for_qubits_equal_to(control_axes, -1) 67 view_axes = _positions_after_removals_at( 68 initial_positions=sub_axes, 69 removals=control_axes) 70 target_view = args.target_tensor[active] 71 buffer_view = args.available_buffer[active] 72 result = protocols.apply_unitary( 73 self.sub_operation, 74 protocols.ApplyUnitaryArgs( 75 target_view, 76 buffer_view, 77 view_axes), 78 default=NotImplemented) 79 80 if result is NotImplemented: 81 return NotImplemented 82 83 if result is target_view: 84 return args.target_tensor 85 86 # HACK: assume they didn't somehow escape the slice view and edit the 87 # rest of target_tensor. 88 args.target_tensor[active] = result 89 return args.target_tensor 90 91 def _has_unitary_(self) -> bool: 92 return protocols.has_unitary(self.sub_operation) 93 94 def _unitary_(self) -> Union[np.ndarray, NotImplementedType]: 95 sub_matrix = protocols.unitary(self.sub_operation, None) 96 if sub_matrix is None: 97 return NotImplemented 98 return linalg.block_diag( 99 np.eye(pow(2, len(self.qubits))-sub_matrix.shape[0]), 100 sub_matrix) 101 102 def __str__(self): 103 if isinstance(self.sub_operation, gate_operation.GateOperation): 104 return '{}{}({})'.format( 105 'C' * len(self.controls), 106 self.sub_operation.gate, 107 ', '.join(map(str, self.qubits))) 108 return 'C({}, {})'.format(', '.join(str(q) for q in self.controls), 109 str(self.sub_operation)) 110 111 def __repr__(self): 112 return ('cirq.ControlledOperation(controls={!r}, ' 113 'sub_operation={!r})'.format(self.controls, 114 self.sub_operation)) 115 116 def _is_parameterized_(self) -> bool: 117 return protocols.is_parameterized(self.sub_operation) 118 119 def _resolve_parameters_(self, resolver): 120 new_sub_op = protocols.resolve_parameters(self.sub_operation, resolver) 121 return ControlledOperation(self.controls, new_sub_op) 122 123 def _trace_distance_bound_(self) -> Optional[float]: 124 if self._is_parameterized_(): 125 return None 126 u = protocols.unitary(self.sub_operation, default=None) 127 if u is None: 128 return NotImplemented 129 angle_list = np.append(np.angle(np.linalg.eigvals(u)), 0) 130 return trace_distance_from_angle_list(angle_list) 131 132 def __pow__(self, exponent: Any) -> 'ControlledOperation': 133 new_sub_op = protocols.pow(self.sub_operation, 134 exponent, 135 NotImplemented) 136 if new_sub_op is NotImplemented: 137 return NotImplemented 138 return ControlledOperation(self.controls, new_sub_op) 139 140 def _circuit_diagram_info_(self, 141 args: protocols.CircuitDiagramInfoArgs 142 ) -> Optional[protocols.CircuitDiagramInfo]: 143 n = len(self.controls) 144 145 sub_args = protocols.CircuitDiagramInfoArgs( 146 known_qubit_count=(args.known_qubit_count - n 147 if args.known_qubit_count is not None else None), 148 known_qubits=(args.known_qubits[n:] 149 if args.known_qubits is not None else None), 150 use_unicode_characters=args.use_unicode_characters, 151 precision=args.precision, 152 qubit_map=args.qubit_map) 153 sub_info = protocols.circuit_diagram_info(self.sub_operation, 154 sub_args, 155 None) 156 if sub_info is None: 157 return NotImplemented 158 159 return protocols.CircuitDiagramInfo(wire_symbols=('@',) * n + 160 sub_info.wire_symbols, 161 exponent=sub_info.exponent) 162 163 164 def _positions_after_removals_at(initial_positions: Sequence[int], 165 removals: Sequence[int]) -> List[int]: 166 # TODO: O(n lg n) instead of O(n**2) 167 result = [] 168 for p in initial_positions: 169 change = len([1 for r in removals if r < p]) 170 result.append(p - change) 171 return result 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/ops/controlled_operation.py b/cirq/ops/controlled_operation.py --- a/cirq/ops/controlled_operation.py +++ b/cirq/ops/controlled_operation.py @@ -23,21 +23,17 @@ @value.value_equality class ControlledOperation(raw_types.Operation): - def __new__(cls, - controls: Sequence[raw_types.Qid], - sub_operation: raw_types.Operation): - """Auto-flatten nested controlled operations.""" - if isinstance(sub_operation, ControlledOperation): - return ControlledOperation( - tuple(controls) + sub_operation.controls, - sub_operation.sub_operation) - return super().__new__(cls) def __init__(self, controls: Sequence[raw_types.Qid], sub_operation: raw_types.Operation): - self.controls = tuple(controls) - self.sub_operation = sub_operation + if not isinstance(sub_operation, ControlledOperation): + self.controls = tuple(controls) + self.sub_operation = sub_operation + else: + # Auto-flatten nested controlled operations. + self.controls = tuple(controls) + sub_operation.controls + self.sub_operation = sub_operation.sub_operation @property def qubits(self):
{"golden_diff": "diff --git a/cirq/ops/controlled_operation.py b/cirq/ops/controlled_operation.py\n--- a/cirq/ops/controlled_operation.py\n+++ b/cirq/ops/controlled_operation.py\n@@ -23,21 +23,17 @@\n \n @value.value_equality\n class ControlledOperation(raw_types.Operation):\n- def __new__(cls,\n- controls: Sequence[raw_types.Qid],\n- sub_operation: raw_types.Operation):\n- \"\"\"Auto-flatten nested controlled operations.\"\"\"\n- if isinstance(sub_operation, ControlledOperation):\n- return ControlledOperation(\n- tuple(controls) + sub_operation.controls,\n- sub_operation.sub_operation)\n- return super().__new__(cls)\n \n def __init__(self,\n controls: Sequence[raw_types.Qid],\n sub_operation: raw_types.Operation):\n- self.controls = tuple(controls)\n- self.sub_operation = sub_operation\n+ if not isinstance(sub_operation, ControlledOperation):\n+ self.controls = tuple(controls)\n+ self.sub_operation = sub_operation\n+ else:\n+ # Auto-flatten nested controlled operations.\n+ self.controls = tuple(controls) + sub_operation.controls\n+ self.sub_operation = sub_operation.sub_operation\n \n @property\n def qubits(self):\n", "issue": "ControlledOperation objects do not (un)pickle\nPython version 3.6.8\r\nCirq Version = 0.6.0.dev (also in 0.5.0)\r\n\r\nAttempting to unpickle any ops.ControlledOperation object, either by pickle or by dill, results in an error.\r\n\r\nCode to reproduce:\r\n\r\n```\r\nimport pickle\r\nimport cirq\r\nimport sympy\r\n\r\ndump=pickle.dumps(cirq.Rx(sympy.Symbol('param')).on(cirq.LineQubit(0)).controlled_by(cirq.LineQubit(1)))\r\nload=pickle.loads(dump)\r\n```\r\n\r\nError message:\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/sumneralperin-lea/anaconda3/envs/quantum/lib/python3.6/pickle.py\", line 1558, in _loads\r\n encoding=encoding, errors=errors).load()\r\n File \"/Users/sumneralperin-lea/anaconda3/envs/quantum/lib/python3.6/pickle.py\", line 1050, in load\r\n dispatch[key[0]](self)\r\n File \"/Users/sumneralperin-lea/anaconda3/envs/quantum/lib/python3.6/pickle.py\", line 1323, in load_newobj\r\n obj = cls.__new__(cls, *args)\r\nTypeError: __new__() missing 2 required positional arguments: 'controls' and 'sub_operation'\r\n```\r\nExtra info/commentary:\r\n\r\nThough this issue doesn't reside exclusively in cirq, it does restrict the (full) use of cirq with some standard python modules. In my particular use case, it prevents use of the multiprocessing module to parallelize the simulation of circuits.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Union, Any, Optional, List, Sequence\n\nimport numpy as np\n\nfrom cirq import protocols, linalg, value\nfrom cirq.ops import raw_types, gate_operation\nfrom cirq.type_workarounds import NotImplementedType\nfrom cirq.protocols import trace_distance_from_angle_list\n\n\[email protected]_equality\nclass ControlledOperation(raw_types.Operation):\n def __new__(cls,\n controls: Sequence[raw_types.Qid],\n sub_operation: raw_types.Operation):\n \"\"\"Auto-flatten nested controlled operations.\"\"\"\n if isinstance(sub_operation, ControlledOperation):\n return ControlledOperation(\n tuple(controls) + sub_operation.controls,\n sub_operation.sub_operation)\n return super().__new__(cls)\n\n def __init__(self,\n controls: Sequence[raw_types.Qid],\n sub_operation: raw_types.Operation):\n self.controls = tuple(controls)\n self.sub_operation = sub_operation\n\n @property\n def qubits(self):\n return self.controls + self.sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n n = len(self.controls)\n return ControlledOperation(\n new_qubits[:n],\n self.sub_operation.with_qubits(*new_qubits[n:]))\n\n def _decompose_(self):\n result = protocols.decompose_once(self.sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [ControlledOperation(self.controls, op) for op in result]\n\n def _value_equality_values_(self):\n return frozenset(self.controls), self.sub_operation\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs) -> np.ndarray:\n n = len(self.controls)\n control_axes = args.axes[:n]\n sub_axes = args.axes[n:]\n active = linalg.slice_for_qubits_equal_to(control_axes, -1)\n view_axes = _positions_after_removals_at(\n initial_positions=sub_axes,\n removals=control_axes)\n target_view = args.target_tensor[active]\n buffer_view = args.available_buffer[active]\n result = protocols.apply_unitary(\n self.sub_operation,\n protocols.ApplyUnitaryArgs(\n target_view,\n buffer_view,\n view_axes),\n default=NotImplemented)\n\n if result is NotImplemented:\n return NotImplemented\n\n if result is target_view:\n return args.target_tensor\n\n # HACK: assume they didn't somehow escape the slice view and edit the\n # rest of target_tensor.\n args.target_tensor[active] = result\n return args.target_tensor\n\n def _has_unitary_(self) -> bool:\n return protocols.has_unitary(self.sub_operation)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n sub_matrix = protocols.unitary(self.sub_operation, None)\n if sub_matrix is None:\n return NotImplemented\n return linalg.block_diag(\n np.eye(pow(2, len(self.qubits))-sub_matrix.shape[0]),\n sub_matrix)\n\n def __str__(self):\n if isinstance(self.sub_operation, gate_operation.GateOperation):\n return '{}{}({})'.format(\n 'C' * len(self.controls),\n self.sub_operation.gate,\n ', '.join(map(str, self.qubits)))\n return 'C({}, {})'.format(', '.join(str(q) for q in self.controls),\n str(self.sub_operation))\n\n def __repr__(self):\n return ('cirq.ControlledOperation(controls={!r}, '\n 'sub_operation={!r})'.format(self.controls,\n self.sub_operation))\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.sub_operation)\n\n def _resolve_parameters_(self, resolver):\n new_sub_op = protocols.resolve_parameters(self.sub_operation, resolver)\n return ControlledOperation(self.controls, new_sub_op)\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n u = protocols.unitary(self.sub_operation, default=None)\n if u is None:\n return NotImplemented\n angle_list = np.append(np.angle(np.linalg.eigvals(u)), 0)\n return trace_distance_from_angle_list(angle_list)\n\n def __pow__(self, exponent: Any) -> 'ControlledOperation':\n new_sub_op = protocols.pow(self.sub_operation,\n exponent,\n NotImplemented)\n if new_sub_op is NotImplemented:\n return NotImplemented\n return ControlledOperation(self.controls, new_sub_op)\n\n def _circuit_diagram_info_(self,\n args: protocols.CircuitDiagramInfoArgs\n ) -> Optional[protocols.CircuitDiagramInfo]:\n n = len(self.controls)\n\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=(args.known_qubit_count - n\n if args.known_qubit_count is not None else None),\n known_qubits=(args.known_qubits[n:]\n if args.known_qubits is not None else None),\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n qubit_map=args.qubit_map)\n sub_info = protocols.circuit_diagram_info(self.sub_operation,\n sub_args,\n None)\n if sub_info is None:\n return NotImplemented\n\n return protocols.CircuitDiagramInfo(wire_symbols=('@',) * n +\n sub_info.wire_symbols,\n exponent=sub_info.exponent)\n\n\ndef _positions_after_removals_at(initial_positions: Sequence[int],\n removals: Sequence[int]) -> List[int]:\n # TODO: O(n lg n) instead of O(n**2)\n result = []\n for p in initial_positions:\n change = len([1 for r in removals if r < p])\n result.append(p - change)\n return result\n", "path": "cirq/ops/controlled_operation.py"}], "after_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Union, Any, Optional, List, Sequence\n\nimport numpy as np\n\nfrom cirq import protocols, linalg, value\nfrom cirq.ops import raw_types, gate_operation\nfrom cirq.type_workarounds import NotImplementedType\nfrom cirq.protocols import trace_distance_from_angle_list\n\n\[email protected]_equality\nclass ControlledOperation(raw_types.Operation):\n\n def __init__(self,\n controls: Sequence[raw_types.Qid],\n sub_operation: raw_types.Operation):\n if not isinstance(sub_operation, ControlledOperation):\n self.controls = tuple(controls)\n self.sub_operation = sub_operation\n else:\n # Auto-flatten nested controlled operations.\n self.controls = tuple(controls) + sub_operation.controls\n self.sub_operation = sub_operation.sub_operation\n\n @property\n def qubits(self):\n return self.controls + self.sub_operation.qubits\n\n def with_qubits(self, *new_qubits):\n n = len(self.controls)\n return ControlledOperation(\n new_qubits[:n],\n self.sub_operation.with_qubits(*new_qubits[n:]))\n\n def _decompose_(self):\n result = protocols.decompose_once(self.sub_operation, NotImplemented)\n if result is NotImplemented:\n return NotImplemented\n\n return [ControlledOperation(self.controls, op) for op in result]\n\n def _value_equality_values_(self):\n return frozenset(self.controls), self.sub_operation\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs) -> np.ndarray:\n n = len(self.controls)\n control_axes = args.axes[:n]\n sub_axes = args.axes[n:]\n active = linalg.slice_for_qubits_equal_to(control_axes, -1)\n view_axes = _positions_after_removals_at(\n initial_positions=sub_axes,\n removals=control_axes)\n target_view = args.target_tensor[active]\n buffer_view = args.available_buffer[active]\n result = protocols.apply_unitary(\n self.sub_operation,\n protocols.ApplyUnitaryArgs(\n target_view,\n buffer_view,\n view_axes),\n default=NotImplemented)\n\n if result is NotImplemented:\n return NotImplemented\n\n if result is target_view:\n return args.target_tensor\n\n # HACK: assume they didn't somehow escape the slice view and edit the\n # rest of target_tensor.\n args.target_tensor[active] = result\n return args.target_tensor\n\n def _has_unitary_(self) -> bool:\n return protocols.has_unitary(self.sub_operation)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n sub_matrix = protocols.unitary(self.sub_operation, None)\n if sub_matrix is None:\n return NotImplemented\n return linalg.block_diag(\n np.eye(pow(2, len(self.qubits))-sub_matrix.shape[0]),\n sub_matrix)\n\n def __str__(self):\n if isinstance(self.sub_operation, gate_operation.GateOperation):\n return '{}{}({})'.format(\n 'C' * len(self.controls),\n self.sub_operation.gate,\n ', '.join(map(str, self.qubits)))\n return 'C({}, {})'.format(', '.join(str(q) for q in self.controls),\n str(self.sub_operation))\n\n def __repr__(self):\n return ('cirq.ControlledOperation(controls={!r}, '\n 'sub_operation={!r})'.format(self.controls,\n self.sub_operation))\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.sub_operation)\n\n def _resolve_parameters_(self, resolver):\n new_sub_op = protocols.resolve_parameters(self.sub_operation, resolver)\n return ControlledOperation(self.controls, new_sub_op)\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n u = protocols.unitary(self.sub_operation, default=None)\n if u is None:\n return NotImplemented\n angle_list = np.append(np.angle(np.linalg.eigvals(u)), 0)\n return trace_distance_from_angle_list(angle_list)\n\n def __pow__(self, exponent: Any) -> 'ControlledOperation':\n new_sub_op = protocols.pow(self.sub_operation,\n exponent,\n NotImplemented)\n if new_sub_op is NotImplemented:\n return NotImplemented\n return ControlledOperation(self.controls, new_sub_op)\n\n def _circuit_diagram_info_(self,\n args: protocols.CircuitDiagramInfoArgs\n ) -> Optional[protocols.CircuitDiagramInfo]:\n n = len(self.controls)\n\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=(args.known_qubit_count - n\n if args.known_qubit_count is not None else None),\n known_qubits=(args.known_qubits[n:]\n if args.known_qubits is not None else None),\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n qubit_map=args.qubit_map)\n sub_info = protocols.circuit_diagram_info(self.sub_operation,\n sub_args,\n None)\n if sub_info is None:\n return NotImplemented\n\n return protocols.CircuitDiagramInfo(wire_symbols=('@',) * n +\n sub_info.wire_symbols,\n exponent=sub_info.exponent)\n\n\ndef _positions_after_removals_at(initial_positions: Sequence[int],\n removals: Sequence[int]) -> List[int]:\n # TODO: O(n lg n) instead of O(n**2)\n result = []\n for p in initial_positions:\n change = len([1 for r in removals if r < p])\n result.append(p - change)\n return result\n", "path": "cirq/ops/controlled_operation.py"}]}
2,417
275
gh_patches_debug_25242
rasdani/github-patches
git_diff
fedora-infra__bodhi-5479
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Updates sometimes get stuck in pending state From quick look, it seems that the composer does: 1. move from `f*-updates-candidate` to `f*-updates-testing` 2. do stuff 3. untag from `f*-updates-testing-pending` 4. mark update state as testing If the composer hang on 2 the update remains stuck in pending as the builds are not tagged anymore in `f*-updates-candidate`. We should find a solution. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bodhi-server/bodhi/server/tasks/check_signed_builds.py` Content: ``` 1 # Copyright © 2017 Red Hat, Inc. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 19 """ 20 Avoid Updates being stuck in pending. 21 22 It may happen that Bodhi misses fedora-messaging messages announcing builds 23 have been signed. 24 In these cases, the Update remain stuck in pending until a manual intervention. 25 26 This script will cycle through builds of Updates in pending status and update 27 the signed status in the db to match the tags found in Koji. 28 """ 29 30 import logging 31 from datetime import datetime, timedelta 32 33 from bodhi.server import buildsys, models 34 from bodhi.server.config import config 35 from bodhi.server.util import transactional_session_maker 36 37 38 log = logging.getLogger(__name__) 39 40 41 def main(): 42 """Check build tags and sign those we missed.""" 43 db_factory = transactional_session_maker() 44 older_than = datetime.utcnow() - timedelta(days=config.get('check_signed_builds_delay')) 45 with db_factory() as session: 46 updates = models.Update.query.filter( 47 models.Update.status == models.UpdateStatus.pending 48 ).filter( 49 models.Update.release_id == models.Release.id 50 ).filter( 51 models.Release.state.in_([ 52 models.ReleaseState.current, 53 models.ReleaseState.pending, 54 models.ReleaseState.frozen, 55 ]) 56 ).all() 57 58 if len(updates) == 0: 59 log.debug('No stuck Updates found') 60 return 61 62 kc = buildsys.get_session() 63 stuck_builds = [] 64 overlooked_builds = [] 65 66 for update in updates: 67 # Let Bodhi have its times 68 if update.date_submitted >= older_than: 69 continue 70 builds = update.builds 71 # Clean Updates with no builds 72 if len(builds) == 0: 73 log.debug(f'Obsoleting empty update {update.alias}') 74 update.obsolete(session) 75 session.flush() 76 continue 77 pending_signing_tag = update.release.pending_signing_tag 78 pending_testing_tag = update.release.pending_testing_tag 79 for build in builds: 80 if build.signed: 81 log.debug(f'{build.nvr} already marked as signed') 82 continue 83 build_tags = [t['name'] for t in kc.listTags(build=build.nvr)] 84 if pending_signing_tag not in build_tags and pending_testing_tag in build_tags: 85 # Our composer missed the message that the build got signed 86 log.debug(f'Changing signed status of {build.nvr}') 87 build.signed = True 88 elif pending_signing_tag in build_tags and pending_testing_tag not in build_tags: 89 # autosign missed the message that the build is waiting to be signed 90 log.debug(f'{build.nvr} is stuck waiting to be signed, let\'s try again') 91 stuck_builds.append((build.nvr, pending_signing_tag)) 92 elif (pending_signing_tag not in build_tags 93 and pending_testing_tag not in build_tags): 94 # this means that an update has been created but we never tagged the build 95 # as pending-signing 96 log.debug(f'Oh, no! We\'ve never sent {build.nvr} for signing, let\'s fix it') 97 overlooked_builds.append((build.nvr, pending_signing_tag)) 98 session.flush() 99 100 if stuck_builds: 101 kc.multicall = True 102 for b, t in stuck_builds: 103 kc.untagBuild(t, b, force=True) 104 kc.multiCall() 105 for b, t in stuck_builds: 106 kc.tagBuild(t, b, force=True) 107 kc.multiCall() 108 109 if overlooked_builds: 110 kc.multicall = True 111 for b, t in overlooked_builds: 112 kc.tagBuild(t, b, force=True) 113 kc.multiCall() 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bodhi-server/bodhi/server/tasks/check_signed_builds.py b/bodhi-server/bodhi/server/tasks/check_signed_builds.py --- a/bodhi-server/bodhi/server/tasks/check_signed_builds.py +++ b/bodhi-server/bodhi/server/tasks/check_signed_builds.py @@ -77,10 +77,16 @@ pending_signing_tag = update.release.pending_signing_tag pending_testing_tag = update.release.pending_testing_tag for build in builds: + build_tags = [t['name'] for t in kc.listTags(build=build.nvr)] if build.signed: log.debug(f'{build.nvr} already marked as signed') + if (update.release.testing_tag in build_tags + and update.release.candidate_tag not in build_tags): + # The update was probably ejected from a compose and is stuck + log.debug(f'Resubmitting {update.alias} to testing') + update.set_request(session, models.UpdateRequest.testing, 'bodhi') + break continue - build_tags = [t['name'] for t in kc.listTags(build=build.nvr)] if pending_signing_tag not in build_tags and pending_testing_tag in build_tags: # Our composer missed the message that the build got signed log.debug(f'Changing signed status of {build.nvr}')
{"golden_diff": "diff --git a/bodhi-server/bodhi/server/tasks/check_signed_builds.py b/bodhi-server/bodhi/server/tasks/check_signed_builds.py\n--- a/bodhi-server/bodhi/server/tasks/check_signed_builds.py\n+++ b/bodhi-server/bodhi/server/tasks/check_signed_builds.py\n@@ -77,10 +77,16 @@\n pending_signing_tag = update.release.pending_signing_tag\n pending_testing_tag = update.release.pending_testing_tag\n for build in builds:\n+ build_tags = [t['name'] for t in kc.listTags(build=build.nvr)]\n if build.signed:\n log.debug(f'{build.nvr} already marked as signed')\n+ if (update.release.testing_tag in build_tags\n+ and update.release.candidate_tag not in build_tags):\n+ # The update was probably ejected from a compose and is stuck\n+ log.debug(f'Resubmitting {update.alias} to testing')\n+ update.set_request(session, models.UpdateRequest.testing, 'bodhi')\n+ break\n continue\n- build_tags = [t['name'] for t in kc.listTags(build=build.nvr)]\n if pending_signing_tag not in build_tags and pending_testing_tag in build_tags:\n # Our composer missed the message that the build got signed\n log.debug(f'Changing signed status of {build.nvr}')\n", "issue": "Updates sometimes get stuck in pending state\nFrom quick look, it seems that the composer does:\r\n1. move from `f*-updates-candidate` to `f*-updates-testing`\r\n2. do stuff\r\n3. untag from `f*-updates-testing-pending`\r\n4. mark update state as testing\r\n\r\nIf the composer hang on 2 the update remains stuck in pending as the builds are not tagged anymore in `f*-updates-candidate`. We should find a solution.\n", "before_files": [{"content": "# Copyright \u00a9 2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\"\"\"\nAvoid Updates being stuck in pending.\n\nIt may happen that Bodhi misses fedora-messaging messages announcing builds\nhave been signed.\nIn these cases, the Update remain stuck in pending until a manual intervention.\n\nThis script will cycle through builds of Updates in pending status and update\nthe signed status in the db to match the tags found in Koji.\n\"\"\"\n\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom bodhi.server import buildsys, models\nfrom bodhi.server.config import config\nfrom bodhi.server.util import transactional_session_maker\n\n\nlog = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"Check build tags and sign those we missed.\"\"\"\n db_factory = transactional_session_maker()\n older_than = datetime.utcnow() - timedelta(days=config.get('check_signed_builds_delay'))\n with db_factory() as session:\n updates = models.Update.query.filter(\n models.Update.status == models.UpdateStatus.pending\n ).filter(\n models.Update.release_id == models.Release.id\n ).filter(\n models.Release.state.in_([\n models.ReleaseState.current,\n models.ReleaseState.pending,\n models.ReleaseState.frozen,\n ])\n ).all()\n\n if len(updates) == 0:\n log.debug('No stuck Updates found')\n return\n\n kc = buildsys.get_session()\n stuck_builds = []\n overlooked_builds = []\n\n for update in updates:\n # Let Bodhi have its times\n if update.date_submitted >= older_than:\n continue\n builds = update.builds\n # Clean Updates with no builds\n if len(builds) == 0:\n log.debug(f'Obsoleting empty update {update.alias}')\n update.obsolete(session)\n session.flush()\n continue\n pending_signing_tag = update.release.pending_signing_tag\n pending_testing_tag = update.release.pending_testing_tag\n for build in builds:\n if build.signed:\n log.debug(f'{build.nvr} already marked as signed')\n continue\n build_tags = [t['name'] for t in kc.listTags(build=build.nvr)]\n if pending_signing_tag not in build_tags and pending_testing_tag in build_tags:\n # Our composer missed the message that the build got signed\n log.debug(f'Changing signed status of {build.nvr}')\n build.signed = True\n elif pending_signing_tag in build_tags and pending_testing_tag not in build_tags:\n # autosign missed the message that the build is waiting to be signed\n log.debug(f'{build.nvr} is stuck waiting to be signed, let\\'s try again')\n stuck_builds.append((build.nvr, pending_signing_tag))\n elif (pending_signing_tag not in build_tags\n and pending_testing_tag not in build_tags):\n # this means that an update has been created but we never tagged the build\n # as pending-signing\n log.debug(f'Oh, no! We\\'ve never sent {build.nvr} for signing, let\\'s fix it')\n overlooked_builds.append((build.nvr, pending_signing_tag))\n session.flush()\n\n if stuck_builds:\n kc.multicall = True\n for b, t in stuck_builds:\n kc.untagBuild(t, b, force=True)\n kc.multiCall()\n for b, t in stuck_builds:\n kc.tagBuild(t, b, force=True)\n kc.multiCall()\n\n if overlooked_builds:\n kc.multicall = True\n for b, t in overlooked_builds:\n kc.tagBuild(t, b, force=True)\n kc.multiCall()\n", "path": "bodhi-server/bodhi/server/tasks/check_signed_builds.py"}], "after_files": [{"content": "# Copyright \u00a9 2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\"\"\"\nAvoid Updates being stuck in pending.\n\nIt may happen that Bodhi misses fedora-messaging messages announcing builds\nhave been signed.\nIn these cases, the Update remain stuck in pending until a manual intervention.\n\nThis script will cycle through builds of Updates in pending status and update\nthe signed status in the db to match the tags found in Koji.\n\"\"\"\n\nimport logging\nfrom datetime import datetime, timedelta\n\nfrom bodhi.server import buildsys, models\nfrom bodhi.server.config import config\nfrom bodhi.server.util import transactional_session_maker\n\n\nlog = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"Check build tags and sign those we missed.\"\"\"\n db_factory = transactional_session_maker()\n older_than = datetime.utcnow() - timedelta(days=config.get('check_signed_builds_delay'))\n with db_factory() as session:\n updates = models.Update.query.filter(\n models.Update.status == models.UpdateStatus.pending\n ).filter(\n models.Update.release_id == models.Release.id\n ).filter(\n models.Release.state.in_([\n models.ReleaseState.current,\n models.ReleaseState.pending,\n models.ReleaseState.frozen,\n ])\n ).all()\n\n if len(updates) == 0:\n log.debug('No stuck Updates found')\n return\n\n kc = buildsys.get_session()\n stuck_builds = []\n overlooked_builds = []\n\n for update in updates:\n # Let Bodhi have its times\n if update.date_submitted >= older_than:\n continue\n builds = update.builds\n # Clean Updates with no builds\n if len(builds) == 0:\n log.debug(f'Obsoleting empty update {update.alias}')\n update.obsolete(session)\n session.flush()\n continue\n pending_signing_tag = update.release.pending_signing_tag\n pending_testing_tag = update.release.pending_testing_tag\n for build in builds:\n build_tags = [t['name'] for t in kc.listTags(build=build.nvr)]\n if build.signed:\n log.debug(f'{build.nvr} already marked as signed')\n if (update.release.testing_tag in build_tags\n and update.release.candidate_tag not in build_tags):\n # The update was probably ejected from a compose and is stuck\n log.debug(f'Resubmitting {update.alias} to testing')\n update.set_request(session, models.UpdateRequest.testing, 'bodhi')\n break\n continue\n if pending_signing_tag not in build_tags and pending_testing_tag in build_tags:\n # Our composer missed the message that the build got signed\n log.debug(f'Changing signed status of {build.nvr}')\n build.signed = True\n elif pending_signing_tag in build_tags and pending_testing_tag not in build_tags:\n # autosign missed the message that the build is waiting to be signed\n log.debug(f'{build.nvr} is stuck waiting to be signed, let\\'s try again')\n stuck_builds.append((build.nvr, pending_signing_tag))\n elif (pending_signing_tag not in build_tags\n and pending_testing_tag not in build_tags):\n # this means that an update has been created but we never tagged the build\n # as pending-signing\n log.debug(f'Oh, no! We\\'ve never sent {build.nvr} for signing, let\\'s fix it')\n overlooked_builds.append((build.nvr, pending_signing_tag))\n session.flush()\n\n if stuck_builds:\n kc.multicall = True\n for b, t in stuck_builds:\n kc.untagBuild(t, b, force=True)\n kc.multiCall()\n for b, t in stuck_builds:\n kc.tagBuild(t, b, force=True)\n kc.multiCall()\n\n if overlooked_builds:\n kc.multicall = True\n for b, t in overlooked_builds:\n kc.tagBuild(t, b, force=True)\n kc.multiCall()\n", "path": "bodhi-server/bodhi/server/tasks/check_signed_builds.py"}]}
1,561
300
gh_patches_debug_25859
rasdani/github-patches
git_diff
ansible__ansible-35363
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ios_command module : can not use "configure revert" commands ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME - ios_command ##### ANSIBLE VERSION - 2.x ##### SUMMARY: ios_command module should not filter out all configure commands ios_command is a wonderful module, however i found a minor issue : On line 165 there is a sanity check that verify that the user is not sending a configuration command ` elif item['command'].startswith('conf'):` With this test, it is not possible to send the following commands : - configure revert timer 5 - configure confirm - configure replace - configure network which ARE valid exec commands and do NOT enter in configuration mode. Hopefully (for me), a basic workaround is to insert a space (may be it is intented) : - [space] configure revert timer 5 I propose to change the test and use something like (sorry i am not familiar with python) : ` elif re.match("^\s*conf\S*\s+t", item['command']) ` just to ensure that the user has NOT entered one of various configure terminal flavors. ##### STEPS TO REPRODUCE Use the following playbook : <pre> --- - name: try hosts: all connection: local gather_facts: no vars: cli: host: "{{ inventory_hostname }}" username: cisco password: cisco transport: cli - name: This work (note the space at the beginning of the command line) ios_command: commands: - clear configuration lock - " configure revert timer 8" - name: This will not work (but should) ios_command: commands: - clear configuration lock - "configure revert timer 8" </pre> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/modules/network/ios/ios_command.py` Content: ``` 1 #!/usr/bin/python 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 # 18 19 ANSIBLE_METADATA = {'metadata_version': '1.1', 20 'status': ['preview'], 21 'supported_by': 'network'} 22 23 24 DOCUMENTATION = """ 25 --- 26 module: ios_command 27 version_added: "2.1" 28 author: "Peter Sprygada (@privateip)" 29 short_description: Run commands on remote devices running Cisco IOS 30 description: 31 - Sends arbitrary commands to an ios node and returns the results 32 read from the device. This module includes an 33 argument that will cause the module to wait for a specific condition 34 before returning or timing out if the condition is not met. 35 - This module does not support running commands in configuration mode. 36 Please use M(ios_config) to configure IOS devices. 37 extends_documentation_fragment: ios 38 notes: 39 - Tested against IOS 15.6 40 options: 41 commands: 42 description: 43 - List of commands to send to the remote ios device over the 44 configured provider. The resulting output from the command 45 is returned. If the I(wait_for) argument is provided, the 46 module is not returned until the condition is satisfied or 47 the number of retries has expired. 48 required: true 49 wait_for: 50 description: 51 - List of conditions to evaluate against the output of the 52 command. The task will wait for each condition to be true 53 before moving forward. If the conditional is not true 54 within the configured number of retries, the task fails. 55 See examples. 56 required: false 57 default: null 58 aliases: ['waitfor'] 59 version_added: "2.2" 60 match: 61 description: 62 - The I(match) argument is used in conjunction with the 63 I(wait_for) argument to specify the match policy. Valid 64 values are C(all) or C(any). If the value is set to C(all) 65 then all conditionals in the wait_for must be satisfied. If 66 the value is set to C(any) then only one of the values must be 67 satisfied. 68 required: false 69 default: all 70 choices: ['any', 'all'] 71 version_added: "2.2" 72 retries: 73 description: 74 - Specifies the number of retries a command should by tried 75 before it is considered failed. The command is run on the 76 target device every retry and evaluated against the 77 I(wait_for) conditions. 78 required: false 79 default: 10 80 interval: 81 description: 82 - Configures the interval in seconds to wait between retries 83 of the command. If the command does not pass the specified 84 conditions, the interval indicates how long to wait before 85 trying the command again. 86 required: false 87 default: 1 88 """ 89 90 EXAMPLES = """ 91 tasks: 92 - name: run show version on remote devices 93 ios_command: 94 commands: show version 95 96 - name: run show version and check to see if output contains IOS 97 ios_command: 98 commands: show version 99 wait_for: result[0] contains IOS 100 101 - name: run multiple commands on remote nodes 102 ios_command: 103 commands: 104 - show version 105 - show interfaces 106 107 - name: run multiple commands and evaluate the output 108 ios_command: 109 commands: 110 - show version 111 - show interfaces 112 wait_for: 113 - result[0] contains IOS 114 - result[1] contains Loopback0 115 """ 116 117 RETURN = """ 118 stdout: 119 description: The set of responses from the commands 120 returned: always apart from low level errors (such as action plugin) 121 type: list 122 sample: ['...', '...'] 123 stdout_lines: 124 description: The value of stdout split into a list 125 returned: always apart from low level errors (such as action plugin) 126 type: list 127 sample: [['...', '...'], ['...'], ['...']] 128 failed_conditions: 129 description: The list of conditionals that have failed 130 returned: failed 131 type: list 132 sample: ['...', '...'] 133 """ 134 import time 135 136 from ansible.module_utils.network.ios.ios import run_commands 137 from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args 138 from ansible.module_utils.basic import AnsibleModule 139 from ansible.module_utils.network.common.utils import ComplexList 140 from ansible.module_utils.network.common.parsing import Conditional 141 from ansible.module_utils.six import string_types 142 143 144 def to_lines(stdout): 145 for item in stdout: 146 if isinstance(item, string_types): 147 item = str(item).split('\n') 148 yield item 149 150 151 def parse_commands(module, warnings): 152 command = ComplexList(dict( 153 command=dict(key=True), 154 prompt=dict(), 155 answer=dict() 156 ), module) 157 commands = command(module.params['commands']) 158 for item in list(commands): 159 if module.check_mode and not item['command'].startswith('show'): 160 warnings.append( 161 'only show commands are supported when using check mode, not ' 162 'executing `%s`' % item['command'] 163 ) 164 commands.remove(item) 165 elif item['command'].startswith('conf'): 166 module.fail_json( 167 msg='ios_command does not support running config mode ' 168 'commands. Please use ios_config instead' 169 ) 170 return commands 171 172 173 def main(): 174 """main entry point for module execution 175 """ 176 argument_spec = dict( 177 commands=dict(type='list', required=True), 178 179 wait_for=dict(type='list', aliases=['waitfor']), 180 match=dict(default='all', choices=['all', 'any']), 181 182 retries=dict(default=10, type='int'), 183 interval=dict(default=1, type='int') 184 ) 185 186 argument_spec.update(ios_argument_spec) 187 188 module = AnsibleModule(argument_spec=argument_spec, 189 supports_check_mode=True) 190 191 result = {'changed': False} 192 193 warnings = list() 194 check_args(module, warnings) 195 commands = parse_commands(module, warnings) 196 result['warnings'] = warnings 197 198 wait_for = module.params['wait_for'] or list() 199 conditionals = [Conditional(c) for c in wait_for] 200 201 retries = module.params['retries'] 202 interval = module.params['interval'] 203 match = module.params['match'] 204 205 while retries > 0: 206 responses = run_commands(module, commands) 207 208 for item in list(conditionals): 209 if item(responses): 210 if match == 'any': 211 conditionals = list() 212 break 213 conditionals.remove(item) 214 215 if not conditionals: 216 break 217 218 time.sleep(interval) 219 retries -= 1 220 221 if conditionals: 222 failed_conditions = [item.raw for item in conditionals] 223 msg = 'One or more conditional statements have not be satisfied' 224 module.fail_json(msg=msg, failed_conditions=failed_conditions) 225 226 result.update({ 227 'changed': False, 228 'stdout': responses, 229 'stdout_lines': list(to_lines(responses)) 230 }) 231 232 module.exit_json(**result) 233 234 235 if __name__ == '__main__': 236 main() 237 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/modules/network/ios/ios_command.py b/lib/ansible/modules/network/ios/ios_command.py --- a/lib/ansible/modules/network/ios/ios_command.py +++ b/lib/ansible/modules/network/ios/ios_command.py @@ -131,6 +131,7 @@ type: list sample: ['...', '...'] """ +import re import time from ansible.module_utils.network.ios.ios import run_commands @@ -156,13 +157,14 @@ ), module) commands = command(module.params['commands']) for item in list(commands): + configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command']) if module.check_mode and not item['command'].startswith('show'): warnings.append( 'only show commands are supported when using check mode, not ' 'executing `%s`' % item['command'] ) commands.remove(item) - elif item['command'].startswith('conf'): + elif configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'): module.fail_json( msg='ios_command does not support running config mode ' 'commands. Please use ios_config instead'
{"golden_diff": "diff --git a/lib/ansible/modules/network/ios/ios_command.py b/lib/ansible/modules/network/ios/ios_command.py\n--- a/lib/ansible/modules/network/ios/ios_command.py\n+++ b/lib/ansible/modules/network/ios/ios_command.py\n@@ -131,6 +131,7 @@\n type: list\n sample: ['...', '...']\n \"\"\"\n+import re\n import time\n \n from ansible.module_utils.network.ios.ios import run_commands\n@@ -156,13 +157,14 @@\n ), module)\n commands = command(module.params['commands'])\n for item in list(commands):\n+ configure_type = re.match(r'conf(?:\\w*)(?:\\s+(\\w+))?', item['command'])\n if module.check_mode and not item['command'].startswith('show'):\n warnings.append(\n 'only show commands are supported when using check mode, not '\n 'executing `%s`' % item['command']\n )\n commands.remove(item)\n- elif item['command'].startswith('conf'):\n+ elif configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'):\n module.fail_json(\n msg='ios_command does not support running config mode '\n 'commands. Please use ios_config instead'\n", "issue": "ios_command module : can not use \"configure revert\" commands\n##### ISSUE TYPE \r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\n- ios_command\r\n\r\n##### ANSIBLE VERSION\r\n- 2.x\r\n\r\n##### SUMMARY: ios_command module should not filter out all configure commands\r\n\r\nios_command is a wonderful module, however i found a minor issue :\r\n\r\nOn line 165 there is a sanity check that verify that the user is not sending a configuration command\r\n ` elif item['command'].startswith('conf'):` \r\n\r\nWith this test, it is not possible to send the following commands :\r\n - configure revert timer 5\r\n - configure confirm\r\n - configure replace\r\n - configure network\r\nwhich ARE valid exec commands and do NOT enter in configuration mode.\r\n\r\nHopefully (for me), a basic workaround is to insert a space (may be it is intented) :\r\n - [space] configure revert timer 5\r\n\r\nI propose to change the test and use something like (sorry i am not familiar with python) :\r\n` elif re.match(\"^\\s*conf\\S*\\s+t\", item['command']) `\r\njust to ensure that the user has NOT entered one of various configure terminal flavors.\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n\r\nUse the following playbook : \r\n\r\n<pre> \r\n---\r\n- name: try\r\n hosts: all\r\n connection: local\r\n gather_facts: no\r\n\r\n vars:\r\n cli:\r\n host: \"{{ inventory_hostname }}\"\r\n username: cisco\r\n password: cisco\r\n transport: cli\r\n\r\n - name: This work (note the space at the beginning of the command line)\r\n ios_command:\r\n commands:\r\n - clear configuration lock\r\n - \" configure revert timer 8\"\r\n\r\n - name: This will not work (but should)\r\n ios_command:\r\n commands:\r\n - clear configuration lock\r\n - \"configure revert timer 8\"\r\n</pre> \r\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ios_command\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Run commands on remote devices running Cisco IOS\ndescription:\n - Sends arbitrary commands to an ios node and returns the results\n read from the device. This module includes an\n argument that will cause the module to wait for a specific condition\n before returning or timing out if the condition is not met.\n - This module does not support running commands in configuration mode.\n Please use M(ios_config) to configure IOS devices.\nextends_documentation_fragment: ios\nnotes:\n - Tested against IOS 15.6\noptions:\n commands:\n description:\n - List of commands to send to the remote ios device over the\n configured provider. The resulting output from the command\n is returned. If the I(wait_for) argument is provided, the\n module is not returned until the condition is satisfied or\n the number of retries has expired.\n required: true\n wait_for:\n description:\n - List of conditions to evaluate against the output of the\n command. The task will wait for each condition to be true\n before moving forward. If the conditional is not true\n within the configured number of retries, the task fails.\n See examples.\n required: false\n default: null\n aliases: ['waitfor']\n version_added: \"2.2\"\n match:\n description:\n - The I(match) argument is used in conjunction with the\n I(wait_for) argument to specify the match policy. Valid\n values are C(all) or C(any). If the value is set to C(all)\n then all conditionals in the wait_for must be satisfied. If\n the value is set to C(any) then only one of the values must be\n satisfied.\n required: false\n default: all\n choices: ['any', 'all']\n version_added: \"2.2\"\n retries:\n description:\n - Specifies the number of retries a command should by tried\n before it is considered failed. The command is run on the\n target device every retry and evaluated against the\n I(wait_for) conditions.\n required: false\n default: 10\n interval:\n description:\n - Configures the interval in seconds to wait between retries\n of the command. If the command does not pass the specified\n conditions, the interval indicates how long to wait before\n trying the command again.\n required: false\n default: 1\n\"\"\"\n\nEXAMPLES = \"\"\"\ntasks:\n - name: run show version on remote devices\n ios_command:\n commands: show version\n\n - name: run show version and check to see if output contains IOS\n ios_command:\n commands: show version\n wait_for: result[0] contains IOS\n\n - name: run multiple commands on remote nodes\n ios_command:\n commands:\n - show version\n - show interfaces\n\n - name: run multiple commands and evaluate the output\n ios_command:\n commands:\n - show version\n - show interfaces\n wait_for:\n - result[0] contains IOS\n - result[1] contains Loopback0\n\"\"\"\n\nRETURN = \"\"\"\nstdout:\n description: The set of responses from the commands\n returned: always apart from low level errors (such as action plugin)\n type: list\n sample: ['...', '...']\nstdout_lines:\n description: The value of stdout split into a list\n returned: always apart from low level errors (such as action plugin)\n type: list\n sample: [['...', '...'], ['...'], ['...']]\nfailed_conditions:\n description: The list of conditionals that have failed\n returned: failed\n type: list\n sample: ['...', '...']\n\"\"\"\nimport time\n\nfrom ansible.module_utils.network.ios.ios import run_commands\nfrom ansible.module_utils.network.ios.ios import ios_argument_spec, check_args\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.network.common.utils import ComplexList\nfrom ansible.module_utils.network.common.parsing import Conditional\nfrom ansible.module_utils.six import string_types\n\n\ndef to_lines(stdout):\n for item in stdout:\n if isinstance(item, string_types):\n item = str(item).split('\\n')\n yield item\n\n\ndef parse_commands(module, warnings):\n command = ComplexList(dict(\n command=dict(key=True),\n prompt=dict(),\n answer=dict()\n ), module)\n commands = command(module.params['commands'])\n for item in list(commands):\n if module.check_mode and not item['command'].startswith('show'):\n warnings.append(\n 'only show commands are supported when using check mode, not '\n 'executing `%s`' % item['command']\n )\n commands.remove(item)\n elif item['command'].startswith('conf'):\n module.fail_json(\n msg='ios_command does not support running config mode '\n 'commands. Please use ios_config instead'\n )\n return commands\n\n\ndef main():\n \"\"\"main entry point for module execution\n \"\"\"\n argument_spec = dict(\n commands=dict(type='list', required=True),\n\n wait_for=dict(type='list', aliases=['waitfor']),\n match=dict(default='all', choices=['all', 'any']),\n\n retries=dict(default=10, type='int'),\n interval=dict(default=1, type='int')\n )\n\n argument_spec.update(ios_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n result = {'changed': False}\n\n warnings = list()\n check_args(module, warnings)\n commands = parse_commands(module, warnings)\n result['warnings'] = warnings\n\n wait_for = module.params['wait_for'] or list()\n conditionals = [Conditional(c) for c in wait_for]\n\n retries = module.params['retries']\n interval = module.params['interval']\n match = module.params['match']\n\n while retries > 0:\n responses = run_commands(module, commands)\n\n for item in list(conditionals):\n if item(responses):\n if match == 'any':\n conditionals = list()\n break\n conditionals.remove(item)\n\n if not conditionals:\n break\n\n time.sleep(interval)\n retries -= 1\n\n if conditionals:\n failed_conditions = [item.raw for item in conditionals]\n msg = 'One or more conditional statements have not be satisfied'\n module.fail_json(msg=msg, failed_conditions=failed_conditions)\n\n result.update({\n 'changed': False,\n 'stdout': responses,\n 'stdout_lines': list(to_lines(responses))\n })\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/network/ios/ios_command.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\n\nDOCUMENTATION = \"\"\"\n---\nmodule: ios_command\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Run commands on remote devices running Cisco IOS\ndescription:\n - Sends arbitrary commands to an ios node and returns the results\n read from the device. This module includes an\n argument that will cause the module to wait for a specific condition\n before returning or timing out if the condition is not met.\n - This module does not support running commands in configuration mode.\n Please use M(ios_config) to configure IOS devices.\nextends_documentation_fragment: ios\nnotes:\n - Tested against IOS 15.6\noptions:\n commands:\n description:\n - List of commands to send to the remote ios device over the\n configured provider. The resulting output from the command\n is returned. If the I(wait_for) argument is provided, the\n module is not returned until the condition is satisfied or\n the number of retries has expired.\n required: true\n wait_for:\n description:\n - List of conditions to evaluate against the output of the\n command. The task will wait for each condition to be true\n before moving forward. If the conditional is not true\n within the configured number of retries, the task fails.\n See examples.\n required: false\n default: null\n aliases: ['waitfor']\n version_added: \"2.2\"\n match:\n description:\n - The I(match) argument is used in conjunction with the\n I(wait_for) argument to specify the match policy. Valid\n values are C(all) or C(any). If the value is set to C(all)\n then all conditionals in the wait_for must be satisfied. If\n the value is set to C(any) then only one of the values must be\n satisfied.\n required: false\n default: all\n choices: ['any', 'all']\n version_added: \"2.2\"\n retries:\n description:\n - Specifies the number of retries a command should by tried\n before it is considered failed. The command is run on the\n target device every retry and evaluated against the\n I(wait_for) conditions.\n required: false\n default: 10\n interval:\n description:\n - Configures the interval in seconds to wait between retries\n of the command. If the command does not pass the specified\n conditions, the interval indicates how long to wait before\n trying the command again.\n required: false\n default: 1\n\"\"\"\n\nEXAMPLES = \"\"\"\ntasks:\n - name: run show version on remote devices\n ios_command:\n commands: show version\n\n - name: run show version and check to see if output contains IOS\n ios_command:\n commands: show version\n wait_for: result[0] contains IOS\n\n - name: run multiple commands on remote nodes\n ios_command:\n commands:\n - show version\n - show interfaces\n\n - name: run multiple commands and evaluate the output\n ios_command:\n commands:\n - show version\n - show interfaces\n wait_for:\n - result[0] contains IOS\n - result[1] contains Loopback0\n\"\"\"\n\nRETURN = \"\"\"\nstdout:\n description: The set of responses from the commands\n returned: always apart from low level errors (such as action plugin)\n type: list\n sample: ['...', '...']\nstdout_lines:\n description: The value of stdout split into a list\n returned: always apart from low level errors (such as action plugin)\n type: list\n sample: [['...', '...'], ['...'], ['...']]\nfailed_conditions:\n description: The list of conditionals that have failed\n returned: failed\n type: list\n sample: ['...', '...']\n\"\"\"\nimport re\nimport time\n\nfrom ansible.module_utils.network.ios.ios import run_commands\nfrom ansible.module_utils.network.ios.ios import ios_argument_spec, check_args\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.network.common.utils import ComplexList\nfrom ansible.module_utils.network.common.parsing import Conditional\nfrom ansible.module_utils.six import string_types\n\n\ndef to_lines(stdout):\n for item in stdout:\n if isinstance(item, string_types):\n item = str(item).split('\\n')\n yield item\n\n\ndef parse_commands(module, warnings):\n command = ComplexList(dict(\n command=dict(key=True),\n prompt=dict(),\n answer=dict()\n ), module)\n commands = command(module.params['commands'])\n for item in list(commands):\n configure_type = re.match(r'conf(?:\\w*)(?:\\s+(\\w+))?', item['command'])\n if module.check_mode and not item['command'].startswith('show'):\n warnings.append(\n 'only show commands are supported when using check mode, not '\n 'executing `%s`' % item['command']\n )\n commands.remove(item)\n elif configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'):\n module.fail_json(\n msg='ios_command does not support running config mode '\n 'commands. Please use ios_config instead'\n )\n return commands\n\n\ndef main():\n \"\"\"main entry point for module execution\n \"\"\"\n argument_spec = dict(\n commands=dict(type='list', required=True),\n\n wait_for=dict(type='list', aliases=['waitfor']),\n match=dict(default='all', choices=['all', 'any']),\n\n retries=dict(default=10, type='int'),\n interval=dict(default=1, type='int')\n )\n\n argument_spec.update(ios_argument_spec)\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n result = {'changed': False}\n\n warnings = list()\n check_args(module, warnings)\n commands = parse_commands(module, warnings)\n result['warnings'] = warnings\n\n wait_for = module.params['wait_for'] or list()\n conditionals = [Conditional(c) for c in wait_for]\n\n retries = module.params['retries']\n interval = module.params['interval']\n match = module.params['match']\n\n while retries > 0:\n responses = run_commands(module, commands)\n\n for item in list(conditionals):\n if item(responses):\n if match == 'any':\n conditionals = list()\n break\n conditionals.remove(item)\n\n if not conditionals:\n break\n\n time.sleep(interval)\n retries -= 1\n\n if conditionals:\n failed_conditions = [item.raw for item in conditionals]\n msg = 'One or more conditional statements have not be satisfied'\n module.fail_json(msg=msg, failed_conditions=failed_conditions)\n\n result.update({\n 'changed': False,\n 'stdout': responses,\n 'stdout_lines': list(to_lines(responses))\n })\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/network/ios/ios_command.py"}]}
2,922
285
gh_patches_debug_10905
rasdani/github-patches
git_diff
saleor__saleor-1775
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improving product variant behaviour ### Steps to reproduce the problem 1. Choose any product in dashboard 2. Remove all variants 3. Try to go to the product page in storefront ### What I expected to happen Product page in storefront with "UNAVAILABLE" label. ### What happened instead/how it failed 500 code. It is getting crashed with `list index out of range` error, because in method `update_field_data` in `VariantChoiceField` lines ``` if self.queryset.count() < 2: self.widget = forms.HiddenInput( {'value': variants.all()[0].pk}) ``` are trying to get index of empty query set. Possible solution would be to omit this block of code if `variants.all()` is `False`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/product/forms.py` Content: ``` 1 import json 2 3 from django import forms 4 from django.utils.encoding import smart_text 5 from django.utils.translation import pgettext_lazy 6 from django_prices.templatetags.prices_i18n import gross 7 8 from ..cart.forms import AddToCartForm 9 10 11 class VariantChoiceField(forms.ModelChoiceField): 12 discounts = None 13 14 def label_from_instance(self, obj): 15 variant_label = smart_text(obj) 16 label = pgettext_lazy( 17 'Variant choice field label', 18 '%(variant_label)s - %(price)s') % { 19 'variant_label': variant_label, 20 'price': gross( 21 obj.get_price_per_item(discounts=self.discounts))} 22 return label 23 24 def update_field_data(self, variants, cart): 25 """Initialize variant picker metadata.""" 26 self.queryset = variants 27 self.discounts = cart.discounts 28 self.empty_label = None 29 images_map = { 30 variant.pk: [ 31 vi.image.image.url for vi in variant.variant_images.all()] 32 for variant in variants.all()} 33 self.widget.attrs['data-images'] = json.dumps(images_map) 34 # Don't display select input if there are less than two variants 35 if self.queryset.count() < 2: 36 self.widget = forms.HiddenInput( 37 {'value': variants.all()[0].pk}) 38 39 40 class ProductForm(AddToCartForm): 41 variant = VariantChoiceField(queryset=None) 42 43 def __init__(self, *args, **kwargs): 44 super().__init__(*args, **kwargs) 45 variant_field = self.fields['variant'] 46 variant_field.update_field_data(self.product.variants, self.cart) 47 48 def get_variant(self, cleaned_data): 49 return cleaned_data.get('variant') 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/product/forms.py b/saleor/product/forms.py --- a/saleor/product/forms.py +++ b/saleor/product/forms.py @@ -31,8 +31,8 @@ vi.image.image.url for vi in variant.variant_images.all()] for variant in variants.all()} self.widget.attrs['data-images'] = json.dumps(images_map) - # Don't display select input if there are less than two variants - if self.queryset.count() < 2: + # Don't display select input if there is only one variant. + if self.queryset.count() == 1: self.widget = forms.HiddenInput( {'value': variants.all()[0].pk})
{"golden_diff": "diff --git a/saleor/product/forms.py b/saleor/product/forms.py\n--- a/saleor/product/forms.py\n+++ b/saleor/product/forms.py\n@@ -31,8 +31,8 @@\n vi.image.image.url for vi in variant.variant_images.all()]\n for variant in variants.all()}\n self.widget.attrs['data-images'] = json.dumps(images_map)\n- # Don't display select input if there are less than two variants\n- if self.queryset.count() < 2:\n+ # Don't display select input if there is only one variant.\n+ if self.queryset.count() == 1:\n self.widget = forms.HiddenInput(\n {'value': variants.all()[0].pk})\n", "issue": "Improving product variant behaviour\n### Steps to reproduce the problem\r\n\r\n1. Choose any product in dashboard\r\n2. Remove all variants\r\n3. Try to go to the product page in storefront\r\n\r\n### What I expected to happen\r\n\r\nProduct page in storefront with \"UNAVAILABLE\" label.\r\n\r\n### What happened instead/how it failed\r\n500 code. \r\n\r\n\r\nIt is getting crashed with `list index out of range` error, because in method `update_field_data` in `VariantChoiceField` lines\r\n```\r\n if self.queryset.count() < 2:\r\n self.widget = forms.HiddenInput(\r\n {'value': variants.all()[0].pk})\r\n```\r\nare trying to get index of empty query set.\r\nPossible solution would be to omit this block of code if `variants.all()` is `False`.\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import pgettext_lazy\nfrom django_prices.templatetags.prices_i18n import gross\n\nfrom ..cart.forms import AddToCartForm\n\n\nclass VariantChoiceField(forms.ModelChoiceField):\n discounts = None\n\n def label_from_instance(self, obj):\n variant_label = smart_text(obj)\n label = pgettext_lazy(\n 'Variant choice field label',\n '%(variant_label)s - %(price)s') % {\n 'variant_label': variant_label,\n 'price': gross(\n obj.get_price_per_item(discounts=self.discounts))}\n return label\n\n def update_field_data(self, variants, cart):\n \"\"\"Initialize variant picker metadata.\"\"\"\n self.queryset = variants\n self.discounts = cart.discounts\n self.empty_label = None\n images_map = {\n variant.pk: [\n vi.image.image.url for vi in variant.variant_images.all()]\n for variant in variants.all()}\n self.widget.attrs['data-images'] = json.dumps(images_map)\n # Don't display select input if there are less than two variants\n if self.queryset.count() < 2:\n self.widget = forms.HiddenInput(\n {'value': variants.all()[0].pk})\n\n\nclass ProductForm(AddToCartForm):\n variant = VariantChoiceField(queryset=None)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n variant_field = self.fields['variant']\n variant_field.update_field_data(self.product.variants, self.cart)\n\n def get_variant(self, cleaned_data):\n return cleaned_data.get('variant')\n", "path": "saleor/product/forms.py"}], "after_files": [{"content": "import json\n\nfrom django import forms\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import pgettext_lazy\nfrom django_prices.templatetags.prices_i18n import gross\n\nfrom ..cart.forms import AddToCartForm\n\n\nclass VariantChoiceField(forms.ModelChoiceField):\n discounts = None\n\n def label_from_instance(self, obj):\n variant_label = smart_text(obj)\n label = pgettext_lazy(\n 'Variant choice field label',\n '%(variant_label)s - %(price)s') % {\n 'variant_label': variant_label,\n 'price': gross(\n obj.get_price_per_item(discounts=self.discounts))}\n return label\n\n def update_field_data(self, variants, cart):\n \"\"\"Initialize variant picker metadata.\"\"\"\n self.queryset = variants\n self.discounts = cart.discounts\n self.empty_label = None\n images_map = {\n variant.pk: [\n vi.image.image.url for vi in variant.variant_images.all()]\n for variant in variants.all()}\n self.widget.attrs['data-images'] = json.dumps(images_map)\n # Don't display select input if there is only one variant.\n if self.queryset.count() == 1:\n self.widget = forms.HiddenInput(\n {'value': variants.all()[0].pk})\n\n\nclass ProductForm(AddToCartForm):\n variant = VariantChoiceField(queryset=None)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n variant_field = self.fields['variant']\n variant_field.update_field_data(self.product.variants, self.cart)\n\n def get_variant(self, cleaned_data):\n return cleaned_data.get('variant')\n", "path": "saleor/product/forms.py"}]}
877
156
gh_patches_debug_34809
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4915
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Versions aren't deleted if there is one with the same commit If we create two versions pointing to the same commit (say, master and a tag on master). If we delete the tag, when syncing the repositories that tag isn't deleted from the versions list (because master has the same commit). This is also seeing when creating two tags that point to the same commit. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/restapi/utils.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """Utility functions that are used by both views and celery tasks.""" 3 4 from __future__ import ( 5 absolute_import, division, print_function, unicode_literals) 6 7 import hashlib 8 import logging 9 10 from rest_framework.pagination import PageNumberPagination 11 12 from readthedocs.builds.constants import (LATEST, LATEST_VERBOSE_NAME, 13 NON_REPOSITORY_VERSIONS, STABLE, 14 STABLE_VERBOSE_NAME) 15 from readthedocs.builds.models import Version 16 from readthedocs.search.indexes import PageIndex, ProjectIndex, SectionIndex 17 18 log = logging.getLogger(__name__) 19 20 21 def sync_versions(project, versions, type): # pylint: disable=redefined-builtin 22 """Update the database with the current versions from the repository.""" 23 old_version_values = project.versions.filter(type=type).values_list( 24 'verbose_name', 'identifier' 25 ) 26 old_versions = dict(old_version_values) 27 28 # Add new versions 29 added = set() 30 has_user_stable = False 31 has_user_latest = False 32 for version in versions: 33 version_id = version['identifier'] 34 version_name = version['verbose_name'] 35 if version_name == STABLE_VERBOSE_NAME: 36 has_user_stable = True 37 created_version, created = set_or_create_version( 38 project=project, 39 slug=STABLE, 40 version_id=version_id, 41 verbose_name=version_name, 42 type_=type 43 ) 44 if created: 45 added.add(created_version.slug) 46 elif version_name == LATEST_VERBOSE_NAME: 47 has_user_latest = True 48 created_version, created = set_or_create_version( 49 project=project, 50 slug=LATEST, 51 version_id=version_id, 52 verbose_name=version_name, 53 type_=type 54 ) 55 if created: 56 added.add(created_version.slug) 57 elif version_name in old_versions: 58 if version_id == old_versions[version_name]: 59 # Version is correct 60 continue 61 else: 62 # Update slug with new identifier 63 Version.objects.filter( 64 project=project, verbose_name=version_name).update( 65 identifier=version_id, 66 type=type, 67 machine=False, 68 ) # noqa 69 70 log.info( 71 '(Sync Versions) Updated Version: [%s=%s] ', 72 version_name, 73 version_id, 74 ) 75 else: 76 # New Version 77 created_version = Version.objects.create( 78 project=project, 79 type=type, 80 identifier=version_id, 81 verbose_name=version_name, 82 ) 83 added.add(created_version.slug) 84 if not has_user_stable: 85 stable_version = ( 86 project.versions 87 .filter(slug=STABLE, type=type) 88 .first() 89 ) 90 if stable_version: 91 # Put back the RTD's stable version 92 stable_version.machine = True 93 stable_version.save() 94 if not has_user_latest: 95 latest_version = ( 96 project.versions 97 .filter(slug=LATEST, type=type) 98 .first() 99 ) 100 if latest_version: 101 # Put back the RTD's latest version 102 latest_version.machine = True 103 latest_version.identifier = project.get_default_branch() 104 latest_version.verbose_name = LATEST_VERBOSE_NAME 105 latest_version.save() 106 if added: 107 log.info('(Sync Versions) Added Versions: [%s] ', ' '.join(added)) 108 return added 109 110 111 def set_or_create_version(project, slug, version_id, verbose_name, type_): 112 """Search or create a version and set its machine attribute to false.""" 113 version = ( 114 project.versions 115 .filter(slug=slug) 116 .first() 117 ) 118 if version: 119 version.identifier = version_id 120 version.machine = False 121 version.type = type_ 122 version.save() 123 else: 124 created_version = Version.objects.create( 125 project=project, 126 type=type_, 127 identifier=version_id, 128 verbose_name=verbose_name, 129 ) 130 return created_version, True 131 return version, False 132 133 134 def delete_versions(project, version_data): 135 """Delete all versions not in the current repo.""" 136 current_versions = [] 137 if 'tags' in version_data: 138 for version in version_data['tags']: 139 current_versions.append(version['identifier']) 140 if 'branches' in version_data: 141 for version in version_data['branches']: 142 current_versions.append(version['identifier']) 143 to_delete_qs = project.versions.all() 144 to_delete_qs = to_delete_qs.exclude(identifier__in=current_versions) 145 to_delete_qs = to_delete_qs.exclude(uploaded=True) 146 to_delete_qs = to_delete_qs.exclude(active=True) 147 to_delete_qs = to_delete_qs.exclude(slug__in=NON_REPOSITORY_VERSIONS) 148 149 if to_delete_qs.count(): 150 ret_val = {obj.slug for obj in to_delete_qs} 151 log.info('(Sync Versions) Deleted Versions: [%s]', ' '.join(ret_val)) 152 to_delete_qs.delete() 153 return ret_val 154 return set() 155 156 157 def index_search_request( 158 version, page_list, commit, project_scale, page_scale, section=True, 159 delete=True): 160 """ 161 Update search indexes with build output JSON. 162 163 In order to keep sub-projects all indexed on the same shard, indexes will be 164 updated using the parent project's slug as the routing value. 165 """ 166 # TODO refactor this function 167 # pylint: disable=too-many-locals 168 project = version.project 169 170 log_msg = ' '.join([page['path'] for page in page_list]) 171 log.info( 172 'Updating search index: project=%s pages=[%s]', 173 project.slug, 174 log_msg, 175 ) 176 177 project_obj = ProjectIndex() 178 project_obj.index_document( 179 data={ 180 'id': project.pk, 181 'name': project.name, 182 'slug': project.slug, 183 'description': project.description, 184 'lang': project.language, 185 'author': [user.username for user in project.users.all()], 186 'url': project.get_absolute_url(), 187 'tags': None, 188 'weight': project_scale, 189 }) 190 191 page_obj = PageIndex() 192 section_obj = SectionIndex() 193 index_list = [] 194 section_index_list = [] 195 routes = [project.slug] 196 routes.extend([p.parent.slug for p in project.superprojects.all()]) 197 for page in page_list: 198 log.debug('Indexing page: %s:%s', project.slug, page['path']) 199 to_hash = '-'.join([project.slug, version.slug, page['path']]) 200 page_id = hashlib.md5(to_hash.encode('utf-8')).hexdigest() 201 index_list.append({ 202 'id': page_id, 203 'project': project.slug, 204 'version': version.slug, 205 'path': page['path'], 206 'title': page['title'], 207 'headers': page['headers'], 208 'content': page['content'], 209 'taxonomy': None, 210 'commit': commit, 211 'weight': page_scale + project_scale, 212 }) 213 if section: 214 for sect in page['sections']: 215 id_to_hash = '-'.join([ 216 project.slug, 217 version.slug, 218 page['path'], 219 sect['id'], 220 ]) 221 section_index_list.append({ 222 'id': (hashlib.md5(id_to_hash.encode('utf-8')).hexdigest()), 223 'project': project.slug, 224 'version': version.slug, 225 'path': page['path'], 226 'page_id': sect['id'], 227 'title': sect['title'], 228 'content': sect['content'], 229 'weight': page_scale, 230 }) 231 for route in routes: 232 section_obj.bulk_index( 233 section_index_list, 234 parent=page_id, 235 routing=route, 236 ) 237 238 for route in routes: 239 page_obj.bulk_index(index_list, parent=project.slug, routing=route) 240 241 if delete: 242 log.info('Deleting files not in commit: %s', commit) 243 # TODO: AK Make sure this works 244 delete_query = { 245 'query': { 246 'bool': { 247 'must': [ 248 { 249 'term': { 250 'project': project.slug, 251 }, 252 }, 253 { 254 'term': { 255 'version': version.slug, 256 }, 257 }, 258 ], 259 'must_not': { 260 'term': { 261 'commit': commit, 262 }, 263 }, 264 }, 265 }, 266 } 267 page_obj.delete_document(body=delete_query) 268 269 270 class RemoteOrganizationPagination(PageNumberPagination): 271 page_size = 25 272 273 274 class RemoteProjectPagination(PageNumberPagination): 275 page_size = 15 276 277 278 class ProjectPagination(PageNumberPagination): 279 page_size = 100 280 max_page_size = 1000 281 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/restapi/utils.py b/readthedocs/restapi/utils.py --- a/readthedocs/restapi/utils.py +++ b/readthedocs/restapi/utils.py @@ -2,16 +2,26 @@ """Utility functions that are used by both views and celery tasks.""" from __future__ import ( - absolute_import, division, print_function, unicode_literals) + absolute_import, + division, + print_function, + unicode_literals, +) import hashlib import logging from rest_framework.pagination import PageNumberPagination -from readthedocs.builds.constants import (LATEST, LATEST_VERBOSE_NAME, - NON_REPOSITORY_VERSIONS, STABLE, - STABLE_VERBOSE_NAME) +from readthedocs.builds.constants import ( + BRANCH, + LATEST, + LATEST_VERBOSE_NAME, + NON_REPOSITORY_VERSIONS, + STABLE, + STABLE_VERBOSE_NAME, + TAG, +) from readthedocs.builds.models import Version from readthedocs.search.indexes import PageIndex, ProjectIndex, SectionIndex @@ -133,15 +143,25 @@ def delete_versions(project, version_data): """Delete all versions not in the current repo.""" - current_versions = [] - if 'tags' in version_data: - for version in version_data['tags']: - current_versions.append(version['identifier']) - if 'branches' in version_data: - for version in version_data['branches']: - current_versions.append(version['identifier']) + # We use verbose_name for tags + # because several tags can point to the same identifier. + versions_tags = [ + version['verbose_name'] + for version in version_data.get('tags', []) + ] + versions_branches = [ + version['identifier'] + for version in version_data.get('branches', []) + ] to_delete_qs = project.versions.all() - to_delete_qs = to_delete_qs.exclude(identifier__in=current_versions) + to_delete_qs = to_delete_qs.exclude( + type=TAG, + verbose_name__in=versions_tags, + ) + to_delete_qs = to_delete_qs.exclude( + type=BRANCH, + identifier__in=versions_branches, + ) to_delete_qs = to_delete_qs.exclude(uploaded=True) to_delete_qs = to_delete_qs.exclude(active=True) to_delete_qs = to_delete_qs.exclude(slug__in=NON_REPOSITORY_VERSIONS)
{"golden_diff": "diff --git a/readthedocs/restapi/utils.py b/readthedocs/restapi/utils.py\n--- a/readthedocs/restapi/utils.py\n+++ b/readthedocs/restapi/utils.py\n@@ -2,16 +2,26 @@\n \"\"\"Utility functions that are used by both views and celery tasks.\"\"\"\n \n from __future__ import (\n- absolute_import, division, print_function, unicode_literals)\n+ absolute_import,\n+ division,\n+ print_function,\n+ unicode_literals,\n+)\n \n import hashlib\n import logging\n \n from rest_framework.pagination import PageNumberPagination\n \n-from readthedocs.builds.constants import (LATEST, LATEST_VERBOSE_NAME,\n- NON_REPOSITORY_VERSIONS, STABLE,\n- STABLE_VERBOSE_NAME)\n+from readthedocs.builds.constants import (\n+ BRANCH,\n+ LATEST,\n+ LATEST_VERBOSE_NAME,\n+ NON_REPOSITORY_VERSIONS,\n+ STABLE,\n+ STABLE_VERBOSE_NAME,\n+ TAG,\n+)\n from readthedocs.builds.models import Version\n from readthedocs.search.indexes import PageIndex, ProjectIndex, SectionIndex\n \n@@ -133,15 +143,25 @@\n \n def delete_versions(project, version_data):\n \"\"\"Delete all versions not in the current repo.\"\"\"\n- current_versions = []\n- if 'tags' in version_data:\n- for version in version_data['tags']:\n- current_versions.append(version['identifier'])\n- if 'branches' in version_data:\n- for version in version_data['branches']:\n- current_versions.append(version['identifier'])\n+ # We use verbose_name for tags\n+ # because several tags can point to the same identifier.\n+ versions_tags = [\n+ version['verbose_name']\n+ for version in version_data.get('tags', [])\n+ ]\n+ versions_branches = [\n+ version['identifier']\n+ for version in version_data.get('branches', [])\n+ ]\n to_delete_qs = project.versions.all()\n- to_delete_qs = to_delete_qs.exclude(identifier__in=current_versions)\n+ to_delete_qs = to_delete_qs.exclude(\n+ type=TAG,\n+ verbose_name__in=versions_tags,\n+ )\n+ to_delete_qs = to_delete_qs.exclude(\n+ type=BRANCH,\n+ identifier__in=versions_branches,\n+ )\n to_delete_qs = to_delete_qs.exclude(uploaded=True)\n to_delete_qs = to_delete_qs.exclude(active=True)\n to_delete_qs = to_delete_qs.exclude(slug__in=NON_REPOSITORY_VERSIONS)\n", "issue": "Versions aren't deleted if there is one with the same commit\nIf we create two versions pointing to the same commit (say, master and a tag on master). If we delete the tag, when syncing the repositories that tag isn't deleted from the versions list (because master has the same commit).\r\n\r\nThis is also seeing when creating two tags that point to the same commit.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Utility functions that are used by both views and celery tasks.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport hashlib\nimport logging\n\nfrom rest_framework.pagination import PageNumberPagination\n\nfrom readthedocs.builds.constants import (LATEST, LATEST_VERBOSE_NAME,\n NON_REPOSITORY_VERSIONS, STABLE,\n STABLE_VERBOSE_NAME)\nfrom readthedocs.builds.models import Version\nfrom readthedocs.search.indexes import PageIndex, ProjectIndex, SectionIndex\n\nlog = logging.getLogger(__name__)\n\n\ndef sync_versions(project, versions, type): # pylint: disable=redefined-builtin\n \"\"\"Update the database with the current versions from the repository.\"\"\"\n old_version_values = project.versions.filter(type=type).values_list(\n 'verbose_name', 'identifier'\n )\n old_versions = dict(old_version_values)\n\n # Add new versions\n added = set()\n has_user_stable = False\n has_user_latest = False\n for version in versions:\n version_id = version['identifier']\n version_name = version['verbose_name']\n if version_name == STABLE_VERBOSE_NAME:\n has_user_stable = True\n created_version, created = set_or_create_version(\n project=project,\n slug=STABLE,\n version_id=version_id,\n verbose_name=version_name,\n type_=type\n )\n if created:\n added.add(created_version.slug)\n elif version_name == LATEST_VERBOSE_NAME:\n has_user_latest = True\n created_version, created = set_or_create_version(\n project=project,\n slug=LATEST,\n version_id=version_id,\n verbose_name=version_name,\n type_=type\n )\n if created:\n added.add(created_version.slug)\n elif version_name in old_versions:\n if version_id == old_versions[version_name]:\n # Version is correct\n continue\n else:\n # Update slug with new identifier\n Version.objects.filter(\n project=project, verbose_name=version_name).update(\n identifier=version_id,\n type=type,\n machine=False,\n ) # noqa\n\n log.info(\n '(Sync Versions) Updated Version: [%s=%s] ',\n version_name,\n version_id,\n )\n else:\n # New Version\n created_version = Version.objects.create(\n project=project,\n type=type,\n identifier=version_id,\n verbose_name=version_name,\n )\n added.add(created_version.slug)\n if not has_user_stable:\n stable_version = (\n project.versions\n .filter(slug=STABLE, type=type)\n .first()\n )\n if stable_version:\n # Put back the RTD's stable version\n stable_version.machine = True\n stable_version.save()\n if not has_user_latest:\n latest_version = (\n project.versions\n .filter(slug=LATEST, type=type)\n .first()\n )\n if latest_version:\n # Put back the RTD's latest version\n latest_version.machine = True\n latest_version.identifier = project.get_default_branch()\n latest_version.verbose_name = LATEST_VERBOSE_NAME\n latest_version.save()\n if added:\n log.info('(Sync Versions) Added Versions: [%s] ', ' '.join(added))\n return added\n\n\ndef set_or_create_version(project, slug, version_id, verbose_name, type_):\n \"\"\"Search or create a version and set its machine attribute to false.\"\"\"\n version = (\n project.versions\n .filter(slug=slug)\n .first()\n )\n if version:\n version.identifier = version_id\n version.machine = False\n version.type = type_\n version.save()\n else:\n created_version = Version.objects.create(\n project=project,\n type=type_,\n identifier=version_id,\n verbose_name=verbose_name,\n )\n return created_version, True\n return version, False\n\n\ndef delete_versions(project, version_data):\n \"\"\"Delete all versions not in the current repo.\"\"\"\n current_versions = []\n if 'tags' in version_data:\n for version in version_data['tags']:\n current_versions.append(version['identifier'])\n if 'branches' in version_data:\n for version in version_data['branches']:\n current_versions.append(version['identifier'])\n to_delete_qs = project.versions.all()\n to_delete_qs = to_delete_qs.exclude(identifier__in=current_versions)\n to_delete_qs = to_delete_qs.exclude(uploaded=True)\n to_delete_qs = to_delete_qs.exclude(active=True)\n to_delete_qs = to_delete_qs.exclude(slug__in=NON_REPOSITORY_VERSIONS)\n\n if to_delete_qs.count():\n ret_val = {obj.slug for obj in to_delete_qs}\n log.info('(Sync Versions) Deleted Versions: [%s]', ' '.join(ret_val))\n to_delete_qs.delete()\n return ret_val\n return set()\n\n\ndef index_search_request(\n version, page_list, commit, project_scale, page_scale, section=True,\n delete=True):\n \"\"\"\n Update search indexes with build output JSON.\n\n In order to keep sub-projects all indexed on the same shard, indexes will be\n updated using the parent project's slug as the routing value.\n \"\"\"\n # TODO refactor this function\n # pylint: disable=too-many-locals\n project = version.project\n\n log_msg = ' '.join([page['path'] for page in page_list])\n log.info(\n 'Updating search index: project=%s pages=[%s]',\n project.slug,\n log_msg,\n )\n\n project_obj = ProjectIndex()\n project_obj.index_document(\n data={\n 'id': project.pk,\n 'name': project.name,\n 'slug': project.slug,\n 'description': project.description,\n 'lang': project.language,\n 'author': [user.username for user in project.users.all()],\n 'url': project.get_absolute_url(),\n 'tags': None,\n 'weight': project_scale,\n })\n\n page_obj = PageIndex()\n section_obj = SectionIndex()\n index_list = []\n section_index_list = []\n routes = [project.slug]\n routes.extend([p.parent.slug for p in project.superprojects.all()])\n for page in page_list:\n log.debug('Indexing page: %s:%s', project.slug, page['path'])\n to_hash = '-'.join([project.slug, version.slug, page['path']])\n page_id = hashlib.md5(to_hash.encode('utf-8')).hexdigest()\n index_list.append({\n 'id': page_id,\n 'project': project.slug,\n 'version': version.slug,\n 'path': page['path'],\n 'title': page['title'],\n 'headers': page['headers'],\n 'content': page['content'],\n 'taxonomy': None,\n 'commit': commit,\n 'weight': page_scale + project_scale,\n })\n if section:\n for sect in page['sections']:\n id_to_hash = '-'.join([\n project.slug,\n version.slug,\n page['path'],\n sect['id'],\n ])\n section_index_list.append({\n 'id': (hashlib.md5(id_to_hash.encode('utf-8')).hexdigest()),\n 'project': project.slug,\n 'version': version.slug,\n 'path': page['path'],\n 'page_id': sect['id'],\n 'title': sect['title'],\n 'content': sect['content'],\n 'weight': page_scale,\n })\n for route in routes:\n section_obj.bulk_index(\n section_index_list,\n parent=page_id,\n routing=route,\n )\n\n for route in routes:\n page_obj.bulk_index(index_list, parent=project.slug, routing=route)\n\n if delete:\n log.info('Deleting files not in commit: %s', commit)\n # TODO: AK Make sure this works\n delete_query = {\n 'query': {\n 'bool': {\n 'must': [\n {\n 'term': {\n 'project': project.slug,\n },\n },\n {\n 'term': {\n 'version': version.slug,\n },\n },\n ],\n 'must_not': {\n 'term': {\n 'commit': commit,\n },\n },\n },\n },\n }\n page_obj.delete_document(body=delete_query)\n\n\nclass RemoteOrganizationPagination(PageNumberPagination):\n page_size = 25\n\n\nclass RemoteProjectPagination(PageNumberPagination):\n page_size = 15\n\n\nclass ProjectPagination(PageNumberPagination):\n page_size = 100\n max_page_size = 1000\n", "path": "readthedocs/restapi/utils.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Utility functions that are used by both views and celery tasks.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport hashlib\nimport logging\n\nfrom rest_framework.pagination import PageNumberPagination\n\nfrom readthedocs.builds.constants import (\n BRANCH,\n LATEST,\n LATEST_VERBOSE_NAME,\n NON_REPOSITORY_VERSIONS,\n STABLE,\n STABLE_VERBOSE_NAME,\n TAG,\n)\nfrom readthedocs.builds.models import Version\nfrom readthedocs.search.indexes import PageIndex, ProjectIndex, SectionIndex\n\nlog = logging.getLogger(__name__)\n\n\ndef sync_versions(project, versions, type): # pylint: disable=redefined-builtin\n \"\"\"Update the database with the current versions from the repository.\"\"\"\n old_version_values = project.versions.filter(type=type).values_list(\n 'verbose_name', 'identifier'\n )\n old_versions = dict(old_version_values)\n\n # Add new versions\n added = set()\n has_user_stable = False\n has_user_latest = False\n for version in versions:\n version_id = version['identifier']\n version_name = version['verbose_name']\n if version_name == STABLE_VERBOSE_NAME:\n has_user_stable = True\n created_version, created = set_or_create_version(\n project=project,\n slug=STABLE,\n version_id=version_id,\n verbose_name=version_name,\n type_=type\n )\n if created:\n added.add(created_version.slug)\n elif version_name == LATEST_VERBOSE_NAME:\n has_user_latest = True\n created_version, created = set_or_create_version(\n project=project,\n slug=LATEST,\n version_id=version_id,\n verbose_name=version_name,\n type_=type\n )\n if created:\n added.add(created_version.slug)\n elif version_name in old_versions:\n if version_id == old_versions[version_name]:\n # Version is correct\n continue\n else:\n # Update slug with new identifier\n Version.objects.filter(\n project=project, verbose_name=version_name).update(\n identifier=version_id,\n type=type,\n machine=False,\n ) # noqa\n\n log.info(\n '(Sync Versions) Updated Version: [%s=%s] ',\n version_name,\n version_id,\n )\n else:\n # New Version\n created_version = Version.objects.create(\n project=project,\n type=type,\n identifier=version_id,\n verbose_name=version_name,\n )\n added.add(created_version.slug)\n if not has_user_stable:\n stable_version = (\n project.versions\n .filter(slug=STABLE, type=type)\n .first()\n )\n if stable_version:\n # Put back the RTD's stable version\n stable_version.machine = True\n stable_version.save()\n if not has_user_latest:\n latest_version = (\n project.versions\n .filter(slug=LATEST, type=type)\n .first()\n )\n if latest_version:\n # Put back the RTD's latest version\n latest_version.machine = True\n latest_version.identifier = project.get_default_branch()\n latest_version.verbose_name = LATEST_VERBOSE_NAME\n latest_version.save()\n if added:\n log.info('(Sync Versions) Added Versions: [%s] ', ' '.join(added))\n return added\n\n\ndef set_or_create_version(project, slug, version_id, verbose_name, type_):\n \"\"\"Search or create a version and set its machine attribute to false.\"\"\"\n version = (\n project.versions\n .filter(slug=slug)\n .first()\n )\n if version:\n version.identifier = version_id\n version.machine = False\n version.type = type_\n version.save()\n else:\n created_version = Version.objects.create(\n project=project,\n type=type_,\n identifier=version_id,\n verbose_name=verbose_name,\n )\n return created_version, True\n return version, False\n\n\ndef delete_versions(project, version_data):\n \"\"\"Delete all versions not in the current repo.\"\"\"\n # We use verbose_name for tags\n # because several tags can point to the same identifier.\n versions_tags = [\n version['verbose_name']\n for version in version_data.get('tags', [])\n ]\n versions_branches = [\n version['identifier']\n for version in version_data.get('branches', [])\n ]\n to_delete_qs = project.versions.all()\n to_delete_qs = to_delete_qs.exclude(\n type=TAG,\n verbose_name__in=versions_tags,\n )\n to_delete_qs = to_delete_qs.exclude(\n type=BRANCH,\n identifier__in=versions_branches,\n )\n to_delete_qs = to_delete_qs.exclude(uploaded=True)\n to_delete_qs = to_delete_qs.exclude(active=True)\n to_delete_qs = to_delete_qs.exclude(slug__in=NON_REPOSITORY_VERSIONS)\n\n if to_delete_qs.count():\n ret_val = {obj.slug for obj in to_delete_qs}\n log.info('(Sync Versions) Deleted Versions: [%s]', ' '.join(ret_val))\n to_delete_qs.delete()\n return ret_val\n return set()\n\n\ndef index_search_request(\n version, page_list, commit, project_scale, page_scale, section=True,\n delete=True):\n \"\"\"\n Update search indexes with build output JSON.\n\n In order to keep sub-projects all indexed on the same shard, indexes will be\n updated using the parent project's slug as the routing value.\n \"\"\"\n # TODO refactor this function\n # pylint: disable=too-many-locals\n project = version.project\n\n log_msg = ' '.join([page['path'] for page in page_list])\n log.info(\n 'Updating search index: project=%s pages=[%s]',\n project.slug,\n log_msg,\n )\n\n project_obj = ProjectIndex()\n project_obj.index_document(\n data={\n 'id': project.pk,\n 'name': project.name,\n 'slug': project.slug,\n 'description': project.description,\n 'lang': project.language,\n 'author': [user.username for user in project.users.all()],\n 'url': project.get_absolute_url(),\n 'tags': None,\n 'weight': project_scale,\n })\n\n page_obj = PageIndex()\n section_obj = SectionIndex()\n index_list = []\n section_index_list = []\n routes = [project.slug]\n routes.extend([p.parent.slug for p in project.superprojects.all()])\n for page in page_list:\n log.debug('Indexing page: %s:%s', project.slug, page['path'])\n to_hash = '-'.join([project.slug, version.slug, page['path']])\n page_id = hashlib.md5(to_hash.encode('utf-8')).hexdigest()\n index_list.append({\n 'id': page_id,\n 'project': project.slug,\n 'version': version.slug,\n 'path': page['path'],\n 'title': page['title'],\n 'headers': page['headers'],\n 'content': page['content'],\n 'taxonomy': None,\n 'commit': commit,\n 'weight': page_scale + project_scale,\n })\n if section:\n for sect in page['sections']:\n id_to_hash = '-'.join([\n project.slug,\n version.slug,\n page['path'],\n sect['id'],\n ])\n section_index_list.append({\n 'id': (hashlib.md5(id_to_hash.encode('utf-8')).hexdigest()),\n 'project': project.slug,\n 'version': version.slug,\n 'path': page['path'],\n 'page_id': sect['id'],\n 'title': sect['title'],\n 'content': sect['content'],\n 'weight': page_scale,\n })\n for route in routes:\n section_obj.bulk_index(\n section_index_list,\n parent=page_id,\n routing=route,\n )\n\n for route in routes:\n page_obj.bulk_index(index_list, parent=project.slug, routing=route)\n\n if delete:\n log.info('Deleting files not in commit: %s', commit)\n # TODO: AK Make sure this works\n delete_query = {\n 'query': {\n 'bool': {\n 'must': [\n {\n 'term': {\n 'project': project.slug,\n },\n },\n {\n 'term': {\n 'version': version.slug,\n },\n },\n ],\n 'must_not': {\n 'term': {\n 'commit': commit,\n },\n },\n },\n },\n }\n page_obj.delete_document(body=delete_query)\n\n\nclass RemoteOrganizationPagination(PageNumberPagination):\n page_size = 25\n\n\nclass RemoteProjectPagination(PageNumberPagination):\n page_size = 15\n\n\nclass ProjectPagination(PageNumberPagination):\n page_size = 100\n max_page_size = 1000\n", "path": "readthedocs/restapi/utils.py"}]}
2,919
547
gh_patches_debug_22682
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-1748
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pydantic converted types have no way to deprecate fields Since we recreate the field object for the values that use simple fields, there isn't a clean way to add a deprecation_reason to a field from a pydantic class. The same is true for permission_classes and directives. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/experimental/pydantic/object_type.py` Content: ``` 1 from __future__ import annotations 2 3 import builtins 4 import dataclasses 5 import warnings 6 from functools import partial 7 from typing import ( 8 TYPE_CHECKING, 9 Any, 10 Callable, 11 Dict, 12 List, 13 Optional, 14 Sequence, 15 Set, 16 Type, 17 cast, 18 ) 19 20 from pydantic import BaseModel 21 from pydantic.fields import ModelField 22 from typing_extensions import Literal 23 24 from graphql import GraphQLResolveInfo 25 26 import strawberry 27 from strawberry.arguments import UNSET 28 from strawberry.experimental.pydantic.conversion import ( 29 convert_pydantic_model_to_strawberry_class, 30 convert_strawberry_class_to_pydantic_model, 31 ) 32 from strawberry.experimental.pydantic.fields import get_basic_type 33 from strawberry.experimental.pydantic.utils import ( 34 DataclassCreationFields, 35 ensure_all_auto_fields_in_pydantic, 36 get_default_factory_for_field, 37 get_private_fields, 38 sort_creation_fields, 39 ) 40 from strawberry.field import StrawberryField 41 from strawberry.object_type import _process_type, _wrap_dataclass 42 from strawberry.schema_directive import StrawberrySchemaDirective 43 from strawberry.types.type_resolver import _get_fields 44 from strawberry.types.types import TypeDefinition 45 46 from .exceptions import MissingFieldsListError, UnregisteredTypeException 47 48 49 def replace_pydantic_types(type_: Any, is_input: bool): 50 origin = getattr(type_, "__origin__", None) 51 if origin is Literal: 52 # Literal does not have types in its __args__ so we return early 53 return type_ 54 if hasattr(type_, "__args__"): 55 replaced_type = type_.copy_with( 56 tuple(replace_pydantic_types(t, is_input) for t in type_.__args__) 57 ) 58 59 if isinstance(replaced_type, TypeDefinition): 60 # TODO: Not sure if this is necessary. No coverage in tests 61 # TODO: Unnecessary with StrawberryObject 62 63 replaced_type = builtins.type( 64 replaced_type.name, 65 (), 66 {"_type_definition": replaced_type}, 67 ) 68 69 return replaced_type 70 71 if issubclass(type_, BaseModel): 72 attr = "_strawberry_input_type" if is_input else "_strawberry_type" 73 if hasattr(type_, attr): 74 return getattr(type_, attr) 75 else: 76 raise UnregisteredTypeException(type_) 77 78 return type_ 79 80 81 def get_type_for_field(field: ModelField, is_input: bool): 82 outer_type = field.outer_type_ 83 basic_type = get_basic_type(outer_type) 84 replaced_type = replace_pydantic_types(basic_type, is_input) 85 86 if not field.required: 87 return Optional[replaced_type] 88 else: 89 return replaced_type 90 91 92 def _build_dataclass_creation_fields( 93 field: ModelField, 94 is_input: bool, 95 existing_fields: Dict[str, StrawberryField], 96 auto_fields_set: Set[str], 97 use_pydantic_alias: bool, 98 ) -> DataclassCreationFields: 99 type_annotation = ( 100 get_type_for_field(field, is_input) 101 if field.name in auto_fields_set 102 else existing_fields[field.name].type 103 ) 104 105 if ( 106 field.name in existing_fields 107 and existing_fields[field.name].base_resolver is not None 108 ): 109 # if the user has defined a resolver for this field, always use it 110 strawberry_field = existing_fields[field.name] 111 else: 112 # otherwise we build an appropriate strawberry field that resolves it 113 strawberry_field = StrawberryField( 114 python_name=field.name, 115 graphql_name=field.alias 116 if field.has_alias and use_pydantic_alias 117 else None, 118 # always unset because we use default_factory instead 119 default=UNSET, 120 default_factory=get_default_factory_for_field(field), 121 type_annotation=type_annotation, 122 description=field.field_info.description, 123 ) 124 125 return DataclassCreationFields( 126 name=field.name, 127 type_annotation=type_annotation, 128 field=strawberry_field, 129 ) 130 131 132 if TYPE_CHECKING: 133 from strawberry.experimental.pydantic.conversion_types import ( 134 PydanticModel, 135 StrawberryTypeFromPydantic, 136 ) 137 138 139 def type( 140 model: Type[PydanticModel], 141 *, 142 fields: Optional[List[str]] = None, 143 name: Optional[str] = None, 144 is_input: bool = False, 145 is_interface: bool = False, 146 description: Optional[str] = None, 147 directives: Optional[Sequence[StrawberrySchemaDirective]] = (), 148 all_fields: bool = False, 149 use_pydantic_alias: bool = True, 150 ) -> Callable[..., Type[StrawberryTypeFromPydantic[PydanticModel]]]: 151 def wrap(cls: Any) -> Type[StrawberryTypeFromPydantic[PydanticModel]]: 152 model_fields = model.__fields__ 153 original_fields_set = set(fields) if fields else set([]) 154 155 if fields: 156 warnings.warn( 157 "`fields` is deprecated, use `auto` type annotations instead", 158 DeprecationWarning, 159 ) 160 161 existing_fields = getattr(cls, "__annotations__", {}) 162 # these are the fields that matched a field name in the pydantic model 163 # and should copy their alias from the pydantic model 164 fields_set = original_fields_set.union( 165 set(name for name, _ in existing_fields.items() if name in model_fields) 166 ) 167 # these are the fields that were marked with strawberry.auto and 168 # should copy their type from the pydantic model 169 auto_fields_set = original_fields_set.union( 170 set(name for name, typ in existing_fields.items() if typ == strawberry.auto) 171 ) 172 173 if all_fields: 174 if fields_set: 175 warnings.warn( 176 "Using all_fields overrides any explicitly defined fields " 177 "in the model, using both is likely a bug", 178 stacklevel=2, 179 ) 180 fields_set = set(model_fields.keys()) 181 auto_fields_set = set(model_fields.keys()) 182 183 if not fields_set: 184 raise MissingFieldsListError(cls) 185 186 ensure_all_auto_fields_in_pydantic( 187 model=model, auto_fields=auto_fields_set, cls_name=cls.__name__ 188 ) 189 190 wrapped = _wrap_dataclass(cls) 191 extra_strawberry_fields = _get_fields(wrapped) 192 extra_fields = cast(List[dataclasses.Field], extra_strawberry_fields) 193 private_fields = get_private_fields(wrapped) 194 195 extra_fields_dict = {field.name: field for field in extra_strawberry_fields} 196 197 all_model_fields: List[DataclassCreationFields] = [ 198 _build_dataclass_creation_fields( 199 field, is_input, extra_fields_dict, auto_fields_set, use_pydantic_alias 200 ) 201 for field_name, field in model_fields.items() 202 if field_name in fields_set 203 ] 204 205 all_model_fields.extend( 206 ( 207 DataclassCreationFields( 208 name=field.name, 209 type_annotation=field.type, 210 field=field, 211 ) 212 for field in extra_fields + private_fields 213 if field.name not in fields_set 214 ) 215 ) 216 217 # Sort fields so that fields with missing defaults go first 218 sorted_fields = sort_creation_fields(all_model_fields) 219 220 # Implicitly define `is_type_of` to support interfaces/unions that use 221 # pydantic objects (not the corresponding strawberry type) 222 @classmethod # type: ignore 223 def is_type_of(cls: Type, obj: Any, _info: GraphQLResolveInfo) -> bool: 224 return isinstance(obj, (cls, model)) 225 226 namespace = {"is_type_of": is_type_of} 227 # We need to tell the difference between a from_pydantic method that is 228 # inherited from a base class and one that is defined by the user in the 229 # decorated class. We want to override the method only if it is 230 # inherited. To tell the difference, we compare the class name to the 231 # fully qualified name of the method, which will end in <class>.from_pydantic 232 has_custom_from_pydantic = hasattr( 233 cls, "from_pydantic" 234 ) and cls.from_pydantic.__qualname__.endswith(f"{cls.__name__}.from_pydantic") 235 has_custom_to_pydantic = hasattr( 236 cls, "to_pydantic" 237 ) and cls.to_pydantic.__qualname__.endswith(f"{cls.__name__}.to_pydantic") 238 239 if has_custom_from_pydantic: 240 namespace["from_pydantic"] = cls.from_pydantic 241 if has_custom_to_pydantic: 242 namespace["to_pydantic"] = cls.to_pydantic 243 244 cls = dataclasses.make_dataclass( 245 cls.__name__, 246 [field.to_tuple() for field in sorted_fields], 247 bases=cls.__bases__, 248 namespace=namespace, 249 ) 250 251 _process_type( 252 cls, 253 name=name, 254 is_input=is_input, 255 is_interface=is_interface, 256 description=description, 257 directives=directives, 258 ) 259 260 if is_input: 261 model._strawberry_input_type = cls # type: ignore 262 else: 263 model._strawberry_type = cls # type: ignore 264 cls._pydantic_type = model 265 266 def from_pydantic_default( 267 instance: PydanticModel, extra: Dict[str, Any] = None 268 ) -> StrawberryTypeFromPydantic[PydanticModel]: 269 return convert_pydantic_model_to_strawberry_class( 270 cls=cls, model_instance=instance, extra=extra 271 ) 272 273 def to_pydantic_default(self) -> PydanticModel: 274 instance_kwargs = { 275 f.name: convert_strawberry_class_to_pydantic_model( 276 getattr(self, f.name) 277 ) 278 for f in dataclasses.fields(self) 279 } 280 return model(**instance_kwargs) 281 282 if not has_custom_from_pydantic: 283 cls.from_pydantic = staticmethod(from_pydantic_default) 284 if not has_custom_to_pydantic: 285 cls.to_pydantic = to_pydantic_default 286 287 return cls 288 289 return wrap 290 291 292 input = partial(type, is_input=True) 293 294 interface = partial(type, is_interface=True) 295 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/experimental/pydantic/object_type.py b/strawberry/experimental/pydantic/object_type.py --- a/strawberry/experimental/pydantic/object_type.py +++ b/strawberry/experimental/pydantic/object_type.py @@ -110,6 +110,7 @@ strawberry_field = existing_fields[field.name] else: # otherwise we build an appropriate strawberry field that resolves it + existing_field = existing_fields.get(field.name) strawberry_field = StrawberryField( python_name=field.name, graphql_name=field.alias @@ -120,6 +121,13 @@ default_factory=get_default_factory_for_field(field), type_annotation=type_annotation, description=field.field_info.description, + deprecation_reason=( + existing_field.deprecation_reason if existing_field else None + ), + permission_classes=( + existing_field.permission_classes if existing_field else [] + ), + directives=existing_field.directives if existing_field else (), ) return DataclassCreationFields(
{"golden_diff": "diff --git a/strawberry/experimental/pydantic/object_type.py b/strawberry/experimental/pydantic/object_type.py\n--- a/strawberry/experimental/pydantic/object_type.py\n+++ b/strawberry/experimental/pydantic/object_type.py\n@@ -110,6 +110,7 @@\n strawberry_field = existing_fields[field.name]\n else:\n # otherwise we build an appropriate strawberry field that resolves it\n+ existing_field = existing_fields.get(field.name)\n strawberry_field = StrawberryField(\n python_name=field.name,\n graphql_name=field.alias\n@@ -120,6 +121,13 @@\n default_factory=get_default_factory_for_field(field),\n type_annotation=type_annotation,\n description=field.field_info.description,\n+ deprecation_reason=(\n+ existing_field.deprecation_reason if existing_field else None\n+ ),\n+ permission_classes=(\n+ existing_field.permission_classes if existing_field else []\n+ ),\n+ directives=existing_field.directives if existing_field else (),\n )\n \n return DataclassCreationFields(\n", "issue": "Pydantic converted types have no way to deprecate fields\nSince we recreate the field object for the values that use simple fields, there isn't a clean way to add a deprecation_reason to a field from a pydantic class. The same is true for permission_classes and directives.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport builtins\nimport dataclasses\nimport warnings\nfrom functools import partial\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n List,\n Optional,\n Sequence,\n Set,\n Type,\n cast,\n)\n\nfrom pydantic import BaseModel\nfrom pydantic.fields import ModelField\nfrom typing_extensions import Literal\n\nfrom graphql import GraphQLResolveInfo\n\nimport strawberry\nfrom strawberry.arguments import UNSET\nfrom strawberry.experimental.pydantic.conversion import (\n convert_pydantic_model_to_strawberry_class,\n convert_strawberry_class_to_pydantic_model,\n)\nfrom strawberry.experimental.pydantic.fields import get_basic_type\nfrom strawberry.experimental.pydantic.utils import (\n DataclassCreationFields,\n ensure_all_auto_fields_in_pydantic,\n get_default_factory_for_field,\n get_private_fields,\n sort_creation_fields,\n)\nfrom strawberry.field import StrawberryField\nfrom strawberry.object_type import _process_type, _wrap_dataclass\nfrom strawberry.schema_directive import StrawberrySchemaDirective\nfrom strawberry.types.type_resolver import _get_fields\nfrom strawberry.types.types import TypeDefinition\n\nfrom .exceptions import MissingFieldsListError, UnregisteredTypeException\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool):\n origin = getattr(type_, \"__origin__\", None)\n if origin is Literal:\n # Literal does not have types in its __args__ so we return early\n return type_\n if hasattr(type_, \"__args__\"):\n replaced_type = type_.copy_with(\n tuple(replace_pydantic_types(t, is_input) for t in type_.__args__)\n )\n\n if isinstance(replaced_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"_type_definition\": replaced_type},\n )\n\n return replaced_type\n\n if issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n\n return type_\n\n\ndef get_type_for_field(field: ModelField, is_input: bool):\n outer_type = field.outer_type_\n basic_type = get_basic_type(outer_type)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n if not field.required:\n return Optional[replaced_type]\n else:\n return replaced_type\n\n\ndef _build_dataclass_creation_fields(\n field: ModelField,\n is_input: bool,\n existing_fields: Dict[str, StrawberryField],\n auto_fields_set: Set[str],\n use_pydantic_alias: bool,\n) -> DataclassCreationFields:\n type_annotation = (\n get_type_for_field(field, is_input)\n if field.name in auto_fields_set\n else existing_fields[field.name].type\n )\n\n if (\n field.name in existing_fields\n and existing_fields[field.name].base_resolver is not None\n ):\n # if the user has defined a resolver for this field, always use it\n strawberry_field = existing_fields[field.name]\n else:\n # otherwise we build an appropriate strawberry field that resolves it\n strawberry_field = StrawberryField(\n python_name=field.name,\n graphql_name=field.alias\n if field.has_alias and use_pydantic_alias\n else None,\n # always unset because we use default_factory instead\n default=UNSET,\n default_factory=get_default_factory_for_field(field),\n type_annotation=type_annotation,\n description=field.field_info.description,\n )\n\n return DataclassCreationFields(\n name=field.name,\n type_annotation=type_annotation,\n field=strawberry_field,\n )\n\n\nif TYPE_CHECKING:\n from strawberry.experimental.pydantic.conversion_types import (\n PydanticModel,\n StrawberryTypeFromPydantic,\n )\n\n\ndef type(\n model: Type[PydanticModel],\n *,\n fields: Optional[List[str]] = None,\n name: Optional[str] = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: Optional[str] = None,\n directives: Optional[Sequence[StrawberrySchemaDirective]] = (),\n all_fields: bool = False,\n use_pydantic_alias: bool = True,\n) -> Callable[..., Type[StrawberryTypeFromPydantic[PydanticModel]]]:\n def wrap(cls: Any) -> Type[StrawberryTypeFromPydantic[PydanticModel]]:\n model_fields = model.__fields__\n original_fields_set = set(fields) if fields else set([])\n\n if fields:\n warnings.warn(\n \"`fields` is deprecated, use `auto` type annotations instead\",\n DeprecationWarning,\n )\n\n existing_fields = getattr(cls, \"__annotations__\", {})\n # these are the fields that matched a field name in the pydantic model\n # and should copy their alias from the pydantic model\n fields_set = original_fields_set.union(\n set(name for name, _ in existing_fields.items() if name in model_fields)\n )\n # these are the fields that were marked with strawberry.auto and\n # should copy their type from the pydantic model\n auto_fields_set = original_fields_set.union(\n set(name for name, typ in existing_fields.items() if typ == strawberry.auto)\n )\n\n if all_fields:\n if fields_set:\n warnings.warn(\n \"Using all_fields overrides any explicitly defined fields \"\n \"in the model, using both is likely a bug\",\n stacklevel=2,\n )\n fields_set = set(model_fields.keys())\n auto_fields_set = set(model_fields.keys())\n\n if not fields_set:\n raise MissingFieldsListError(cls)\n\n ensure_all_auto_fields_in_pydantic(\n model=model, auto_fields=auto_fields_set, cls_name=cls.__name__\n )\n\n wrapped = _wrap_dataclass(cls)\n extra_strawberry_fields = _get_fields(wrapped)\n extra_fields = cast(List[dataclasses.Field], extra_strawberry_fields)\n private_fields = get_private_fields(wrapped)\n\n extra_fields_dict = {field.name: field for field in extra_strawberry_fields}\n\n all_model_fields: List[DataclassCreationFields] = [\n _build_dataclass_creation_fields(\n field, is_input, extra_fields_dict, auto_fields_set, use_pydantic_alias\n )\n for field_name, field in model_fields.items()\n if field_name in fields_set\n ]\n\n all_model_fields.extend(\n (\n DataclassCreationFields(\n name=field.name,\n type_annotation=field.type,\n field=field,\n )\n for field in extra_fields + private_fields\n if field.name not in fields_set\n )\n )\n\n # Sort fields so that fields with missing defaults go first\n sorted_fields = sort_creation_fields(all_model_fields)\n\n # Implicitly define `is_type_of` to support interfaces/unions that use\n # pydantic objects (not the corresponding strawberry type)\n @classmethod # type: ignore\n def is_type_of(cls: Type, obj: Any, _info: GraphQLResolveInfo) -> bool:\n return isinstance(obj, (cls, model))\n\n namespace = {\"is_type_of\": is_type_of}\n # We need to tell the difference between a from_pydantic method that is\n # inherited from a base class and one that is defined by the user in the\n # decorated class. We want to override the method only if it is\n # inherited. To tell the difference, we compare the class name to the\n # fully qualified name of the method, which will end in <class>.from_pydantic\n has_custom_from_pydantic = hasattr(\n cls, \"from_pydantic\"\n ) and cls.from_pydantic.__qualname__.endswith(f\"{cls.__name__}.from_pydantic\")\n has_custom_to_pydantic = hasattr(\n cls, \"to_pydantic\"\n ) and cls.to_pydantic.__qualname__.endswith(f\"{cls.__name__}.to_pydantic\")\n\n if has_custom_from_pydantic:\n namespace[\"from_pydantic\"] = cls.from_pydantic\n if has_custom_to_pydantic:\n namespace[\"to_pydantic\"] = cls.to_pydantic\n\n cls = dataclasses.make_dataclass(\n cls.__name__,\n [field.to_tuple() for field in sorted_fields],\n bases=cls.__bases__,\n namespace=namespace,\n )\n\n _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n directives=directives,\n )\n\n if is_input:\n model._strawberry_input_type = cls # type: ignore\n else:\n model._strawberry_type = cls # type: ignore\n cls._pydantic_type = model\n\n def from_pydantic_default(\n instance: PydanticModel, extra: Dict[str, Any] = None\n ) -> StrawberryTypeFromPydantic[PydanticModel]:\n return convert_pydantic_model_to_strawberry_class(\n cls=cls, model_instance=instance, extra=extra\n )\n\n def to_pydantic_default(self) -> PydanticModel:\n instance_kwargs = {\n f.name: convert_strawberry_class_to_pydantic_model(\n getattr(self, f.name)\n )\n for f in dataclasses.fields(self)\n }\n return model(**instance_kwargs)\n\n if not has_custom_from_pydantic:\n cls.from_pydantic = staticmethod(from_pydantic_default)\n if not has_custom_to_pydantic:\n cls.to_pydantic = to_pydantic_default\n\n return cls\n\n return wrap\n\n\ninput = partial(type, is_input=True)\n\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/experimental/pydantic/object_type.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport builtins\nimport dataclasses\nimport warnings\nfrom functools import partial\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n List,\n Optional,\n Sequence,\n Set,\n Type,\n cast,\n)\n\nfrom pydantic import BaseModel\nfrom pydantic.fields import ModelField\nfrom typing_extensions import Literal\n\nfrom graphql import GraphQLResolveInfo\n\nimport strawberry\nfrom strawberry.arguments import UNSET\nfrom strawberry.experimental.pydantic.conversion import (\n convert_pydantic_model_to_strawberry_class,\n convert_strawberry_class_to_pydantic_model,\n)\nfrom strawberry.experimental.pydantic.fields import get_basic_type\nfrom strawberry.experimental.pydantic.utils import (\n DataclassCreationFields,\n ensure_all_auto_fields_in_pydantic,\n get_default_factory_for_field,\n get_private_fields,\n sort_creation_fields,\n)\nfrom strawberry.field import StrawberryField\nfrom strawberry.object_type import _process_type, _wrap_dataclass\nfrom strawberry.schema_directive import StrawberrySchemaDirective\nfrom strawberry.types.type_resolver import _get_fields\nfrom strawberry.types.types import TypeDefinition\n\nfrom .exceptions import MissingFieldsListError, UnregisteredTypeException\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool):\n origin = getattr(type_, \"__origin__\", None)\n if origin is Literal:\n # Literal does not have types in its __args__ so we return early\n return type_\n if hasattr(type_, \"__args__\"):\n replaced_type = type_.copy_with(\n tuple(replace_pydantic_types(t, is_input) for t in type_.__args__)\n )\n\n if isinstance(replaced_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"_type_definition\": replaced_type},\n )\n\n return replaced_type\n\n if issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n\n return type_\n\n\ndef get_type_for_field(field: ModelField, is_input: bool):\n outer_type = field.outer_type_\n basic_type = get_basic_type(outer_type)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n if not field.required:\n return Optional[replaced_type]\n else:\n return replaced_type\n\n\ndef _build_dataclass_creation_fields(\n field: ModelField,\n is_input: bool,\n existing_fields: Dict[str, StrawberryField],\n auto_fields_set: Set[str],\n use_pydantic_alias: bool,\n) -> DataclassCreationFields:\n type_annotation = (\n get_type_for_field(field, is_input)\n if field.name in auto_fields_set\n else existing_fields[field.name].type\n )\n\n if (\n field.name in existing_fields\n and existing_fields[field.name].base_resolver is not None\n ):\n # if the user has defined a resolver for this field, always use it\n strawberry_field = existing_fields[field.name]\n else:\n # otherwise we build an appropriate strawberry field that resolves it\n existing_field = existing_fields.get(field.name)\n strawberry_field = StrawberryField(\n python_name=field.name,\n graphql_name=field.alias\n if field.has_alias and use_pydantic_alias\n else None,\n # always unset because we use default_factory instead\n default=UNSET,\n default_factory=get_default_factory_for_field(field),\n type_annotation=type_annotation,\n description=field.field_info.description,\n deprecation_reason=(\n existing_field.deprecation_reason if existing_field else None\n ),\n permission_classes=(\n existing_field.permission_classes if existing_field else []\n ),\n directives=existing_field.directives if existing_field else (),\n )\n\n return DataclassCreationFields(\n name=field.name,\n type_annotation=type_annotation,\n field=strawberry_field,\n )\n\n\nif TYPE_CHECKING:\n from strawberry.experimental.pydantic.conversion_types import (\n PydanticModel,\n StrawberryTypeFromPydantic,\n )\n\n\ndef type(\n model: Type[PydanticModel],\n *,\n fields: Optional[List[str]] = None,\n name: Optional[str] = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: Optional[str] = None,\n directives: Optional[Sequence[StrawberrySchemaDirective]] = (),\n all_fields: bool = False,\n use_pydantic_alias: bool = True,\n) -> Callable[..., Type[StrawberryTypeFromPydantic[PydanticModel]]]:\n def wrap(cls: Any) -> Type[StrawberryTypeFromPydantic[PydanticModel]]:\n model_fields = model.__fields__\n original_fields_set = set(fields) if fields else set([])\n\n if fields:\n warnings.warn(\n \"`fields` is deprecated, use `auto` type annotations instead\",\n DeprecationWarning,\n )\n\n existing_fields = getattr(cls, \"__annotations__\", {})\n # these are the fields that matched a field name in the pydantic model\n # and should copy their alias from the pydantic model\n fields_set = original_fields_set.union(\n set(name for name, _ in existing_fields.items() if name in model_fields)\n )\n # these are the fields that were marked with strawberry.auto and\n # should copy their type from the pydantic model\n auto_fields_set = original_fields_set.union(\n set(name for name, typ in existing_fields.items() if typ == strawberry.auto)\n )\n\n if all_fields:\n if fields_set:\n warnings.warn(\n \"Using all_fields overrides any explicitly defined fields \"\n \"in the model, using both is likely a bug\",\n stacklevel=2,\n )\n fields_set = set(model_fields.keys())\n auto_fields_set = set(model_fields.keys())\n\n if not fields_set:\n raise MissingFieldsListError(cls)\n\n ensure_all_auto_fields_in_pydantic(\n model=model, auto_fields=auto_fields_set, cls_name=cls.__name__\n )\n\n wrapped = _wrap_dataclass(cls)\n extra_strawberry_fields = _get_fields(wrapped)\n extra_fields = cast(List[dataclasses.Field], extra_strawberry_fields)\n private_fields = get_private_fields(wrapped)\n\n extra_fields_dict = {field.name: field for field in extra_strawberry_fields}\n\n all_model_fields: List[DataclassCreationFields] = [\n _build_dataclass_creation_fields(\n field, is_input, extra_fields_dict, auto_fields_set, use_pydantic_alias\n )\n for field_name, field in model_fields.items()\n if field_name in fields_set\n ]\n\n all_model_fields.extend(\n (\n DataclassCreationFields(\n name=field.name,\n type_annotation=field.type,\n field=field,\n )\n for field in extra_fields + private_fields\n if field.name not in fields_set\n )\n )\n\n # Sort fields so that fields with missing defaults go first\n sorted_fields = sort_creation_fields(all_model_fields)\n\n # Implicitly define `is_type_of` to support interfaces/unions that use\n # pydantic objects (not the corresponding strawberry type)\n @classmethod # type: ignore\n def is_type_of(cls: Type, obj: Any, _info: GraphQLResolveInfo) -> bool:\n return isinstance(obj, (cls, model))\n\n namespace = {\"is_type_of\": is_type_of}\n # We need to tell the difference between a from_pydantic method that is\n # inherited from a base class and one that is defined by the user in the\n # decorated class. We want to override the method only if it is\n # inherited. To tell the difference, we compare the class name to the\n # fully qualified name of the method, which will end in <class>.from_pydantic\n has_custom_from_pydantic = hasattr(\n cls, \"from_pydantic\"\n ) and cls.from_pydantic.__qualname__.endswith(f\"{cls.__name__}.from_pydantic\")\n has_custom_to_pydantic = hasattr(\n cls, \"to_pydantic\"\n ) and cls.to_pydantic.__qualname__.endswith(f\"{cls.__name__}.to_pydantic\")\n\n if has_custom_from_pydantic:\n namespace[\"from_pydantic\"] = cls.from_pydantic\n if has_custom_to_pydantic:\n namespace[\"to_pydantic\"] = cls.to_pydantic\n\n cls = dataclasses.make_dataclass(\n cls.__name__,\n [field.to_tuple() for field in sorted_fields],\n bases=cls.__bases__,\n namespace=namespace,\n )\n\n _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n directives=directives,\n )\n\n if is_input:\n model._strawberry_input_type = cls # type: ignore\n else:\n model._strawberry_type = cls # type: ignore\n cls._pydantic_type = model\n\n def from_pydantic_default(\n instance: PydanticModel, extra: Dict[str, Any] = None\n ) -> StrawberryTypeFromPydantic[PydanticModel]:\n return convert_pydantic_model_to_strawberry_class(\n cls=cls, model_instance=instance, extra=extra\n )\n\n def to_pydantic_default(self) -> PydanticModel:\n instance_kwargs = {\n f.name: convert_strawberry_class_to_pydantic_model(\n getattr(self, f.name)\n )\n for f in dataclasses.fields(self)\n }\n return model(**instance_kwargs)\n\n if not has_custom_from_pydantic:\n cls.from_pydantic = staticmethod(from_pydantic_default)\n if not has_custom_to_pydantic:\n cls.to_pydantic = to_pydantic_default\n\n return cls\n\n return wrap\n\n\ninput = partial(type, is_input=True)\n\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/experimental/pydantic/object_type.py"}]}
3,278
235
gh_patches_debug_25281
rasdani/github-patches
git_diff
crytic__slither-1470
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `naming-convention` does not flag `I` or `O` variable ### Describe the issue: Slither has a detection for usage of * l - Lowercase letter el * O - Uppercase letter oh * I - Uppercase letter eye https://docs.soliditylang.org/en/v0.8.17/style-guide.html#names-to-avoid However this detection fails on O and I, as the code considers them "uppercase with underscores" even though there are no underscores. Relevant code: * https://github.com/crytic/slither/blob/fba37f2c0c8196079719432d6324e42a1a974399/slither/detectors/naming_convention/naming_convention.py#L52-L54 (will match just uppercase letters as well) * https://github.com/crytic/slither/blob/fba37f2c0c8196079719432d6324e42a1a974399/slither/detectors/naming_convention/naming_convention.py#L120-L137 (detection code) ### Code example to reproduce the issue: ```solidity // SPDX-License-Identifier: UNLICENSED pragma solidity ^0.8.0; contract C { bool constant l = false; bool constant O = false; bool constant I = false; function f() public pure {} } ``` ### Version: 0.9.0 ### Relevant log output: _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/detectors/naming_convention/naming_convention.py` Content: ``` 1 import re 2 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification 3 from slither.formatters.naming_convention.naming_convention import custom_format 4 5 6 class NamingConvention(AbstractDetector): 7 """ 8 Check if naming conventions are followed 9 https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions 10 11 Exceptions: 12 - Allow constant variables name/symbol/decimals to be lowercase (ERC20) 13 - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters 14 - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_' 15 """ 16 17 ARGUMENT = "naming-convention" 18 HELP = "Conformity to Solidity naming conventions" 19 IMPACT = DetectorClassification.INFORMATIONAL 20 CONFIDENCE = DetectorClassification.HIGH 21 22 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions" 23 24 WIKI_TITLE = "Conformance to Solidity naming conventions" 25 26 # region wiki_description 27 WIKI_DESCRIPTION = """ 28 Solidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed. 29 #### Rule exceptions 30 - Allow constant variable name/symbol/decimals to be lowercase (`ERC20`). 31 - Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters.""" 32 # endregion wiki_description 33 34 WIKI_RECOMMENDATION = "Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions)." 35 36 STANDARD_JSON = False 37 38 @staticmethod 39 def is_cap_words(name): 40 return re.search("^[A-Z]([A-Za-z0-9]+)?_?$", name) is not None 41 42 @staticmethod 43 def is_mixed_case(name): 44 return re.search("^[a-z]([A-Za-z0-9]+)?_?$", name) is not None 45 46 @staticmethod 47 def is_mixed_case_with_underscore(name): 48 # Allow _ at the beginning to represent private variable 49 # or unused parameters 50 return re.search("^[_]?[a-z]([A-Za-z0-9]+)?_?$", name) is not None 51 52 @staticmethod 53 def is_upper_case_with_underscores(name): 54 return re.search("^[A-Z0-9_]+_?$", name) is not None 55 56 @staticmethod 57 def should_avoid_name(name): 58 return re.search("^[lOI]$", name) is not None 59 60 def _detect(self): # pylint: disable=too-many-branches,too-many-statements 61 62 results = [] 63 for contract in self.contracts: 64 65 if not self.is_cap_words(contract.name): 66 info = ["Contract ", contract, " is not in CapWords\n"] 67 68 res = self.generate_result(info) 69 res.add(contract, {"target": "contract", "convention": "CapWords"}) 70 results.append(res) 71 72 for struct in contract.structures_declared: 73 if not self.is_cap_words(struct.name): 74 info = ["Struct ", struct, " is not in CapWords\n"] 75 76 res = self.generate_result(info) 77 res.add(struct, {"target": "structure", "convention": "CapWords"}) 78 results.append(res) 79 80 for event in contract.events_declared: 81 if not self.is_cap_words(event.name): 82 info = ["Event ", event, " is not in CapWords\n"] 83 84 res = self.generate_result(info) 85 res.add(event, {"target": "event", "convention": "CapWords"}) 86 results.append(res) 87 88 for func in contract.functions_declared: 89 if func.is_constructor: 90 continue 91 if not self.is_mixed_case(func.name): 92 if func.visibility in [ 93 "internal", 94 "private", 95 ] and self.is_mixed_case_with_underscore(func.name): 96 continue 97 if func.name.startswith(("echidna_", "crytic_")): 98 continue 99 info = ["Function ", func, " is not in mixedCase\n"] 100 101 res = self.generate_result(info) 102 res.add(func, {"target": "function", "convention": "mixedCase"}) 103 results.append(res) 104 105 for argument in func.parameters: 106 # Ignore parameter names that are not specified i.e. empty strings 107 if argument.name == "": 108 continue 109 if argument in func.variables_read_or_written: 110 correct_naming = self.is_mixed_case(argument.name) 111 else: 112 correct_naming = self.is_mixed_case_with_underscore(argument.name) 113 if not correct_naming: 114 info = ["Parameter ", argument, " is not in mixedCase\n"] 115 116 res = self.generate_result(info) 117 res.add(argument, {"target": "parameter", "convention": "mixedCase"}) 118 results.append(res) 119 120 for var in contract.state_variables_declared: 121 if self.should_avoid_name(var.name): 122 if not self.is_upper_case_with_underscores(var.name): 123 info = [ 124 "Variable ", 125 var, 126 " used l, O, I, which should not be used\n", 127 ] 128 129 res = self.generate_result(info) 130 res.add( 131 var, 132 { 133 "target": "variable", 134 "convention": "l_O_I_should_not_be_used", 135 }, 136 ) 137 results.append(res) 138 139 if var.is_constant is True: 140 # For ERC20 compatibility 141 if var.name in ["symbol", "name", "decimals"]: 142 continue 143 144 if not self.is_upper_case_with_underscores(var.name): 145 info = [ 146 "Constant ", 147 var, 148 " is not in UPPER_CASE_WITH_UNDERSCORES\n", 149 ] 150 151 res = self.generate_result(info) 152 res.add( 153 var, 154 { 155 "target": "variable_constant", 156 "convention": "UPPER_CASE_WITH_UNDERSCORES", 157 }, 158 ) 159 results.append(res) 160 161 else: 162 if var.visibility == "private": 163 correct_naming = self.is_mixed_case_with_underscore(var.name) 164 else: 165 correct_naming = self.is_mixed_case(var.name) 166 if not correct_naming: 167 info = ["Variable ", var, " is not in mixedCase\n"] 168 169 res = self.generate_result(info) 170 res.add(var, {"target": "variable", "convention": "mixedCase"}) 171 results.append(res) 172 173 for enum in contract.enums_declared: 174 if not self.is_cap_words(enum.name): 175 info = ["Enum ", enum, " is not in CapWords\n"] 176 177 res = self.generate_result(info) 178 res.add(enum, {"target": "enum", "convention": "CapWords"}) 179 results.append(res) 180 181 for modifier in contract.modifiers_declared: 182 if not self.is_mixed_case(modifier.name): 183 info = ["Modifier ", modifier, " is not in mixedCase\n"] 184 185 res = self.generate_result(info) 186 res.add(modifier, {"target": "modifier", "convention": "mixedCase"}) 187 results.append(res) 188 189 return results 190 191 @staticmethod 192 def _format(slither, result): 193 custom_format(slither, result) 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/detectors/naming_convention/naming_convention.py b/slither/detectors/naming_convention/naming_convention.py --- a/slither/detectors/naming_convention/naming_convention.py +++ b/slither/detectors/naming_convention/naming_convention.py @@ -119,22 +119,21 @@ for var in contract.state_variables_declared: if self.should_avoid_name(var.name): - if not self.is_upper_case_with_underscores(var.name): - info = [ - "Variable ", - var, - " used l, O, I, which should not be used\n", - ] + info = [ + "Variable ", + var, + " is single letter l, O, or I, which should not be used\n", + ] - res = self.generate_result(info) - res.add( - var, - { - "target": "variable", - "convention": "l_O_I_should_not_be_used", - }, - ) - results.append(res) + res = self.generate_result(info) + res.add( + var, + { + "target": "variable", + "convention": "l_O_I_should_not_be_used", + }, + ) + results.append(res) if var.is_constant is True: # For ERC20 compatibility
{"golden_diff": "diff --git a/slither/detectors/naming_convention/naming_convention.py b/slither/detectors/naming_convention/naming_convention.py\n--- a/slither/detectors/naming_convention/naming_convention.py\n+++ b/slither/detectors/naming_convention/naming_convention.py\n@@ -119,22 +119,21 @@\n \n for var in contract.state_variables_declared:\n if self.should_avoid_name(var.name):\n- if not self.is_upper_case_with_underscores(var.name):\n- info = [\n- \"Variable \",\n- var,\n- \" used l, O, I, which should not be used\\n\",\n- ]\n+ info = [\n+ \"Variable \",\n+ var,\n+ \" is single letter l, O, or I, which should not be used\\n\",\n+ ]\n \n- res = self.generate_result(info)\n- res.add(\n- var,\n- {\n- \"target\": \"variable\",\n- \"convention\": \"l_O_I_should_not_be_used\",\n- },\n- )\n- results.append(res)\n+ res = self.generate_result(info)\n+ res.add(\n+ var,\n+ {\n+ \"target\": \"variable\",\n+ \"convention\": \"l_O_I_should_not_be_used\",\n+ },\n+ )\n+ results.append(res)\n \n if var.is_constant is True:\n # For ERC20 compatibility\n", "issue": "`naming-convention` does not flag `I` or `O` variable\n### Describe the issue:\n\nSlither has a detection for usage of\r\n\r\n* l - Lowercase letter el\r\n* O - Uppercase letter oh\r\n* I - Uppercase letter eye\r\n\r\nhttps://docs.soliditylang.org/en/v0.8.17/style-guide.html#names-to-avoid\r\n\r\nHowever this detection fails on O and I, as the code considers them \"uppercase with underscores\" even though there are no underscores.\r\n\r\nRelevant code:\r\n* https://github.com/crytic/slither/blob/fba37f2c0c8196079719432d6324e42a1a974399/slither/detectors/naming_convention/naming_convention.py#L52-L54 (will match just uppercase letters as well)\r\n* https://github.com/crytic/slither/blob/fba37f2c0c8196079719432d6324e42a1a974399/slither/detectors/naming_convention/naming_convention.py#L120-L137 (detection code)\n\n### Code example to reproduce the issue:\n\n```solidity\r\n// SPDX-License-Identifier: UNLICENSED\r\npragma solidity ^0.8.0;\r\n\r\ncontract C {\r\n bool constant l = false;\r\n bool constant O = false;\r\n bool constant I = false;\r\n function f() public pure {}\r\n}\r\n```\n\n### Version:\n\n0.9.0\n\n### Relevant log output:\n\n_No response_\n", "before_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.naming_convention.naming_convention import custom_format\n\n\nclass NamingConvention(AbstractDetector):\n \"\"\"\n Check if naming conventions are followed\n https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions\n\n Exceptions:\n - Allow constant variables name/symbol/decimals to be lowercase (ERC20)\n - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters\n - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'\n \"\"\"\n\n ARGUMENT = \"naming-convention\"\n HELP = \"Conformity to Solidity naming conventions\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions\"\n\n WIKI_TITLE = \"Conformance to Solidity naming conventions\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"\nSolidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.\n#### Rule exceptions\n- Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).\n- Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters.\"\"\"\n # endregion wiki_description\n\n WIKI_RECOMMENDATION = \"Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions).\"\n\n STANDARD_JSON = False\n\n @staticmethod\n def is_cap_words(name):\n return re.search(\"^[A-Z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case(name):\n return re.search(\"^[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case_with_underscore(name):\n # Allow _ at the beginning to represent private variable\n # or unused parameters\n return re.search(\"^[_]?[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_upper_case_with_underscores(name):\n return re.search(\"^[A-Z0-9_]+_?$\", name) is not None\n\n @staticmethod\n def should_avoid_name(name):\n return re.search(\"^[lOI]$\", name) is not None\n\n def _detect(self): # pylint: disable=too-many-branches,too-many-statements\n\n results = []\n for contract in self.contracts:\n\n if not self.is_cap_words(contract.name):\n info = [\"Contract \", contract, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(contract, {\"target\": \"contract\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for struct in contract.structures_declared:\n if not self.is_cap_words(struct.name):\n info = [\"Struct \", struct, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(struct, {\"target\": \"structure\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for event in contract.events_declared:\n if not self.is_cap_words(event.name):\n info = [\"Event \", event, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(event, {\"target\": \"event\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for func in contract.functions_declared:\n if func.is_constructor:\n continue\n if not self.is_mixed_case(func.name):\n if func.visibility in [\n \"internal\",\n \"private\",\n ] and self.is_mixed_case_with_underscore(func.name):\n continue\n if func.name.startswith((\"echidna_\", \"crytic_\")):\n continue\n info = [\"Function \", func, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(func, {\"target\": \"function\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for argument in func.parameters:\n # Ignore parameter names that are not specified i.e. empty strings\n if argument.name == \"\":\n continue\n if argument in func.variables_read_or_written:\n correct_naming = self.is_mixed_case(argument.name)\n else:\n correct_naming = self.is_mixed_case_with_underscore(argument.name)\n if not correct_naming:\n info = [\"Parameter \", argument, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(argument, {\"target\": \"parameter\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for var in contract.state_variables_declared:\n if self.should_avoid_name(var.name):\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Variable \",\n var,\n \" used l, O, I, which should not be used\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable\",\n \"convention\": \"l_O_I_should_not_be_used\",\n },\n )\n results.append(res)\n\n if var.is_constant is True:\n # For ERC20 compatibility\n if var.name in [\"symbol\", \"name\", \"decimals\"]:\n continue\n\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Constant \",\n var,\n \" is not in UPPER_CASE_WITH_UNDERSCORES\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable_constant\",\n \"convention\": \"UPPER_CASE_WITH_UNDERSCORES\",\n },\n )\n results.append(res)\n\n else:\n if var.visibility == \"private\":\n correct_naming = self.is_mixed_case_with_underscore(var.name)\n else:\n correct_naming = self.is_mixed_case(var.name)\n if not correct_naming:\n info = [\"Variable \", var, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(var, {\"target\": \"variable\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for enum in contract.enums_declared:\n if not self.is_cap_words(enum.name):\n info = [\"Enum \", enum, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(enum, {\"target\": \"enum\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for modifier in contract.modifiers_declared:\n if not self.is_mixed_case(modifier.name):\n info = [\"Modifier \", modifier, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(modifier, {\"target\": \"modifier\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n custom_format(slither, result)\n", "path": "slither/detectors/naming_convention/naming_convention.py"}], "after_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.naming_convention.naming_convention import custom_format\n\n\nclass NamingConvention(AbstractDetector):\n \"\"\"\n Check if naming conventions are followed\n https://solidity.readthedocs.io/en/v0.4.25/style-guide.html?highlight=naming_convention%20convention#naming_convention-conventions\n\n Exceptions:\n - Allow constant variables name/symbol/decimals to be lowercase (ERC20)\n - Allow '_' at the beggining of the mixed_case match for private variables and unused parameters\n - Ignore echidna properties (functions with names starting 'echidna_' or 'crytic_'\n \"\"\"\n\n ARGUMENT = \"naming-convention\"\n HELP = \"Conformity to Solidity naming conventions\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#conformance-to-solidity-naming-conventions\"\n\n WIKI_TITLE = \"Conformance to Solidity naming conventions\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"\nSolidity defines a [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions) that should be followed.\n#### Rule exceptions\n- Allow constant variable name/symbol/decimals to be lowercase (`ERC20`).\n- Allow `_` at the beginning of the `mixed_case` match for private variables and unused parameters.\"\"\"\n # endregion wiki_description\n\n WIKI_RECOMMENDATION = \"Follow the Solidity [naming convention](https://solidity.readthedocs.io/en/v0.4.25/style-guide.html#naming-conventions).\"\n\n STANDARD_JSON = False\n\n @staticmethod\n def is_cap_words(name):\n return re.search(\"^[A-Z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case(name):\n return re.search(\"^[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_mixed_case_with_underscore(name):\n # Allow _ at the beginning to represent private variable\n # or unused parameters\n return re.search(\"^[_]?[a-z]([A-Za-z0-9]+)?_?$\", name) is not None\n\n @staticmethod\n def is_upper_case_with_underscores(name):\n return re.search(\"^[A-Z0-9_]+_?$\", name) is not None\n\n @staticmethod\n def should_avoid_name(name):\n return re.search(\"^[lOI]$\", name) is not None\n\n def _detect(self): # pylint: disable=too-many-branches,too-many-statements\n\n results = []\n for contract in self.contracts:\n\n if not self.is_cap_words(contract.name):\n info = [\"Contract \", contract, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(contract, {\"target\": \"contract\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for struct in contract.structures_declared:\n if not self.is_cap_words(struct.name):\n info = [\"Struct \", struct, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(struct, {\"target\": \"structure\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for event in contract.events_declared:\n if not self.is_cap_words(event.name):\n info = [\"Event \", event, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(event, {\"target\": \"event\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for func in contract.functions_declared:\n if func.is_constructor:\n continue\n if not self.is_mixed_case(func.name):\n if func.visibility in [\n \"internal\",\n \"private\",\n ] and self.is_mixed_case_with_underscore(func.name):\n continue\n if func.name.startswith((\"echidna_\", \"crytic_\")):\n continue\n info = [\"Function \", func, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(func, {\"target\": \"function\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for argument in func.parameters:\n # Ignore parameter names that are not specified i.e. empty strings\n if argument.name == \"\":\n continue\n if argument in func.variables_read_or_written:\n correct_naming = self.is_mixed_case(argument.name)\n else:\n correct_naming = self.is_mixed_case_with_underscore(argument.name)\n if not correct_naming:\n info = [\"Parameter \", argument, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(argument, {\"target\": \"parameter\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for var in contract.state_variables_declared:\n if self.should_avoid_name(var.name):\n info = [\n \"Variable \",\n var,\n \" is single letter l, O, or I, which should not be used\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable\",\n \"convention\": \"l_O_I_should_not_be_used\",\n },\n )\n results.append(res)\n\n if var.is_constant is True:\n # For ERC20 compatibility\n if var.name in [\"symbol\", \"name\", \"decimals\"]:\n continue\n\n if not self.is_upper_case_with_underscores(var.name):\n info = [\n \"Constant \",\n var,\n \" is not in UPPER_CASE_WITH_UNDERSCORES\\n\",\n ]\n\n res = self.generate_result(info)\n res.add(\n var,\n {\n \"target\": \"variable_constant\",\n \"convention\": \"UPPER_CASE_WITH_UNDERSCORES\",\n },\n )\n results.append(res)\n\n else:\n if var.visibility == \"private\":\n correct_naming = self.is_mixed_case_with_underscore(var.name)\n else:\n correct_naming = self.is_mixed_case(var.name)\n if not correct_naming:\n info = [\"Variable \", var, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(var, {\"target\": \"variable\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n for enum in contract.enums_declared:\n if not self.is_cap_words(enum.name):\n info = [\"Enum \", enum, \" is not in CapWords\\n\"]\n\n res = self.generate_result(info)\n res.add(enum, {\"target\": \"enum\", \"convention\": \"CapWords\"})\n results.append(res)\n\n for modifier in contract.modifiers_declared:\n if not self.is_mixed_case(modifier.name):\n info = [\"Modifier \", modifier, \" is not in mixedCase\\n\"]\n\n res = self.generate_result(info)\n res.add(modifier, {\"target\": \"modifier\", \"convention\": \"mixedCase\"})\n results.append(res)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n custom_format(slither, result)\n", "path": "slither/detectors/naming_convention/naming_convention.py"}]}
2,714
321
gh_patches_debug_35598
rasdani/github-patches
git_diff
beetbox__beets-2917
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- mbsync: Tolerate MusicBrainz recording ID changes mbsync uses the musicbrainz recording id to match local track information against a certain track in a musicbrainz release. However, there is no fallback matching defined, so once the recording id changes in musicbrainz the local track can no longer be synced unless the recording id is changed manually. But this behavior is also not very visible (I stumbled upon it by accident), so most users would not even be aware that the recording id is outdated. mbsync: Tolerate MusicBrainz recording ID changes mbsync uses the musicbrainz recording id to match local track information against a certain track in a musicbrainz release. However, there is no fallback matching defined, so once the recording id changes in musicbrainz the local track can no longer be synced unless the recording id is changed manually. But this behavior is also not very visible (I stumbled upon it by accident), so most users would not even be aware that the recording id is outdated. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `beetsplug/mbsync.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This file is part of beets. 3 # Copyright 2016, Jakob Schnitzer. 4 # 5 # Permission is hereby granted, free of charge, to any person obtaining 6 # a copy of this software and associated documentation files (the 7 # "Software"), to deal in the Software without restriction, including 8 # without limitation the rights to use, copy, modify, merge, publish, 9 # distribute, sublicense, and/or sell copies of the Software, and to 10 # permit persons to whom the Software is furnished to do so, subject to 11 # the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be 14 # included in all copies or substantial portions of the Software. 15 16 """Update library's tags using MusicBrainz. 17 """ 18 from __future__ import division, absolute_import, print_function 19 20 from beets.plugins import BeetsPlugin 21 from beets import autotag, library, ui, util 22 from beets.autotag import hooks 23 from collections import defaultdict 24 25 26 def apply_item_changes(lib, item, move, pretend, write): 27 """Store, move and write the item according to the arguments. 28 """ 29 if not pretend: 30 # Move the item if it's in the library. 31 if move and lib.directory in util.ancestry(item.path): 32 item.move(with_album=False) 33 34 if write: 35 item.try_write() 36 item.store() 37 38 39 class MBSyncPlugin(BeetsPlugin): 40 def __init__(self): 41 super(MBSyncPlugin, self).__init__() 42 43 def commands(self): 44 cmd = ui.Subcommand('mbsync', 45 help=u'update metadata from musicbrainz') 46 cmd.parser.add_option( 47 u'-p', u'--pretend', action='store_true', 48 help=u'show all changes but do nothing') 49 cmd.parser.add_option( 50 u'-m', u'--move', action='store_true', dest='move', 51 help=u"move files in the library directory") 52 cmd.parser.add_option( 53 u'-M', u'--nomove', action='store_false', dest='move', 54 help=u"don't move files in library") 55 cmd.parser.add_option( 56 u'-W', u'--nowrite', action='store_false', 57 default=None, dest='write', 58 help=u"don't write updated metadata to files") 59 cmd.parser.add_format_option() 60 cmd.func = self.func 61 return [cmd] 62 63 def func(self, lib, opts, args): 64 """Command handler for the mbsync function. 65 """ 66 move = ui.should_move(opts.move) 67 pretend = opts.pretend 68 write = ui.should_write(opts.write) 69 query = ui.decargs(args) 70 71 self.singletons(lib, query, move, pretend, write) 72 self.albums(lib, query, move, pretend, write) 73 74 def singletons(self, lib, query, move, pretend, write): 75 """Retrieve and apply info from the autotagger for items matched by 76 query. 77 """ 78 for item in lib.items(query + [u'singleton:true']): 79 item_formatted = format(item) 80 if not item.mb_trackid: 81 self._log.info(u'Skipping singleton with no mb_trackid: {0}', 82 item_formatted) 83 continue 84 85 # Get the MusicBrainz recording info. 86 track_info = hooks.track_for_mbid(item.mb_trackid) 87 if not track_info: 88 self._log.info(u'Recording ID not found: {0} for track {0}', 89 item.mb_trackid, 90 item_formatted) 91 continue 92 93 # Apply. 94 with lib.transaction(): 95 autotag.apply_item_metadata(item, track_info) 96 apply_item_changes(lib, item, move, pretend, write) 97 98 def albums(self, lib, query, move, pretend, write): 99 """Retrieve and apply info from the autotagger for albums matched by 100 query and their items. 101 """ 102 # Process matching albums. 103 for a in lib.albums(query): 104 album_formatted = format(a) 105 if not a.mb_albumid: 106 self._log.info(u'Skipping album with no mb_albumid: {0}', 107 album_formatted) 108 continue 109 110 items = list(a.items()) 111 112 # Get the MusicBrainz album information. 113 album_info = hooks.album_for_mbid(a.mb_albumid) 114 if not album_info: 115 self._log.info(u'Release ID {0} not found for album {1}', 116 a.mb_albumid, 117 album_formatted) 118 continue 119 120 # Map recording MBIDs to their information. Recordings can appear 121 # multiple times on a release, so each MBID maps to a list of 122 # TrackInfo objects. 123 track_index = defaultdict(list) 124 for track_info in album_info.tracks: 125 track_index[track_info.track_id].append(track_info) 126 127 # Construct a track mapping according to MBIDs. This should work 128 # for albums that have missing or extra tracks. If there are 129 # multiple copies of a recording, they are disambiguated using 130 # their disc and track number. 131 mapping = {} 132 for item in items: 133 candidates = track_index[item.mb_trackid] 134 if len(candidates) == 1: 135 mapping[item] = candidates[0] 136 else: 137 for c in candidates: 138 if (c.medium_index == item.track and 139 c.medium == item.disc): 140 mapping[item] = c 141 break 142 143 # Apply. 144 self._log.debug(u'applying changes to {}', album_formatted) 145 with lib.transaction(): 146 autotag.apply_metadata(album_info, mapping) 147 changed = False 148 for item in items: 149 item_changed = ui.show_model_changes(item) 150 changed |= item_changed 151 if item_changed: 152 apply_item_changes(lib, item, move, pretend, write) 153 154 if not changed: 155 # No change to any item. 156 continue 157 158 if not pretend: 159 # Update album structure to reflect an item in it. 160 for key in library.Album.item_keys: 161 a[key] = items[0][key] 162 a.store() 163 164 # Move album art (and any inconsistent items). 165 if move and lib.directory in util.ancestry(items[0].path): 166 self._log.debug(u'moving album {0}', album_formatted) 167 a.move() 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/beetsplug/mbsync.py b/beetsplug/mbsync.py --- a/beetsplug/mbsync.py +++ b/beetsplug/mbsync.py @@ -117,28 +117,35 @@ album_formatted) continue - # Map recording MBIDs to their information. Recordings can appear - # multiple times on a release, so each MBID maps to a list of - # TrackInfo objects. + # Map release track and recording MBIDs to their information. + # Recordings can appear multiple times on a release, so each MBID + # maps to a list of TrackInfo objects. + releasetrack_index = dict() track_index = defaultdict(list) for track_info in album_info.tracks: + releasetrack_index[track_info.release_track_id] = track_info track_index[track_info.track_id].append(track_info) - # Construct a track mapping according to MBIDs. This should work - # for albums that have missing or extra tracks. If there are - # multiple copies of a recording, they are disambiguated using - # their disc and track number. + # Construct a track mapping according to MBIDs (release track MBIDs + # first, if available, and recording MBIDs otherwise). This should + # work for albums that have missing or extra tracks. mapping = {} for item in items: - candidates = track_index[item.mb_trackid] - if len(candidates) == 1: - mapping[item] = candidates[0] + if item.mb_releasetrackid and \ + item.mb_releasetrackid in releasetrack_index: + mapping[item] = releasetrack_index[item.mb_releasetrackid] else: - for c in candidates: - if (c.medium_index == item.track and - c.medium == item.disc): - mapping[item] = c - break + candidates = track_index[item.mb_trackid] + if len(candidates) == 1: + mapping[item] = candidates[0] + else: + # If there are multiple copies of a recording, they are + # disambiguated using their disc and track number. + for c in candidates: + if (c.medium_index == item.track and + c.medium == item.disc): + mapping[item] = c + break # Apply. self._log.debug(u'applying changes to {}', album_formatted)
{"golden_diff": "diff --git a/beetsplug/mbsync.py b/beetsplug/mbsync.py\n--- a/beetsplug/mbsync.py\n+++ b/beetsplug/mbsync.py\n@@ -117,28 +117,35 @@\n album_formatted)\n continue\n \n- # Map recording MBIDs to their information. Recordings can appear\n- # multiple times on a release, so each MBID maps to a list of\n- # TrackInfo objects.\n+ # Map release track and recording MBIDs to their information.\n+ # Recordings can appear multiple times on a release, so each MBID\n+ # maps to a list of TrackInfo objects.\n+ releasetrack_index = dict()\n track_index = defaultdict(list)\n for track_info in album_info.tracks:\n+ releasetrack_index[track_info.release_track_id] = track_info\n track_index[track_info.track_id].append(track_info)\n \n- # Construct a track mapping according to MBIDs. This should work\n- # for albums that have missing or extra tracks. If there are\n- # multiple copies of a recording, they are disambiguated using\n- # their disc and track number.\n+ # Construct a track mapping according to MBIDs (release track MBIDs\n+ # first, if available, and recording MBIDs otherwise). This should\n+ # work for albums that have missing or extra tracks.\n mapping = {}\n for item in items:\n- candidates = track_index[item.mb_trackid]\n- if len(candidates) == 1:\n- mapping[item] = candidates[0]\n+ if item.mb_releasetrackid and \\\n+ item.mb_releasetrackid in releasetrack_index:\n+ mapping[item] = releasetrack_index[item.mb_releasetrackid]\n else:\n- for c in candidates:\n- if (c.medium_index == item.track and\n- c.medium == item.disc):\n- mapping[item] = c\n- break\n+ candidates = track_index[item.mb_trackid]\n+ if len(candidates) == 1:\n+ mapping[item] = candidates[0]\n+ else:\n+ # If there are multiple copies of a recording, they are\n+ # disambiguated using their disc and track number.\n+ for c in candidates:\n+ if (c.medium_index == item.track and\n+ c.medium == item.disc):\n+ mapping[item] = c\n+ break\n \n # Apply.\n self._log.debug(u'applying changes to {}', album_formatted)\n", "issue": "mbsync: Tolerate MusicBrainz recording ID changes\nmbsync uses the musicbrainz recording id to match local track information against a certain track in a musicbrainz release. However, there is no fallback matching defined, so once the recording id changes in musicbrainz the local track can no longer be synced unless the recording id is changed manually. But this behavior is also not very visible (I stumbled upon it by accident), so most users would not even be aware that the recording id is outdated.\n\nmbsync: Tolerate MusicBrainz recording ID changes\nmbsync uses the musicbrainz recording id to match local track information against a certain track in a musicbrainz release. However, there is no fallback matching defined, so once the recording id changes in musicbrainz the local track can no longer be synced unless the recording id is changed manually. But this behavior is also not very visible (I stumbled upon it by accident), so most users would not even be aware that the recording id is outdated.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Jakob Schnitzer.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Update library's tags using MusicBrainz.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import autotag, library, ui, util\nfrom beets.autotag import hooks\nfrom collections import defaultdict\n\n\ndef apply_item_changes(lib, item, move, pretend, write):\n \"\"\"Store, move and write the item according to the arguments.\n \"\"\"\n if not pretend:\n # Move the item if it's in the library.\n if move and lib.directory in util.ancestry(item.path):\n item.move(with_album=False)\n\n if write:\n item.try_write()\n item.store()\n\n\nclass MBSyncPlugin(BeetsPlugin):\n def __init__(self):\n super(MBSyncPlugin, self).__init__()\n\n def commands(self):\n cmd = ui.Subcommand('mbsync',\n help=u'update metadata from musicbrainz')\n cmd.parser.add_option(\n u'-p', u'--pretend', action='store_true',\n help=u'show all changes but do nothing')\n cmd.parser.add_option(\n u'-m', u'--move', action='store_true', dest='move',\n help=u\"move files in the library directory\")\n cmd.parser.add_option(\n u'-M', u'--nomove', action='store_false', dest='move',\n help=u\"don't move files in library\")\n cmd.parser.add_option(\n u'-W', u'--nowrite', action='store_false',\n default=None, dest='write',\n help=u\"don't write updated metadata to files\")\n cmd.parser.add_format_option()\n cmd.func = self.func\n return [cmd]\n\n def func(self, lib, opts, args):\n \"\"\"Command handler for the mbsync function.\n \"\"\"\n move = ui.should_move(opts.move)\n pretend = opts.pretend\n write = ui.should_write(opts.write)\n query = ui.decargs(args)\n\n self.singletons(lib, query, move, pretend, write)\n self.albums(lib, query, move, pretend, write)\n\n def singletons(self, lib, query, move, pretend, write):\n \"\"\"Retrieve and apply info from the autotagger for items matched by\n query.\n \"\"\"\n for item in lib.items(query + [u'singleton:true']):\n item_formatted = format(item)\n if not item.mb_trackid:\n self._log.info(u'Skipping singleton with no mb_trackid: {0}',\n item_formatted)\n continue\n\n # Get the MusicBrainz recording info.\n track_info = hooks.track_for_mbid(item.mb_trackid)\n if not track_info:\n self._log.info(u'Recording ID not found: {0} for track {0}',\n item.mb_trackid,\n item_formatted)\n continue\n\n # Apply.\n with lib.transaction():\n autotag.apply_item_metadata(item, track_info)\n apply_item_changes(lib, item, move, pretend, write)\n\n def albums(self, lib, query, move, pretend, write):\n \"\"\"Retrieve and apply info from the autotagger for albums matched by\n query and their items.\n \"\"\"\n # Process matching albums.\n for a in lib.albums(query):\n album_formatted = format(a)\n if not a.mb_albumid:\n self._log.info(u'Skipping album with no mb_albumid: {0}',\n album_formatted)\n continue\n\n items = list(a.items())\n\n # Get the MusicBrainz album information.\n album_info = hooks.album_for_mbid(a.mb_albumid)\n if not album_info:\n self._log.info(u'Release ID {0} not found for album {1}',\n a.mb_albumid,\n album_formatted)\n continue\n\n # Map recording MBIDs to their information. Recordings can appear\n # multiple times on a release, so each MBID maps to a list of\n # TrackInfo objects.\n track_index = defaultdict(list)\n for track_info in album_info.tracks:\n track_index[track_info.track_id].append(track_info)\n\n # Construct a track mapping according to MBIDs. This should work\n # for albums that have missing or extra tracks. If there are\n # multiple copies of a recording, they are disambiguated using\n # their disc and track number.\n mapping = {}\n for item in items:\n candidates = track_index[item.mb_trackid]\n if len(candidates) == 1:\n mapping[item] = candidates[0]\n else:\n for c in candidates:\n if (c.medium_index == item.track and\n c.medium == item.disc):\n mapping[item] = c\n break\n\n # Apply.\n self._log.debug(u'applying changes to {}', album_formatted)\n with lib.transaction():\n autotag.apply_metadata(album_info, mapping)\n changed = False\n for item in items:\n item_changed = ui.show_model_changes(item)\n changed |= item_changed\n if item_changed:\n apply_item_changes(lib, item, move, pretend, write)\n\n if not changed:\n # No change to any item.\n continue\n\n if not pretend:\n # Update album structure to reflect an item in it.\n for key in library.Album.item_keys:\n a[key] = items[0][key]\n a.store()\n\n # Move album art (and any inconsistent items).\n if move and lib.directory in util.ancestry(items[0].path):\n self._log.debug(u'moving album {0}', album_formatted)\n a.move()\n", "path": "beetsplug/mbsync.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Jakob Schnitzer.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Update library's tags using MusicBrainz.\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import autotag, library, ui, util\nfrom beets.autotag import hooks\nfrom collections import defaultdict\n\n\ndef apply_item_changes(lib, item, move, pretend, write):\n \"\"\"Store, move and write the item according to the arguments.\n \"\"\"\n if not pretend:\n # Move the item if it's in the library.\n if move and lib.directory in util.ancestry(item.path):\n item.move(with_album=False)\n\n if write:\n item.try_write()\n item.store()\n\n\nclass MBSyncPlugin(BeetsPlugin):\n def __init__(self):\n super(MBSyncPlugin, self).__init__()\n\n def commands(self):\n cmd = ui.Subcommand('mbsync',\n help=u'update metadata from musicbrainz')\n cmd.parser.add_option(\n u'-p', u'--pretend', action='store_true',\n help=u'show all changes but do nothing')\n cmd.parser.add_option(\n u'-m', u'--move', action='store_true', dest='move',\n help=u\"move files in the library directory\")\n cmd.parser.add_option(\n u'-M', u'--nomove', action='store_false', dest='move',\n help=u\"don't move files in library\")\n cmd.parser.add_option(\n u'-W', u'--nowrite', action='store_false',\n default=None, dest='write',\n help=u\"don't write updated metadata to files\")\n cmd.parser.add_format_option()\n cmd.func = self.func\n return [cmd]\n\n def func(self, lib, opts, args):\n \"\"\"Command handler for the mbsync function.\n \"\"\"\n move = ui.should_move(opts.move)\n pretend = opts.pretend\n write = ui.should_write(opts.write)\n query = ui.decargs(args)\n\n self.singletons(lib, query, move, pretend, write)\n self.albums(lib, query, move, pretend, write)\n\n def singletons(self, lib, query, move, pretend, write):\n \"\"\"Retrieve and apply info from the autotagger for items matched by\n query.\n \"\"\"\n for item in lib.items(query + [u'singleton:true']):\n item_formatted = format(item)\n if not item.mb_trackid:\n self._log.info(u'Skipping singleton with no mb_trackid: {0}',\n item_formatted)\n continue\n\n # Get the MusicBrainz recording info.\n track_info = hooks.track_for_mbid(item.mb_trackid)\n if not track_info:\n self._log.info(u'Recording ID not found: {0} for track {0}',\n item.mb_trackid,\n item_formatted)\n continue\n\n # Apply.\n with lib.transaction():\n autotag.apply_item_metadata(item, track_info)\n apply_item_changes(lib, item, move, pretend, write)\n\n def albums(self, lib, query, move, pretend, write):\n \"\"\"Retrieve and apply info from the autotagger for albums matched by\n query and their items.\n \"\"\"\n # Process matching albums.\n for a in lib.albums(query):\n album_formatted = format(a)\n if not a.mb_albumid:\n self._log.info(u'Skipping album with no mb_albumid: {0}',\n album_formatted)\n continue\n\n items = list(a.items())\n\n # Get the MusicBrainz album information.\n album_info = hooks.album_for_mbid(a.mb_albumid)\n if not album_info:\n self._log.info(u'Release ID {0} not found for album {1}',\n a.mb_albumid,\n album_formatted)\n continue\n\n # Map release track and recording MBIDs to their information.\n # Recordings can appear multiple times on a release, so each MBID\n # maps to a list of TrackInfo objects.\n releasetrack_index = dict()\n track_index = defaultdict(list)\n for track_info in album_info.tracks:\n releasetrack_index[track_info.release_track_id] = track_info\n track_index[track_info.track_id].append(track_info)\n\n # Construct a track mapping according to MBIDs (release track MBIDs\n # first, if available, and recording MBIDs otherwise). This should\n # work for albums that have missing or extra tracks.\n mapping = {}\n for item in items:\n if item.mb_releasetrackid and \\\n item.mb_releasetrackid in releasetrack_index:\n mapping[item] = releasetrack_index[item.mb_releasetrackid]\n else:\n candidates = track_index[item.mb_trackid]\n if len(candidates) == 1:\n mapping[item] = candidates[0]\n else:\n # If there are multiple copies of a recording, they are\n # disambiguated using their disc and track number.\n for c in candidates:\n if (c.medium_index == item.track and\n c.medium == item.disc):\n mapping[item] = c\n break\n\n # Apply.\n self._log.debug(u'applying changes to {}', album_formatted)\n with lib.transaction():\n autotag.apply_metadata(album_info, mapping)\n changed = False\n for item in items:\n item_changed = ui.show_model_changes(item)\n changed |= item_changed\n if item_changed:\n apply_item_changes(lib, item, move, pretend, write)\n\n if not changed:\n # No change to any item.\n continue\n\n if not pretend:\n # Update album structure to reflect an item in it.\n for key in library.Album.item_keys:\n a[key] = items[0][key]\n a.store()\n\n # Move album art (and any inconsistent items).\n if move and lib.directory in util.ancestry(items[0].path):\n self._log.debug(u'moving album {0}', album_formatted)\n a.move()\n", "path": "beetsplug/mbsync.py"}]}
2,230
562
gh_patches_debug_28913
rasdani/github-patches
git_diff
PrefectHQ__prefect-2534
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Behaviour differs from documentation: state.result & flow.get_tasks Prefect Version: 0.10.4 ## Current behavior Minor issue - The following behaviour doesn't match up to that described by the documentation on [result handling](https://docs.prefect.io/core/concepts/results.html#results): ```python >>> task_ref = flow.get_tasks[0] >>> state = flow.run() >>> state.result[task_ref]._result # a Task State's Result contains the Task's return value <Result: 1> ``` Instead prefect requires that the task_ref be created via a name or tag call (`flow.get_tasks(name='<name>')` instead of `flow.get_tasks[0]`) and be extracted from its list (`state.result[task_ref[0]]` instead of state.result[task_ref]): ```python >>> task_ref = flow.get_tasks(name='<name>') >>> state = flow.run() >>> state.result[task_ref[0]]._result # a Task State's Result contains the Task's return value <Result: 1> ``` and also the documentation on [Local Debugging](https://docs.prefect.io/core/advanced_tutorials/local-debugging.html#local-debugging): ```python from prefect import Flow, task @task def gotcha(): tup = ('a', ['b']) try: tup[1] += ['c'] except TypeError: assert len(tup[1]) == 1 flow = Flow(name="tuples", tasks=[gotcha]) state = flow.run() state.result # {<Task: gotcha>: Failed("Unexpected error: AssertionError()")} failed_state = state.result[gotcha] raise failed_state.result ``` ## Examples ### `flow.get_tasks`: ```python from prefect import Flow, task import pandas as pd @task(name='load data') def _load_data() -> pd.DataFrame: return pd.DataFrame({'a': [1, 2, 3]}) with Flow('preprocess-VO') as flow: _load_data() state = flow.run() task_ref = flow.get_tasks[0] ``` Returns: ```python >>> TypeError: 'method' object is not subscriptable ``` ### `state.result[task_ref]` ```python from prefect import Flow, task import pandas as pd @task(name='load data') def _load_data() -> pd.DataFrame: return pd.DataFrame({'a': [1, 2, 3]}) with Flow('preprocess-VO') as flow: _load_data() state = flow.run() task_ref = flow.get_tasks(name='load data') state.result[task_ref] ``` Returns: ```python >>> TypeError: unhashable type: 'list' ``` ### `state.result[<function name>]` ```python from prefect import Flow, task import pandas as pd @task(name='load data') def _load_data() -> pd.DataFrame: return pd.DataFrame({'a': [1, 2, 3]}) with Flow('preprocess-VO') as flow: _load_data() state = flow.run() task_ref = flow.get_tasks(name='load data') state.result[_load_data] ``` ```python KeyError: <Task: load data> ``` ## Proposed behavior It may be a bit more readable to adapt the functionality here so that Prefect behaves as in the documentation so: - `state.result[task_ref]` can be used instead of `state.result[task_ref[0]]` - `state.result[<func name>]` works - `flow.get_tasks[0]` returns a task reference Additionally, it would be great if function names could be passed to either `state.result[<func name>]` (EDIT: I see now that this was functionality did exist) or `flow.get_tasks(<func name>)` as an alternative to adding tags and/or names to each task function to avoid having to double name all functions. --- Thanks for creating and maintaining prefect, really enjoying the library so far --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/prefect/engine/results/azure_result.py` Content: ``` 1 import os 2 from typing import TYPE_CHECKING, Any 3 4 from prefect.client import Secret 5 from prefect.engine.result import Result 6 7 if TYPE_CHECKING: 8 import azure.storage.blob 9 10 11 class AzureResult(Result): 12 """ 13 Result for writing to and reading from an Azure Blob storage. 14 15 Note that your flow's runtime environment must be able to authenticate with 16 Azure; there are currently two supported options: provide a connection string 17 either at initialization or at runtime through an environment variable, or 18 set your Azure connection string as a Prefect Secret. Using an environment 19 variable is the recommended approach. 20 21 Args: 22 - container (str): the name of the container to write to / read from 23 - connection_string (str, optional): an Azure connection string for communicating with 24 Blob storage. If not provided the value set in the environment as `AZURE_STORAGE_CONNECTION_STRING` 25 will be used 26 - connection_string_secret (str, optional): the name of a Prefect Secret 27 which stores your Azure connection tring 28 """ 29 30 def __init__( 31 self, 32 container: str, 33 connection_string: str = None, 34 connection_string_secret: str = None, 35 **kwargs: Any 36 ) -> None: 37 self.container = container 38 self.connection_string = connection_string or os.getenv( 39 "AZURE_STORAGE_CONNECTION_STRING" 40 ) 41 self.connection_string_secret = connection_string_secret 42 super().__init__(**kwargs) 43 44 def initialize_service(self) -> None: 45 """ 46 Initialize a Blob service. 47 """ 48 import azure.storage.blob 49 50 connection_string = self.connection_string 51 if not connection_string and self.connection_string_secret: 52 connection_string = Secret(self.connection_string_secret).get() 53 54 self._service = azure.storage.blob.BlobServiceClient.from_connection_string( 55 conn_str=connection_string 56 ) 57 58 @property 59 def service(self) -> "azure.storage.blob.BlobServiceClient": 60 if not hasattr(self, "_service"): 61 self.initialize_service() 62 return self._service 63 64 @service.setter 65 def service(self, val: Any) -> None: 66 self._service = val 67 68 def __getstate__(self) -> dict: 69 state = self.__dict__.copy() 70 if "_service" in state: 71 del state["_service"] 72 return state 73 74 def __setstate__(self, state: dict) -> None: 75 self.__dict__.update(state) 76 77 def write(self, value: Any, **kwargs: Any) -> Result: 78 """ 79 Writes the result value to a blob storage in Azure. 80 81 Args: 82 - value (Any): the value to write; will then be stored as the `value` attribute 83 of the returned `Result` instance 84 - **kwargs (optional): if provided, will be used to format the location template 85 to determine the location to write to 86 87 Returns: 88 - Result: a new Result instance with the appropriately formatted location 89 """ 90 new = self.format(**kwargs) 91 new.value = value 92 93 self.logger.debug("Starting to upload result to {}...".format(new.location)) 94 95 ## prepare data 96 binary_data = new.serialize_to_bytes(new.value).decode() 97 98 # initialize client and upload 99 client = self.service.get_blob_client( 100 container=self.container, blob=new.location 101 ) 102 client.upload_blob(binary_data) 103 104 self.logger.debug("Finished uploading result to {}.".format(new.location)) 105 106 return new 107 108 def read(self, location: str) -> Result: 109 """ 110 Reads a result from an Azure Blob container and returns a corresponding `Result` instance. 111 112 Args: 113 - location (str): the Azure blob location to read from 114 115 Returns: 116 - Result: the read result 117 """ 118 new = self.copy() 119 new.location = location 120 121 try: 122 self.logger.debug("Starting to download result from {}...".format(location)) 123 124 # initialize client and download 125 client = self.service.get_blob_client( 126 container=self.container, blob=location 127 ) 128 content_string = client.download_blob() 129 130 try: 131 new.value = new.deserialize_from_bytes(content_string) 132 except EOFError: 133 new.value = None 134 self.logger.debug("Finished downloading result from {}.".format(location)) 135 except Exception as exc: 136 self.logger.exception( 137 "Unexpected error while reading from result handler: {}".format( 138 repr(exc) 139 ) 140 ) 141 raise exc 142 return new 143 144 def exists(self, location: str, **kwargs: Any) -> bool: 145 """ 146 Checks whether the target result exists. 147 148 Does not validate whether the result is `valid`, only that it is present. 149 150 Args: 151 - location (str): Location of the result in the specific result target. 152 Will check whether the provided location exists 153 - **kwargs (Any): string format arguments for `location` 154 155 Returns: 156 - bool: whether or not the target result exists. 157 """ 158 from azure.core.exceptions import ResourceNotFoundError 159 160 # initialize client and download 161 client = self.service.get_blob_client( 162 container=self.container, blob=location.format(**kwargs) 163 ) 164 165 # Catch exception because Azure python bindings do not yet have an exists method 166 # https://github.com/Azure/azure-sdk-for-python/issues/9507 167 try: 168 client.get_blob_properties() 169 return True 170 except ResourceNotFoundError: 171 return False 172 ``` Path: `src/prefect/engine/results/constant_result.py` Content: ``` 1 from typing import Any 2 from prefect.engine.result import Result 3 4 5 class ConstantResult(Result): 6 """ 7 Hook for storing and retrieving constant Python objects. Only intended to be used 8 internally. The "backend" in this instance is the class instance itself. 9 10 Args: 11 - **kwargs (Any, optional): any additional `Result` initialization options 12 """ 13 14 def __init__(self, **kwargs: Any) -> None: 15 super().__init__(**kwargs) 16 17 def read(self, location: str) -> Result: 18 """ 19 Returns the underlying value regardless of the argument passed. 20 21 Args: 22 - location (str): an unused argument 23 """ 24 return self 25 26 def write(self, value: Any, **kwargs: Any) -> Result: 27 """ 28 Returns the repr of the underlying value, purely for convenience. 29 30 Args: 31 - value (Any): unused, for interface compatibility 32 - **kwargs (optional): unused, for interface compatibility 33 34 Raises: 35 ValueError: ConstantResults cannot be written to 36 """ 37 raise ValueError("Cannot write values to `ConstantResult` types.") 38 39 def exists(self, location: str, **kwargs: Any) -> bool: 40 """ 41 As all Python objects are valid constants, always returns `True`. 42 43 Args: 44 - location (str): for interface compatibility 45 - **kwargs (Any): string format arguments for `location` 46 47 Returns: 48 - bool: True, confirming the constant exists. 49 """ 50 return True 51 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/prefect/engine/results/azure_result.py b/src/prefect/engine/results/azure_result.py --- a/src/prefect/engine/results/azure_result.py +++ b/src/prefect/engine/results/azure_result.py @@ -25,6 +25,7 @@ will be used - connection_string_secret (str, optional): the name of a Prefect Secret which stores your Azure connection tring + - **kwargs (Any, optional): any additional `Result` initialization options """ def __init__( diff --git a/src/prefect/engine/results/constant_result.py b/src/prefect/engine/results/constant_result.py --- a/src/prefect/engine/results/constant_result.py +++ b/src/prefect/engine/results/constant_result.py @@ -16,7 +16,7 @@ def read(self, location: str) -> Result: """ - Returns the underlying value regardless of the argument passed. + Will return the underlying value regardless of the argument passed. Args: - location (str): an unused argument @@ -25,14 +25,14 @@ def write(self, value: Any, **kwargs: Any) -> Result: """ - Returns the repr of the underlying value, purely for convenience. + Will return the repr of the underlying value, purely for convenience. Args: - value (Any): unused, for interface compatibility - **kwargs (optional): unused, for interface compatibility Raises: - ValueError: ConstantResults cannot be written to + - ValueError: ConstantResults cannot be written to """ raise ValueError("Cannot write values to `ConstantResult` types.")
{"golden_diff": "diff --git a/src/prefect/engine/results/azure_result.py b/src/prefect/engine/results/azure_result.py\n--- a/src/prefect/engine/results/azure_result.py\n+++ b/src/prefect/engine/results/azure_result.py\n@@ -25,6 +25,7 @@\n will be used\n - connection_string_secret (str, optional): the name of a Prefect Secret\n which stores your Azure connection tring\n+ - **kwargs (Any, optional): any additional `Result` initialization options\n \"\"\"\n \n def __init__(\ndiff --git a/src/prefect/engine/results/constant_result.py b/src/prefect/engine/results/constant_result.py\n--- a/src/prefect/engine/results/constant_result.py\n+++ b/src/prefect/engine/results/constant_result.py\n@@ -16,7 +16,7 @@\n \n def read(self, location: str) -> Result:\n \"\"\"\n- Returns the underlying value regardless of the argument passed.\n+ Will return the underlying value regardless of the argument passed.\n \n Args:\n - location (str): an unused argument\n@@ -25,14 +25,14 @@\n \n def write(self, value: Any, **kwargs: Any) -> Result:\n \"\"\"\n- Returns the repr of the underlying value, purely for convenience.\n+ Will return the repr of the underlying value, purely for convenience.\n \n Args:\n - value (Any): unused, for interface compatibility\n - **kwargs (optional): unused, for interface compatibility\n \n Raises:\n- ValueError: ConstantResults cannot be written to\n+ - ValueError: ConstantResults cannot be written to\n \"\"\"\n raise ValueError(\"Cannot write values to `ConstantResult` types.\")\n", "issue": "Behaviour differs from documentation: state.result & flow.get_tasks \nPrefect Version: 0.10.4\r\n\r\n## Current behavior\r\nMinor issue - The following behaviour doesn't match up to that described by the documentation on [result handling](https://docs.prefect.io/core/concepts/results.html#results):\r\n\r\n```python\r\n>>> task_ref = flow.get_tasks[0]\r\n>>> state = flow.run()\r\n>>> state.result[task_ref]._result # a Task State's Result contains the Task's return value\r\n<Result: 1>\r\n```\r\n\r\nInstead prefect requires that the task_ref be created via a name or tag call (`flow.get_tasks(name='<name>')` instead of `flow.get_tasks[0]`) and be extracted from its list (`state.result[task_ref[0]]` instead of state.result[task_ref]):\r\n\r\n```python\r\n>>> task_ref = flow.get_tasks(name='<name>')\r\n>>> state = flow.run()\r\n>>> state.result[task_ref[0]]._result # a Task State's Result contains the Task's return value\r\n<Result: 1>\r\n```\r\n\r\nand also the documentation on [Local Debugging](https://docs.prefect.io/core/advanced_tutorials/local-debugging.html#local-debugging):\r\n\r\n```python\r\nfrom prefect import Flow, task\r\n\r\n@task\r\ndef gotcha():\r\n tup = ('a', ['b'])\r\n try:\r\n tup[1] += ['c']\r\n except TypeError:\r\n assert len(tup[1]) == 1\r\n\r\nflow = Flow(name=\"tuples\", tasks=[gotcha])\r\n\r\nstate = flow.run()\r\nstate.result # {<Task: gotcha>: Failed(\"Unexpected error: AssertionError()\")}\r\n\r\nfailed_state = state.result[gotcha]\r\nraise failed_state.result\r\n```\r\n\r\n## Examples\r\n\r\n### `flow.get_tasks`:\r\n\r\n```python\r\nfrom prefect import Flow, task\r\nimport pandas as pd\r\n\r\n@task(name='load data')\r\ndef _load_data() -> pd.DataFrame:\r\n return pd.DataFrame({'a': [1, 2, 3]})\r\n\r\n\r\nwith Flow('preprocess-VO') as flow:\r\n _load_data()\r\n\r\nstate = flow.run()\r\ntask_ref = flow.get_tasks[0]\r\n```\r\n\r\nReturns:\r\n```python\r\n>>> TypeError: 'method' object is not subscriptable\r\n```\r\n\r\n### `state.result[task_ref]`\r\n\r\n```python\r\nfrom prefect import Flow, task\r\nimport pandas as pd\r\n\r\n@task(name='load data')\r\ndef _load_data() -> pd.DataFrame:\r\n return pd.DataFrame({'a': [1, 2, 3]})\r\n\r\n\r\nwith Flow('preprocess-VO') as flow:\r\n _load_data()\r\n\r\nstate = flow.run()\r\ntask_ref = flow.get_tasks(name='load data')\r\nstate.result[task_ref]\r\n```\r\n\r\nReturns:\r\n```python\r\n>>> TypeError: unhashable type: 'list'\r\n```\r\n\r\n### `state.result[<function name>]`\r\n\r\n```python\r\nfrom prefect import Flow, task\r\nimport pandas as pd\r\n\r\n@task(name='load data')\r\ndef _load_data() -> pd.DataFrame:\r\n return pd.DataFrame({'a': [1, 2, 3]})\r\n\r\n\r\nwith Flow('preprocess-VO') as flow:\r\n _load_data()\r\n\r\nstate = flow.run()\r\ntask_ref = flow.get_tasks(name='load data')\r\nstate.result[_load_data]\r\n```\r\n\r\n```python\r\nKeyError: <Task: load data>\r\n```\r\n\r\n## Proposed behavior\r\nIt may be a bit more readable to adapt the functionality here so that Prefect behaves as in the documentation so:\r\n- `state.result[task_ref]` can be used instead of `state.result[task_ref[0]]` \r\n- `state.result[<func name>]` works\r\n- `flow.get_tasks[0]` returns a task reference\r\n\r\nAdditionally, it would be great if function names could be passed to either `state.result[<func name>]` (EDIT: I see now that this was functionality did exist) or `flow.get_tasks(<func name>)` as an alternative to adding tags and/or names to each task function to avoid having to double name all functions.\r\n\r\n---\r\n\r\nThanks for creating and maintaining prefect, really enjoying the library so far \r\n\n", "before_files": [{"content": "import os\nfrom typing import TYPE_CHECKING, Any\n\nfrom prefect.client import Secret\nfrom prefect.engine.result import Result\n\nif TYPE_CHECKING:\n import azure.storage.blob\n\n\nclass AzureResult(Result):\n \"\"\"\n Result for writing to and reading from an Azure Blob storage.\n\n Note that your flow's runtime environment must be able to authenticate with\n Azure; there are currently two supported options: provide a connection string\n either at initialization or at runtime through an environment variable, or\n set your Azure connection string as a Prefect Secret. Using an environment\n variable is the recommended approach.\n\n Args:\n - container (str): the name of the container to write to / read from\n - connection_string (str, optional): an Azure connection string for communicating with\n Blob storage. If not provided the value set in the environment as `AZURE_STORAGE_CONNECTION_STRING`\n will be used\n - connection_string_secret (str, optional): the name of a Prefect Secret\n which stores your Azure connection tring\n \"\"\"\n\n def __init__(\n self,\n container: str,\n connection_string: str = None,\n connection_string_secret: str = None,\n **kwargs: Any\n ) -> None:\n self.container = container\n self.connection_string = connection_string or os.getenv(\n \"AZURE_STORAGE_CONNECTION_STRING\"\n )\n self.connection_string_secret = connection_string_secret\n super().__init__(**kwargs)\n\n def initialize_service(self) -> None:\n \"\"\"\n Initialize a Blob service.\n \"\"\"\n import azure.storage.blob\n\n connection_string = self.connection_string\n if not connection_string and self.connection_string_secret:\n connection_string = Secret(self.connection_string_secret).get()\n\n self._service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=connection_string\n )\n\n @property\n def service(self) -> \"azure.storage.blob.BlobServiceClient\":\n if not hasattr(self, \"_service\"):\n self.initialize_service()\n return self._service\n\n @service.setter\n def service(self, val: Any) -> None:\n self._service = val\n\n def __getstate__(self) -> dict:\n state = self.__dict__.copy()\n if \"_service\" in state:\n del state[\"_service\"]\n return state\n\n def __setstate__(self, state: dict) -> None:\n self.__dict__.update(state)\n\n def write(self, value: Any, **kwargs: Any) -> Result:\n \"\"\"\n Writes the result value to a blob storage in Azure.\n\n Args:\n - value (Any): the value to write; will then be stored as the `value` attribute\n of the returned `Result` instance\n - **kwargs (optional): if provided, will be used to format the location template\n to determine the location to write to\n\n Returns:\n - Result: a new Result instance with the appropriately formatted location\n \"\"\"\n new = self.format(**kwargs)\n new.value = value\n\n self.logger.debug(\"Starting to upload result to {}...\".format(new.location))\n\n ## prepare data\n binary_data = new.serialize_to_bytes(new.value).decode()\n\n # initialize client and upload\n client = self.service.get_blob_client(\n container=self.container, blob=new.location\n )\n client.upload_blob(binary_data)\n\n self.logger.debug(\"Finished uploading result to {}.\".format(new.location))\n\n return new\n\n def read(self, location: str) -> Result:\n \"\"\"\n Reads a result from an Azure Blob container and returns a corresponding `Result` instance.\n\n Args:\n - location (str): the Azure blob location to read from\n\n Returns:\n - Result: the read result\n \"\"\"\n new = self.copy()\n new.location = location\n\n try:\n self.logger.debug(\"Starting to download result from {}...\".format(location))\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location\n )\n content_string = client.download_blob()\n\n try:\n new.value = new.deserialize_from_bytes(content_string)\n except EOFError:\n new.value = None\n self.logger.debug(\"Finished downloading result from {}.\".format(location))\n except Exception as exc:\n self.logger.exception(\n \"Unexpected error while reading from result handler: {}\".format(\n repr(exc)\n )\n )\n raise exc\n return new\n\n def exists(self, location: str, **kwargs: Any) -> bool:\n \"\"\"\n Checks whether the target result exists.\n\n Does not validate whether the result is `valid`, only that it is present.\n\n Args:\n - location (str): Location of the result in the specific result target.\n Will check whether the provided location exists\n - **kwargs (Any): string format arguments for `location`\n\n Returns:\n - bool: whether or not the target result exists.\n \"\"\"\n from azure.core.exceptions import ResourceNotFoundError\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location.format(**kwargs)\n )\n\n # Catch exception because Azure python bindings do not yet have an exists method\n # https://github.com/Azure/azure-sdk-for-python/issues/9507\n try:\n client.get_blob_properties()\n return True\n except ResourceNotFoundError:\n return False\n", "path": "src/prefect/engine/results/azure_result.py"}, {"content": "from typing import Any\nfrom prefect.engine.result import Result\n\n\nclass ConstantResult(Result):\n \"\"\"\n Hook for storing and retrieving constant Python objects. Only intended to be used\n internally. The \"backend\" in this instance is the class instance itself.\n\n Args:\n - **kwargs (Any, optional): any additional `Result` initialization options\n \"\"\"\n\n def __init__(self, **kwargs: Any) -> None:\n super().__init__(**kwargs)\n\n def read(self, location: str) -> Result:\n \"\"\"\n Returns the underlying value regardless of the argument passed.\n\n Args:\n - location (str): an unused argument\n \"\"\"\n return self\n\n def write(self, value: Any, **kwargs: Any) -> Result:\n \"\"\"\n Returns the repr of the underlying value, purely for convenience.\n\n Args:\n - value (Any): unused, for interface compatibility\n - **kwargs (optional): unused, for interface compatibility\n\n Raises:\n ValueError: ConstantResults cannot be written to\n \"\"\"\n raise ValueError(\"Cannot write values to `ConstantResult` types.\")\n\n def exists(self, location: str, **kwargs: Any) -> bool:\n \"\"\"\n As all Python objects are valid constants, always returns `True`.\n\n Args:\n - location (str): for interface compatibility\n - **kwargs (Any): string format arguments for `location`\n\n Returns:\n - bool: True, confirming the constant exists.\n \"\"\"\n return True\n", "path": "src/prefect/engine/results/constant_result.py"}], "after_files": [{"content": "import os\nfrom typing import TYPE_CHECKING, Any\n\nfrom prefect.client import Secret\nfrom prefect.engine.result import Result\n\nif TYPE_CHECKING:\n import azure.storage.blob\n\n\nclass AzureResult(Result):\n \"\"\"\n Result for writing to and reading from an Azure Blob storage.\n\n Note that your flow's runtime environment must be able to authenticate with\n Azure; there are currently two supported options: provide a connection string\n either at initialization or at runtime through an environment variable, or\n set your Azure connection string as a Prefect Secret. Using an environment\n variable is the recommended approach.\n\n Args:\n - container (str): the name of the container to write to / read from\n - connection_string (str, optional): an Azure connection string for communicating with\n Blob storage. If not provided the value set in the environment as `AZURE_STORAGE_CONNECTION_STRING`\n will be used\n - connection_string_secret (str, optional): the name of a Prefect Secret\n which stores your Azure connection tring\n - **kwargs (Any, optional): any additional `Result` initialization options\n \"\"\"\n\n def __init__(\n self,\n container: str,\n connection_string: str = None,\n connection_string_secret: str = None,\n **kwargs: Any\n ) -> None:\n self.container = container\n self.connection_string = connection_string or os.getenv(\n \"AZURE_STORAGE_CONNECTION_STRING\"\n )\n self.connection_string_secret = connection_string_secret\n super().__init__(**kwargs)\n\n def initialize_service(self) -> None:\n \"\"\"\n Initialize a Blob service.\n \"\"\"\n import azure.storage.blob\n\n connection_string = self.connection_string\n if not connection_string and self.connection_string_secret:\n connection_string = Secret(self.connection_string_secret).get()\n\n self._service = azure.storage.blob.BlobServiceClient.from_connection_string(\n conn_str=connection_string\n )\n\n @property\n def service(self) -> \"azure.storage.blob.BlobServiceClient\":\n if not hasattr(self, \"_service\"):\n self.initialize_service()\n return self._service\n\n @service.setter\n def service(self, val: Any) -> None:\n self._service = val\n\n def __getstate__(self) -> dict:\n state = self.__dict__.copy()\n if \"_service\" in state:\n del state[\"_service\"]\n return state\n\n def __setstate__(self, state: dict) -> None:\n self.__dict__.update(state)\n\n def write(self, value: Any, **kwargs: Any) -> Result:\n \"\"\"\n Writes the result value to a blob storage in Azure.\n\n Args:\n - value (Any): the value to write; will then be stored as the `value` attribute\n of the returned `Result` instance\n - **kwargs (optional): if provided, will be used to format the location template\n to determine the location to write to\n\n Returns:\n - Result: a new Result instance with the appropriately formatted location\n \"\"\"\n new = self.format(**kwargs)\n new.value = value\n\n self.logger.debug(\"Starting to upload result to {}...\".format(new.location))\n\n ## prepare data\n binary_data = new.serialize_to_bytes(new.value).decode()\n\n # initialize client and upload\n client = self.service.get_blob_client(\n container=self.container, blob=new.location\n )\n client.upload_blob(binary_data)\n\n self.logger.debug(\"Finished uploading result to {}.\".format(new.location))\n\n return new\n\n def read(self, location: str) -> Result:\n \"\"\"\n Reads a result from an Azure Blob container and returns a corresponding `Result` instance.\n\n Args:\n - location (str): the Azure blob location to read from\n\n Returns:\n - Result: the read result\n \"\"\"\n new = self.copy()\n new.location = location\n\n try:\n self.logger.debug(\"Starting to download result from {}...\".format(location))\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location\n )\n content_string = client.download_blob()\n\n try:\n new.value = new.deserialize_from_bytes(content_string)\n except EOFError:\n new.value = None\n self.logger.debug(\"Finished downloading result from {}.\".format(location))\n except Exception as exc:\n self.logger.exception(\n \"Unexpected error while reading from result handler: {}\".format(\n repr(exc)\n )\n )\n raise exc\n return new\n\n def exists(self, location: str, **kwargs: Any) -> bool:\n \"\"\"\n Checks whether the target result exists.\n\n Does not validate whether the result is `valid`, only that it is present.\n\n Args:\n - location (str): Location of the result in the specific result target.\n Will check whether the provided location exists\n - **kwargs (Any): string format arguments for `location`\n\n Returns:\n - bool: whether or not the target result exists.\n \"\"\"\n from azure.core.exceptions import ResourceNotFoundError\n\n # initialize client and download\n client = self.service.get_blob_client(\n container=self.container, blob=location.format(**kwargs)\n )\n\n # Catch exception because Azure python bindings do not yet have an exists method\n # https://github.com/Azure/azure-sdk-for-python/issues/9507\n try:\n client.get_blob_properties()\n return True\n except ResourceNotFoundError:\n return False\n", "path": "src/prefect/engine/results/azure_result.py"}, {"content": "from typing import Any\nfrom prefect.engine.result import Result\n\n\nclass ConstantResult(Result):\n \"\"\"\n Hook for storing and retrieving constant Python objects. Only intended to be used\n internally. The \"backend\" in this instance is the class instance itself.\n\n Args:\n - **kwargs (Any, optional): any additional `Result` initialization options\n \"\"\"\n\n def __init__(self, **kwargs: Any) -> None:\n super().__init__(**kwargs)\n\n def read(self, location: str) -> Result:\n \"\"\"\n Will return the underlying value regardless of the argument passed.\n\n Args:\n - location (str): an unused argument\n \"\"\"\n return self\n\n def write(self, value: Any, **kwargs: Any) -> Result:\n \"\"\"\n Will return the repr of the underlying value, purely for convenience.\n\n Args:\n - value (Any): unused, for interface compatibility\n - **kwargs (optional): unused, for interface compatibility\n\n Raises:\n - ValueError: ConstantResults cannot be written to\n \"\"\"\n raise ValueError(\"Cannot write values to `ConstantResult` types.\")\n\n def exists(self, location: str, **kwargs: Any) -> bool:\n \"\"\"\n As all Python objects are valid constants, always returns `True`.\n\n Args:\n - location (str): for interface compatibility\n - **kwargs (Any): string format arguments for `location`\n\n Returns:\n - bool: True, confirming the constant exists.\n \"\"\"\n return True\n", "path": "src/prefect/engine/results/constant_result.py"}]}
3,136
374
gh_patches_debug_6255
rasdani/github-patches
git_diff
qtile__qtile-180
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- using Qtile.cmd_{info, debug} doesn't switch logging level It seems that critical, warning, and error do all work, though. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libqtile/log_utils.py` Content: ``` 1 import logging 2 import os 3 import sys 4 from logging import getLogger, StreamHandler 5 6 7 class ColorFormatter(logging.Formatter): 8 """Logging formatter adding console colors to the output. 9 """ 10 black, red, green, yellow, blue, magenta, cyan, white = range(8) 11 colors = { 12 'WARNING': yellow, 13 'INFO': green, 14 'DEBUG': blue, 15 'CRITICAL': yellow, 16 'ERROR': red, 17 'RED': red, 18 'GREEN': green, 19 'YELLOW': yellow, 20 'BLUE': blue, 21 'MAGENTA': magenta, 22 'CYAN': cyan, 23 'WHITE': white} 24 reset_seq = '\033[0m' 25 color_seq = '\033[%dm' 26 bold_seq = '\033[1m' 27 28 def format(self, record): 29 """Format the record with colors.""" 30 color = self.color_seq % (30 + self.colors[record.levelname]) 31 message = logging.Formatter.format(self, record) 32 message = message.replace('$RESET', self.reset_seq)\ 33 .replace('$BOLD', self.bold_seq)\ 34 .replace('$COLOR', color) 35 for color, value in self.colors.items(): 36 message = message.replace( 37 '$' + color, self.color_seq % (value + 30))\ 38 .replace('$BG' + color, self.color_seq % (value + 40))\ 39 .replace('$BG-' + color, self.color_seq % (value + 40)) 40 return message + self.reset_seq 41 42 43 def init_log(log_level=logging.WARNING, logger='qtile'): 44 handler = logging.FileHandler( 45 os.path.expanduser('~/.%s.log' % logger)) 46 handler.setLevel(logging.WARNING) 47 handler.setFormatter( 48 logging.Formatter( 49 "%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s")) 50 log = getLogger(logger) 51 log.setLevel(log_level) 52 log.addHandler(handler) 53 log.warning('Starting %s' % logger.title()) 54 handler = StreamHandler(sys.stderr) 55 handler.setFormatter( 56 ColorFormatter( 57 '$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s' 58 ' %(funcName)s:%(lineno)d $RESET %(message)s')) 59 log.addHandler(handler) 60 return log 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libqtile/log_utils.py b/libqtile/log_utils.py --- a/libqtile/log_utils.py +++ b/libqtile/log_utils.py @@ -43,7 +43,6 @@ def init_log(log_level=logging.WARNING, logger='qtile'): handler = logging.FileHandler( os.path.expanduser('~/.%s.log' % logger)) - handler.setLevel(logging.WARNING) handler.setFormatter( logging.Formatter( "%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s"))
{"golden_diff": "diff --git a/libqtile/log_utils.py b/libqtile/log_utils.py\n--- a/libqtile/log_utils.py\n+++ b/libqtile/log_utils.py\n@@ -43,7 +43,6 @@\n def init_log(log_level=logging.WARNING, logger='qtile'):\n handler = logging.FileHandler(\n os.path.expanduser('~/.%s.log' % logger))\n- handler.setLevel(logging.WARNING)\n handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s\"))\n", "issue": "using Qtile.cmd_{info, debug} doesn't switch logging level\nIt seems that critical, warning, and error do all work, though.\n\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom logging import getLogger, StreamHandler\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\"Logging formatter adding console colors to the output.\n \"\"\"\n black, red, green, yellow, blue, magenta, cyan, white = range(8)\n colors = {\n 'WARNING': yellow,\n 'INFO': green,\n 'DEBUG': blue,\n 'CRITICAL': yellow,\n 'ERROR': red,\n 'RED': red,\n 'GREEN': green,\n 'YELLOW': yellow,\n 'BLUE': blue,\n 'MAGENTA': magenta,\n 'CYAN': cyan,\n 'WHITE': white}\n reset_seq = '\\033[0m'\n color_seq = '\\033[%dm'\n bold_seq = '\\033[1m'\n\n def format(self, record):\n \"\"\"Format the record with colors.\"\"\"\n color = self.color_seq % (30 + self.colors[record.levelname])\n message = logging.Formatter.format(self, record)\n message = message.replace('$RESET', self.reset_seq)\\\n .replace('$BOLD', self.bold_seq)\\\n .replace('$COLOR', color)\n for color, value in self.colors.items():\n message = message.replace(\n '$' + color, self.color_seq % (value + 30))\\\n .replace('$BG' + color, self.color_seq % (value + 40))\\\n .replace('$BG-' + color, self.color_seq % (value + 40))\n return message + self.reset_seq\n\n\ndef init_log(log_level=logging.WARNING, logger='qtile'):\n handler = logging.FileHandler(\n os.path.expanduser('~/.%s.log' % logger))\n handler.setLevel(logging.WARNING)\n handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s\"))\n log = getLogger(logger)\n log.setLevel(log_level)\n log.addHandler(handler)\n log.warning('Starting %s' % logger.title())\n handler = StreamHandler(sys.stderr)\n handler.setFormatter(\n ColorFormatter(\n '$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s'\n ' %(funcName)s:%(lineno)d $RESET %(message)s'))\n log.addHandler(handler)\n return log\n", "path": "libqtile/log_utils.py"}], "after_files": [{"content": "import logging\nimport os\nimport sys\nfrom logging import getLogger, StreamHandler\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\"Logging formatter adding console colors to the output.\n \"\"\"\n black, red, green, yellow, blue, magenta, cyan, white = range(8)\n colors = {\n 'WARNING': yellow,\n 'INFO': green,\n 'DEBUG': blue,\n 'CRITICAL': yellow,\n 'ERROR': red,\n 'RED': red,\n 'GREEN': green,\n 'YELLOW': yellow,\n 'BLUE': blue,\n 'MAGENTA': magenta,\n 'CYAN': cyan,\n 'WHITE': white}\n reset_seq = '\\033[0m'\n color_seq = '\\033[%dm'\n bold_seq = '\\033[1m'\n\n def format(self, record):\n \"\"\"Format the record with colors.\"\"\"\n color = self.color_seq % (30 + self.colors[record.levelname])\n message = logging.Formatter.format(self, record)\n message = message.replace('$RESET', self.reset_seq)\\\n .replace('$BOLD', self.bold_seq)\\\n .replace('$COLOR', color)\n for color, value in self.colors.items():\n message = message.replace(\n '$' + color, self.color_seq % (value + 30))\\\n .replace('$BG' + color, self.color_seq % (value + 40))\\\n .replace('$BG-' + color, self.color_seq % (value + 40))\n return message + self.reset_seq\n\n\ndef init_log(log_level=logging.WARNING, logger='qtile'):\n handler = logging.FileHandler(\n os.path.expanduser('~/.%s.log' % logger))\n handler.setFormatter(\n logging.Formatter(\n \"%(asctime)s %(levelname)s %(funcName)s:%(lineno)d %(message)s\"))\n log = getLogger(logger)\n log.setLevel(log_level)\n log.addHandler(handler)\n log.warning('Starting %s' % logger.title())\n handler = StreamHandler(sys.stderr)\n handler.setFormatter(\n ColorFormatter(\n '$RESET$COLOR%(asctime)s $BOLD$COLOR%(name)s'\n ' %(funcName)s:%(lineno)d $RESET %(message)s'))\n log.addHandler(handler)\n return log\n", "path": "libqtile/log_utils.py"}]}
896
118
gh_patches_debug_38317
rasdani/github-patches
git_diff
lutris__lutris-2235
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Lutris removes "DXVK" dlls even when they're not from DXVK **Describe the bug** It is currently not possible to use Gallium Nine, when DXVK is disabled in the settings (which would be what I thought would be necessary to run Gallium Nine). When I enable Gallium Nine and then run a game I get these messages in the console: ``` 2019-06-17 10:21:30,859: Removing DXVK dll /home/bjoern/Games/starcraft-ii/drive_c/windows/system32/d3d9 2019-06-17 10:21:30,859: Removing DXVK dll /home/bjoern/Games/starcraft-ii/drive_c/windows/syswow64/d3d9 ``` Afterwards the game complains about a missiing d3d9.dll Enabling DXVK and setting the version to manual makes it work. **Expected behavior** Game runs with Gallium Nine. **Current behavior** Game complains about a missiing d3d9.dll **Steps to reproduce** * Install Gallium Nine via winetricks. * Enable Gallium Nine by running drive_c/windows/syswo64/ninewinecfg.exe * Disable DXVK. * Run game. **Workarounds** Any of these should work: * Set DXVK-version to manual and enable DXVK. * Run the game manually from the console and not via lutris. * It would probably work to run the game exe manually via "Run EXE inside wine prefix". --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lutris/util/wine/dxvk.py` Content: ``` 1 """DXVK helper module""" 2 import os 3 import json 4 import time 5 import shutil 6 import urllib.request 7 8 from lutris.settings import RUNTIME_DIR 9 from lutris.util.log import logger 10 from lutris.util.extract import extract_archive 11 from lutris.util.downloader import Downloader 12 from lutris.util import system 13 14 CACHE_MAX_AGE = 86400 # Re-download DXVK versions every day 15 16 17 @system.run_once 18 def init_dxvk_versions(): 19 def get_dxvk_versions(base_name, tags_url): 20 """Get DXVK versions from GitHub""" 21 logger.info("Updating "+base_name.upper()+" versions") 22 dxvk_path = os.path.join(RUNTIME_DIR, base_name) 23 if not os.path.isdir(dxvk_path): 24 os.mkdir(dxvk_path) 25 versions_path = os.path.join(dxvk_path, base_name+"_versions.json") 26 27 urllib.request.urlretrieve(tags_url, versions_path) 28 29 with open(versions_path, "r") as dxvk_tags: 30 dxvk_json = json.load(dxvk_tags) 31 dxvk_versions = list() 32 for x in dxvk_json: 33 version_name = x["name"].replace("v", "") 34 if version_name.startswith('m'): # ignore master snapshots of d9vk 35 continue 36 dxvk_versions.append(version_name) 37 return dxvk_versions 38 39 def init_versions(manager): 40 try: 41 manager.DXVK_VERSIONS \ 42 = get_dxvk_versions(manager.base_name, manager.DXVK_TAGS_URL) 43 except Exception as ex: # pylint: disable= broad-except 44 logger.error(ex) 45 manager.DXVK_LATEST, manager.DXVK_PAST_RELEASES = manager.DXVK_VERSIONS[0], manager.DXVK_VERSIONS[1:9] 46 47 init_versions(DXVKManager) 48 init_versions(D9VKManager) 49 50 51 class UnavailableDXVKVersion(RuntimeError): 52 """Exception raised when a version of DXVK is not found""" 53 54 55 class DXVKManager: 56 """Utility class to install DXVK dlls to a Wine prefix""" 57 58 DXVK_TAGS_URL = "https://api.github.com/repos/doitsujin/dxvk/tags" 59 DXVK_VERSIONS = [ 60 "0.94", 61 ] 62 DXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:9] 63 64 base_url = "https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz" 65 base_name = "dxvk" 66 base_dir = os.path.join(RUNTIME_DIR, base_name) 67 dxvk_dlls = ("dxgi", "d3d11", "d3d10core", "d3d10_1", "d3d10", "d3d9") 68 latest_version = DXVK_LATEST 69 70 def __init__(self, prefix, arch="win64", version=None): 71 self.prefix = prefix 72 if not os.path.isdir(self.base_dir): 73 os.makedirs(self.base_dir) 74 self._version = version 75 self.wine_arch = arch 76 77 @property 78 def version(self): 79 """Return version of DXVK (latest known version if not provided)""" 80 if self._version: 81 return self._version 82 return self.latest_version 83 84 @property 85 def dxvk_path(self): 86 """Return path to DXVK local cache""" 87 return os.path.join(self.base_dir, self.version) 88 89 @staticmethod 90 def is_dxvk_dll(dll_path): 91 """Check if a given DLL path is provided by DXVK 92 93 Very basic check to see if a dll exists and is over 256K. If this is the 94 case, then consider the DLL to be from DXVK 95 """ 96 if system.path_exists(dll_path, check_symlinks=True): 97 dll_stats = os.stat(dll_path) 98 dll_size = dll_stats.st_size 99 else: 100 dll_size = 0 101 return dll_size > 1024 * 256 102 103 def is_available(self): 104 """Return whether DXVK is cached locally""" 105 return system.path_exists(self.dxvk_path) 106 107 def dxvk_dll_exists(self, dll_name): 108 """Check if the dll exists as a DXVK variant""" 109 return system.path_exists(os.path.join(self.dxvk_path, "x64", dll_name + ".dll")) \ 110 and system.path_exists(os.path.join(self.dxvk_path, "x32", dll_name + ".dll")) 111 112 def download(self): 113 """Download DXVK to the local cache""" 114 dxvk_url = self.base_url.format(self.version, self.version) 115 if self.is_available(): 116 logger.warning(self.base_name.upper()+" already available at %s", self.dxvk_path) 117 118 dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url)) 119 120 downloader = Downloader(dxvk_url, dxvk_archive_path) 121 downloader.start() 122 while downloader.check_progress() < 1 and downloader.state != downloader.ERROR: 123 time.sleep(0.3) 124 if not system.path_exists(dxvk_archive_path): 125 raise UnavailableDXVKVersion("Failed to download "+self.base_name.upper()+" %s" % self.version) 126 if os.stat(dxvk_archive_path).st_size: 127 extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True) 128 os.remove(dxvk_archive_path) 129 else: 130 os.remove(dxvk_archive_path) 131 raise UnavailableDXVKVersion("Failed to download "+self.base_name.upper()+" %s" % self.version) 132 133 def enable_dxvk_dll(self, system_dir, dxvk_arch, dll): 134 """Copies DXVK dlls to the appropriate destination""" 135 # Copying DXVK's version 136 dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, "%s.dll" % dll) 137 if system.path_exists(dxvk_dll_path): 138 wine_dll_path = os.path.join(system_dir, "%s.dll" % dll) 139 logger.info("Replacing %s/%s with "+self.base_name.upper()+" version", system_dir, dll) 140 if not self.is_dxvk_dll(wine_dll_path): 141 # Backing up original version (may not be needed) 142 if system.path_exists(wine_dll_path): 143 shutil.move(wine_dll_path, wine_dll_path + ".orig") 144 if system.path_exists(wine_dll_path): 145 os.remove(wine_dll_path) 146 os.symlink(dxvk_dll_path, wine_dll_path) 147 else: 148 self.disable_dxvk_dll(system_dir, dxvk_arch, dll) 149 150 def disable_dxvk_dll(self, system_dir, dxvk_arch, dll): 151 """Remove DXVK DLL from Wine prefix""" 152 wine_dll_path = os.path.join(system_dir, "%s.dll" % dll) 153 if self.is_dxvk_dll(wine_dll_path): 154 logger.info("Removing "+self.base_name.upper()+" dll %s/%s", system_dir, dll) 155 os.remove(wine_dll_path) 156 # Restoring original version (may not be needed) 157 if system.path_exists(wine_dll_path + ".orig"): 158 shutil.move(wine_dll_path + ".orig", wine_dll_path) 159 160 def _iter_dxvk_dlls(self): 161 windows_path = os.path.join(self.prefix, "drive_c/windows") 162 if self.wine_arch == "win64": 163 system_dirs = { 164 "x64": os.path.join(windows_path, "system32"), 165 "x32": os.path.join(windows_path, "syswow64"), 166 } 167 elif self.wine_arch == "win32": 168 system_dirs = {"x32": os.path.join(windows_path, "system32")} 169 170 for dxvk_arch, system_dir in system_dirs.items(): 171 for dll in self.dxvk_dlls: 172 yield system_dir, dxvk_arch, dll 173 174 def enable(self): 175 """Enable DXVK for the current prefix""" 176 if not system.path_exists(self.dxvk_path): 177 logger.error(self.base_name.upper()+" %s is not available locally", self.version) 178 return 179 for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls(): 180 self.enable_dxvk_dll(system_dir, dxvk_arch, dll) 181 182 def disable(self): 183 """Disable DXVK for the current prefix""" 184 for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls(): 185 self.disable_dxvk_dll(system_dir, dxvk_arch, dll) 186 187 188 class D9VKManager(DXVKManager): 189 DXVK_TAGS_URL = "https://api.github.com/repos/Joshua-Ashton/d9vk/tags" 190 DXVK_VERSIONS = [ 191 "0.10", 192 ] 193 DXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:9] 194 195 base_url = "https://github.com/Joshua-Ashton/d9vk/releases/download/{}/d9vk-{}.tar.gz" 196 base_name = "d9vk" 197 base_dir = os.path.join(RUNTIME_DIR, base_name) 198 dxvk_dlls = ("d3d9",) 199 latest_version = DXVK_LATEST 200 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lutris/util/wine/dxvk.py b/lutris/util/wine/dxvk.py --- a/lutris/util/wine/dxvk.py +++ b/lutris/util/wine/dxvk.py @@ -23,25 +23,31 @@ if not os.path.isdir(dxvk_path): os.mkdir(dxvk_path) versions_path = os.path.join(dxvk_path, base_name+"_versions.json") - - urllib.request.urlretrieve(tags_url, versions_path) - + internet_available = True + try: + urllib.request.urlretrieve(tags_url, versions_path) + except Exception as ex: # pylint: disable= broad-except + logger.error(ex) + internet_available = False + dxvk_versions = list() with open(versions_path, "r") as dxvk_tags: dxvk_json = json.load(dxvk_tags) - dxvk_versions = list() for x in dxvk_json: version_name = x["name"].replace("v", "") if version_name.startswith('m'): # ignore master snapshots of d9vk continue - dxvk_versions.append(version_name) - return dxvk_versions + if internet_available or version_name in os.listdir(dxvk_path): + dxvk_versions.append(version_name) + if not dxvk_versions: # We don't want to set manager.DXVK_VERSIONS, if the list is empty + raise IndexError + return sorted(dxvk_versions, reverse=True) def init_versions(manager): try: manager.DXVK_VERSIONS \ = get_dxvk_versions(manager.base_name, manager.DXVK_TAGS_URL) - except Exception as ex: # pylint: disable= broad-except - logger.error(ex) + except (IndexError, FileNotFoundError): + pass manager.DXVK_LATEST, manager.DXVK_PAST_RELEASES = manager.DXVK_VERSIONS[0], manager.DXVK_VERSIONS[1:9] init_versions(DXVKManager) @@ -150,11 +156,9 @@ def disable_dxvk_dll(self, system_dir, dxvk_arch, dll): """Remove DXVK DLL from Wine prefix""" wine_dll_path = os.path.join(system_dir, "%s.dll" % dll) - if self.is_dxvk_dll(wine_dll_path): + if self.is_dxvk_dll(wine_dll_path) and system.path_exists(wine_dll_path + ".orig"): logger.info("Removing "+self.base_name.upper()+" dll %s/%s", system_dir, dll) os.remove(wine_dll_path) - # Restoring original version (may not be needed) - if system.path_exists(wine_dll_path + ".orig"): shutil.move(wine_dll_path + ".orig", wine_dll_path) def _iter_dxvk_dlls(self):
{"golden_diff": "diff --git a/lutris/util/wine/dxvk.py b/lutris/util/wine/dxvk.py\n--- a/lutris/util/wine/dxvk.py\n+++ b/lutris/util/wine/dxvk.py\n@@ -23,25 +23,31 @@\n if not os.path.isdir(dxvk_path):\n os.mkdir(dxvk_path)\n versions_path = os.path.join(dxvk_path, base_name+\"_versions.json\")\n-\n- urllib.request.urlretrieve(tags_url, versions_path)\n-\n+ internet_available = True\n+ try:\n+ urllib.request.urlretrieve(tags_url, versions_path)\n+ except Exception as ex: # pylint: disable= broad-except\n+ logger.error(ex)\n+ internet_available = False\n+ dxvk_versions = list()\n with open(versions_path, \"r\") as dxvk_tags:\n dxvk_json = json.load(dxvk_tags)\n- dxvk_versions = list()\n for x in dxvk_json:\n version_name = x[\"name\"].replace(\"v\", \"\")\n if version_name.startswith('m'): # ignore master snapshots of d9vk\n continue\n- dxvk_versions.append(version_name)\n- return dxvk_versions\n+ if internet_available or version_name in os.listdir(dxvk_path):\n+ dxvk_versions.append(version_name)\n+ if not dxvk_versions: # We don't want to set manager.DXVK_VERSIONS, if the list is empty\n+ raise IndexError\n+ return sorted(dxvk_versions, reverse=True)\n \n def init_versions(manager):\n try:\n manager.DXVK_VERSIONS \\\n = get_dxvk_versions(manager.base_name, manager.DXVK_TAGS_URL)\n- except Exception as ex: # pylint: disable= broad-except\n- logger.error(ex)\n+ except (IndexError, FileNotFoundError):\n+ pass\n manager.DXVK_LATEST, manager.DXVK_PAST_RELEASES = manager.DXVK_VERSIONS[0], manager.DXVK_VERSIONS[1:9]\n \n init_versions(DXVKManager)\n@@ -150,11 +156,9 @@\n def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Remove DXVK DLL from Wine prefix\"\"\"\n wine_dll_path = os.path.join(system_dir, \"%s.dll\" % dll)\n- if self.is_dxvk_dll(wine_dll_path):\n+ if self.is_dxvk_dll(wine_dll_path) and system.path_exists(wine_dll_path + \".orig\"):\n logger.info(\"Removing \"+self.base_name.upper()+\" dll %s/%s\", system_dir, dll)\n os.remove(wine_dll_path)\n- # Restoring original version (may not be needed)\n- if system.path_exists(wine_dll_path + \".orig\"):\n shutil.move(wine_dll_path + \".orig\", wine_dll_path)\n \n def _iter_dxvk_dlls(self):\n", "issue": "Lutris removes \"DXVK\" dlls even when they're not from DXVK\n**Describe the bug**\r\n\r\nIt is currently not possible to use Gallium Nine, when DXVK is disabled in the settings (which would be what I thought would be necessary to run Gallium Nine). When I enable Gallium Nine and then run a game I get these messages in the console:\r\n\r\n```\r\n2019-06-17 10:21:30,859: Removing DXVK dll /home/bjoern/Games/starcraft-ii/drive_c/windows/system32/d3d9\r\n2019-06-17 10:21:30,859: Removing DXVK dll /home/bjoern/Games/starcraft-ii/drive_c/windows/syswow64/d3d9\r\n```\r\n\r\nAfterwards the game complains about a missiing d3d9.dll\r\nEnabling DXVK and setting the version to manual makes it work.\r\n\r\n**Expected behavior**\r\n\r\nGame runs with Gallium Nine.\r\n\r\n**Current behavior**\r\n\r\nGame complains about a missiing d3d9.dll\r\n\r\n**Steps to reproduce**\r\n\r\n* Install Gallium Nine via winetricks.\r\n* Enable Gallium Nine by running drive_c/windows/syswo64/ninewinecfg.exe\r\n* Disable DXVK.\r\n* Run game.\r\n\r\n**Workarounds**\r\n\r\nAny of these should work:\r\n\r\n* Set DXVK-version to manual and enable DXVK.\r\n* Run the game manually from the console and not via lutris.\r\n* It would probably work to run the game exe manually via \"Run EXE inside wine prefix\".\n", "before_files": [{"content": "\"\"\"DXVK helper module\"\"\"\nimport os\nimport json\nimport time\nimport shutil\nimport urllib.request\n\nfrom lutris.settings import RUNTIME_DIR\nfrom lutris.util.log import logger\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.downloader import Downloader\nfrom lutris.util import system\n\nCACHE_MAX_AGE = 86400 # Re-download DXVK versions every day\n\n\[email protected]_once\ndef init_dxvk_versions():\n def get_dxvk_versions(base_name, tags_url):\n \"\"\"Get DXVK versions from GitHub\"\"\"\n logger.info(\"Updating \"+base_name.upper()+\" versions\")\n dxvk_path = os.path.join(RUNTIME_DIR, base_name)\n if not os.path.isdir(dxvk_path):\n os.mkdir(dxvk_path)\n versions_path = os.path.join(dxvk_path, base_name+\"_versions.json\")\n\n urllib.request.urlretrieve(tags_url, versions_path)\n\n with open(versions_path, \"r\") as dxvk_tags:\n dxvk_json = json.load(dxvk_tags)\n dxvk_versions = list()\n for x in dxvk_json:\n version_name = x[\"name\"].replace(\"v\", \"\")\n if version_name.startswith('m'): # ignore master snapshots of d9vk\n continue\n dxvk_versions.append(version_name)\n return dxvk_versions\n\n def init_versions(manager):\n try:\n manager.DXVK_VERSIONS \\\n = get_dxvk_versions(manager.base_name, manager.DXVK_TAGS_URL)\n except Exception as ex: # pylint: disable= broad-except\n logger.error(ex)\n manager.DXVK_LATEST, manager.DXVK_PAST_RELEASES = manager.DXVK_VERSIONS[0], manager.DXVK_VERSIONS[1:9]\n\n init_versions(DXVKManager)\n init_versions(D9VKManager)\n\n\nclass UnavailableDXVKVersion(RuntimeError):\n \"\"\"Exception raised when a version of DXVK is not found\"\"\"\n\n\nclass DXVKManager:\n \"\"\"Utility class to install DXVK dlls to a Wine prefix\"\"\"\n\n DXVK_TAGS_URL = \"https://api.github.com/repos/doitsujin/dxvk/tags\"\n DXVK_VERSIONS = [\n \"0.94\",\n ]\n DXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:9]\n\n base_url = \"https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz\"\n base_name = \"dxvk\"\n base_dir = os.path.join(RUNTIME_DIR, base_name)\n dxvk_dlls = (\"dxgi\", \"d3d11\", \"d3d10core\", \"d3d10_1\", \"d3d10\", \"d3d9\")\n latest_version = DXVK_LATEST\n\n def __init__(self, prefix, arch=\"win64\", version=None):\n self.prefix = prefix\n if not os.path.isdir(self.base_dir):\n os.makedirs(self.base_dir)\n self._version = version\n self.wine_arch = arch\n\n @property\n def version(self):\n \"\"\"Return version of DXVK (latest known version if not provided)\"\"\"\n if self._version:\n return self._version\n return self.latest_version\n\n @property\n def dxvk_path(self):\n \"\"\"Return path to DXVK local cache\"\"\"\n return os.path.join(self.base_dir, self.version)\n\n @staticmethod\n def is_dxvk_dll(dll_path):\n \"\"\"Check if a given DLL path is provided by DXVK\n\n Very basic check to see if a dll exists and is over 256K. If this is the\n case, then consider the DLL to be from DXVK\n \"\"\"\n if system.path_exists(dll_path, check_symlinks=True):\n dll_stats = os.stat(dll_path)\n dll_size = dll_stats.st_size\n else:\n dll_size = 0\n return dll_size > 1024 * 256\n\n def is_available(self):\n \"\"\"Return whether DXVK is cached locally\"\"\"\n return system.path_exists(self.dxvk_path)\n\n def dxvk_dll_exists(self, dll_name):\n \"\"\"Check if the dll exists as a DXVK variant\"\"\"\n return system.path_exists(os.path.join(self.dxvk_path, \"x64\", dll_name + \".dll\")) \\\n and system.path_exists(os.path.join(self.dxvk_path, \"x32\", dll_name + \".dll\"))\n\n def download(self):\n \"\"\"Download DXVK to the local cache\"\"\"\n dxvk_url = self.base_url.format(self.version, self.version)\n if self.is_available():\n logger.warning(self.base_name.upper()+\" already available at %s\", self.dxvk_path)\n\n dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))\n\n downloader = Downloader(dxvk_url, dxvk_archive_path)\n downloader.start()\n while downloader.check_progress() < 1 and downloader.state != downloader.ERROR:\n time.sleep(0.3)\n if not system.path_exists(dxvk_archive_path):\n raise UnavailableDXVKVersion(\"Failed to download \"+self.base_name.upper()+\" %s\" % self.version)\n if os.stat(dxvk_archive_path).st_size:\n extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)\n os.remove(dxvk_archive_path)\n else:\n os.remove(dxvk_archive_path)\n raise UnavailableDXVKVersion(\"Failed to download \"+self.base_name.upper()+\" %s\" % self.version)\n\n def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Copies DXVK dlls to the appropriate destination\"\"\"\n # Copying DXVK's version\n dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, \"%s.dll\" % dll)\n if system.path_exists(dxvk_dll_path):\n wine_dll_path = os.path.join(system_dir, \"%s.dll\" % dll)\n logger.info(\"Replacing %s/%s with \"+self.base_name.upper()+\" version\", system_dir, dll)\n if not self.is_dxvk_dll(wine_dll_path):\n # Backing up original version (may not be needed)\n if system.path_exists(wine_dll_path):\n shutil.move(wine_dll_path, wine_dll_path + \".orig\")\n if system.path_exists(wine_dll_path):\n os.remove(wine_dll_path)\n os.symlink(dxvk_dll_path, wine_dll_path)\n else:\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Remove DXVK DLL from Wine prefix\"\"\"\n wine_dll_path = os.path.join(system_dir, \"%s.dll\" % dll)\n if self.is_dxvk_dll(wine_dll_path):\n logger.info(\"Removing \"+self.base_name.upper()+\" dll %s/%s\", system_dir, dll)\n os.remove(wine_dll_path)\n # Restoring original version (may not be needed)\n if system.path_exists(wine_dll_path + \".orig\"):\n shutil.move(wine_dll_path + \".orig\", wine_dll_path)\n\n def _iter_dxvk_dlls(self):\n windows_path = os.path.join(self.prefix, \"drive_c/windows\")\n if self.wine_arch == \"win64\":\n system_dirs = {\n \"x64\": os.path.join(windows_path, \"system32\"),\n \"x32\": os.path.join(windows_path, \"syswow64\"),\n }\n elif self.wine_arch == \"win32\":\n system_dirs = {\"x32\": os.path.join(windows_path, \"system32\")}\n\n for dxvk_arch, system_dir in system_dirs.items():\n for dll in self.dxvk_dlls:\n yield system_dir, dxvk_arch, dll\n\n def enable(self):\n \"\"\"Enable DXVK for the current prefix\"\"\"\n if not system.path_exists(self.dxvk_path):\n logger.error(self.base_name.upper()+\" %s is not available locally\", self.version)\n return\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.enable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable(self):\n \"\"\"Disable DXVK for the current prefix\"\"\"\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n\nclass D9VKManager(DXVKManager):\n DXVK_TAGS_URL = \"https://api.github.com/repos/Joshua-Ashton/d9vk/tags\"\n DXVK_VERSIONS = [\n \"0.10\",\n ]\n DXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:9]\n\n base_url = \"https://github.com/Joshua-Ashton/d9vk/releases/download/{}/d9vk-{}.tar.gz\"\n base_name = \"d9vk\"\n base_dir = os.path.join(RUNTIME_DIR, base_name)\n dxvk_dlls = (\"d3d9\",)\n latest_version = DXVK_LATEST\n", "path": "lutris/util/wine/dxvk.py"}], "after_files": [{"content": "\"\"\"DXVK helper module\"\"\"\nimport os\nimport json\nimport time\nimport shutil\nimport urllib.request\n\nfrom lutris.settings import RUNTIME_DIR\nfrom lutris.util.log import logger\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.downloader import Downloader\nfrom lutris.util import system\n\nCACHE_MAX_AGE = 86400 # Re-download DXVK versions every day\n\n\[email protected]_once\ndef init_dxvk_versions():\n def get_dxvk_versions(base_name, tags_url):\n \"\"\"Get DXVK versions from GitHub\"\"\"\n logger.info(\"Updating \"+base_name.upper()+\" versions\")\n dxvk_path = os.path.join(RUNTIME_DIR, base_name)\n if not os.path.isdir(dxvk_path):\n os.mkdir(dxvk_path)\n versions_path = os.path.join(dxvk_path, base_name+\"_versions.json\")\n internet_available = True\n try:\n urllib.request.urlretrieve(tags_url, versions_path)\n except Exception as ex: # pylint: disable= broad-except\n logger.error(ex)\n internet_available = False\n dxvk_versions = list()\n with open(versions_path, \"r\") as dxvk_tags:\n dxvk_json = json.load(dxvk_tags)\n for x in dxvk_json:\n version_name = x[\"name\"].replace(\"v\", \"\")\n if version_name.startswith('m'): # ignore master snapshots of d9vk\n continue\n if internet_available or version_name in os.listdir(dxvk_path):\n dxvk_versions.append(version_name)\n if not dxvk_versions: # We don't want to set manager.DXVK_VERSIONS, if the list is empty\n raise IndexError\n return sorted(dxvk_versions, reverse=True)\n\n def init_versions(manager):\n try:\n manager.DXVK_VERSIONS \\\n = get_dxvk_versions(manager.base_name, manager.DXVK_TAGS_URL)\n except (IndexError, FileNotFoundError):\n pass\n manager.DXVK_LATEST, manager.DXVK_PAST_RELEASES = manager.DXVK_VERSIONS[0], manager.DXVK_VERSIONS[1:9]\n\n init_versions(DXVKManager)\n init_versions(D9VKManager)\n\n\nclass UnavailableDXVKVersion(RuntimeError):\n \"\"\"Exception raised when a version of DXVK is not found\"\"\"\n\n\nclass DXVKManager:\n \"\"\"Utility class to install DXVK dlls to a Wine prefix\"\"\"\n\n DXVK_TAGS_URL = \"https://api.github.com/repos/doitsujin/dxvk/tags\"\n DXVK_VERSIONS = [\n \"0.94\",\n ]\n DXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:9]\n\n base_url = \"https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz\"\n base_name = \"dxvk\"\n base_dir = os.path.join(RUNTIME_DIR, base_name)\n dxvk_dlls = (\"dxgi\", \"d3d11\", \"d3d10core\", \"d3d10_1\", \"d3d10\", \"d3d9\")\n latest_version = DXVK_LATEST\n\n def __init__(self, prefix, arch=\"win64\", version=None):\n self.prefix = prefix\n if not os.path.isdir(self.base_dir):\n os.makedirs(self.base_dir)\n self._version = version\n self.wine_arch = arch\n\n @property\n def version(self):\n \"\"\"Return version of DXVK (latest known version if not provided)\"\"\"\n if self._version:\n return self._version\n return self.latest_version\n\n @property\n def dxvk_path(self):\n \"\"\"Return path to DXVK local cache\"\"\"\n return os.path.join(self.base_dir, self.version)\n\n @staticmethod\n def is_dxvk_dll(dll_path):\n \"\"\"Check if a given DLL path is provided by DXVK\n\n Very basic check to see if a dll exists and is over 256K. If this is the\n case, then consider the DLL to be from DXVK\n \"\"\"\n if system.path_exists(dll_path, check_symlinks=True):\n dll_stats = os.stat(dll_path)\n dll_size = dll_stats.st_size\n else:\n dll_size = 0\n return dll_size > 1024 * 256\n\n def is_available(self):\n \"\"\"Return whether DXVK is cached locally\"\"\"\n return system.path_exists(self.dxvk_path)\n\n def dxvk_dll_exists(self, dll_name):\n \"\"\"Check if the dll exists as a DXVK variant\"\"\"\n return system.path_exists(os.path.join(self.dxvk_path, \"x64\", dll_name + \".dll\")) \\\n and system.path_exists(os.path.join(self.dxvk_path, \"x32\", dll_name + \".dll\"))\n\n def download(self):\n \"\"\"Download DXVK to the local cache\"\"\"\n dxvk_url = self.base_url.format(self.version, self.version)\n if self.is_available():\n logger.warning(self.base_name.upper()+\" already available at %s\", self.dxvk_path)\n\n dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))\n\n downloader = Downloader(dxvk_url, dxvk_archive_path)\n downloader.start()\n while downloader.check_progress() < 1 and downloader.state != downloader.ERROR:\n time.sleep(0.3)\n if not system.path_exists(dxvk_archive_path):\n raise UnavailableDXVKVersion(\"Failed to download \"+self.base_name.upper()+\" %s\" % self.version)\n if os.stat(dxvk_archive_path).st_size:\n extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)\n os.remove(dxvk_archive_path)\n else:\n os.remove(dxvk_archive_path)\n raise UnavailableDXVKVersion(\"Failed to download \"+self.base_name.upper()+\" %s\" % self.version)\n\n def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Copies DXVK dlls to the appropriate destination\"\"\"\n # Copying DXVK's version\n dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, \"%s.dll\" % dll)\n if system.path_exists(dxvk_dll_path):\n wine_dll_path = os.path.join(system_dir, \"%s.dll\" % dll)\n logger.info(\"Replacing %s/%s with \"+self.base_name.upper()+\" version\", system_dir, dll)\n if not self.is_dxvk_dll(wine_dll_path):\n # Backing up original version (may not be needed)\n if system.path_exists(wine_dll_path):\n shutil.move(wine_dll_path, wine_dll_path + \".orig\")\n if system.path_exists(wine_dll_path):\n os.remove(wine_dll_path)\n os.symlink(dxvk_dll_path, wine_dll_path)\n else:\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Remove DXVK DLL from Wine prefix\"\"\"\n wine_dll_path = os.path.join(system_dir, \"%s.dll\" % dll)\n if self.is_dxvk_dll(wine_dll_path) and system.path_exists(wine_dll_path + \".orig\"):\n logger.info(\"Removing \"+self.base_name.upper()+\" dll %s/%s\", system_dir, dll)\n os.remove(wine_dll_path)\n shutil.move(wine_dll_path + \".orig\", wine_dll_path)\n\n def _iter_dxvk_dlls(self):\n windows_path = os.path.join(self.prefix, \"drive_c/windows\")\n if self.wine_arch == \"win64\":\n system_dirs = {\n \"x64\": os.path.join(windows_path, \"system32\"),\n \"x32\": os.path.join(windows_path, \"syswow64\"),\n }\n elif self.wine_arch == \"win32\":\n system_dirs = {\"x32\": os.path.join(windows_path, \"system32\")}\n\n for dxvk_arch, system_dir in system_dirs.items():\n for dll in self.dxvk_dlls:\n yield system_dir, dxvk_arch, dll\n\n def enable(self):\n \"\"\"Enable DXVK for the current prefix\"\"\"\n if not system.path_exists(self.dxvk_path):\n logger.error(self.base_name.upper()+\" %s is not available locally\", self.version)\n return\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.enable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable(self):\n \"\"\"Disable DXVK for the current prefix\"\"\"\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n\nclass D9VKManager(DXVKManager):\n DXVK_TAGS_URL = \"https://api.github.com/repos/Joshua-Ashton/d9vk/tags\"\n DXVK_VERSIONS = [\n \"0.10\",\n ]\n DXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:9]\n\n base_url = \"https://github.com/Joshua-Ashton/d9vk/releases/download/{}/d9vk-{}.tar.gz\"\n base_name = \"d9vk\"\n base_dir = os.path.join(RUNTIME_DIR, base_name)\n dxvk_dlls = (\"d3d9\",)\n latest_version = DXVK_LATEST\n", "path": "lutris/util/wine/dxvk.py"}]}
3,099
634
gh_patches_debug_7605
rasdani/github-patches
git_diff
vacanza__python-holidays-376
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pop_named() sometimes generates an exception This simple code block generates an exception when I try to pop "Columbus Day" (last line): ``` from datetime import date import holidays us_holidays = holidays.CountryHoliday('US', prov=None, state='CA', years=2022) print(us_holidays.get('2022-10-10').format("%s")) us_holidays.pop_named("Columbus Day") ``` The exception is `dictionary changed size during iteration`, and it seems to be related to popping elements out of a dictionary while you're iterating through it: https://stackoverflow.com/questions/11941817/how-to-avoid-runtimeerror-dictionary-changed-size-during-iteration-error. The element to be popped isn't actually removed. I'm far from a competent Python programmer, but iterating over `list(self)` seems to solve the problem for me - no exception, and the item is removed properly: ``` def get_named(self, name): # find all dates matching provided name (accepting partial # strings too, case insensitive), returning them in a list return [key for key in list(self) if name.lower() in self[key].lower()] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `holidays/holiday_base.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # python-holidays 4 # --------------- 5 # A fast, efficient Python library for generating country, province and state 6 # specific sets of holidays on the fly. It aims to make determining whether a 7 # specific date is a holiday as fast and flexible as possible. 8 # 9 # Author: ryanss <[email protected]> (c) 2014-2017 10 # dr-prodigy <[email protected]> (c) 2017-2020 11 # Website: https://github.com/dr-prodigy/python-holidays 12 # License: MIT (see LICENSE file) 13 14 from datetime import timedelta, datetime, date 15 16 import six 17 from dateutil.parser import parse 18 19 20 class HolidayBase(dict): 21 PROVINCES = [] 22 23 def __init__(self, years=[], expand=True, observed=True, 24 prov=None, state=None): 25 self.observed = observed 26 self.expand = expand 27 if isinstance(years, int): 28 years = [years, ] 29 self.years = set(years) 30 if not getattr(self, 'prov', False): 31 self.prov = prov 32 self.state = state 33 for year in list(self.years): 34 self._populate(year) 35 36 def __setattr__(self, key, value): 37 if key == 'observed' and len(self) > 0: 38 dict.__setattr__(self, key, value) 39 if value is True: 40 # Add (Observed) dates 41 years = list(self.years) 42 self.years = set() 43 self.clear() 44 for year in years: 45 self._populate(year) 46 else: 47 # Remove (Observed) dates 48 for k, v in list(self.items()): 49 if v.find("Observed") >= 0: 50 del self[k] 51 else: 52 return dict.__setattr__(self, key, value) 53 54 def __keytransform__(self, key): 55 if isinstance(key, datetime): 56 key = key.date() 57 elif isinstance(key, date): 58 key = key 59 elif isinstance(key, int) or isinstance(key, float): 60 key = datetime.utcfromtimestamp(key).date() 61 elif isinstance(key, six.string_types): 62 try: 63 key = parse(key).date() 64 except (ValueError, OverflowError): 65 raise ValueError("Cannot parse date from string '%s'" % key) 66 else: 67 raise TypeError("Cannot convert type '%s' to date." % type(key)) 68 69 if self.expand and key.year not in self.years: 70 self.years.add(key.year) 71 self._populate(key.year) 72 return key 73 74 def __contains__(self, key): 75 return dict.__contains__(self, self.__keytransform__(key)) 76 77 def __getitem__(self, key): 78 if isinstance(key, slice): 79 if not key.start or not key.stop: 80 raise ValueError("Both start and stop must be given.") 81 82 start = self.__keytransform__(key.start) 83 stop = self.__keytransform__(key.stop) 84 85 if key.step is None: 86 step = 1 87 elif isinstance(key.step, timedelta): 88 step = key.step.days 89 elif isinstance(key.step, int): 90 step = key.step 91 else: 92 raise TypeError( 93 "Cannot convert type '%s' to int." % type(key.step) 94 ) 95 96 if step == 0: 97 raise ValueError('Step value must not be zero.') 98 99 date_diff = stop - start 100 if date_diff.days < 0 <= step or date_diff.days >= 0 > step: 101 step *= -1 102 103 days_in_range = [] 104 for delta_days in range(0, date_diff.days, step): 105 day = start + timedelta(days=delta_days) 106 try: 107 dict.__getitem__( 108 self, 109 day 110 ) 111 days_in_range.append(day) 112 except KeyError: 113 pass 114 return days_in_range 115 return dict.__getitem__(self, self.__keytransform__(key)) 116 117 def __setitem__(self, key, value): 118 if key in self: 119 if self.get(key).find(value) < 0 \ 120 and value.find(self.get(key)) < 0: 121 value = "%s, %s" % (value, self.get(key)) 122 else: 123 value = self.get(key) 124 return dict.__setitem__(self, self.__keytransform__(key), value) 125 126 def update(self, *args): 127 args = list(args) 128 for arg in args: 129 if isinstance(arg, dict): 130 for key, value in list(arg.items()): 131 self[key] = value 132 elif isinstance(arg, list): 133 for item in arg: 134 self[item] = "Holiday" 135 else: 136 self[arg] = "Holiday" 137 138 def append(self, *args): 139 return self.update(*args) 140 141 def get(self, key, default=None): 142 return dict.get(self, self.__keytransform__(key), default) 143 144 def get_list(self, key): 145 return [h for h in self.get(key, "").split(", ") if h] 146 147 def get_named(self, name): 148 # find all dates matching provided name (accepting partial 149 # strings too, case insensitive), returning them in a list 150 return [key for key in self if name.lower() in self[key].lower()] 151 152 def pop(self, key, default=None): 153 if default is None: 154 return dict.pop(self, self.__keytransform__(key)) 155 return dict.pop(self, self.__keytransform__(key), default) 156 157 def pop_named(self, name): 158 to_pop = self.get_named(name) 159 if not to_pop: 160 raise KeyError(name) 161 for key in to_pop: 162 self.pop(key) 163 return to_pop 164 165 def __eq__(self, other): 166 return dict.__eq__(self, other) and self.__dict__ == other.__dict__ 167 168 def __ne__(self, other): 169 return dict.__ne__(self, other) or self.__dict__ != other.__dict__ 170 171 def __add__(self, other): 172 if isinstance(other, int) and other == 0: 173 # Required to sum() list of holidays 174 # sum([h1, h2]) is equivalent to (0 + h1 + h2) 175 return self 176 elif not isinstance(other, HolidayBase): 177 raise TypeError() 178 HolidaySum = createHolidaySum(self, other) 179 country = (getattr(self, 'country', None) or 180 getattr(other, 'country', None)) 181 if self.country and other.country and self.country != other.country: 182 c1 = self.country 183 if not isinstance(c1, list): 184 c1 = [c1] 185 c2 = other.country 186 if not isinstance(c2, list): 187 c2 = [c2] 188 country = c1 + c2 189 prov = getattr(self, 'prov', None) or getattr(other, 'prov', None) 190 if self.prov and other.prov and self.prov != other.prov: 191 p1 = self.prov if isinstance(self.prov, list) else [self.prov] 192 p2 = other.prov if isinstance(other.prov, list) else [other.prov] 193 prov = p1 + p2 194 return HolidaySum(years=(self.years | other.years), 195 expand=(self.expand or other.expand), 196 observed=(self.observed or other.observed), 197 country=country, prov=prov) 198 199 def __radd__(self, other): 200 return self.__add__(other) 201 202 def _populate(self, year): 203 pass 204 205 206 def createHolidaySum(h1, h2): 207 class HolidaySum(HolidayBase): 208 209 def __init__(self, country, **kwargs): 210 self.country = country 211 self.holidays = [] 212 if getattr(h1, 'holidays', False): 213 for h in h1.holidays: 214 self.holidays.append(h) 215 else: 216 self.holidays.append(h1) 217 if getattr(h2, 'holidays', False): 218 for h in h2.holidays: 219 self.holidays.append(h) 220 else: 221 self.holidays.append(h2) 222 HolidayBase.__init__(self, **kwargs) 223 224 def _populate(self, year): 225 for h in self.holidays[::-1]: 226 h._populate(year) 227 self.update(h) 228 229 return HolidaySum 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/holidays/holiday_base.py b/holidays/holiday_base.py --- a/holidays/holiday_base.py +++ b/holidays/holiday_base.py @@ -147,7 +147,8 @@ def get_named(self, name): # find all dates matching provided name (accepting partial # strings too, case insensitive), returning them in a list - return [key for key in self if name.lower() in self[key].lower()] + matches = [key for key in self if name.lower() in self[key].lower()] + return matches def pop(self, key, default=None): if default is None:
{"golden_diff": "diff --git a/holidays/holiday_base.py b/holidays/holiday_base.py\n--- a/holidays/holiday_base.py\n+++ b/holidays/holiday_base.py\n@@ -147,7 +147,8 @@\n def get_named(self, name):\n # find all dates matching provided name (accepting partial\n # strings too, case insensitive), returning them in a list\n- return [key for key in self if name.lower() in self[key].lower()]\n+ matches = [key for key in self if name.lower() in self[key].lower()]\n+ return matches\n \n def pop(self, key, default=None):\n if default is None:\n", "issue": "pop_named() sometimes generates an exception\nThis simple code block generates an exception when I try to pop \"Columbus Day\" (last line):\r\n\r\n```\r\nfrom datetime import date\r\n \r\nimport holidays\r\n\r\nus_holidays = holidays.CountryHoliday('US', prov=None, state='CA', years=2022)\r\n\r\nprint(us_holidays.get('2022-10-10').format(\"%s\"))\r\n\r\nus_holidays.pop_named(\"Columbus Day\")\r\n```\r\n\r\nThe exception is `dictionary changed size during iteration`, and it seems to be related to popping elements out of a dictionary while you're iterating through it: https://stackoverflow.com/questions/11941817/how-to-avoid-runtimeerror-dictionary-changed-size-during-iteration-error. The element to be popped isn't actually removed.\r\n\r\nI'm far from a competent Python programmer, but iterating over `list(self)` seems to solve the problem for me - no exception, and the item is removed properly:\r\n\r\n```\r\n def get_named(self, name):\r\n # find all dates matching provided name (accepting partial\r\n # strings too, case insensitive), returning them in a list\r\n return [key for key in list(self) if name.lower() in self[key].lower()]\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2020\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta, datetime, date\n\nimport six\nfrom dateutil.parser import parse\n\n\nclass HolidayBase(dict):\n PROVINCES = []\n\n def __init__(self, years=[], expand=True, observed=True,\n prov=None, state=None):\n self.observed = observed\n self.expand = expand\n if isinstance(years, int):\n years = [years, ]\n self.years = set(years)\n if not getattr(self, 'prov', False):\n self.prov = prov\n self.state = state\n for year in list(self.years):\n self._populate(year)\n\n def __setattr__(self, key, value):\n if key == 'observed' and len(self) > 0:\n dict.__setattr__(self, key, value)\n if value is True:\n # Add (Observed) dates\n years = list(self.years)\n self.years = set()\n self.clear()\n for year in years:\n self._populate(year)\n else:\n # Remove (Observed) dates\n for k, v in list(self.items()):\n if v.find(\"Observed\") >= 0:\n del self[k]\n else:\n return dict.__setattr__(self, key, value)\n\n def __keytransform__(self, key):\n if isinstance(key, datetime):\n key = key.date()\n elif isinstance(key, date):\n key = key\n elif isinstance(key, int) or isinstance(key, float):\n key = datetime.utcfromtimestamp(key).date()\n elif isinstance(key, six.string_types):\n try:\n key = parse(key).date()\n except (ValueError, OverflowError):\n raise ValueError(\"Cannot parse date from string '%s'\" % key)\n else:\n raise TypeError(\"Cannot convert type '%s' to date.\" % type(key))\n\n if self.expand and key.year not in self.years:\n self.years.add(key.year)\n self._populate(key.year)\n return key\n\n def __contains__(self, key):\n return dict.__contains__(self, self.__keytransform__(key))\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if not key.start or not key.stop:\n raise ValueError(\"Both start and stop must be given.\")\n\n start = self.__keytransform__(key.start)\n stop = self.__keytransform__(key.stop)\n\n if key.step is None:\n step = 1\n elif isinstance(key.step, timedelta):\n step = key.step.days\n elif isinstance(key.step, int):\n step = key.step\n else:\n raise TypeError(\n \"Cannot convert type '%s' to int.\" % type(key.step)\n )\n\n if step == 0:\n raise ValueError('Step value must not be zero.')\n\n date_diff = stop - start\n if date_diff.days < 0 <= step or date_diff.days >= 0 > step:\n step *= -1\n\n days_in_range = []\n for delta_days in range(0, date_diff.days, step):\n day = start + timedelta(days=delta_days)\n try:\n dict.__getitem__(\n self,\n day\n )\n days_in_range.append(day)\n except KeyError:\n pass\n return days_in_range\n return dict.__getitem__(self, self.__keytransform__(key))\n\n def __setitem__(self, key, value):\n if key in self:\n if self.get(key).find(value) < 0 \\\n and value.find(self.get(key)) < 0:\n value = \"%s, %s\" % (value, self.get(key))\n else:\n value = self.get(key)\n return dict.__setitem__(self, self.__keytransform__(key), value)\n\n def update(self, *args):\n args = list(args)\n for arg in args:\n if isinstance(arg, dict):\n for key, value in list(arg.items()):\n self[key] = value\n elif isinstance(arg, list):\n for item in arg:\n self[item] = \"Holiday\"\n else:\n self[arg] = \"Holiday\"\n\n def append(self, *args):\n return self.update(*args)\n\n def get(self, key, default=None):\n return dict.get(self, self.__keytransform__(key), default)\n\n def get_list(self, key):\n return [h for h in self.get(key, \"\").split(\", \") if h]\n\n def get_named(self, name):\n # find all dates matching provided name (accepting partial\n # strings too, case insensitive), returning them in a list\n return [key for key in self if name.lower() in self[key].lower()]\n\n def pop(self, key, default=None):\n if default is None:\n return dict.pop(self, self.__keytransform__(key))\n return dict.pop(self, self.__keytransform__(key), default)\n\n def pop_named(self, name):\n to_pop = self.get_named(name)\n if not to_pop:\n raise KeyError(name)\n for key in to_pop:\n self.pop(key)\n return to_pop\n\n def __eq__(self, other):\n return dict.__eq__(self, other) and self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return dict.__ne__(self, other) or self.__dict__ != other.__dict__\n\n def __add__(self, other):\n if isinstance(other, int) and other == 0:\n # Required to sum() list of holidays\n # sum([h1, h2]) is equivalent to (0 + h1 + h2)\n return self\n elif not isinstance(other, HolidayBase):\n raise TypeError()\n HolidaySum = createHolidaySum(self, other)\n country = (getattr(self, 'country', None) or\n getattr(other, 'country', None))\n if self.country and other.country and self.country != other.country:\n c1 = self.country\n if not isinstance(c1, list):\n c1 = [c1]\n c2 = other.country\n if not isinstance(c2, list):\n c2 = [c2]\n country = c1 + c2\n prov = getattr(self, 'prov', None) or getattr(other, 'prov', None)\n if self.prov and other.prov and self.prov != other.prov:\n p1 = self.prov if isinstance(self.prov, list) else [self.prov]\n p2 = other.prov if isinstance(other.prov, list) else [other.prov]\n prov = p1 + p2\n return HolidaySum(years=(self.years | other.years),\n expand=(self.expand or other.expand),\n observed=(self.observed or other.observed),\n country=country, prov=prov)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def _populate(self, year):\n pass\n\n\ndef createHolidaySum(h1, h2):\n class HolidaySum(HolidayBase):\n\n def __init__(self, country, **kwargs):\n self.country = country\n self.holidays = []\n if getattr(h1, 'holidays', False):\n for h in h1.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h1)\n if getattr(h2, 'holidays', False):\n for h in h2.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h2)\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n for h in self.holidays[::-1]:\n h._populate(year)\n self.update(h)\n\n return HolidaySum\n", "path": "holidays/holiday_base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2020\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta, datetime, date\n\nimport six\nfrom dateutil.parser import parse\n\n\nclass HolidayBase(dict):\n PROVINCES = []\n\n def __init__(self, years=[], expand=True, observed=True,\n prov=None, state=None):\n self.observed = observed\n self.expand = expand\n if isinstance(years, int):\n years = [years, ]\n self.years = set(years)\n if not getattr(self, 'prov', False):\n self.prov = prov\n self.state = state\n for year in list(self.years):\n self._populate(year)\n\n def __setattr__(self, key, value):\n if key == 'observed' and len(self) > 0:\n dict.__setattr__(self, key, value)\n if value is True:\n # Add (Observed) dates\n years = list(self.years)\n self.years = set()\n self.clear()\n for year in years:\n self._populate(year)\n else:\n # Remove (Observed) dates\n for k, v in list(self.items()):\n if v.find(\"Observed\") >= 0:\n del self[k]\n else:\n return dict.__setattr__(self, key, value)\n\n def __keytransform__(self, key):\n if isinstance(key, datetime):\n key = key.date()\n elif isinstance(key, date):\n key = key\n elif isinstance(key, int) or isinstance(key, float):\n key = datetime.utcfromtimestamp(key).date()\n elif isinstance(key, six.string_types):\n try:\n key = parse(key).date()\n except (ValueError, OverflowError):\n raise ValueError(\"Cannot parse date from string '%s'\" % key)\n else:\n raise TypeError(\"Cannot convert type '%s' to date.\" % type(key))\n\n if self.expand and key.year not in self.years:\n self.years.add(key.year)\n self._populate(key.year)\n return key\n\n def __contains__(self, key):\n return dict.__contains__(self, self.__keytransform__(key))\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if not key.start or not key.stop:\n raise ValueError(\"Both start and stop must be given.\")\n\n start = self.__keytransform__(key.start)\n stop = self.__keytransform__(key.stop)\n\n if key.step is None:\n step = 1\n elif isinstance(key.step, timedelta):\n step = key.step.days\n elif isinstance(key.step, int):\n step = key.step\n else:\n raise TypeError(\n \"Cannot convert type '%s' to int.\" % type(key.step)\n )\n\n if step == 0:\n raise ValueError('Step value must not be zero.')\n\n date_diff = stop - start\n if date_diff.days < 0 <= step or date_diff.days >= 0 > step:\n step *= -1\n\n days_in_range = []\n for delta_days in range(0, date_diff.days, step):\n day = start + timedelta(days=delta_days)\n try:\n dict.__getitem__(\n self,\n day\n )\n days_in_range.append(day)\n except KeyError:\n pass\n return days_in_range\n return dict.__getitem__(self, self.__keytransform__(key))\n\n def __setitem__(self, key, value):\n if key in self:\n if self.get(key).find(value) < 0 \\\n and value.find(self.get(key)) < 0:\n value = \"%s, %s\" % (value, self.get(key))\n else:\n value = self.get(key)\n return dict.__setitem__(self, self.__keytransform__(key), value)\n\n def update(self, *args):\n args = list(args)\n for arg in args:\n if isinstance(arg, dict):\n for key, value in list(arg.items()):\n self[key] = value\n elif isinstance(arg, list):\n for item in arg:\n self[item] = \"Holiday\"\n else:\n self[arg] = \"Holiday\"\n\n def append(self, *args):\n return self.update(*args)\n\n def get(self, key, default=None):\n return dict.get(self, self.__keytransform__(key), default)\n\n def get_list(self, key):\n return [h for h in self.get(key, \"\").split(\", \") if h]\n\n def get_named(self, name):\n # find all dates matching provided name (accepting partial\n # strings too, case insensitive), returning them in a list\n matches = [key for key in self if name.lower() in self[key].lower()]\n return matches\n\n def pop(self, key, default=None):\n if default is None:\n return dict.pop(self, self.__keytransform__(key))\n return dict.pop(self, self.__keytransform__(key), default)\n\n def pop_named(self, name):\n to_pop = self.get_named(name)\n if not to_pop:\n raise KeyError(name)\n for key in to_pop:\n self.pop(key)\n return to_pop\n\n def __eq__(self, other):\n return dict.__eq__(self, other) and self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return dict.__ne__(self, other) or self.__dict__ != other.__dict__\n\n def __add__(self, other):\n if isinstance(other, int) and other == 0:\n # Required to sum() list of holidays\n # sum([h1, h2]) is equivalent to (0 + h1 + h2)\n return self\n elif not isinstance(other, HolidayBase):\n raise TypeError()\n HolidaySum = createHolidaySum(self, other)\n country = (getattr(self, 'country', None) or\n getattr(other, 'country', None))\n if self.country and other.country and self.country != other.country:\n c1 = self.country\n if not isinstance(c1, list):\n c1 = [c1]\n c2 = other.country\n if not isinstance(c2, list):\n c2 = [c2]\n country = c1 + c2\n prov = getattr(self, 'prov', None) or getattr(other, 'prov', None)\n if self.prov and other.prov and self.prov != other.prov:\n p1 = self.prov if isinstance(self.prov, list) else [self.prov]\n p2 = other.prov if isinstance(other.prov, list) else [other.prov]\n prov = p1 + p2\n return HolidaySum(years=(self.years | other.years),\n expand=(self.expand or other.expand),\n observed=(self.observed or other.observed),\n country=country, prov=prov)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def _populate(self, year):\n pass\n\n\ndef createHolidaySum(h1, h2):\n class HolidaySum(HolidayBase):\n\n def __init__(self, country, **kwargs):\n self.country = country\n self.holidays = []\n if getattr(h1, 'holidays', False):\n for h in h1.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h1)\n if getattr(h2, 'holidays', False):\n for h in h2.holidays:\n self.holidays.append(h)\n else:\n self.holidays.append(h2)\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n for h in self.holidays[::-1]:\n h._populate(year)\n self.update(h)\n\n return HolidaySum\n", "path": "holidays/holiday_base.py"}]}
2,937
145
gh_patches_debug_20514
rasdani/github-patches
git_diff
liqd__a4-product-149
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- tile images on partner page are not cut to same size ![screenshot from 2017-11-28 12-12-35](https://user-images.githubusercontent.com/8178179/33316792-7b8fc848-d435-11e7-8723-8849b5804bce.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `liqd_product/config/urls.py` Content: ``` 1 """Beteiligung.in URL Configuration.""" 2 3 from ckeditor_uploader import views as ck_views 4 from django.conf import settings 5 from django.conf.urls import include 6 from django.conf.urls import url 7 from django.contrib import admin 8 from django.views.decorators.cache import never_cache 9 from django.views.i18n import javascript_catalog 10 from rest_framework import routers 11 12 from adhocracy4.api import routers as a4routers 13 from adhocracy4.comments.api import CommentViewSet 14 from adhocracy4.follows.api import FollowViewSet 15 from adhocracy4.ratings.api import RatingViewSet 16 from adhocracy4.reports.api import ReportViewSet 17 from liqd_product.apps.partners.urlresolvers import partner_patterns 18 from liqd_product.apps.users.decorators import user_is_project_admin 19 from meinberlin.apps.documents.api import DocumentViewSet 20 from meinberlin.apps.polls.api import PollViewSet 21 from meinberlin.apps.polls.api import VoteViewSet 22 from meinberlin.apps.polls.routers import QuestionDefaultRouter 23 24 js_info_dict = { 25 'packages': ('adhocracy4.comments',), 26 } 27 28 router = routers.DefaultRouter() 29 router.register(r'follows', FollowViewSet, base_name='follows') 30 router.register(r'reports', ReportViewSet, base_name='reports') 31 router.register(r'polls', PollViewSet, base_name='polls') 32 33 module_router = a4routers.ModuleDefaultRouter() 34 # FIXME: rename to 'chapters' 35 module_router.register(r'documents', DocumentViewSet, base_name='chapters') 36 37 orga_router = a4routers.OrganisationDefaultRouter() 38 39 ct_router = a4routers.ContentTypeDefaultRouter() 40 ct_router.register(r'comments', CommentViewSet, base_name='comments') 41 ct_router.register(r'ratings', RatingViewSet, base_name='ratings') 42 43 question_router = QuestionDefaultRouter() 44 question_router.register(r'vote', VoteViewSet, base_name='vote') 45 46 47 urlpatterns = [ 48 # General platform urls 49 url(r'^django-admin/', include(admin.site.urls)), 50 url(r'^admin/', include('wagtail.wagtailadmin.urls')), 51 52 url(r'^accounts/', include('allauth.urls')), 53 url(r'^account/', include('liqd_product.apps.account.urls')), 54 url(r'^embed/', include('meinberlin.apps.embed.urls')), 55 url(r'^dashboard/', include('meinberlin.apps.dashboard2.urls')), 56 url(r'^profile/', include('liqd_product.apps.users.urls')), 57 58 # API urls 59 url(r'^api/', include(ct_router.urls)), 60 url(r'^api/', include(module_router.urls)), 61 url(r'^api/', include(orga_router.urls)), 62 url(r'^api/', include(question_router.urls)), 63 url(r'^api/', include(router.urls)), 64 65 url(r'^upload/', user_is_project_admin(ck_views.upload), 66 name='ckeditor_upload'), 67 url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)), 68 name='ckeditor_browse'), 69 70 url(r'^jsi18n/$', javascript_catalog, 71 js_info_dict, name='javascript-catalog'), 72 73 # Urls within the context of a partner 74 partner_patterns( 75 url(r'^modules/', include('adhocracy4.modules.urls')), 76 url(r'^projects/', include('adhocracy4.projects.urls')), 77 url(r'^offlineevents/', include('meinberlin.apps.offlineevents.urls', 78 namespace='meinberlin_offlineevents')), 79 url(r'^ideas/', include(r'meinberlin.apps.ideas.urls', 80 namespace='meinberlin_ideas')), 81 url(r'^mapideas/', include('meinberlin.apps.mapideas.urls', 82 namespace='meinberlin_mapideas')), 83 url(r'^text/', include('meinberlin.apps.documents.urls', 84 namespace='meinberlin_documents')), 85 ), 86 87 url(r'', include('liqd_product.apps.partners.urls')), 88 url(r'', include('wagtail.wagtailcore.urls')) 89 ] 90 91 92 if settings.DEBUG: 93 from django.conf.urls.static import static 94 from django.contrib.staticfiles.urls import staticfiles_urlpatterns 95 96 # Serve static and media locally 97 urlpatterns += staticfiles_urlpatterns() 98 urlpatterns += static(settings.MEDIA_URL, 99 document_root=settings.MEDIA_ROOT) 100 try: 101 import debug_toolbar 102 except ImportError: 103 pass 104 else: 105 urlpatterns = [ 106 url(r'^__debug__/', include(debug_toolbar.urls)), 107 ] + urlpatterns 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/liqd_product/config/urls.py b/liqd_product/config/urls.py --- a/liqd_product/config/urls.py +++ b/liqd_product/config/urls.py @@ -16,6 +16,7 @@ from adhocracy4.reports.api import ReportViewSet from liqd_product.apps.partners.urlresolvers import partner_patterns from liqd_product.apps.users.decorators import user_is_project_admin +from meinberlin.apps.contrib import views as contrib_views from meinberlin.apps.documents.api import DocumentViewSet from meinberlin.apps.polls.api import PollViewSet from meinberlin.apps.polls.api import VoteViewSet @@ -67,6 +68,7 @@ url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)), name='ckeditor_browse'), + url(r'^components/$', contrib_views.ComponentLibraryView.as_view()), url(r'^jsi18n/$', javascript_catalog, js_info_dict, name='javascript-catalog'),
{"golden_diff": "diff --git a/liqd_product/config/urls.py b/liqd_product/config/urls.py\n--- a/liqd_product/config/urls.py\n+++ b/liqd_product/config/urls.py\n@@ -16,6 +16,7 @@\n from adhocracy4.reports.api import ReportViewSet\n from liqd_product.apps.partners.urlresolvers import partner_patterns\n from liqd_product.apps.users.decorators import user_is_project_admin\n+from meinberlin.apps.contrib import views as contrib_views\n from meinberlin.apps.documents.api import DocumentViewSet\n from meinberlin.apps.polls.api import PollViewSet\n from meinberlin.apps.polls.api import VoteViewSet\n@@ -67,6 +68,7 @@\n url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)),\n name='ckeditor_browse'),\n \n+ url(r'^components/$', contrib_views.ComponentLibraryView.as_view()),\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n", "issue": "tile images on partner page are not cut to same size\n![screenshot from 2017-11-28 12-12-35](https://user-images.githubusercontent.com/8178179/33316792-7b8fc848-d435-11e7-8723-8849b5804bce.png)\r\n\n", "before_files": [{"content": "\"\"\"Beteiligung.in URL Configuration.\"\"\"\n\nfrom ckeditor_uploader import views as ck_views\nfrom django.conf import settings\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.views.decorators.cache import never_cache\nfrom django.views.i18n import javascript_catalog\nfrom rest_framework import routers\n\nfrom adhocracy4.api import routers as a4routers\nfrom adhocracy4.comments.api import CommentViewSet\nfrom adhocracy4.follows.api import FollowViewSet\nfrom adhocracy4.ratings.api import RatingViewSet\nfrom adhocracy4.reports.api import ReportViewSet\nfrom liqd_product.apps.partners.urlresolvers import partner_patterns\nfrom liqd_product.apps.users.decorators import user_is_project_admin\nfrom meinberlin.apps.documents.api import DocumentViewSet\nfrom meinberlin.apps.polls.api import PollViewSet\nfrom meinberlin.apps.polls.api import VoteViewSet\nfrom meinberlin.apps.polls.routers import QuestionDefaultRouter\n\njs_info_dict = {\n 'packages': ('adhocracy4.comments',),\n}\n\nrouter = routers.DefaultRouter()\nrouter.register(r'follows', FollowViewSet, base_name='follows')\nrouter.register(r'reports', ReportViewSet, base_name='reports')\nrouter.register(r'polls', PollViewSet, base_name='polls')\n\nmodule_router = a4routers.ModuleDefaultRouter()\n# FIXME: rename to 'chapters'\nmodule_router.register(r'documents', DocumentViewSet, base_name='chapters')\n\norga_router = a4routers.OrganisationDefaultRouter()\n\nct_router = a4routers.ContentTypeDefaultRouter()\nct_router.register(r'comments', CommentViewSet, base_name='comments')\nct_router.register(r'ratings', RatingViewSet, base_name='ratings')\n\nquestion_router = QuestionDefaultRouter()\nquestion_router.register(r'vote', VoteViewSet, base_name='vote')\n\n\nurlpatterns = [\n # General platform urls\n url(r'^django-admin/', include(admin.site.urls)),\n url(r'^admin/', include('wagtail.wagtailadmin.urls')),\n\n url(r'^accounts/', include('allauth.urls')),\n url(r'^account/', include('liqd_product.apps.account.urls')),\n url(r'^embed/', include('meinberlin.apps.embed.urls')),\n url(r'^dashboard/', include('meinberlin.apps.dashboard2.urls')),\n url(r'^profile/', include('liqd_product.apps.users.urls')),\n\n # API urls\n url(r'^api/', include(ct_router.urls)),\n url(r'^api/', include(module_router.urls)),\n url(r'^api/', include(orga_router.urls)),\n url(r'^api/', include(question_router.urls)),\n url(r'^api/', include(router.urls)),\n\n url(r'^upload/', user_is_project_admin(ck_views.upload),\n name='ckeditor_upload'),\n url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)),\n name='ckeditor_browse'),\n\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n\n # Urls within the context of a partner\n partner_patterns(\n url(r'^modules/', include('adhocracy4.modules.urls')),\n url(r'^projects/', include('adhocracy4.projects.urls')),\n url(r'^offlineevents/', include('meinberlin.apps.offlineevents.urls',\n namespace='meinberlin_offlineevents')),\n url(r'^ideas/', include(r'meinberlin.apps.ideas.urls',\n namespace='meinberlin_ideas')),\n url(r'^mapideas/', include('meinberlin.apps.mapideas.urls',\n namespace='meinberlin_mapideas')),\n url(r'^text/', include('meinberlin.apps.documents.urls',\n namespace='meinberlin_documents')),\n ),\n\n url(r'', include('liqd_product.apps.partners.urls')),\n url(r'', include('wagtail.wagtailcore.urls'))\n]\n\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n # Serve static and media locally\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n try:\n import debug_toolbar\n except ImportError:\n pass\n else:\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n", "path": "liqd_product/config/urls.py"}], "after_files": [{"content": "\"\"\"Beteiligung.in URL Configuration.\"\"\"\n\nfrom ckeditor_uploader import views as ck_views\nfrom django.conf import settings\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.views.decorators.cache import never_cache\nfrom django.views.i18n import javascript_catalog\nfrom rest_framework import routers\n\nfrom adhocracy4.api import routers as a4routers\nfrom adhocracy4.comments.api import CommentViewSet\nfrom adhocracy4.follows.api import FollowViewSet\nfrom adhocracy4.ratings.api import RatingViewSet\nfrom adhocracy4.reports.api import ReportViewSet\nfrom liqd_product.apps.partners.urlresolvers import partner_patterns\nfrom liqd_product.apps.users.decorators import user_is_project_admin\nfrom meinberlin.apps.contrib import views as contrib_views\nfrom meinberlin.apps.documents.api import DocumentViewSet\nfrom meinberlin.apps.polls.api import PollViewSet\nfrom meinberlin.apps.polls.api import VoteViewSet\nfrom meinberlin.apps.polls.routers import QuestionDefaultRouter\n\njs_info_dict = {\n 'packages': ('adhocracy4.comments',),\n}\n\nrouter = routers.DefaultRouter()\nrouter.register(r'follows', FollowViewSet, base_name='follows')\nrouter.register(r'reports', ReportViewSet, base_name='reports')\nrouter.register(r'polls', PollViewSet, base_name='polls')\n\nmodule_router = a4routers.ModuleDefaultRouter()\n# FIXME: rename to 'chapters'\nmodule_router.register(r'documents', DocumentViewSet, base_name='chapters')\n\norga_router = a4routers.OrganisationDefaultRouter()\n\nct_router = a4routers.ContentTypeDefaultRouter()\nct_router.register(r'comments', CommentViewSet, base_name='comments')\nct_router.register(r'ratings', RatingViewSet, base_name='ratings')\n\nquestion_router = QuestionDefaultRouter()\nquestion_router.register(r'vote', VoteViewSet, base_name='vote')\n\n\nurlpatterns = [\n # General platform urls\n url(r'^django-admin/', include(admin.site.urls)),\n url(r'^admin/', include('wagtail.wagtailadmin.urls')),\n\n url(r'^accounts/', include('allauth.urls')),\n url(r'^account/', include('liqd_product.apps.account.urls')),\n url(r'^embed/', include('meinberlin.apps.embed.urls')),\n url(r'^dashboard/', include('meinberlin.apps.dashboard2.urls')),\n url(r'^profile/', include('liqd_product.apps.users.urls')),\n\n # API urls\n url(r'^api/', include(ct_router.urls)),\n url(r'^api/', include(module_router.urls)),\n url(r'^api/', include(orga_router.urls)),\n url(r'^api/', include(question_router.urls)),\n url(r'^api/', include(router.urls)),\n\n url(r'^upload/', user_is_project_admin(ck_views.upload),\n name='ckeditor_upload'),\n url(r'^browse/', never_cache(user_is_project_admin(ck_views.browse)),\n name='ckeditor_browse'),\n\n url(r'^components/$', contrib_views.ComponentLibraryView.as_view()),\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n\n # Urls within the context of a partner\n partner_patterns(\n url(r'^modules/', include('adhocracy4.modules.urls')),\n url(r'^projects/', include('adhocracy4.projects.urls')),\n url(r'^offlineevents/', include('meinberlin.apps.offlineevents.urls',\n namespace='meinberlin_offlineevents')),\n url(r'^ideas/', include(r'meinberlin.apps.ideas.urls',\n namespace='meinberlin_ideas')),\n url(r'^mapideas/', include('meinberlin.apps.mapideas.urls',\n namespace='meinberlin_mapideas')),\n url(r'^text/', include('meinberlin.apps.documents.urls',\n namespace='meinberlin_documents')),\n ),\n\n url(r'', include('liqd_product.apps.partners.urls')),\n url(r'', include('wagtail.wagtailcore.urls'))\n]\n\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n # Serve static and media locally\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n try:\n import debug_toolbar\n except ImportError:\n pass\n else:\n urlpatterns = [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ] + urlpatterns\n", "path": "liqd_product/config/urls.py"}]}
1,511
216
gh_patches_debug_30361
rasdani/github-patches
git_diff
pytorch__ignite-1771
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Patch MNIST dataset downloading to fix CI Currently, there is an issue with downloading MNIST dataset using torchvision. Let's introduce the following patch to our CI to fix it: - https://github.com/pytorch/vision/issues/3500#issuecomment-790491487 Where to put that: - create new step with the patch before [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.github/workflows/unit-tests.yml#L106) - add patch [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.circleci/config.yml#L147). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/mnist/mnist_patch.py` Content: ``` 1 """Patch to fix MNIST download issue as described here: 2 - https://github.com/pytorch/ignite/issues/1737 3 - https://github.com/pytorch/vision/issues/3500 4 """ 5 6 import os 7 import subprocess as sp 8 9 import torch 10 from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file 11 from torchvision.datasets.utils import extract_archive 12 13 14 def patched_download(self): 15 """wget patched download method. 16 """ 17 if self._check_exists(): 18 return 19 20 os.makedirs(self.raw_folder, exist_ok=True) 21 os.makedirs(self.processed_folder, exist_ok=True) 22 23 # download files 24 for url, md5 in self.resources: 25 filename = url.rpartition("/")[2] 26 download_root = os.path.expanduser(self.raw_folder) 27 extract_root = None 28 remove_finished = False 29 30 if extract_root is None: 31 extract_root = download_root 32 if not filename: 33 filename = os.path.basename(url) 34 35 # Use wget to download archives 36 sp.run(["wget", url, "-P", download_root]) 37 38 archive = os.path.join(download_root, filename) 39 print("Extracting {} to {}".format(archive, extract_root)) 40 extract_archive(archive, extract_root, remove_finished) 41 42 # process and save as torch files 43 print("Processing...") 44 45 training_set = ( 46 read_image_file(os.path.join(self.raw_folder, "train-images-idx3-ubyte")), 47 read_label_file(os.path.join(self.raw_folder, "train-labels-idx1-ubyte")), 48 ) 49 test_set = ( 50 read_image_file(os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")), 51 read_label_file(os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")), 52 ) 53 with open(os.path.join(self.processed_folder, self.training_file), "wb") as f: 54 torch.save(training_set, f) 55 with open(os.path.join(self.processed_folder, self.test_file), "wb") as f: 56 torch.save(test_set, f) 57 58 print("Done!") 59 60 61 def main(): 62 # Patch download method 63 MNIST.download = patched_download 64 # Download MNIST 65 MNIST(".", download=True) 66 67 68 if __name__ == "__main__": 69 main() 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/mnist/mnist_patch.py b/examples/mnist/mnist_patch.py deleted file mode 100644 --- a/examples/mnist/mnist_patch.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Patch to fix MNIST download issue as described here: -- https://github.com/pytorch/ignite/issues/1737 -- https://github.com/pytorch/vision/issues/3500 -""" - -import os -import subprocess as sp - -import torch -from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file -from torchvision.datasets.utils import extract_archive - - -def patched_download(self): - """wget patched download method. - """ - if self._check_exists(): - return - - os.makedirs(self.raw_folder, exist_ok=True) - os.makedirs(self.processed_folder, exist_ok=True) - - # download files - for url, md5 in self.resources: - filename = url.rpartition("/")[2] - download_root = os.path.expanduser(self.raw_folder) - extract_root = None - remove_finished = False - - if extract_root is None: - extract_root = download_root - if not filename: - filename = os.path.basename(url) - - # Use wget to download archives - sp.run(["wget", url, "-P", download_root]) - - archive = os.path.join(download_root, filename) - print("Extracting {} to {}".format(archive, extract_root)) - extract_archive(archive, extract_root, remove_finished) - - # process and save as torch files - print("Processing...") - - training_set = ( - read_image_file(os.path.join(self.raw_folder, "train-images-idx3-ubyte")), - read_label_file(os.path.join(self.raw_folder, "train-labels-idx1-ubyte")), - ) - test_set = ( - read_image_file(os.path.join(self.raw_folder, "t10k-images-idx3-ubyte")), - read_label_file(os.path.join(self.raw_folder, "t10k-labels-idx1-ubyte")), - ) - with open(os.path.join(self.processed_folder, self.training_file), "wb") as f: - torch.save(training_set, f) - with open(os.path.join(self.processed_folder, self.test_file), "wb") as f: - torch.save(test_set, f) - - print("Done!") - - -def main(): - # Patch download method - MNIST.download = patched_download - # Download MNIST - MNIST(".", download=True) - - -if __name__ == "__main__": - main()
{"golden_diff": "diff --git a/examples/mnist/mnist_patch.py b/examples/mnist/mnist_patch.py\ndeleted file mode 100644\n--- a/examples/mnist/mnist_patch.py\n+++ /dev/null\n@@ -1,69 +0,0 @@\n-\"\"\"Patch to fix MNIST download issue as described here:\n-- https://github.com/pytorch/ignite/issues/1737\n-- https://github.com/pytorch/vision/issues/3500\n-\"\"\"\n-\n-import os\n-import subprocess as sp\n-\n-import torch\n-from torchvision.datasets.mnist import MNIST, read_image_file, read_label_file\n-from torchvision.datasets.utils import extract_archive\n-\n-\n-def patched_download(self):\n- \"\"\"wget patched download method.\n- \"\"\"\n- if self._check_exists():\n- return\n-\n- os.makedirs(self.raw_folder, exist_ok=True)\n- os.makedirs(self.processed_folder, exist_ok=True)\n-\n- # download files\n- for url, md5 in self.resources:\n- filename = url.rpartition(\"/\")[2]\n- download_root = os.path.expanduser(self.raw_folder)\n- extract_root = None\n- remove_finished = False\n-\n- if extract_root is None:\n- extract_root = download_root\n- if not filename:\n- filename = os.path.basename(url)\n-\n- # Use wget to download archives\n- sp.run([\"wget\", url, \"-P\", download_root])\n-\n- archive = os.path.join(download_root, filename)\n- print(\"Extracting {} to {}\".format(archive, extract_root))\n- extract_archive(archive, extract_root, remove_finished)\n-\n- # process and save as torch files\n- print(\"Processing...\")\n-\n- training_set = (\n- read_image_file(os.path.join(self.raw_folder, \"train-images-idx3-ubyte\")),\n- read_label_file(os.path.join(self.raw_folder, \"train-labels-idx1-ubyte\")),\n- )\n- test_set = (\n- read_image_file(os.path.join(self.raw_folder, \"t10k-images-idx3-ubyte\")),\n- read_label_file(os.path.join(self.raw_folder, \"t10k-labels-idx1-ubyte\")),\n- )\n- with open(os.path.join(self.processed_folder, self.training_file), \"wb\") as f:\n- torch.save(training_set, f)\n- with open(os.path.join(self.processed_folder, self.test_file), \"wb\") as f:\n- torch.save(test_set, f)\n-\n- print(\"Done!\")\n-\n-\n-def main():\n- # Patch download method\n- MNIST.download = patched_download\n- # Download MNIST\n- MNIST(\".\", download=True)\n-\n-\n-if __name__ == \"__main__\":\n- main()\n", "issue": "Patch MNIST dataset downloading to fix CI\nCurrently, there is an issue with downloading MNIST dataset using torchvision. Let's introduce the following patch to our CI to fix it:\r\n- https://github.com/pytorch/vision/issues/3500#issuecomment-790491487\r\n\r\nWhere to put that:\r\n- create new step with the patch before [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.github/workflows/unit-tests.yml#L106)\r\n- add patch [here](https://github.com/pytorch/ignite/blob/700f0e1325efc5dc0dce88d26284e51bc2a7c87c/.circleci/config.yml#L147).\n", "before_files": [{"content": "\"\"\"Patch to fix MNIST download issue as described here:\n- https://github.com/pytorch/ignite/issues/1737\n- https://github.com/pytorch/vision/issues/3500\n\"\"\"\n\nimport os\nimport subprocess as sp\n\nimport torch\nfrom torchvision.datasets.mnist import MNIST, read_image_file, read_label_file\nfrom torchvision.datasets.utils import extract_archive\n\n\ndef patched_download(self):\n \"\"\"wget patched download method.\n \"\"\"\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url, md5 in self.resources:\n filename = url.rpartition(\"/\")[2]\n download_root = os.path.expanduser(self.raw_folder)\n extract_root = None\n remove_finished = False\n\n if extract_root is None:\n extract_root = download_root\n if not filename:\n filename = os.path.basename(url)\n\n # Use wget to download archives\n sp.run([\"wget\", url, \"-P\", download_root])\n\n archive = os.path.join(download_root, filename)\n print(\"Extracting {} to {}\".format(archive, extract_root))\n extract_archive(archive, extract_root, remove_finished)\n\n # process and save as torch files\n print(\"Processing...\")\n\n training_set = (\n read_image_file(os.path.join(self.raw_folder, \"train-images-idx3-ubyte\")),\n read_label_file(os.path.join(self.raw_folder, \"train-labels-idx1-ubyte\")),\n )\n test_set = (\n read_image_file(os.path.join(self.raw_folder, \"t10k-images-idx3-ubyte\")),\n read_label_file(os.path.join(self.raw_folder, \"t10k-labels-idx1-ubyte\")),\n )\n with open(os.path.join(self.processed_folder, self.training_file), \"wb\") as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), \"wb\") as f:\n torch.save(test_set, f)\n\n print(\"Done!\")\n\n\ndef main():\n # Patch download method\n MNIST.download = patched_download\n # Download MNIST\n MNIST(\".\", download=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/mnist/mnist_patch.py"}], "after_files": [{"content": null, "path": "examples/mnist/mnist_patch.py"}]}
1,095
600
gh_patches_debug_18310
rasdani/github-patches
git_diff
huggingface__accelerate-695
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `notebook_launcher` cannot use MPS device on M1 or M2 Mac ### System Info ```Shell - `Accelerate` version: 0.13.0.dev0 - Platform: macOS-12.5.1-arm64-arm-64bit - Python version: 3.10.4 - Numpy version: 1.23.1 - PyTorch version (GPU?): 1.13.0.dev20220909 (False) - `Accelerate` default config: - compute_environment: LOCAL_MACHINE - distributed_type: MPS - mixed_precision: no - use_cpu: False - num_processes: 1 - machine_rank: 0 - num_machines: 1 - main_process_ip: None - main_process_port: None - rdzv_backend: static - same_network: False - main_training_function: main - deepspeed_config: {} - fsdp_config: {} - downcast_bf16: no ``` ### Information - [ ] The official example scripts - [X] My own modified scripts ### Tasks - [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`) - [X] My own task or dataset (give details below) ### Reproduction Steps to reproduce: 1. Stable Diffusion Textual Inversion Colab script: https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb 2. Run script on M1 Max, expecting `accelerate.notebook_launcher` to report running the training on *GPU (or MPS)* 3. Observe it runs the training on *CPU* instead ### Expected behavior ```Shell With a previous PR, `mps` M1 GPU support was introduced to Accelerate: https://github.com/huggingface/accelerate/pull/596 However `notebook_launcher` needs to be adapted to make use of this change as well. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/accelerate/launchers.py` Content: ``` 1 # Copyright 2021 The HuggingFace Team. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import sys 17 import tempfile 18 import warnings 19 20 import torch 21 22 from .state import AcceleratorState 23 from .utils import PrecisionType, PrepareForLaunch, patch_environment 24 25 26 def notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision="no", use_port="29500"): 27 """ 28 Launches a training function, using several processes if it's possible in the current environment (TPU with 29 multiple cores for instance). 30 31 Args: 32 function (`Callable`): 33 The training function to execute. If it accepts arguments, the first argument should be the index of the 34 process run. 35 args (`Tuple`): 36 Tuple of arguments to pass to the function (it will receive `*args`). 37 num_processes (`int`, *optional*): 38 The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to 39 the number of GPUs available otherwise. 40 mixed_precision (`str`, *optional*, defaults to `"no"`): 41 If `fp16` or `bf16`, will use mixed precision training on multi-GPU. 42 use_port (`str`, *optional*, defaults to `"29500"`): 43 The port to use to communicate between processes when launching a multi-GPU training. 44 """ 45 # Are we in a google colab or a Kaggle Kernel? 46 if any(key.startswith("KAGGLE") for key in os.environ.keys()): 47 in_colab_or_kaggle = True 48 elif "IPython" in sys.modules: 49 in_colab_or_kaggle = "google.colab" in str(sys.modules["IPython"].get_ipython()) 50 else: 51 in_colab_or_kaggle = False 52 53 try: 54 mixed_precision = PrecisionType(mixed_precision.lower()) 55 except ValueError: 56 raise ValueError( 57 f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." 58 ) 59 60 if in_colab_or_kaggle: 61 if os.environ.get("TPU_NAME", None) is not None: 62 # TPU launch 63 import torch_xla.distributed.xla_multiprocessing as xmp 64 65 if len(AcceleratorState._shared_state) > 0: 66 raise ValueError( 67 "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " 68 "your training function. Restart your notebook and make sure no cells initializes an " 69 "`Accelerator`." 70 ) 71 if num_processes is None: 72 num_processes = 8 73 74 launcher = PrepareForLaunch(function, distributed_type="TPU") 75 print(f"Launching a training on {num_processes} TPU cores.") 76 xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork") 77 else: 78 # No need for a distributed launch otherwise as it's either CPU or one GPU. 79 if torch.cuda.is_available(): 80 print("Launching training on one GPU.") 81 else: 82 print("Launching training on one CPU.") 83 function(*args) 84 85 else: 86 if num_processes is None: 87 raise ValueError( 88 "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." 89 ) 90 91 if num_processes > 1: 92 # Multi-GPU launch 93 from torch.multiprocessing import start_processes 94 95 if len(AcceleratorState._shared_state) > 0: 96 raise ValueError( 97 "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " 98 "inside your training function. Restart your notebook and make sure no cells initializes an " 99 "`Accelerator`." 100 ) 101 102 if torch.cuda.is_initialized(): 103 raise ValueError( 104 "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " 105 "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " 106 "function." 107 ) 108 109 if use_fp16: 110 warnings.warn('use_fp16=True is deprecated. Use mixed_precision="fp16" instead.', DeprecationWarning) 111 mixed_precision = "fp16" 112 113 # torch.distributed will expect a few environment variable to be here. We set the ones common to each 114 # process here (the other ones will be set be the launcher). 115 with patch_environment( 116 world_size=num_processes, master_addr="127.0.01", master_port=use_port, mixed_precision=mixed_precision 117 ): 118 launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU") 119 120 print(f"Launching training on {num_processes} GPUs.") 121 start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") 122 123 else: 124 # No need for a distributed launch otherwise as it's either CPU or one GPU. 125 if torch.cuda.is_available(): 126 print("Launching training on one GPU.") 127 else: 128 print("Launching training on CPU.") 129 function(*args) 130 131 132 def debug_launcher(function, args=(), num_processes=2): 133 """ 134 Launches a training function using several processes on CPU for debugging purposes. 135 136 <Tip warning={true}> 137 138 This function is provided for internal testing and debugging, but it's not intended for real trainings. It will 139 only use the CPU. 140 141 </Tip> 142 143 Args: 144 function (`Callable`): 145 The training function to execute. 146 args (`Tuple`): 147 Tuple of arguments to pass to the function (it will receive `*args`). 148 num_processes (`int`, *optional*, defaults to 2): 149 The number of processes to use for training. 150 """ 151 from torch.multiprocessing import start_processes 152 153 with tempfile.NamedTemporaryFile() as tmp_file: 154 # torch.distributed will expect a few environment variable to be here. We set the ones common to each 155 # process here (the other ones will be set be the launcher). 156 with patch_environment( 157 world_size=num_processes, 158 master_addr="127.0.01", 159 master_port="29500", 160 mixed_precision="no", 161 accelerate_debug_rdv_file=tmp_file.name, 162 use_cpu="yes", 163 ): 164 launcher = PrepareForLaunch(function, debug=True) 165 start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") 166 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py --- a/src/accelerate/launchers.py +++ b/src/accelerate/launchers.py @@ -121,12 +121,17 @@ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") else: - # No need for a distributed launch otherwise as it's either CPU or one GPU. - if torch.cuda.is_available(): + # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. + use_mps_device = "false" + if torch.backends.mps.is_available(): + print("Launching training on MPS.") + use_mps_device = "true" + elif torch.cuda.is_available(): print("Launching training on one GPU.") else: print("Launching training on CPU.") - function(*args) + with patch_environment(use_mps_device=use_mps_device): + function(*args) def debug_launcher(function, args=(), num_processes=2):
{"golden_diff": "diff --git a/src/accelerate/launchers.py b/src/accelerate/launchers.py\n--- a/src/accelerate/launchers.py\n+++ b/src/accelerate/launchers.py\n@@ -121,12 +121,17 @@\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n \n else:\n- # No need for a distributed launch otherwise as it's either CPU or one GPU.\n- if torch.cuda.is_available():\n+ # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.\n+ use_mps_device = \"false\"\n+ if torch.backends.mps.is_available():\n+ print(\"Launching training on MPS.\")\n+ use_mps_device = \"true\"\n+ elif torch.cuda.is_available():\n print(\"Launching training on one GPU.\")\n else:\n print(\"Launching training on CPU.\")\n- function(*args)\n+ with patch_environment(use_mps_device=use_mps_device):\n+ function(*args)\n \n \n def debug_launcher(function, args=(), num_processes=2):\n", "issue": "`notebook_launcher` cannot use MPS device on M1 or M2 Mac\n### System Info\r\n\r\n```Shell\r\n- `Accelerate` version: 0.13.0.dev0\r\n- Platform: macOS-12.5.1-arm64-arm-64bit\r\n- Python version: 3.10.4\r\n- Numpy version: 1.23.1\r\n- PyTorch version (GPU?): 1.13.0.dev20220909 (False)\r\n- `Accelerate` default config:\r\n\t- compute_environment: LOCAL_MACHINE\r\n\t- distributed_type: MPS\r\n\t- mixed_precision: no\r\n\t- use_cpu: False\r\n\t- num_processes: 1\r\n\t- machine_rank: 0\r\n\t- num_machines: 1\r\n\t- main_process_ip: None\r\n\t- main_process_port: None\r\n\t- rdzv_backend: static\r\n\t- same_network: False\r\n\t- main_training_function: main\r\n\t- deepspeed_config: {}\r\n\t- fsdp_config: {}\r\n\t- downcast_bf16: no\r\n```\r\n\r\n\r\n### Information\r\n\r\n- [ ] The official example scripts\r\n- [X] My own modified scripts\r\n\r\n### Tasks\r\n\r\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\r\n- [X] My own task or dataset (give details below)\r\n\r\n### Reproduction\r\n\r\nSteps to reproduce:\r\n1. Stable Diffusion Textual Inversion Colab script: https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb\r\n2. Run script on M1 Max, expecting `accelerate.notebook_launcher` to report running the training on *GPU (or MPS)*\r\n3. Observe it runs the training on *CPU* instead\r\n\r\n### Expected behavior\r\n\r\n```Shell\r\nWith a previous PR, `mps` M1 GPU support was introduced to Accelerate: https://github.com/huggingface/accelerate/pull/596\r\n\r\nHowever `notebook_launcher` needs to be adapted to make use of this change as well.\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport tempfile\nimport warnings\n\nimport torch\n\nfrom .state import AcceleratorState\nfrom .utils import PrecisionType, PrepareForLaunch, patch_environment\n\n\ndef notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n \"\"\"\n Launches a training function, using several processes if it's possible in the current environment (TPU with\n multiple cores for instance).\n\n Args:\n function (`Callable`):\n The training function to execute. If it accepts arguments, the first argument should be the index of the\n process run.\n args (`Tuple`):\n Tuple of arguments to pass to the function (it will receive `*args`).\n num_processes (`int`, *optional*):\n The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to\n the number of GPUs available otherwise.\n mixed_precision (`str`, *optional*, defaults to `\"no\"`):\n If `fp16` or `bf16`, will use mixed precision training on multi-GPU.\n use_port (`str`, *optional*, defaults to `\"29500\"`):\n The port to use to communicate between processes when launching a multi-GPU training.\n \"\"\"\n # Are we in a google colab or a Kaggle Kernel?\n if any(key.startswith(\"KAGGLE\") for key in os.environ.keys()):\n in_colab_or_kaggle = True\n elif \"IPython\" in sys.modules:\n in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n else:\n in_colab_or_kaggle = False\n\n try:\n mixed_precision = PrecisionType(mixed_precision.lower())\n except ValueError:\n raise ValueError(\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n )\n\n if in_colab_or_kaggle:\n if os.environ.get(\"TPU_NAME\", None) is not None:\n # TPU launch\n import torch_xla.distributed.xla_multiprocessing as xmp\n\n if len(AcceleratorState._shared_state) > 0:\n raise ValueError(\n \"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\n \"your training function. Restart your notebook and make sure no cells initializes an \"\n \"`Accelerator`.\"\n )\n if num_processes is None:\n num_processes = 8\n\n launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n print(f\"Launching a training on {num_processes} TPU cores.\")\n xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n if torch.cuda.is_available():\n print(\"Launching training on one GPU.\")\n else:\n print(\"Launching training on one CPU.\")\n function(*args)\n\n else:\n if num_processes is None:\n raise ValueError(\n \"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.\"\n )\n\n if num_processes > 1:\n # Multi-GPU launch\n from torch.multiprocessing import start_processes\n\n if len(AcceleratorState._shared_state) > 0:\n raise ValueError(\n \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n \"inside your training function. Restart your notebook and make sure no cells initializes an \"\n \"`Accelerator`.\"\n )\n\n if torch.cuda.is_initialized():\n raise ValueError(\n \"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\n \"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\n \"function.\"\n )\n\n if use_fp16:\n warnings.warn('use_fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n # process here (the other ones will be set be the launcher).\n with patch_environment(\n world_size=num_processes, master_addr=\"127.0.01\", master_port=use_port, mixed_precision=mixed_precision\n ):\n launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n\n print(f\"Launching training on {num_processes} GPUs.\")\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n if torch.cuda.is_available():\n print(\"Launching training on one GPU.\")\n else:\n print(\"Launching training on CPU.\")\n function(*args)\n\n\ndef debug_launcher(function, args=(), num_processes=2):\n \"\"\"\n Launches a training function using several processes on CPU for debugging purposes.\n\n <Tip warning={true}>\n\n This function is provided for internal testing and debugging, but it's not intended for real trainings. It will\n only use the CPU.\n\n </Tip>\n\n Args:\n function (`Callable`):\n The training function to execute.\n args (`Tuple`):\n Tuple of arguments to pass to the function (it will receive `*args`).\n num_processes (`int`, *optional*, defaults to 2):\n The number of processes to use for training.\n \"\"\"\n from torch.multiprocessing import start_processes\n\n with tempfile.NamedTemporaryFile() as tmp_file:\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n # process here (the other ones will be set be the launcher).\n with patch_environment(\n world_size=num_processes,\n master_addr=\"127.0.01\",\n master_port=\"29500\",\n mixed_precision=\"no\",\n accelerate_debug_rdv_file=tmp_file.name,\n use_cpu=\"yes\",\n ):\n launcher = PrepareForLaunch(function, debug=True)\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n", "path": "src/accelerate/launchers.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport tempfile\nimport warnings\n\nimport torch\n\nfrom .state import AcceleratorState\nfrom .utils import PrecisionType, PrepareForLaunch, patch_environment\n\n\ndef notebook_launcher(function, args=(), num_processes=None, use_fp16=False, mixed_precision=\"no\", use_port=\"29500\"):\n \"\"\"\n Launches a training function, using several processes if it's possible in the current environment (TPU with\n multiple cores for instance).\n\n Args:\n function (`Callable`):\n The training function to execute. If it accepts arguments, the first argument should be the index of the\n process run.\n args (`Tuple`):\n Tuple of arguments to pass to the function (it will receive `*args`).\n num_processes (`int`, *optional*):\n The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to\n the number of GPUs available otherwise.\n mixed_precision (`str`, *optional*, defaults to `\"no\"`):\n If `fp16` or `bf16`, will use mixed precision training on multi-GPU.\n use_port (`str`, *optional*, defaults to `\"29500\"`):\n The port to use to communicate between processes when launching a multi-GPU training.\n \"\"\"\n # Are we in a google colab or a Kaggle Kernel?\n if any(key.startswith(\"KAGGLE\") for key in os.environ.keys()):\n in_colab_or_kaggle = True\n elif \"IPython\" in sys.modules:\n in_colab_or_kaggle = \"google.colab\" in str(sys.modules[\"IPython\"].get_ipython())\n else:\n in_colab_or_kaggle = False\n\n try:\n mixed_precision = PrecisionType(mixed_precision.lower())\n except ValueError:\n raise ValueError(\n f\"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.\"\n )\n\n if in_colab_or_kaggle:\n if os.environ.get(\"TPU_NAME\", None) is not None:\n # TPU launch\n import torch_xla.distributed.xla_multiprocessing as xmp\n\n if len(AcceleratorState._shared_state) > 0:\n raise ValueError(\n \"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside \"\n \"your training function. Restart your notebook and make sure no cells initializes an \"\n \"`Accelerator`.\"\n )\n if num_processes is None:\n num_processes = 8\n\n launcher = PrepareForLaunch(function, distributed_type=\"TPU\")\n print(f\"Launching a training on {num_processes} TPU cores.\")\n xmp.spawn(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n else:\n # No need for a distributed launch otherwise as it's either CPU or one GPU.\n if torch.cuda.is_available():\n print(\"Launching training on one GPU.\")\n else:\n print(\"Launching training on one CPU.\")\n function(*args)\n\n else:\n if num_processes is None:\n raise ValueError(\n \"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.\"\n )\n\n if num_processes > 1:\n # Multi-GPU launch\n from torch.multiprocessing import start_processes\n\n if len(AcceleratorState._shared_state) > 0:\n raise ValueError(\n \"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized \"\n \"inside your training function. Restart your notebook and make sure no cells initializes an \"\n \"`Accelerator`.\"\n )\n\n if torch.cuda.is_initialized():\n raise ValueError(\n \"To launch a multi-GPU training from your notebook, you need to avoid running any instruction \"\n \"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA \"\n \"function.\"\n )\n\n if use_fp16:\n warnings.warn('use_fp16=True is deprecated. Use mixed_precision=\"fp16\" instead.', DeprecationWarning)\n mixed_precision = \"fp16\"\n\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n # process here (the other ones will be set be the launcher).\n with patch_environment(\n world_size=num_processes, master_addr=\"127.0.01\", master_port=use_port, mixed_precision=mixed_precision\n ):\n launcher = PrepareForLaunch(function, distributed_type=\"MULTI_GPU\")\n\n print(f\"Launching training on {num_processes} GPUs.\")\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n\n else:\n # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.\n use_mps_device = \"false\"\n if torch.backends.mps.is_available():\n print(\"Launching training on MPS.\")\n use_mps_device = \"true\"\n elif torch.cuda.is_available():\n print(\"Launching training on one GPU.\")\n else:\n print(\"Launching training on CPU.\")\n with patch_environment(use_mps_device=use_mps_device):\n function(*args)\n\n\ndef debug_launcher(function, args=(), num_processes=2):\n \"\"\"\n Launches a training function using several processes on CPU for debugging purposes.\n\n <Tip warning={true}>\n\n This function is provided for internal testing and debugging, but it's not intended for real trainings. It will\n only use the CPU.\n\n </Tip>\n\n Args:\n function (`Callable`):\n The training function to execute.\n args (`Tuple`):\n Tuple of arguments to pass to the function (it will receive `*args`).\n num_processes (`int`, *optional*, defaults to 2):\n The number of processes to use for training.\n \"\"\"\n from torch.multiprocessing import start_processes\n\n with tempfile.NamedTemporaryFile() as tmp_file:\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n # process here (the other ones will be set be the launcher).\n with patch_environment(\n world_size=num_processes,\n master_addr=\"127.0.01\",\n master_port=\"29500\",\n mixed_precision=\"no\",\n accelerate_debug_rdv_file=tmp_file.name,\n use_cpu=\"yes\",\n ):\n launcher = PrepareForLaunch(function, debug=True)\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n", "path": "src/accelerate/launchers.py"}]}
2,668
239
gh_patches_debug_10539
rasdani/github-patches
git_diff
pwndbg__pwndbg-216
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- IDA Pro Integration - decompile() segfault Calling `idaapi.decompile()` causes my IDA `6.9.151221 (December 21, 2015)` to segfault (often corrupting the database) when Hex-Rays is not installed. There needs to be some sanity checking in the [context](https://github.com/pwndbg/pwndbg/blob/master/pwndbg/commands/context.py#L209) command before calling `pwndbg.ida.decompile()` I'm not sure if this is fixed in the latest IDA `6.95.160808 (August 08, 2016)` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/ida.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 Talks to an XMLRPC server running inside of an active IDA Pro instance, 5 in order to query it about the database. Allows symbol resolution and 6 interactive debugging. 7 """ 8 from __future__ import absolute_import 9 from __future__ import division 10 from __future__ import print_function 11 from __future__ import unicode_literals 12 13 import errno 14 import functools 15 import socket 16 import traceback 17 18 import gdb 19 20 import pwndbg.arch 21 import pwndbg.color 22 import pwndbg.compat 23 import pwndbg.config 24 import pwndbg.elf 25 import pwndbg.events 26 import pwndbg.memoize 27 import pwndbg.memory 28 import pwndbg.regs 29 30 try: 31 import xmlrpc.client as xmlrpclib 32 except: 33 import xmlrpclib 34 35 36 ida_rpc_host = pwndbg.config.Parameter('ida-rpc-host', '127.0.0.1', 'ida xmlrpc server address') 37 ida_rpc_port = pwndbg.config.Parameter('ida-rpc-port', 8888, 'ida xmlrpc server port') 38 39 xmlrpclib.Marshaller.dispatch[int] = lambda _, v, w: w("<value><i8>%d</i8></value>" % v) 40 41 if pwndbg.compat.python2: 42 xmlrpclib.Marshaller.dispatch[long] = lambda _, v, w: w("<value><i8>%d</i8></value>" % v) 43 44 xmlrpclib.Marshaller.dispatch[type(0)] = lambda _, v, w: w("<value><i8>%d</i8></value>" % v) 45 46 _ida = None 47 48 49 @pwndbg.config.Trigger([ida_rpc_host, ida_rpc_port]) 50 def init_ida_rpc_client(): 51 global _ida 52 addr = 'http://{host}:{port}'.format(host=ida_rpc_host, port=ida_rpc_port) 53 54 _ida = xmlrpclib.ServerProxy(addr) 55 56 try: 57 _ida.here() 58 print(pwndbg.color.green("Pwndbg successfully connected to Ida Pro xmlrpc: %s" % addr)) 59 except socket.error as e: 60 if e.errno != errno.ECONNREFUSED: 61 traceback.print_exc() 62 _ida = None 63 64 65 class withIDA(object): 66 def __init__(self, fn): 67 self.fn = fn 68 functools.update_wrapper(self, fn) 69 70 def __call__(self, *args, **kwargs): 71 if _ida is None: 72 init_ida_rpc_client() 73 if _ida is not None: 74 return self.fn(*args, **kwargs) 75 return None 76 77 78 def takes_address(function): 79 @functools.wraps(function) 80 def wrapper(address, *args, **kwargs): 81 return function(l2r(address), *args, **kwargs) 82 83 return wrapper 84 85 86 def returns_address(function): 87 @functools.wraps(function) 88 def wrapper(*args, **kwargs): 89 return r2l(function(*args, **kwargs)) 90 91 return wrapper 92 93 94 @withIDA 95 def available(): 96 return True 97 98 99 def l2r(addr): 100 exe = pwndbg.elf.exe() 101 if not exe: 102 raise Exception("Can't find EXE base") 103 result = (addr - int(exe.address) + base()) & pwndbg.arch.ptrmask 104 return result 105 106 107 def r2l(addr): 108 exe = pwndbg.elf.exe() 109 if not exe: 110 raise Exception("Can't find EXE base") 111 result = (addr - base() + int(exe.address)) & pwndbg.arch.ptrmask 112 return result 113 114 115 def remote(function): 116 """Runs the provided function in IDA's interpreter. 117 118 The function must be self-contained and not reference any 119 global variables.""" 120 121 122 @pwndbg.memoize.reset_on_objfile 123 def base(): 124 segaddr = _ida.NextSeg(0) 125 base = _ida.get_fileregion_offset(segaddr) 126 127 return segaddr - base 128 129 130 @withIDA 131 @takes_address 132 def Comment(addr): 133 return _ida.GetCommentEx(addr, 0) or _ida.GetCommentEx(addr) 134 135 136 @withIDA 137 @takes_address 138 @pwndbg.memoize.reset_on_objfile 139 def Name(addr): 140 return _ida.Name(addr) 141 142 143 @withIDA 144 @takes_address 145 @pwndbg.memoize.reset_on_objfile 146 def GetFuncOffset(addr): 147 rv = _ida.GetFuncOffset(addr) 148 return rv 149 150 151 @withIDA 152 @takes_address 153 @pwndbg.memoize.reset_on_objfile 154 def GetType(addr): 155 rv = _ida.GetType(addr) 156 return rv 157 158 159 @withIDA 160 @returns_address 161 def here(): 162 return _ida.here() 163 164 165 @withIDA 166 @takes_address 167 def Jump(addr): 168 return _ida.Jump(addr) 169 170 171 @withIDA 172 @takes_address 173 @pwndbg.memoize.reset_on_objfile 174 def Anterior(addr): 175 hexrays_prefix = '\x01\x04; ' 176 lines = [] 177 for i in range(10): 178 r = _ida.LineA(addr, i) 179 if not r: break 180 if r.startswith(hexrays_prefix): 181 r = r[len(hexrays_prefix):] 182 lines.append(r) 183 return '\n'.join(lines) 184 185 186 @withIDA 187 def GetBreakpoints(): 188 for i in range(GetBptQty()): 189 yield GetBptEA(i) 190 191 192 @withIDA 193 def GetBptQty(): 194 return _ida.GetBptQty() 195 196 197 @withIDA 198 @returns_address 199 def GetBptEA(i): 200 return _ida.GetBptEA(i) 201 202 203 _breakpoints = [] 204 205 206 @pwndbg.events.cont 207 @pwndbg.events.stop 208 @withIDA 209 def UpdateBreakpoints(): 210 # XXX: Remove breakpoints from IDA when the user removes them. 211 current = set(eval(b.location.lstrip('*')) for b in _breakpoints) 212 want = set(GetBreakpoints()) 213 214 # print(want) 215 216 for addr in current - want: 217 for bp in _breakpoints: 218 if int(bp.location.lstrip('*'), 0) == addr: 219 # print("delete", addr) 220 bp.delete() 221 break 222 _breakpoints.remove(bp) 223 224 for bp in want - current: 225 if not pwndbg.memory.peek(bp): 226 continue 227 228 bp = gdb.Breakpoint('*' + hex(int(bp))) 229 _breakpoints.append(bp) 230 # print(_breakpoints) 231 232 233 @withIDA 234 @takes_address 235 def SetColor(pc, color): 236 return _ida.SetColor(pc, 1, color) 237 238 239 colored_pc = None 240 241 242 @pwndbg.events.stop 243 @withIDA 244 def Auto_Color_PC(): 245 global colored_pc 246 colored_pc = pwndbg.regs.pc 247 SetColor(colored_pc, 0x7f7fff) 248 249 250 @pwndbg.events.cont 251 @withIDA 252 def Auto_UnColor_PC(): 253 global colored_pc 254 if colored_pc: 255 SetColor(colored_pc, 0xffffff) 256 colored_pc = None 257 258 259 @withIDA 260 @returns_address 261 @pwndbg.memoize.reset_on_objfile 262 def LocByName(name): 263 return _ida.LocByName(str(name)) 264 265 266 @withIDA 267 @takes_address 268 @returns_address 269 @pwndbg.memoize.reset_on_objfile 270 def PrevHead(addr): 271 return _ida.PrevHead(addr) 272 273 274 @withIDA 275 @takes_address 276 @returns_address 277 @pwndbg.memoize.reset_on_objfile 278 def NextHead(addr): 279 return _ida.NextHead(addr) 280 281 282 @withIDA 283 @takes_address 284 @pwndbg.memoize.reset_on_objfile 285 def GetFunctionName(addr): 286 return _ida.GetFunctionName(addr) 287 288 289 @withIDA 290 @takes_address 291 @pwndbg.memoize.reset_on_objfile 292 def GetFlags(addr): 293 return _ida.GetFlags(addr) 294 295 296 @withIDA 297 @pwndbg.memoize.reset_on_objfile 298 def isASCII(flags): 299 return _ida.isASCII(flags) 300 301 302 @withIDA 303 @takes_address 304 @pwndbg.memoize.reset_on_objfile 305 def ArgCount(address): 306 pass 307 308 309 @withIDA 310 def SaveBase(path): 311 return _ida.SaveBase(path) 312 313 314 @withIDA 315 def GetIdbPath(): 316 return _ida.GetIdbPath() 317 318 319 @takes_address 320 @pwndbg.memoize.reset_on_stop 321 def has_cached_cfunc(addr): 322 return _ida.has_cached_cfunc(addr) 323 324 325 @withIDA 326 @takes_address 327 @pwndbg.memoize.reset_on_stop 328 def decompile(addr): 329 return _ida.decompile(addr) 330 331 332 @withIDA 333 @pwndbg.memoize.reset_on_stop 334 def GetStrucQty(): 335 return _ida.GetStrucQty() 336 337 338 @withIDA 339 @pwndbg.memoize.reset_on_stop 340 def GetStrucId(idx): 341 return _ida.GetStrucId(idx) 342 343 344 @withIDA 345 @pwndbg.memoize.reset_on_stop 346 def GetStrucName(sid): 347 return _ida.GetStrucName(sid) 348 349 350 @withIDA 351 @pwndbg.memoize.reset_on_stop 352 def GetStrucSize(sid): 353 return _ida.GetStrucSize(sid) 354 355 356 @withIDA 357 @pwndbg.memoize.reset_on_stop 358 def GetMemberQty(sid): 359 return _ida.GetMemberQty(sid) 360 361 362 @withIDA 363 @pwndbg.memoize.reset_on_stop 364 def GetMemberSize(sid, offset): 365 return _ida.GetMemberSize(sid, offset) 366 367 368 @withIDA 369 @pwndbg.memoize.reset_on_stop 370 def GetMemberId(sid, offset): 371 return _ida.GetMemberId(sid, offset) 372 373 374 @withIDA 375 @pwndbg.memoize.reset_on_stop 376 def GetMemberName(sid, offset): 377 return _ida.GetMemberName(sid, offset) 378 379 380 @withIDA 381 @pwndbg.memoize.reset_on_stop 382 def GetMemberFlag(sid, offset): 383 return _ida.GetMemberFlag(sid, offset) 384 385 386 @withIDA 387 @pwndbg.memoize.reset_on_stop 388 def GetStrucNextOff(sid, offset): 389 return _ida.GetStrucNextOff(sid, offset) 390 391 392 class IDC(object): 393 query = "{k:v for k,v in globals()['idc'].__dict__.items() if type(v) in (int,long)}" 394 395 def __init__(self): 396 if available(): 397 data = _ida.eval(self.query) 398 self.__dict__.update(data) 399 400 401 idc = IDC() 402 403 404 def print_member(sid, offset): 405 mid = GetMemberId(sid, offset) 406 mname = GetMemberName(sid, offset) or '(no name)' 407 msize = GetMemberSize(sid, offset) or 0 408 mflag = GetMemberFlag(sid, offset) or 0 409 print(" +%#x - %s [%#x bytes]" % (offset, mname, msize)) 410 411 412 def print_structs(): 413 for i in range(GetStrucQty() or 0): 414 sid = GetStrucId(i) 415 416 name = GetStrucName(sid) 417 size = GetStrucSize(sid) 418 419 print("%s - %#x bytes" % (name, size)) 420 421 offset = 0 422 while offset < size: 423 print_member(sid, offset) 424 offset = GetStrucNextOff(sid, offset) 425 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/ida.py b/pwndbg/ida.py --- a/pwndbg/ida.py +++ b/pwndbg/ida.py @@ -74,6 +74,12 @@ return self.fn(*args, **kwargs) return None +def withHexrays(f): + @withIDA + @functools.wraps(f) + def wrapper(*a, **kw): + if _ida.init_hexrays_plugin(): + return f(*a, **kw) def takes_address(function): @functools.wraps(function) @@ -322,7 +328,7 @@ return _ida.has_cached_cfunc(addr) -@withIDA +@withHexrays @takes_address @pwndbg.memoize.reset_on_stop def decompile(addr):
{"golden_diff": "diff --git a/pwndbg/ida.py b/pwndbg/ida.py\n--- a/pwndbg/ida.py\n+++ b/pwndbg/ida.py\n@@ -74,6 +74,12 @@\n return self.fn(*args, **kwargs)\n return None\n \n+def withHexrays(f):\n+ @withIDA\n+ @functools.wraps(f)\n+ def wrapper(*a, **kw):\n+ if _ida.init_hexrays_plugin():\n+ return f(*a, **kw)\n \n def takes_address(function):\n @functools.wraps(function)\n@@ -322,7 +328,7 @@\n return _ida.has_cached_cfunc(addr)\n \n \n-@withIDA\n+@withHexrays\n @takes_address\n @pwndbg.memoize.reset_on_stop\n def decompile(addr):\n", "issue": "IDA Pro Integration - decompile() segfault\nCalling `idaapi.decompile()` causes my IDA `6.9.151221 (December 21, 2015)` to segfault (often corrupting the database) when Hex-Rays is not installed.\r\n\r\nThere needs to be some sanity checking in the [context](https://github.com/pwndbg/pwndbg/blob/master/pwndbg/commands/context.py#L209) command before calling `pwndbg.ida.decompile()`\r\n\r\nI'm not sure if this is fixed in the latest IDA `6.95.160808 (August 08, 2016)`\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTalks to an XMLRPC server running inside of an active IDA Pro instance,\nin order to query it about the database. Allows symbol resolution and\ninteractive debugging.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport errno\nimport functools\nimport socket\nimport traceback\n\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.color\nimport pwndbg.compat\nimport pwndbg.config\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.regs\n\ntry:\n import xmlrpc.client as xmlrpclib\nexcept:\n import xmlrpclib\n\n\nida_rpc_host = pwndbg.config.Parameter('ida-rpc-host', '127.0.0.1', 'ida xmlrpc server address')\nida_rpc_port = pwndbg.config.Parameter('ida-rpc-port', 8888, 'ida xmlrpc server port')\n\nxmlrpclib.Marshaller.dispatch[int] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nif pwndbg.compat.python2:\n xmlrpclib.Marshaller.dispatch[long] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nxmlrpclib.Marshaller.dispatch[type(0)] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\n_ida = None\n\n\[email protected]([ida_rpc_host, ida_rpc_port])\ndef init_ida_rpc_client():\n global _ida\n addr = 'http://{host}:{port}'.format(host=ida_rpc_host, port=ida_rpc_port)\n\n _ida = xmlrpclib.ServerProxy(addr)\n\n try:\n _ida.here()\n print(pwndbg.color.green(\"Pwndbg successfully connected to Ida Pro xmlrpc: %s\" % addr))\n except socket.error as e:\n if e.errno != errno.ECONNREFUSED:\n traceback.print_exc()\n _ida = None\n\n\nclass withIDA(object):\n def __init__(self, fn):\n self.fn = fn\n functools.update_wrapper(self, fn)\n\n def __call__(self, *args, **kwargs):\n if _ida is None:\n init_ida_rpc_client()\n if _ida is not None:\n return self.fn(*args, **kwargs)\n return None\n\n\ndef takes_address(function):\n @functools.wraps(function)\n def wrapper(address, *args, **kwargs):\n return function(l2r(address), *args, **kwargs)\n\n return wrapper\n\n\ndef returns_address(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n return r2l(function(*args, **kwargs))\n\n return wrapper\n\n\n@withIDA\ndef available():\n return True\n\n\ndef l2r(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - int(exe.address) + base()) & pwndbg.arch.ptrmask\n return result\n\n\ndef r2l(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - base() + int(exe.address)) & pwndbg.arch.ptrmask\n return result\n\n\ndef remote(function):\n \"\"\"Runs the provided function in IDA's interpreter.\n\n The function must be self-contained and not reference any\n global variables.\"\"\"\n\n\[email protected]_on_objfile\ndef base():\n segaddr = _ida.NextSeg(0)\n base = _ida.get_fileregion_offset(segaddr)\n\n return segaddr - base\n\n\n@withIDA\n@takes_address\ndef Comment(addr):\n return _ida.GetCommentEx(addr, 0) or _ida.GetCommentEx(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Name(addr):\n return _ida.Name(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFuncOffset(addr):\n rv = _ida.GetFuncOffset(addr)\n return rv\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetType(addr):\n rv = _ida.GetType(addr)\n return rv\n\n\n@withIDA\n@returns_address\ndef here():\n return _ida.here()\n\n\n@withIDA\n@takes_address\ndef Jump(addr):\n return _ida.Jump(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Anterior(addr):\n hexrays_prefix = '\\x01\\x04; '\n lines = []\n for i in range(10):\n r = _ida.LineA(addr, i)\n if not r: break\n if r.startswith(hexrays_prefix):\n r = r[len(hexrays_prefix):]\n lines.append(r)\n return '\\n'.join(lines)\n\n\n@withIDA\ndef GetBreakpoints():\n for i in range(GetBptQty()):\n yield GetBptEA(i)\n\n\n@withIDA\ndef GetBptQty():\n return _ida.GetBptQty()\n\n\n@withIDA\n@returns_address\ndef GetBptEA(i):\n return _ida.GetBptEA(i)\n\n\n_breakpoints = []\n\n\[email protected]\[email protected]\n@withIDA\ndef UpdateBreakpoints():\n # XXX: Remove breakpoints from IDA when the user removes them.\n current = set(eval(b.location.lstrip('*')) for b in _breakpoints)\n want = set(GetBreakpoints())\n\n # print(want)\n\n for addr in current - want:\n for bp in _breakpoints:\n if int(bp.location.lstrip('*'), 0) == addr:\n # print(\"delete\", addr)\n bp.delete()\n break\n _breakpoints.remove(bp)\n\n for bp in want - current:\n if not pwndbg.memory.peek(bp):\n continue\n\n bp = gdb.Breakpoint('*' + hex(int(bp)))\n _breakpoints.append(bp)\n # print(_breakpoints)\n\n\n@withIDA\n@takes_address\ndef SetColor(pc, color):\n return _ida.SetColor(pc, 1, color)\n\n\ncolored_pc = None\n\n\[email protected]\n@withIDA\ndef Auto_Color_PC():\n global colored_pc\n colored_pc = pwndbg.regs.pc\n SetColor(colored_pc, 0x7f7fff)\n\n\[email protected]\n@withIDA\ndef Auto_UnColor_PC():\n global colored_pc\n if colored_pc:\n SetColor(colored_pc, 0xffffff)\n colored_pc = None\n\n\n@withIDA\n@returns_address\[email protected]_on_objfile\ndef LocByName(name):\n return _ida.LocByName(str(name))\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef PrevHead(addr):\n return _ida.PrevHead(addr)\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef NextHead(addr):\n return _ida.NextHead(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFunctionName(addr):\n return _ida.GetFunctionName(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFlags(addr):\n return _ida.GetFlags(addr)\n\n\n@withIDA\[email protected]_on_objfile\ndef isASCII(flags):\n return _ida.isASCII(flags)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef ArgCount(address):\n pass\n\n\n@withIDA\ndef SaveBase(path):\n return _ida.SaveBase(path)\n\n\n@withIDA\ndef GetIdbPath():\n return _ida.GetIdbPath()\n\n\n@takes_address\[email protected]_on_stop\ndef has_cached_cfunc(addr):\n return _ida.has_cached_cfunc(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_stop\ndef decompile(addr):\n return _ida.decompile(addr)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucQty():\n return _ida.GetStrucQty()\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucId(idx):\n return _ida.GetStrucId(idx)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucName(sid):\n return _ida.GetStrucName(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucSize(sid):\n return _ida.GetStrucSize(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberQty(sid):\n return _ida.GetMemberQty(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberSize(sid, offset):\n return _ida.GetMemberSize(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberId(sid, offset):\n return _ida.GetMemberId(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberName(sid, offset):\n return _ida.GetMemberName(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberFlag(sid, offset):\n return _ida.GetMemberFlag(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucNextOff(sid, offset):\n return _ida.GetStrucNextOff(sid, offset)\n\n\nclass IDC(object):\n query = \"{k:v for k,v in globals()['idc'].__dict__.items() if type(v) in (int,long)}\"\n\n def __init__(self):\n if available():\n data = _ida.eval(self.query)\n self.__dict__.update(data)\n\n\nidc = IDC()\n\n\ndef print_member(sid, offset):\n mid = GetMemberId(sid, offset)\n mname = GetMemberName(sid, offset) or '(no name)'\n msize = GetMemberSize(sid, offset) or 0\n mflag = GetMemberFlag(sid, offset) or 0\n print(\" +%#x - %s [%#x bytes]\" % (offset, mname, msize))\n\n\ndef print_structs():\n for i in range(GetStrucQty() or 0):\n sid = GetStrucId(i)\n\n name = GetStrucName(sid)\n size = GetStrucSize(sid)\n\n print(\"%s - %#x bytes\" % (name, size))\n\n offset = 0\n while offset < size:\n print_member(sid, offset)\n offset = GetStrucNextOff(sid, offset)\n", "path": "pwndbg/ida.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTalks to an XMLRPC server running inside of an active IDA Pro instance,\nin order to query it about the database. Allows symbol resolution and\ninteractive debugging.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport errno\nimport functools\nimport socket\nimport traceback\n\nimport gdb\n\nimport pwndbg.arch\nimport pwndbg.color\nimport pwndbg.compat\nimport pwndbg.config\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.memoize\nimport pwndbg.memory\nimport pwndbg.regs\n\ntry:\n import xmlrpc.client as xmlrpclib\nexcept:\n import xmlrpclib\n\n\nida_rpc_host = pwndbg.config.Parameter('ida-rpc-host', '127.0.0.1', 'ida xmlrpc server address')\nida_rpc_port = pwndbg.config.Parameter('ida-rpc-port', 8888, 'ida xmlrpc server port')\n\nxmlrpclib.Marshaller.dispatch[int] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nif pwndbg.compat.python2:\n xmlrpclib.Marshaller.dispatch[long] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\nxmlrpclib.Marshaller.dispatch[type(0)] = lambda _, v, w: w(\"<value><i8>%d</i8></value>\" % v)\n\n_ida = None\n\n\[email protected]([ida_rpc_host, ida_rpc_port])\ndef init_ida_rpc_client():\n global _ida\n addr = 'http://{host}:{port}'.format(host=ida_rpc_host, port=ida_rpc_port)\n\n _ida = xmlrpclib.ServerProxy(addr)\n\n try:\n _ida.here()\n print(pwndbg.color.green(\"Pwndbg successfully connected to Ida Pro xmlrpc: %s\" % addr))\n except socket.error as e:\n if e.errno != errno.ECONNREFUSED:\n traceback.print_exc()\n _ida = None\n\n\nclass withIDA(object):\n def __init__(self, fn):\n self.fn = fn\n functools.update_wrapper(self, fn)\n\n def __call__(self, *args, **kwargs):\n if _ida is None:\n init_ida_rpc_client()\n if _ida is not None:\n return self.fn(*args, **kwargs)\n return None\n\ndef withHexrays(f):\n @withIDA\n @functools.wraps(f)\n def wrapper(*a, **kw):\n if _ida.init_hexrays_plugin():\n return f(*a, **kw)\n\ndef takes_address(function):\n @functools.wraps(function)\n def wrapper(address, *args, **kwargs):\n return function(l2r(address), *args, **kwargs)\n\n return wrapper\n\n\ndef returns_address(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n return r2l(function(*args, **kwargs))\n\n return wrapper\n\n\n@withIDA\ndef available():\n return True\n\n\ndef l2r(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - int(exe.address) + base()) & pwndbg.arch.ptrmask\n return result\n\n\ndef r2l(addr):\n exe = pwndbg.elf.exe()\n if not exe:\n raise Exception(\"Can't find EXE base\")\n result = (addr - base() + int(exe.address)) & pwndbg.arch.ptrmask\n return result\n\n\ndef remote(function):\n \"\"\"Runs the provided function in IDA's interpreter.\n\n The function must be self-contained and not reference any\n global variables.\"\"\"\n\n\[email protected]_on_objfile\ndef base():\n segaddr = _ida.NextSeg(0)\n base = _ida.get_fileregion_offset(segaddr)\n\n return segaddr - base\n\n\n@withIDA\n@takes_address\ndef Comment(addr):\n return _ida.GetCommentEx(addr, 0) or _ida.GetCommentEx(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Name(addr):\n return _ida.Name(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFuncOffset(addr):\n rv = _ida.GetFuncOffset(addr)\n return rv\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetType(addr):\n rv = _ida.GetType(addr)\n return rv\n\n\n@withIDA\n@returns_address\ndef here():\n return _ida.here()\n\n\n@withIDA\n@takes_address\ndef Jump(addr):\n return _ida.Jump(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef Anterior(addr):\n hexrays_prefix = '\\x01\\x04; '\n lines = []\n for i in range(10):\n r = _ida.LineA(addr, i)\n if not r: break\n if r.startswith(hexrays_prefix):\n r = r[len(hexrays_prefix):]\n lines.append(r)\n return '\\n'.join(lines)\n\n\n@withIDA\ndef GetBreakpoints():\n for i in range(GetBptQty()):\n yield GetBptEA(i)\n\n\n@withIDA\ndef GetBptQty():\n return _ida.GetBptQty()\n\n\n@withIDA\n@returns_address\ndef GetBptEA(i):\n return _ida.GetBptEA(i)\n\n\n_breakpoints = []\n\n\[email protected]\[email protected]\n@withIDA\ndef UpdateBreakpoints():\n # XXX: Remove breakpoints from IDA when the user removes them.\n current = set(eval(b.location.lstrip('*')) for b in _breakpoints)\n want = set(GetBreakpoints())\n\n # print(want)\n\n for addr in current - want:\n for bp in _breakpoints:\n if int(bp.location.lstrip('*'), 0) == addr:\n # print(\"delete\", addr)\n bp.delete()\n break\n _breakpoints.remove(bp)\n\n for bp in want - current:\n if not pwndbg.memory.peek(bp):\n continue\n\n bp = gdb.Breakpoint('*' + hex(int(bp)))\n _breakpoints.append(bp)\n # print(_breakpoints)\n\n\n@withIDA\n@takes_address\ndef SetColor(pc, color):\n return _ida.SetColor(pc, 1, color)\n\n\ncolored_pc = None\n\n\[email protected]\n@withIDA\ndef Auto_Color_PC():\n global colored_pc\n colored_pc = pwndbg.regs.pc\n SetColor(colored_pc, 0x7f7fff)\n\n\[email protected]\n@withIDA\ndef Auto_UnColor_PC():\n global colored_pc\n if colored_pc:\n SetColor(colored_pc, 0xffffff)\n colored_pc = None\n\n\n@withIDA\n@returns_address\[email protected]_on_objfile\ndef LocByName(name):\n return _ida.LocByName(str(name))\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef PrevHead(addr):\n return _ida.PrevHead(addr)\n\n\n@withIDA\n@takes_address\n@returns_address\[email protected]_on_objfile\ndef NextHead(addr):\n return _ida.NextHead(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFunctionName(addr):\n return _ida.GetFunctionName(addr)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef GetFlags(addr):\n return _ida.GetFlags(addr)\n\n\n@withIDA\[email protected]_on_objfile\ndef isASCII(flags):\n return _ida.isASCII(flags)\n\n\n@withIDA\n@takes_address\[email protected]_on_objfile\ndef ArgCount(address):\n pass\n\n\n@withIDA\ndef SaveBase(path):\n return _ida.SaveBase(path)\n\n\n@withIDA\ndef GetIdbPath():\n return _ida.GetIdbPath()\n\n\n@takes_address\[email protected]_on_stop\ndef has_cached_cfunc(addr):\n return _ida.has_cached_cfunc(addr)\n\n\n@withHexrays\n@takes_address\[email protected]_on_stop\ndef decompile(addr):\n return _ida.decompile(addr)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucQty():\n return _ida.GetStrucQty()\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucId(idx):\n return _ida.GetStrucId(idx)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucName(sid):\n return _ida.GetStrucName(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucSize(sid):\n return _ida.GetStrucSize(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberQty(sid):\n return _ida.GetMemberQty(sid)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberSize(sid, offset):\n return _ida.GetMemberSize(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberId(sid, offset):\n return _ida.GetMemberId(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberName(sid, offset):\n return _ida.GetMemberName(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetMemberFlag(sid, offset):\n return _ida.GetMemberFlag(sid, offset)\n\n\n@withIDA\[email protected]_on_stop\ndef GetStrucNextOff(sid, offset):\n return _ida.GetStrucNextOff(sid, offset)\n\n\nclass IDC(object):\n query = \"{k:v for k,v in globals()['idc'].__dict__.items() if type(v) in (int,long)}\"\n\n def __init__(self):\n if available():\n data = _ida.eval(self.query)\n self.__dict__.update(data)\n\n\nidc = IDC()\n\n\ndef print_member(sid, offset):\n mid = GetMemberId(sid, offset)\n mname = GetMemberName(sid, offset) or '(no name)'\n msize = GetMemberSize(sid, offset) or 0\n mflag = GetMemberFlag(sid, offset) or 0\n print(\" +%#x - %s [%#x bytes]\" % (offset, mname, msize))\n\n\ndef print_structs():\n for i in range(GetStrucQty() or 0):\n sid = GetStrucId(i)\n\n name = GetStrucName(sid)\n size = GetStrucSize(sid)\n\n print(\"%s - %#x bytes\" % (name, size))\n\n offset = 0\n while offset < size:\n print_member(sid, offset)\n offset = GetStrucNextOff(sid, offset)\n", "path": "pwndbg/ida.py"}]}
4,090
186
gh_patches_debug_13350
rasdani/github-patches
git_diff
cisagov__manage.get.gov-282
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Build Domain Submission Landing Page **User Story** As a user submitting a domain request I want to know my domain was successfully submitted so I can prepare for future steps **Acceptance Criteria** - Domain Request form has a landing page that aligns with wireframes **Wireframe** https://www.figma.com/proto/ninNjo0bTg7XfB4mvTp4hG/GetGov%2FWireframes?page-id=1057%3A4846&node-id=1077%3A7610&viewport=-1004%2C292%2C0.15&scaling=min-zoom&starting-point-node-id=1057%3A5291 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/registrar/forms/application_wizard.py` Content: ``` 1 """Forms Wizard for creating a new domain application.""" 2 3 import logging 4 5 from django import forms 6 from django.shortcuts import redirect 7 8 from django.contrib.auth.mixins import LoginRequiredMixin 9 10 from formtools.wizard.views import NamedUrlSessionWizardView # type: ignore 11 12 from registrar.models import DomainApplication, Website 13 14 15 logger = logging.getLogger(__name__) 16 17 18 # Subclass used to remove the default colon suffix from all fields 19 class RegistrarForm(forms.Form): 20 def __init__(self, *args, **kwargs): 21 kwargs.setdefault("label_suffix", "") 22 super(RegistrarForm, self).__init__(*args, **kwargs) 23 24 25 class OrganizationTypeForm(RegistrarForm): 26 organization_type = forms.ChoiceField( 27 required=True, 28 choices=[ 29 ("Federal", "Federal: a federal agency"), 30 ("Interstate", "Interstate: an organization of two or more states"), 31 ( 32 "State_or_Territory", 33 ( 34 "State or Territory: One of the 50 U.S. states, the District of " 35 "Columbia, American Samoa, Guam, Northern Mariana Islands, " 36 "Puerto Rico, or the U.S. Virgin Islands" 37 ), 38 ), 39 ( 40 "Tribal", 41 ( 42 "Tribal: a tribal government recognized by the federal or " 43 "state government" 44 ), 45 ), 46 ("County", "County: a county, parish, or borough"), 47 ("City", "City: a city, town, township, village, etc."), 48 ( 49 "Special_District", 50 "Special District: an independent organization within a single state", 51 ), 52 ], 53 widget=forms.RadioSelect, 54 ) 55 federal_type = forms.ChoiceField( 56 required=False, 57 choices=DomainApplication.BRANCH_CHOICES, 58 widget=forms.RadioSelect, 59 ) 60 is_election_board = forms.ChoiceField( 61 required=False, 62 choices=[ 63 ("Yes", "Yes"), 64 ("No", "No"), 65 ], 66 widget=forms.RadioSelect(attrs={"class": "usa-radio__input"}), 67 ) 68 69 70 class OrganizationFederalForm(RegistrarForm): 71 federal_type = forms.ChoiceField( 72 required=False, 73 choices=DomainApplication.BRANCH_CHOICES, 74 widget=forms.RadioSelect, 75 ) 76 77 78 class OrganizationElectionForm(RegistrarForm): 79 is_election_board = forms.BooleanField( 80 widget=forms.RadioSelect( 81 choices=[ 82 (True, "Yes"), 83 (False, "No"), 84 ], 85 ) 86 ) 87 88 89 class OrganizationContactForm(RegistrarForm): 90 organization_name = forms.CharField(label="Organization Name") 91 address_line1 = forms.CharField(label="Address line 1") 92 address_line2 = forms.CharField( 93 required=False, 94 label="Address line 2", 95 ) 96 us_state = forms.ChoiceField( 97 label="State", 98 choices=[ 99 ("AL", "Alabama"), 100 ("AK", "Alaska"), 101 ("AZ", "Arizona"), 102 ("AR", "Arkansas"), 103 ("CA", "California"), 104 ("CO", "Colorado"), 105 ("CT", "Connecticut"), 106 ("DE", "Delaware"), 107 ("DC", "District of Columbia"), 108 ("FL", "Florida"), 109 ("GA", "Georgia"), 110 ("HI", "Hawaii"), 111 ("ID", "Idaho"), 112 ("IL", "Illinois"), 113 ("IN", "Indiana"), 114 ("IA", "Iowa"), 115 ("KS", "Kansas"), 116 ("KY", "Kentucky"), 117 ("LA", "Louisiana"), 118 ("ME", "Maine"), 119 ("MD", "Maryland"), 120 ("MA", "Massachusetts"), 121 ("MI", "Michigan"), 122 ("MN", "Minnesota"), 123 ("MS", "Mississippi"), 124 ("MO", "Missouri"), 125 ("MT", "Montana"), 126 ("NE", "Nebraska"), 127 ("NV", "Nevada"), 128 ("NH", "New Hampshire"), 129 ("NJ", "New Jersey"), 130 ("NM", "New Mexico"), 131 ("NY", "New York"), 132 ("NC", "North Carolina"), 133 ("ND", "North Dakota"), 134 ("OH", "Ohio"), 135 ("OK", "Oklahoma"), 136 ("OR", "Oregon"), 137 ("PA", "Pennsylvania"), 138 ("RI", "Rhode Island"), 139 ("SC", "South Carolina"), 140 ("SD", "South Dakota"), 141 ("TN", "Tennessee"), 142 ("TX", "Texas"), 143 ("UT", "Utah"), 144 ("VT", "Vermont"), 145 ("VA", "Virginia"), 146 ("WA", "Washington"), 147 ("WV", "West Virginia"), 148 ("WI", "Wisconsin"), 149 ("WY", "Wyoming"), 150 ("AS", "American Samoa"), 151 ("GU", "Guam"), 152 ("MP", "Northern Mariana Islands"), 153 ("PR", "Puerto Rico"), 154 ("VI", "Virgin Islands"), 155 ], 156 ) 157 zipcode = forms.CharField(label="ZIP code") 158 159 160 class AuthorizingOfficialForm(RegistrarForm): 161 first_name = forms.CharField(label="First name/given name") 162 middle_name = forms.CharField( 163 required=False, 164 label="Middle name (optional)", 165 ) 166 last_name = forms.CharField(label="Last name/family name") 167 title = forms.CharField(label="Title or role in your organization") 168 email = forms.EmailField(label="Email") 169 phone = forms.CharField(label="Phone") 170 171 172 class CurrentSitesForm(RegistrarForm): 173 current_site = forms.CharField( 174 required=False, 175 label="Enter your organization’s public website, if you have one. For example, " 176 "www.city.com.", 177 ) 178 179 180 class DotGovDomainForm(RegistrarForm): 181 dotgov_domain = forms.CharField(label="What .gov domain do you want?") 182 alternative_domain = forms.CharField( 183 required=False, 184 label="Are there other domains you’d like if we can’t give you your first " 185 "choice? Entering alternative domains is optional.", 186 ) 187 188 189 class PurposeForm(RegistrarForm): 190 purpose_field = forms.CharField(label="Purpose", widget=forms.Textarea()) 191 192 193 class YourContactForm(RegistrarForm): 194 first_name = forms.CharField(label="First name/given name") 195 middle_name = forms.CharField( 196 required=False, 197 label="Middle name (optional)", 198 ) 199 last_name = forms.CharField(label="Last name/family name") 200 title = forms.CharField(label="Title or role in your organization") 201 email = forms.EmailField(label="Email") 202 phone = forms.CharField(label="Phone") 203 204 205 class OtherContactsForm(RegistrarForm): 206 first_name = forms.CharField(label="First name/given name") 207 middle_name = forms.CharField( 208 required=False, 209 label="Middle name (optional)", 210 ) 211 last_name = forms.CharField(label="Last name/family name") 212 title = forms.CharField(label="Title or role in your organization") 213 email = forms.EmailField(label="Email") 214 phone = forms.CharField(label="Phone") 215 216 217 class SecurityEmailForm(RegistrarForm): 218 email = forms.EmailField( 219 required=False, 220 label="Security email", 221 ) 222 223 224 class AnythingElseForm(RegistrarForm): 225 anything_else = forms.CharField( 226 required=False, label="Anything else we should know", widget=forms.Textarea() 227 ) 228 229 230 class RequirementsForm(RegistrarForm): 231 agree_check = forms.BooleanField( 232 label="I read and agree to the .gov domain requirements." 233 ) 234 235 236 # Empty class for the review page which gets included as part of the form, but does not 237 # have any form fields itself 238 class ReviewForm(RegistrarForm): 239 pass 240 241 242 # List of forms in our wizard. Each entry is a tuple of a name and a form 243 # subclass 244 FORMS = [ 245 ("organization_type", OrganizationTypeForm), 246 ("organization_federal", OrganizationFederalForm), 247 ("organization_election", OrganizationElectionForm), 248 ("organization_contact", OrganizationContactForm), 249 ("authorizing_official", AuthorizingOfficialForm), 250 ("current_sites", CurrentSitesForm), 251 ("dotgov_domain", DotGovDomainForm), 252 ("purpose", PurposeForm), 253 ("your_contact", YourContactForm), 254 ("other_contacts", OtherContactsForm), 255 ("security_email", SecurityEmailForm), 256 ("anything_else", AnythingElseForm), 257 ("requirements", RequirementsForm), 258 ("review", ReviewForm), 259 ] 260 261 # Dict to match up the right template with the right step. Keys here must 262 # match the first elements of the tuples in FORMS 263 TEMPLATES = { 264 "organization_type": "application_org_type.html", 265 "organization_federal": "application_org_federal.html", 266 "organization_election": "application_org_election.html", 267 "organization_contact": "application_org_contact.html", 268 "authorizing_official": "application_authorizing_official.html", 269 "current_sites": "application_current_sites.html", 270 "dotgov_domain": "application_dotgov_domain.html", 271 "purpose": "application_purpose.html", 272 "your_contact": "application_your_contact.html", 273 "other_contacts": "application_other_contacts.html", 274 "security_email": "application_security_email.html", 275 "anything_else": "application_anything_else.html", 276 "requirements": "application_requirements.html", 277 "review": "application_review.html", 278 } 279 280 # We need to pass our page titles as context to the templates, indexed 281 # by the step names 282 TITLES = { 283 "organization_type": "Type of organization", 284 "organization_federal": "Type of organization — Federal", 285 "organization_election": "Type of organization — Election board", 286 "organization_contact": "Organization name and mailing address", 287 "authorizing_official": "Authorizing official", 288 "current_sites": "Organization website", 289 "dotgov_domain": ".gov domain", 290 "purpose": "Purpose of your domain", 291 "your_contact": "Your contact information", 292 "other_contacts": "Other contacts for your domain", 293 "security_email": "Security email for public use", 294 "anything_else": "Anything else we should know?", 295 "requirements": "Requirements for registration and operation of .gov domains", 296 "review": "Review and submit your domain request", 297 } 298 299 300 class ApplicationWizard(LoginRequiredMixin, NamedUrlSessionWizardView): 301 302 """Multi-page form ("wizard") for new domain applications. 303 304 This sets up a sequence of forms that gather information for new 305 domain applications. Each form in the sequence has its own URL and 306 the progress through the form is stored in the Django session (thus 307 "NamedUrlSessionWizardView"). 308 """ 309 310 form_list = FORMS 311 312 def get_template_names(self): 313 """Template for the current step. 314 315 The return is a singleton list. 316 """ 317 return [TEMPLATES[self.steps.current]] 318 319 def get_context_data(self, form, **kwargs): 320 """Add title information to the context for all steps.""" 321 context = super().get_context_data(form=form, **kwargs) 322 context["form_titles"] = TITLES 323 return context 324 325 def forms_to_object(self, form_dict: dict) -> DomainApplication: 326 """Unpack the form responses onto the model object properties.""" 327 application = DomainApplication.objects.create(creator=self.request.user) 328 329 # organization type information 330 organization_type_data = form_dict["organization_type"].cleaned_data 331 application.organization_type = organization_type_data["organization_type"] 332 333 # federal branch information 334 federal_branch_data = form_dict["organization_federal"].cleaned_data 335 application.federal_branch = federal_branch_data["federal_type"] 336 337 # election board information 338 election_board_data = form_dict["organization_election"].cleaned_data 339 application.is_election_office = election_board_data["is_election_board"] 340 341 # contact information 342 contact_data = form_dict["organization_contact"].cleaned_data 343 application.organization_name = contact_data["organization_name"] 344 application.street_address = contact_data["address_line1"] 345 # TODO: add the rest of these fields when they are created in the forms 346 347 # This isn't really the requested_domain field 348 # but we need something in this field to make the form submittable 349 requested_site, _ = Website.objects.get_or_create( 350 website=contact_data["organization_name"] + ".gov" 351 ) 352 application.requested_domain = requested_site 353 return application 354 355 def done(self, form_list, form_dict, **kwargs): 356 application = self.forms_to_object(form_dict) 357 application.submit() # change the status to submitted 358 application.save() 359 logger.debug("Application object saved: %s", application.id) 360 return redirect("home") 361 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/registrar/forms/application_wizard.py b/src/registrar/forms/application_wizard.py --- a/src/registrar/forms/application_wizard.py +++ b/src/registrar/forms/application_wizard.py @@ -3,7 +3,7 @@ import logging from django import forms -from django.shortcuts import redirect +from django.shortcuts import render from django.contrib.auth.mixins import LoginRequiredMixin @@ -357,4 +357,6 @@ application.submit() # change the status to submitted application.save() logger.debug("Application object saved: %s", application.id) - return redirect("home") + return render( + self.request, "application_done.html", {"application_id": application.id} + )
{"golden_diff": "diff --git a/src/registrar/forms/application_wizard.py b/src/registrar/forms/application_wizard.py\n--- a/src/registrar/forms/application_wizard.py\n+++ b/src/registrar/forms/application_wizard.py\n@@ -3,7 +3,7 @@\n import logging\n \n from django import forms\n-from django.shortcuts import redirect\n+from django.shortcuts import render\n \n from django.contrib.auth.mixins import LoginRequiredMixin\n \n@@ -357,4 +357,6 @@\n application.submit() # change the status to submitted\n application.save()\n logger.debug(\"Application object saved: %s\", application.id)\n- return redirect(\"home\")\n+ return render(\n+ self.request, \"application_done.html\", {\"application_id\": application.id}\n+ )\n", "issue": "Build Domain Submission Landing Page\n**User Story**\n\nAs a user submitting a domain request I want to know my domain was successfully submitted so I can prepare for future steps\n\n**Acceptance Criteria**\n\n- Domain Request form has a landing page that aligns with wireframes\n\n**Wireframe**\n\nhttps://www.figma.com/proto/ninNjo0bTg7XfB4mvTp4hG/GetGov%2FWireframes?page-id=1057%3A4846&node-id=1077%3A7610&viewport=-1004%2C292%2C0.15&scaling=min-zoom&starting-point-node-id=1057%3A5291\n", "before_files": [{"content": "\"\"\"Forms Wizard for creating a new domain application.\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.shortcuts import redirect\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom formtools.wizard.views import NamedUrlSessionWizardView # type: ignore\n\nfrom registrar.models import DomainApplication, Website\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Subclass used to remove the default colon suffix from all fields\nclass RegistrarForm(forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"label_suffix\", \"\")\n super(RegistrarForm, self).__init__(*args, **kwargs)\n\n\nclass OrganizationTypeForm(RegistrarForm):\n organization_type = forms.ChoiceField(\n required=True,\n choices=[\n (\"Federal\", \"Federal: a federal agency\"),\n (\"Interstate\", \"Interstate: an organization of two or more states\"),\n (\n \"State_or_Territory\",\n (\n \"State or Territory: One of the 50 U.S. states, the District of \"\n \"Columbia, American Samoa, Guam, Northern Mariana Islands, \"\n \"Puerto Rico, or the U.S. Virgin Islands\"\n ),\n ),\n (\n \"Tribal\",\n (\n \"Tribal: a tribal government recognized by the federal or \"\n \"state government\"\n ),\n ),\n (\"County\", \"County: a county, parish, or borough\"),\n (\"City\", \"City: a city, town, township, village, etc.\"),\n (\n \"Special_District\",\n \"Special District: an independent organization within a single state\",\n ),\n ],\n widget=forms.RadioSelect,\n )\n federal_type = forms.ChoiceField(\n required=False,\n choices=DomainApplication.BRANCH_CHOICES,\n widget=forms.RadioSelect,\n )\n is_election_board = forms.ChoiceField(\n required=False,\n choices=[\n (\"Yes\", \"Yes\"),\n (\"No\", \"No\"),\n ],\n widget=forms.RadioSelect(attrs={\"class\": \"usa-radio__input\"}),\n )\n\n\nclass OrganizationFederalForm(RegistrarForm):\n federal_type = forms.ChoiceField(\n required=False,\n choices=DomainApplication.BRANCH_CHOICES,\n widget=forms.RadioSelect,\n )\n\n\nclass OrganizationElectionForm(RegistrarForm):\n is_election_board = forms.BooleanField(\n widget=forms.RadioSelect(\n choices=[\n (True, \"Yes\"),\n (False, \"No\"),\n ],\n )\n )\n\n\nclass OrganizationContactForm(RegistrarForm):\n organization_name = forms.CharField(label=\"Organization Name\")\n address_line1 = forms.CharField(label=\"Address line 1\")\n address_line2 = forms.CharField(\n required=False,\n label=\"Address line 2\",\n )\n us_state = forms.ChoiceField(\n label=\"State\",\n choices=[\n (\"AL\", \"Alabama\"),\n (\"AK\", \"Alaska\"),\n (\"AZ\", \"Arizona\"),\n (\"AR\", \"Arkansas\"),\n (\"CA\", \"California\"),\n (\"CO\", \"Colorado\"),\n (\"CT\", \"Connecticut\"),\n (\"DE\", \"Delaware\"),\n (\"DC\", \"District of Columbia\"),\n (\"FL\", \"Florida\"),\n (\"GA\", \"Georgia\"),\n (\"HI\", \"Hawaii\"),\n (\"ID\", \"Idaho\"),\n (\"IL\", \"Illinois\"),\n (\"IN\", \"Indiana\"),\n (\"IA\", \"Iowa\"),\n (\"KS\", \"Kansas\"),\n (\"KY\", \"Kentucky\"),\n (\"LA\", \"Louisiana\"),\n (\"ME\", \"Maine\"),\n (\"MD\", \"Maryland\"),\n (\"MA\", \"Massachusetts\"),\n (\"MI\", \"Michigan\"),\n (\"MN\", \"Minnesota\"),\n (\"MS\", \"Mississippi\"),\n (\"MO\", \"Missouri\"),\n (\"MT\", \"Montana\"),\n (\"NE\", \"Nebraska\"),\n (\"NV\", \"Nevada\"),\n (\"NH\", \"New Hampshire\"),\n (\"NJ\", \"New Jersey\"),\n (\"NM\", \"New Mexico\"),\n (\"NY\", \"New York\"),\n (\"NC\", \"North Carolina\"),\n (\"ND\", \"North Dakota\"),\n (\"OH\", \"Ohio\"),\n (\"OK\", \"Oklahoma\"),\n (\"OR\", \"Oregon\"),\n (\"PA\", \"Pennsylvania\"),\n (\"RI\", \"Rhode Island\"),\n (\"SC\", \"South Carolina\"),\n (\"SD\", \"South Dakota\"),\n (\"TN\", \"Tennessee\"),\n (\"TX\", \"Texas\"),\n (\"UT\", \"Utah\"),\n (\"VT\", \"Vermont\"),\n (\"VA\", \"Virginia\"),\n (\"WA\", \"Washington\"),\n (\"WV\", \"West Virginia\"),\n (\"WI\", \"Wisconsin\"),\n (\"WY\", \"Wyoming\"),\n (\"AS\", \"American Samoa\"),\n (\"GU\", \"Guam\"),\n (\"MP\", \"Northern Mariana Islands\"),\n (\"PR\", \"Puerto Rico\"),\n (\"VI\", \"Virgin Islands\"),\n ],\n )\n zipcode = forms.CharField(label=\"ZIP code\")\n\n\nclass AuthorizingOfficialForm(RegistrarForm):\n first_name = forms.CharField(label=\"First name/given name\")\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(label=\"Last name/family name\")\n title = forms.CharField(label=\"Title or role in your organization\")\n email = forms.EmailField(label=\"Email\")\n phone = forms.CharField(label=\"Phone\")\n\n\nclass CurrentSitesForm(RegistrarForm):\n current_site = forms.CharField(\n required=False,\n label=\"Enter your organization\u2019s public website, if you have one. For example, \"\n \"www.city.com.\",\n )\n\n\nclass DotGovDomainForm(RegistrarForm):\n dotgov_domain = forms.CharField(label=\"What .gov domain do you want?\")\n alternative_domain = forms.CharField(\n required=False,\n label=\"Are there other domains you\u2019d like if we can\u2019t give you your first \"\n \"choice? Entering alternative domains is optional.\",\n )\n\n\nclass PurposeForm(RegistrarForm):\n purpose_field = forms.CharField(label=\"Purpose\", widget=forms.Textarea())\n\n\nclass YourContactForm(RegistrarForm):\n first_name = forms.CharField(label=\"First name/given name\")\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(label=\"Last name/family name\")\n title = forms.CharField(label=\"Title or role in your organization\")\n email = forms.EmailField(label=\"Email\")\n phone = forms.CharField(label=\"Phone\")\n\n\nclass OtherContactsForm(RegistrarForm):\n first_name = forms.CharField(label=\"First name/given name\")\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(label=\"Last name/family name\")\n title = forms.CharField(label=\"Title or role in your organization\")\n email = forms.EmailField(label=\"Email\")\n phone = forms.CharField(label=\"Phone\")\n\n\nclass SecurityEmailForm(RegistrarForm):\n email = forms.EmailField(\n required=False,\n label=\"Security email\",\n )\n\n\nclass AnythingElseForm(RegistrarForm):\n anything_else = forms.CharField(\n required=False, label=\"Anything else we should know\", widget=forms.Textarea()\n )\n\n\nclass RequirementsForm(RegistrarForm):\n agree_check = forms.BooleanField(\n label=\"I read and agree to the .gov domain requirements.\"\n )\n\n\n# Empty class for the review page which gets included as part of the form, but does not\n# have any form fields itself\nclass ReviewForm(RegistrarForm):\n pass\n\n\n# List of forms in our wizard. Each entry is a tuple of a name and a form\n# subclass\nFORMS = [\n (\"organization_type\", OrganizationTypeForm),\n (\"organization_federal\", OrganizationFederalForm),\n (\"organization_election\", OrganizationElectionForm),\n (\"organization_contact\", OrganizationContactForm),\n (\"authorizing_official\", AuthorizingOfficialForm),\n (\"current_sites\", CurrentSitesForm),\n (\"dotgov_domain\", DotGovDomainForm),\n (\"purpose\", PurposeForm),\n (\"your_contact\", YourContactForm),\n (\"other_contacts\", OtherContactsForm),\n (\"security_email\", SecurityEmailForm),\n (\"anything_else\", AnythingElseForm),\n (\"requirements\", RequirementsForm),\n (\"review\", ReviewForm),\n]\n\n# Dict to match up the right template with the right step. Keys here must\n# match the first elements of the tuples in FORMS\nTEMPLATES = {\n \"organization_type\": \"application_org_type.html\",\n \"organization_federal\": \"application_org_federal.html\",\n \"organization_election\": \"application_org_election.html\",\n \"organization_contact\": \"application_org_contact.html\",\n \"authorizing_official\": \"application_authorizing_official.html\",\n \"current_sites\": \"application_current_sites.html\",\n \"dotgov_domain\": \"application_dotgov_domain.html\",\n \"purpose\": \"application_purpose.html\",\n \"your_contact\": \"application_your_contact.html\",\n \"other_contacts\": \"application_other_contacts.html\",\n \"security_email\": \"application_security_email.html\",\n \"anything_else\": \"application_anything_else.html\",\n \"requirements\": \"application_requirements.html\",\n \"review\": \"application_review.html\",\n}\n\n# We need to pass our page titles as context to the templates, indexed\n# by the step names\nTITLES = {\n \"organization_type\": \"Type of organization\",\n \"organization_federal\": \"Type of organization \u2014 Federal\",\n \"organization_election\": \"Type of organization \u2014 Election board\",\n \"organization_contact\": \"Organization name and mailing address\",\n \"authorizing_official\": \"Authorizing official\",\n \"current_sites\": \"Organization website\",\n \"dotgov_domain\": \".gov domain\",\n \"purpose\": \"Purpose of your domain\",\n \"your_contact\": \"Your contact information\",\n \"other_contacts\": \"Other contacts for your domain\",\n \"security_email\": \"Security email for public use\",\n \"anything_else\": \"Anything else we should know?\",\n \"requirements\": \"Requirements for registration and operation of .gov domains\",\n \"review\": \"Review and submit your domain request\",\n}\n\n\nclass ApplicationWizard(LoginRequiredMixin, NamedUrlSessionWizardView):\n\n \"\"\"Multi-page form (\"wizard\") for new domain applications.\n\n This sets up a sequence of forms that gather information for new\n domain applications. Each form in the sequence has its own URL and\n the progress through the form is stored in the Django session (thus\n \"NamedUrlSessionWizardView\").\n \"\"\"\n\n form_list = FORMS\n\n def get_template_names(self):\n \"\"\"Template for the current step.\n\n The return is a singleton list.\n \"\"\"\n return [TEMPLATES[self.steps.current]]\n\n def get_context_data(self, form, **kwargs):\n \"\"\"Add title information to the context for all steps.\"\"\"\n context = super().get_context_data(form=form, **kwargs)\n context[\"form_titles\"] = TITLES\n return context\n\n def forms_to_object(self, form_dict: dict) -> DomainApplication:\n \"\"\"Unpack the form responses onto the model object properties.\"\"\"\n application = DomainApplication.objects.create(creator=self.request.user)\n\n # organization type information\n organization_type_data = form_dict[\"organization_type\"].cleaned_data\n application.organization_type = organization_type_data[\"organization_type\"]\n\n # federal branch information\n federal_branch_data = form_dict[\"organization_federal\"].cleaned_data\n application.federal_branch = federal_branch_data[\"federal_type\"]\n\n # election board information\n election_board_data = form_dict[\"organization_election\"].cleaned_data\n application.is_election_office = election_board_data[\"is_election_board\"]\n\n # contact information\n contact_data = form_dict[\"organization_contact\"].cleaned_data\n application.organization_name = contact_data[\"organization_name\"]\n application.street_address = contact_data[\"address_line1\"]\n # TODO: add the rest of these fields when they are created in the forms\n\n # This isn't really the requested_domain field\n # but we need something in this field to make the form submittable\n requested_site, _ = Website.objects.get_or_create(\n website=contact_data[\"organization_name\"] + \".gov\"\n )\n application.requested_domain = requested_site\n return application\n\n def done(self, form_list, form_dict, **kwargs):\n application = self.forms_to_object(form_dict)\n application.submit() # change the status to submitted\n application.save()\n logger.debug(\"Application object saved: %s\", application.id)\n return redirect(\"home\")\n", "path": "src/registrar/forms/application_wizard.py"}], "after_files": [{"content": "\"\"\"Forms Wizard for creating a new domain application.\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.shortcuts import render\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom formtools.wizard.views import NamedUrlSessionWizardView # type: ignore\n\nfrom registrar.models import DomainApplication, Website\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Subclass used to remove the default colon suffix from all fields\nclass RegistrarForm(forms.Form):\n def __init__(self, *args, **kwargs):\n kwargs.setdefault(\"label_suffix\", \"\")\n super(RegistrarForm, self).__init__(*args, **kwargs)\n\n\nclass OrganizationTypeForm(RegistrarForm):\n organization_type = forms.ChoiceField(\n required=True,\n choices=[\n (\"Federal\", \"Federal: a federal agency\"),\n (\"Interstate\", \"Interstate: an organization of two or more states\"),\n (\n \"State_or_Territory\",\n (\n \"State or Territory: One of the 50 U.S. states, the District of \"\n \"Columbia, American Samoa, Guam, Northern Mariana Islands, \"\n \"Puerto Rico, or the U.S. Virgin Islands\"\n ),\n ),\n (\n \"Tribal\",\n (\n \"Tribal: a tribal government recognized by the federal or \"\n \"state government\"\n ),\n ),\n (\"County\", \"County: a county, parish, or borough\"),\n (\"City\", \"City: a city, town, township, village, etc.\"),\n (\n \"Special_District\",\n \"Special District: an independent organization within a single state\",\n ),\n ],\n widget=forms.RadioSelect,\n )\n federal_type = forms.ChoiceField(\n required=False,\n choices=DomainApplication.BRANCH_CHOICES,\n widget=forms.RadioSelect,\n )\n is_election_board = forms.ChoiceField(\n required=False,\n choices=[\n (\"Yes\", \"Yes\"),\n (\"No\", \"No\"),\n ],\n widget=forms.RadioSelect(attrs={\"class\": \"usa-radio__input\"}),\n )\n\n\nclass OrganizationFederalForm(RegistrarForm):\n federal_type = forms.ChoiceField(\n required=False,\n choices=DomainApplication.BRANCH_CHOICES,\n widget=forms.RadioSelect,\n )\n\n\nclass OrganizationElectionForm(RegistrarForm):\n is_election_board = forms.BooleanField(\n widget=forms.RadioSelect(\n choices=[\n (True, \"Yes\"),\n (False, \"No\"),\n ],\n )\n )\n\n\nclass OrganizationContactForm(RegistrarForm):\n organization_name = forms.CharField(label=\"Organization Name\")\n address_line1 = forms.CharField(label=\"Address line 1\")\n address_line2 = forms.CharField(\n required=False,\n label=\"Address line 2\",\n )\n us_state = forms.ChoiceField(\n label=\"State\",\n choices=[\n (\"AL\", \"Alabama\"),\n (\"AK\", \"Alaska\"),\n (\"AZ\", \"Arizona\"),\n (\"AR\", \"Arkansas\"),\n (\"CA\", \"California\"),\n (\"CO\", \"Colorado\"),\n (\"CT\", \"Connecticut\"),\n (\"DE\", \"Delaware\"),\n (\"DC\", \"District of Columbia\"),\n (\"FL\", \"Florida\"),\n (\"GA\", \"Georgia\"),\n (\"HI\", \"Hawaii\"),\n (\"ID\", \"Idaho\"),\n (\"IL\", \"Illinois\"),\n (\"IN\", \"Indiana\"),\n (\"IA\", \"Iowa\"),\n (\"KS\", \"Kansas\"),\n (\"KY\", \"Kentucky\"),\n (\"LA\", \"Louisiana\"),\n (\"ME\", \"Maine\"),\n (\"MD\", \"Maryland\"),\n (\"MA\", \"Massachusetts\"),\n (\"MI\", \"Michigan\"),\n (\"MN\", \"Minnesota\"),\n (\"MS\", \"Mississippi\"),\n (\"MO\", \"Missouri\"),\n (\"MT\", \"Montana\"),\n (\"NE\", \"Nebraska\"),\n (\"NV\", \"Nevada\"),\n (\"NH\", \"New Hampshire\"),\n (\"NJ\", \"New Jersey\"),\n (\"NM\", \"New Mexico\"),\n (\"NY\", \"New York\"),\n (\"NC\", \"North Carolina\"),\n (\"ND\", \"North Dakota\"),\n (\"OH\", \"Ohio\"),\n (\"OK\", \"Oklahoma\"),\n (\"OR\", \"Oregon\"),\n (\"PA\", \"Pennsylvania\"),\n (\"RI\", \"Rhode Island\"),\n (\"SC\", \"South Carolina\"),\n (\"SD\", \"South Dakota\"),\n (\"TN\", \"Tennessee\"),\n (\"TX\", \"Texas\"),\n (\"UT\", \"Utah\"),\n (\"VT\", \"Vermont\"),\n (\"VA\", \"Virginia\"),\n (\"WA\", \"Washington\"),\n (\"WV\", \"West Virginia\"),\n (\"WI\", \"Wisconsin\"),\n (\"WY\", \"Wyoming\"),\n (\"AS\", \"American Samoa\"),\n (\"GU\", \"Guam\"),\n (\"MP\", \"Northern Mariana Islands\"),\n (\"PR\", \"Puerto Rico\"),\n (\"VI\", \"Virgin Islands\"),\n ],\n )\n zipcode = forms.CharField(label=\"ZIP code\")\n\n\nclass AuthorizingOfficialForm(RegistrarForm):\n first_name = forms.CharField(label=\"First name/given name\")\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(label=\"Last name/family name\")\n title = forms.CharField(label=\"Title or role in your organization\")\n email = forms.EmailField(label=\"Email\")\n phone = forms.CharField(label=\"Phone\")\n\n\nclass CurrentSitesForm(RegistrarForm):\n current_site = forms.CharField(\n required=False,\n label=\"Enter your organization\u2019s public website, if you have one. For example, \"\n \"www.city.com.\",\n )\n\n\nclass DotGovDomainForm(RegistrarForm):\n dotgov_domain = forms.CharField(label=\"What .gov domain do you want?\")\n alternative_domain = forms.CharField(\n required=False,\n label=\"Are there other domains you\u2019d like if we can\u2019t give you your first \"\n \"choice? Entering alternative domains is optional.\",\n )\n\n\nclass PurposeForm(RegistrarForm):\n purpose_field = forms.CharField(label=\"Purpose\", widget=forms.Textarea())\n\n\nclass YourContactForm(RegistrarForm):\n first_name = forms.CharField(label=\"First name/given name\")\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(label=\"Last name/family name\")\n title = forms.CharField(label=\"Title or role in your organization\")\n email = forms.EmailField(label=\"Email\")\n phone = forms.CharField(label=\"Phone\")\n\n\nclass OtherContactsForm(RegistrarForm):\n first_name = forms.CharField(label=\"First name/given name\")\n middle_name = forms.CharField(\n required=False,\n label=\"Middle name (optional)\",\n )\n last_name = forms.CharField(label=\"Last name/family name\")\n title = forms.CharField(label=\"Title or role in your organization\")\n email = forms.EmailField(label=\"Email\")\n phone = forms.CharField(label=\"Phone\")\n\n\nclass SecurityEmailForm(RegistrarForm):\n email = forms.EmailField(\n required=False,\n label=\"Security email\",\n )\n\n\nclass AnythingElseForm(RegistrarForm):\n anything_else = forms.CharField(\n required=False, label=\"Anything else we should know\", widget=forms.Textarea()\n )\n\n\nclass RequirementsForm(RegistrarForm):\n agree_check = forms.BooleanField(\n label=\"I read and agree to the .gov domain requirements.\"\n )\n\n\n# Empty class for the review page which gets included as part of the form, but does not\n# have any form fields itself\nclass ReviewForm(RegistrarForm):\n pass\n\n\n# List of forms in our wizard. Each entry is a tuple of a name and a form\n# subclass\nFORMS = [\n (\"organization_type\", OrganizationTypeForm),\n (\"organization_federal\", OrganizationFederalForm),\n (\"organization_election\", OrganizationElectionForm),\n (\"organization_contact\", OrganizationContactForm),\n (\"authorizing_official\", AuthorizingOfficialForm),\n (\"current_sites\", CurrentSitesForm),\n (\"dotgov_domain\", DotGovDomainForm),\n (\"purpose\", PurposeForm),\n (\"your_contact\", YourContactForm),\n (\"other_contacts\", OtherContactsForm),\n (\"security_email\", SecurityEmailForm),\n (\"anything_else\", AnythingElseForm),\n (\"requirements\", RequirementsForm),\n (\"review\", ReviewForm),\n]\n\n# Dict to match up the right template with the right step. Keys here must\n# match the first elements of the tuples in FORMS\nTEMPLATES = {\n \"organization_type\": \"application_org_type.html\",\n \"organization_federal\": \"application_org_federal.html\",\n \"organization_election\": \"application_org_election.html\",\n \"organization_contact\": \"application_org_contact.html\",\n \"authorizing_official\": \"application_authorizing_official.html\",\n \"current_sites\": \"application_current_sites.html\",\n \"dotgov_domain\": \"application_dotgov_domain.html\",\n \"purpose\": \"application_purpose.html\",\n \"your_contact\": \"application_your_contact.html\",\n \"other_contacts\": \"application_other_contacts.html\",\n \"security_email\": \"application_security_email.html\",\n \"anything_else\": \"application_anything_else.html\",\n \"requirements\": \"application_requirements.html\",\n \"review\": \"application_review.html\",\n}\n\n# We need to pass our page titles as context to the templates, indexed\n# by the step names\nTITLES = {\n \"organization_type\": \"Type of organization\",\n \"organization_federal\": \"Type of organization \u2014 Federal\",\n \"organization_election\": \"Type of organization \u2014 Election board\",\n \"organization_contact\": \"Organization name and mailing address\",\n \"authorizing_official\": \"Authorizing official\",\n \"current_sites\": \"Organization website\",\n \"dotgov_domain\": \".gov domain\",\n \"purpose\": \"Purpose of your domain\",\n \"your_contact\": \"Your contact information\",\n \"other_contacts\": \"Other contacts for your domain\",\n \"security_email\": \"Security email for public use\",\n \"anything_else\": \"Anything else we should know?\",\n \"requirements\": \"Requirements for registration and operation of .gov domains\",\n \"review\": \"Review and submit your domain request\",\n}\n\n\nclass ApplicationWizard(LoginRequiredMixin, NamedUrlSessionWizardView):\n\n \"\"\"Multi-page form (\"wizard\") for new domain applications.\n\n This sets up a sequence of forms that gather information for new\n domain applications. Each form in the sequence has its own URL and\n the progress through the form is stored in the Django session (thus\n \"NamedUrlSessionWizardView\").\n \"\"\"\n\n form_list = FORMS\n\n def get_template_names(self):\n \"\"\"Template for the current step.\n\n The return is a singleton list.\n \"\"\"\n return [TEMPLATES[self.steps.current]]\n\n def get_context_data(self, form, **kwargs):\n \"\"\"Add title information to the context for all steps.\"\"\"\n context = super().get_context_data(form=form, **kwargs)\n context[\"form_titles\"] = TITLES\n return context\n\n def forms_to_object(self, form_dict: dict) -> DomainApplication:\n \"\"\"Unpack the form responses onto the model object properties.\"\"\"\n application = DomainApplication.objects.create(creator=self.request.user)\n\n # organization type information\n organization_type_data = form_dict[\"organization_type\"].cleaned_data\n application.organization_type = organization_type_data[\"organization_type\"]\n\n # federal branch information\n federal_branch_data = form_dict[\"organization_federal\"].cleaned_data\n application.federal_branch = federal_branch_data[\"federal_type\"]\n\n # election board information\n election_board_data = form_dict[\"organization_election\"].cleaned_data\n application.is_election_office = election_board_data[\"is_election_board\"]\n\n # contact information\n contact_data = form_dict[\"organization_contact\"].cleaned_data\n application.organization_name = contact_data[\"organization_name\"]\n application.street_address = contact_data[\"address_line1\"]\n # TODO: add the rest of these fields when they are created in the forms\n\n # This isn't really the requested_domain field\n # but we need something in this field to make the form submittable\n requested_site, _ = Website.objects.get_or_create(\n website=contact_data[\"organization_name\"] + \".gov\"\n )\n application.requested_domain = requested_site\n return application\n\n def done(self, form_list, form_dict, **kwargs):\n application = self.forms_to_object(form_dict)\n application.submit() # change the status to submitted\n application.save()\n logger.debug(\"Application object saved: %s\", application.id)\n return render(\n self.request, \"application_done.html\", {\"application_id\": application.id}\n )\n", "path": "src/registrar/forms/application_wizard.py"}]}
4,091
161
gh_patches_debug_8885
rasdani/github-patches
git_diff
certbot__certbot-5861
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [1] Standardize how you’ll run mypy From http://blog.zulip.org/2016/10/13/static-types-in-python-oh-mypy/: Write tooling to [install](https://github.com/zulip/zulip/blob/master/tools/install-mypy) and [run](https://github.com/zulip/zulip/blob/master/tools/run-mypy) `mypy` against your codebase, so that everyone using the project can run the type checker the same way. Two features are important in how you run mypy: - Support for determining which files should be checked (a whitelist/exclude list is useful!). - Specifying the correct flags for your project at this time. For a Python 2 project, I recommend starting with `mypy --py2 --silent-imports --fast-parser -i <paths>`. You should be able to do this using a [mypy.ini](http://mypy.readthedocs.io/en/latest/config_file.html) file. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import codecs 2 import os 3 import re 4 import sys 5 6 from setuptools import setup 7 from setuptools import find_packages 8 9 # Workaround for http://bugs.python.org/issue8876, see 10 # http://bugs.python.org/issue8876#msg208792 11 # This can be removed when using Python 2.7.9 or later: 12 # https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS 13 if os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant': 14 del os.link 15 16 17 def read_file(filename, encoding='utf8'): 18 """Read unicode from given file.""" 19 with codecs.open(filename, encoding=encoding) as fd: 20 return fd.read() 21 22 23 here = os.path.abspath(os.path.dirname(__file__)) 24 25 # read version number (and other metadata) from package init 26 init_fn = os.path.join(here, 'certbot', '__init__.py') 27 meta = dict(re.findall(r"""__([a-z]+)__ = '([^']+)""", read_file(init_fn))) 28 29 readme = read_file(os.path.join(here, 'README.rst')) 30 changes = read_file(os.path.join(here, 'CHANGES.rst')) 31 version = meta['version'] 32 33 # This package relies on PyOpenSSL, requests, and six, however, it isn't 34 # specified here to avoid masking the more specific request requirements in 35 # acme. See https://github.com/pypa/pip/issues/988 for more info. 36 install_requires = [ 37 'acme>=0.22.1', 38 # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but 39 # saying so here causes a runtime error against our temporary fork of 0.9.3 40 # in which we added 2.6 support (see #2243), so we relax the requirement. 41 'ConfigArgParse>=0.9.3', 42 'configobj', 43 'cryptography>=1.2', # load_pem_x509_certificate 44 'josepy', 45 'mock', 46 'parsedatetime>=1.3', # Calendar.parseDT 47 'pyrfc3339', 48 'pytz', 49 'setuptools', 50 'zope.component', 51 'zope.interface', 52 ] 53 54 dev_extras = [ 55 # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289 56 'astroid==1.3.5', 57 'coverage', 58 'ipdb', 59 'pytest', 60 'pytest-cov', 61 'pytest-xdist', 62 'pylint==1.4.2', # upstream #248 63 'tox', 64 'twine', 65 'wheel', 66 ] 67 68 docs_extras = [ 69 'repoze.sphinx.autointerface', 70 # autodoc_member_order = 'bysource', autodoc_default_flags, and #4686 71 'Sphinx >=1.0,<=1.5.6', 72 'sphinx_rtd_theme', 73 ] 74 75 setup( 76 name='certbot', 77 version=version, 78 description="ACME client", 79 long_description=readme, # later: + '\n\n' + changes 80 url='https://github.com/letsencrypt/letsencrypt', 81 author="Certbot Project", 82 author_email='[email protected]', 83 license='Apache License 2.0', 84 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', 85 classifiers=[ 86 'Development Status :: 3 - Alpha', 87 'Environment :: Console', 88 'Environment :: Console :: Curses', 89 'Intended Audience :: System Administrators', 90 'License :: OSI Approved :: Apache Software License', 91 'Operating System :: POSIX :: Linux', 92 'Programming Language :: Python', 93 'Programming Language :: Python :: 2', 94 'Programming Language :: Python :: 2.7', 95 'Programming Language :: Python :: 3', 96 'Programming Language :: Python :: 3.4', 97 'Programming Language :: Python :: 3.5', 98 'Programming Language :: Python :: 3.6', 99 'Topic :: Internet :: WWW/HTTP', 100 'Topic :: Security', 101 'Topic :: System :: Installation/Setup', 102 'Topic :: System :: Networking', 103 'Topic :: System :: Systems Administration', 104 'Topic :: Utilities', 105 ], 106 107 packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']), 108 include_package_data=True, 109 110 install_requires=install_requires, 111 extras_require={ 112 'dev': dev_extras, 113 'docs': docs_extras, 114 }, 115 116 # to test all packages run "python setup.py test -s 117 # {acme,certbot_apache,certbot_nginx}" 118 test_suite='certbot', 119 120 entry_points={ 121 'console_scripts': [ 122 'certbot = certbot.main:main', 123 ], 124 'certbot.plugins': [ 125 'manual = certbot.plugins.manual:Authenticator', 126 'null = certbot.plugins.null:Installer', 127 'standalone = certbot.plugins.standalone:Authenticator', 128 'webroot = certbot.plugins.webroot:Authenticator', 129 ], 130 }, 131 ) 132 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -65,6 +65,10 @@ 'wheel', ] +dev3_extras = [ + 'mypy', +] + docs_extras = [ 'repoze.sphinx.autointerface', # autodoc_member_order = 'bysource', autodoc_default_flags, and #4686 @@ -110,6 +114,7 @@ install_requires=install_requires, extras_require={ 'dev': dev_extras, + 'dev3': dev3_extras, 'docs': docs_extras, },
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,6 +65,10 @@\n 'wheel',\n ]\n \n+dev3_extras = [\n+ 'mypy',\n+]\n+\n docs_extras = [\n 'repoze.sphinx.autointerface',\n # autodoc_member_order = 'bysource', autodoc_default_flags, and #4686\n@@ -110,6 +114,7 @@\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n+ 'dev3': dev3_extras,\n 'docs': docs_extras,\n },\n", "issue": "[1] Standardize how you\u2019ll run mypy\nFrom http://blog.zulip.org/2016/10/13/static-types-in-python-oh-mypy/:\r\n\r\nWrite tooling to [install](https://github.com/zulip/zulip/blob/master/tools/install-mypy) and [run](https://github.com/zulip/zulip/blob/master/tools/run-mypy) `mypy` against your codebase, so that everyone using the project can run the type checker the same way. Two features are important in how you run mypy:\r\n\r\n- Support for determining which files should be checked (a whitelist/exclude list is useful!).\r\n- Specifying the correct flags for your project at this time. For a Python 2 project, I recommend starting with `mypy --py2 --silent-imports --fast-parser -i <paths>`. You should be able to do this using a [mypy.ini](http://mypy.readthedocs.io/en/latest/config_file.html) file.\n", "before_files": [{"content": "import codecs\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n# Workaround for http://bugs.python.org/issue8876, see\n# http://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'certbot', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nchanges = read_file(os.path.join(here, 'CHANGES.rst'))\nversion = meta['version']\n\n# This package relies on PyOpenSSL, requests, and six, however, it isn't\n# specified here to avoid masking the more specific request requirements in\n# acme. See https://github.com/pypa/pip/issues/988 for more info.\ninstall_requires = [\n 'acme>=0.22.1',\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj',\n 'cryptography>=1.2', # load_pem_x509_certificate\n 'josepy',\n 'mock',\n 'parsedatetime>=1.3', # Calendar.parseDT\n 'pyrfc3339',\n 'pytz',\n 'setuptools',\n 'zope.component',\n 'zope.interface',\n]\n\ndev_extras = [\n # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289\n 'astroid==1.3.5',\n 'coverage',\n 'ipdb',\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'pylint==1.4.2', # upstream #248\n 'tox',\n 'twine',\n 'wheel',\n]\n\ndocs_extras = [\n 'repoze.sphinx.autointerface',\n # autodoc_member_order = 'bysource', autodoc_default_flags, and #4686\n 'Sphinx >=1.0,<=1.5.6',\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot',\n version=version,\n description=\"ACME client\",\n long_description=readme, # later: + '\\n\\n' + changes\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'docs': docs_extras,\n },\n\n # to test all packages run \"python setup.py test -s\n # {acme,certbot_apache,certbot_nginx}\"\n test_suite='certbot',\n\n entry_points={\n 'console_scripts': [\n 'certbot = certbot.main:main',\n ],\n 'certbot.plugins': [\n 'manual = certbot.plugins.manual:Authenticator',\n 'null = certbot.plugins.null:Installer',\n 'standalone = certbot.plugins.standalone:Authenticator',\n 'webroot = certbot.plugins.webroot:Authenticator',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\n# Workaround for http://bugs.python.org/issue8876, see\n# http://bugs.python.org/issue8876#msg208792\n# This can be removed when using Python 2.7.9 or later:\n# https://hg.python.org/cpython/raw-file/v2.7.9/Misc/NEWS\nif os.path.abspath(__file__).split(os.path.sep)[1] == 'vagrant':\n del os.link\n\n\ndef read_file(filename, encoding='utf8'):\n \"\"\"Read unicode from given file.\"\"\"\n with codecs.open(filename, encoding=encoding) as fd:\n return fd.read()\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# read version number (and other metadata) from package init\ninit_fn = os.path.join(here, 'certbot', '__init__.py')\nmeta = dict(re.findall(r\"\"\"__([a-z]+)__ = '([^']+)\"\"\", read_file(init_fn)))\n\nreadme = read_file(os.path.join(here, 'README.rst'))\nchanges = read_file(os.path.join(here, 'CHANGES.rst'))\nversion = meta['version']\n\n# This package relies on PyOpenSSL, requests, and six, however, it isn't\n# specified here to avoid masking the more specific request requirements in\n# acme. See https://github.com/pypa/pip/issues/988 for more info.\ninstall_requires = [\n 'acme>=0.22.1',\n # We technically need ConfigArgParse 0.10.0 for Python 2.6 support, but\n # saying so here causes a runtime error against our temporary fork of 0.9.3\n # in which we added 2.6 support (see #2243), so we relax the requirement.\n 'ConfigArgParse>=0.9.3',\n 'configobj',\n 'cryptography>=1.2', # load_pem_x509_certificate\n 'josepy',\n 'mock',\n 'parsedatetime>=1.3', # Calendar.parseDT\n 'pyrfc3339',\n 'pytz',\n 'setuptools',\n 'zope.component',\n 'zope.interface',\n]\n\ndev_extras = [\n # Pin astroid==1.3.5, pylint==1.4.2 as a workaround for #289\n 'astroid==1.3.5',\n 'coverage',\n 'ipdb',\n 'pytest',\n 'pytest-cov',\n 'pytest-xdist',\n 'pylint==1.4.2', # upstream #248\n 'tox',\n 'twine',\n 'wheel',\n]\n\ndev3_extras = [\n 'mypy',\n]\n\ndocs_extras = [\n 'repoze.sphinx.autointerface',\n # autodoc_member_order = 'bysource', autodoc_default_flags, and #4686\n 'Sphinx >=1.0,<=1.5.6',\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot',\n version=version,\n description=\"ACME client\",\n long_description=readme, # later: + '\\n\\n' + changes\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(exclude=['docs', 'examples', 'tests', 'venv']),\n include_package_data=True,\n\n install_requires=install_requires,\n extras_require={\n 'dev': dev_extras,\n 'dev3': dev3_extras,\n 'docs': docs_extras,\n },\n\n # to test all packages run \"python setup.py test -s\n # {acme,certbot_apache,certbot_nginx}\"\n test_suite='certbot',\n\n entry_points={\n 'console_scripts': [\n 'certbot = certbot.main:main',\n ],\n 'certbot.plugins': [\n 'manual = certbot.plugins.manual:Authenticator',\n 'null = certbot.plugins.null:Installer',\n 'standalone = certbot.plugins.standalone:Authenticator',\n 'webroot = certbot.plugins.webroot:Authenticator',\n ],\n },\n)\n", "path": "setup.py"}]}
1,917
151
gh_patches_debug_42707
rasdani/github-patches
git_diff
secondmind-labs__trieste-441
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Active learning acquisition functions are missing integration tests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/notebooks/active_learning.pct.py` Content: ``` 1 # %% [markdown] 2 # # Active Learning 3 4 # %% [markdown] 5 # Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste. 6 7 # %% 8 # %matplotlib inline 9 import numpy as np 10 import tensorflow as tf 11 12 np.random.seed(1793) 13 tf.random.set_seed(1793) 14 15 # %% [markdown] 16 # ## Describe the problem 17 # 18 # In this example, we will perform active learning for the scaled Branin function. 19 20 21 # %% 22 from trieste.objectives import scaled_branin 23 from util.plotting_plotly import plot_function_plotly 24 from trieste.space import Box 25 26 search_space = Box([0, 0], [1, 1]) 27 28 fig = plot_function_plotly( 29 scaled_branin, search_space.lower, search_space.upper, grid_density=20 30 ) 31 fig.update_layout(height=400, width=400) 32 fig.show() 33 34 # %% [markdown] 35 # We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence. 36 37 # %% 38 import trieste 39 40 observer = trieste.objectives.utils.mk_observer(scaled_branin) 41 42 num_initial_points = 4 43 initial_query_points = search_space.sample_halton(num_initial_points) 44 initial_data = observer(initial_query_points) 45 46 47 # %% [markdown] 48 # ## Surrogate model 49 # 50 # Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper. 51 52 # %% 53 import gpflow 54 from trieste.models.gpflow.models import GaussianProcessRegression 55 56 57 def build_model(data): 58 variance = tf.math.reduce_variance(data.observations) 59 kernel = gpflow.kernels.RBF(variance=variance, lengthscales=[2, 2]) 60 gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5) 61 gpflow.set_trainable(gpr.likelihood, False) 62 63 return GaussianProcessRegression(gpr) 64 65 66 model = build_model(initial_data) 67 68 # %% [markdown] 69 # ## Active learning using predictive variance 70 # 71 # For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite="MacKay1992"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute. 72 # 73 # We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize Trieste's `BayesianOptimizer` to do the active learning steps. 74 # 75 76 # %% 77 from trieste.acquisition.function import PredictiveVariance 78 from trieste.acquisition.optimizer import generate_continuous_optimizer 79 from trieste.acquisition.rule import EfficientGlobalOptimization 80 81 acq = PredictiveVariance() 82 rule = EfficientGlobalOptimization( 83 builder=acq, optimizer=generate_continuous_optimizer() 84 ) 85 bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space) 86 87 # %% [markdown] 88 # To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make Trieste record our model at each iteration. 89 90 # %% 91 bo_iter = 5 92 result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True) 93 94 # %% [markdown] 95 # Then we can retrieve our final dataset from the active learning steps. 96 97 # %% 98 dataset = result.try_get_final_dataset() 99 query_points = dataset.query_points.numpy() 100 observations = dataset.observations.numpy() 101 102 # %% [markdown] 103 # Finally, we can check the performance of our `PredictiveVariance` active learning acquisition function by plotting the predictive variance landscape of our model. We can see how it samples regions for which our model is highly uncertain. 104 105 # %% 106 from util.plotting import plot_bo_points, plot_function_2d 107 108 109 def plot_active_learning_query( 110 result, bo_iter, num_initial_points, query_points, num_query=1 111 ): 112 113 for i in range(bo_iter): 114 115 def pred_var(x): 116 _, var = result.history[i].models["OBJECTIVE"].model.predict_f(x) 117 return var 118 119 _, ax = plot_function_2d( 120 pred_var, 121 search_space.lower - 0.01, 122 search_space.upper + 0.01, 123 grid_density=100, 124 contour=True, 125 colorbar=True, 126 figsize=(10, 6), 127 title=[ 128 "Variance contour with queried points at iter:" + str(i + 1) 129 ], 130 xlabel="$X_1$", 131 ylabel="$X_2$", 132 ) 133 134 plot_bo_points( 135 query_points[: num_initial_points + (i * num_query)], 136 ax[0, 0], 137 num_initial_points, 138 ) 139 140 141 plot_active_learning_query(result, bo_iter, num_initial_points, query_points) 142 143 144 # %% [markdown] 145 # ## Batch active learning using predictive variance 146 # 147 # For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time. 148 149 # %% 150 bo_iter = 5 151 num_query = 3 152 model = build_model(initial_data) 153 acq = PredictiveVariance() 154 rule = EfficientGlobalOptimization( 155 num_query_points=num_query, 156 builder=acq, 157 optimizer=generate_continuous_optimizer(num_optimization_runs=1), 158 ) 159 bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space) 160 161 result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True) 162 163 164 # %% [markdown] 165 # After that, we can retrieve our final dataset. 166 167 # %% 168 dataset = result.try_get_final_dataset() 169 query_points = dataset.query_points.numpy() 170 observations = dataset.observations.numpy() 171 172 173 # %% [markdown] 174 # Now we can visualize the batch predictive variance using our plotting function. 175 176 # %% 177 from util.plotting import plot_bo_points, plot_function_2d 178 179 plot_active_learning_query( 180 result, bo_iter, num_initial_points, query_points, num_query 181 ) 182 183 # %% [markdown] 184 # ## LICENSE 185 # 186 # [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE) 187 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/notebooks/active_learning.pct.py b/docs/notebooks/active_learning.pct.py --- a/docs/notebooks/active_learning.pct.py +++ b/docs/notebooks/active_learning.pct.py @@ -19,11 +19,11 @@ # %% -from trieste.objectives import scaled_branin +from trieste.objectives import BRANIN_SEARCH_SPACE, scaled_branin from util.plotting_plotly import plot_function_plotly from trieste.space import Box -search_space = Box([0, 0], [1, 1]) +search_space = BRANIN_SEARCH_SPACE fig = plot_function_plotly( scaled_branin, search_space.lower, search_space.upper, grid_density=20 @@ -32,7 +32,7 @@ fig.show() # %% [markdown] -# We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence. +# We begin our Bayesian active learning from a small initial design built from a space-filling Halton sequence. # %% import trieste @@ -47,16 +47,24 @@ # %% [markdown] # ## Surrogate model # -# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper. +# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper. As a good practice, we use priors for the kernel hyperparameters. # %% import gpflow from trieste.models.gpflow.models import GaussianProcessRegression +import tensorflow_probability as tfp def build_model(data): variance = tf.math.reduce_variance(data.observations) - kernel = gpflow.kernels.RBF(variance=variance, lengthscales=[2, 2]) + kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2, 0.2]) + prior_scale = tf.cast(1.0, dtype=tf.float64) + kernel.variance.prior = tfp.distributions.LogNormal( + tf.cast(-2.0, dtype=tf.float64), prior_scale + ) + kernel.lengthscales.prior = tfp.distributions.LogNormal( + tf.math.log(kernel.lengthscales), prior_scale + ) gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5) gpflow.set_trainable(gpr.likelihood, False) @@ -79,9 +87,7 @@ from trieste.acquisition.rule import EfficientGlobalOptimization acq = PredictiveVariance() -rule = EfficientGlobalOptimization( - builder=acq, optimizer=generate_continuous_optimizer() -) +rule = EfficientGlobalOptimization(builder=acq) # type: ignore bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space) # %% [markdown] @@ -144,12 +150,14 @@ # %% [markdown] # ## Batch active learning using predictive variance # -# For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time. +# In cases when we can evaluate the black-box function in parallel, it would be useful to produce a batch of points rather than a single point. `PredictiveVariance` acquisition function can also perform batch active learning. We must pass a `num_query_points` input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is that it tends to query in high variance area less accurately, compared to sequentially drawing one point at a time. # %% bo_iter = 5 num_query = 3 + model = build_model(initial_data) + acq = PredictiveVariance() rule = EfficientGlobalOptimization( num_query_points=num_query,
{"golden_diff": "diff --git a/docs/notebooks/active_learning.pct.py b/docs/notebooks/active_learning.pct.py\n--- a/docs/notebooks/active_learning.pct.py\n+++ b/docs/notebooks/active_learning.pct.py\n@@ -19,11 +19,11 @@\n \n \n # %%\n-from trieste.objectives import scaled_branin\n+from trieste.objectives import BRANIN_SEARCH_SPACE, scaled_branin\n from util.plotting_plotly import plot_function_plotly\n from trieste.space import Box\n \n-search_space = Box([0, 0], [1, 1])\n+search_space = BRANIN_SEARCH_SPACE\n \n fig = plot_function_plotly(\n scaled_branin, search_space.lower, search_space.upper, grid_density=20\n@@ -32,7 +32,7 @@\n fig.show()\n \n # %% [markdown]\n-# We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence.\n+# We begin our Bayesian active learning from a small initial design built from a space-filling Halton sequence.\n \n # %%\n import trieste\n@@ -47,16 +47,24 @@\n # %% [markdown]\n # ## Surrogate model\n #\n-# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper.\n+# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper. As a good practice, we use priors for the kernel hyperparameters.\n \n # %%\n import gpflow\n from trieste.models.gpflow.models import GaussianProcessRegression\n+import tensorflow_probability as tfp\n \n \n def build_model(data):\n variance = tf.math.reduce_variance(data.observations)\n- kernel = gpflow.kernels.RBF(variance=variance, lengthscales=[2, 2])\n+ kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2, 0.2])\n+ prior_scale = tf.cast(1.0, dtype=tf.float64)\n+ kernel.variance.prior = tfp.distributions.LogNormal(\n+ tf.cast(-2.0, dtype=tf.float64), prior_scale\n+ )\n+ kernel.lengthscales.prior = tfp.distributions.LogNormal(\n+ tf.math.log(kernel.lengthscales), prior_scale\n+ )\n gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)\n gpflow.set_trainable(gpr.likelihood, False)\n \n@@ -79,9 +87,7 @@\n from trieste.acquisition.rule import EfficientGlobalOptimization\n \n acq = PredictiveVariance()\n-rule = EfficientGlobalOptimization(\n- builder=acq, optimizer=generate_continuous_optimizer()\n-)\n+rule = EfficientGlobalOptimization(builder=acq) # type: ignore\n bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n \n # %% [markdown]\n@@ -144,12 +150,14 @@\n # %% [markdown]\n # ## Batch active learning using predictive variance\n #\n-# For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time.\n+# In cases when we can evaluate the black-box function in parallel, it would be useful to produce a batch of points rather than a single point. `PredictiveVariance` acquisition function can also perform batch active learning. We must pass a `num_query_points` input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is that it tends to query in high variance area less accurately, compared to sequentially drawing one point at a time.\n \n # %%\n bo_iter = 5\n num_query = 3\n+\n model = build_model(initial_data)\n+\n acq = PredictiveVariance()\n rule = EfficientGlobalOptimization(\n num_query_points=num_query,\n", "issue": "Active learning acquisition functions are missing integration tests\n\n", "before_files": [{"content": "# %% [markdown]\n# # Active Learning\n\n# %% [markdown]\n# Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste.\n\n# %%\n# %matplotlib inline\nimport numpy as np\nimport tensorflow as tf\n\nnp.random.seed(1793)\ntf.random.set_seed(1793)\n\n# %% [markdown]\n# ## Describe the problem\n#\n# In this example, we will perform active learning for the scaled Branin function.\n\n\n# %%\nfrom trieste.objectives import scaled_branin\nfrom util.plotting_plotly import plot_function_plotly\nfrom trieste.space import Box\n\nsearch_space = Box([0, 0], [1, 1])\n\nfig = plot_function_plotly(\n scaled_branin, search_space.lower, search_space.upper, grid_density=20\n)\nfig.update_layout(height=400, width=400)\nfig.show()\n\n# %% [markdown]\n# We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence.\n\n# %%\nimport trieste\n\nobserver = trieste.objectives.utils.mk_observer(scaled_branin)\n\nnum_initial_points = 4\ninitial_query_points = search_space.sample_halton(num_initial_points)\ninitial_data = observer(initial_query_points)\n\n\n# %% [markdown]\n# ## Surrogate model\n#\n# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper.\n\n# %%\nimport gpflow\nfrom trieste.models.gpflow.models import GaussianProcessRegression\n\n\ndef build_model(data):\n variance = tf.math.reduce_variance(data.observations)\n kernel = gpflow.kernels.RBF(variance=variance, lengthscales=[2, 2])\n gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)\n gpflow.set_trainable(gpr.likelihood, False)\n\n return GaussianProcessRegression(gpr)\n\n\nmodel = build_model(initial_data)\n\n# %% [markdown]\n# ## Active learning using predictive variance\n#\n# For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite=\"MacKay1992\"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute.\n#\n# We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize Trieste's `BayesianOptimizer` to do the active learning steps.\n#\n\n# %%\nfrom trieste.acquisition.function import PredictiveVariance\nfrom trieste.acquisition.optimizer import generate_continuous_optimizer\nfrom trieste.acquisition.rule import EfficientGlobalOptimization\n\nacq = PredictiveVariance()\nrule = EfficientGlobalOptimization(\n builder=acq, optimizer=generate_continuous_optimizer()\n)\nbo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n\n# %% [markdown]\n# To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make Trieste record our model at each iteration.\n\n# %%\nbo_iter = 5\nresult = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)\n\n# %% [markdown]\n# Then we can retrieve our final dataset from the active learning steps.\n\n# %%\ndataset = result.try_get_final_dataset()\nquery_points = dataset.query_points.numpy()\nobservations = dataset.observations.numpy()\n\n# %% [markdown]\n# Finally, we can check the performance of our `PredictiveVariance` active learning acquisition function by plotting the predictive variance landscape of our model. We can see how it samples regions for which our model is highly uncertain.\n\n# %%\nfrom util.plotting import plot_bo_points, plot_function_2d\n\n\ndef plot_active_learning_query(\n result, bo_iter, num_initial_points, query_points, num_query=1\n):\n\n for i in range(bo_iter):\n\n def pred_var(x):\n _, var = result.history[i].models[\"OBJECTIVE\"].model.predict_f(x)\n return var\n\n _, ax = plot_function_2d(\n pred_var,\n search_space.lower - 0.01,\n search_space.upper + 0.01,\n grid_density=100,\n contour=True,\n colorbar=True,\n figsize=(10, 6),\n title=[\n \"Variance contour with queried points at iter:\" + str(i + 1)\n ],\n xlabel=\"$X_1$\",\n ylabel=\"$X_2$\",\n )\n\n plot_bo_points(\n query_points[: num_initial_points + (i * num_query)],\n ax[0, 0],\n num_initial_points,\n )\n\n\nplot_active_learning_query(result, bo_iter, num_initial_points, query_points)\n\n\n# %% [markdown]\n# ## Batch active learning using predictive variance\n#\n# For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time.\n\n# %%\nbo_iter = 5\nnum_query = 3\nmodel = build_model(initial_data)\nacq = PredictiveVariance()\nrule = EfficientGlobalOptimization(\n num_query_points=num_query,\n builder=acq,\n optimizer=generate_continuous_optimizer(num_optimization_runs=1),\n)\nbo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n\nresult = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)\n\n\n# %% [markdown]\n# After that, we can retrieve our final dataset.\n\n# %%\ndataset = result.try_get_final_dataset()\nquery_points = dataset.query_points.numpy()\nobservations = dataset.observations.numpy()\n\n\n# %% [markdown]\n# Now we can visualize the batch predictive variance using our plotting function.\n\n# %%\nfrom util.plotting import plot_bo_points, plot_function_2d\n\nplot_active_learning_query(\n result, bo_iter, num_initial_points, query_points, num_query\n)\n\n# %% [markdown]\n# ## LICENSE\n#\n# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)\n", "path": "docs/notebooks/active_learning.pct.py"}], "after_files": [{"content": "# %% [markdown]\n# # Active Learning\n\n# %% [markdown]\n# Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste.\n\n# %%\n# %matplotlib inline\nimport numpy as np\nimport tensorflow as tf\n\nnp.random.seed(1793)\ntf.random.set_seed(1793)\n\n# %% [markdown]\n# ## Describe the problem\n#\n# In this example, we will perform active learning for the scaled Branin function.\n\n\n# %%\nfrom trieste.objectives import BRANIN_SEARCH_SPACE, scaled_branin\nfrom util.plotting_plotly import plot_function_plotly\nfrom trieste.space import Box\n\nsearch_space = BRANIN_SEARCH_SPACE\n\nfig = plot_function_plotly(\n scaled_branin, search_space.lower, search_space.upper, grid_density=20\n)\nfig.update_layout(height=400, width=400)\nfig.show()\n\n# %% [markdown]\n# We begin our Bayesian active learning from a small initial design built from a space-filling Halton sequence.\n\n# %%\nimport trieste\n\nobserver = trieste.objectives.utils.mk_observer(scaled_branin)\n\nnum_initial_points = 4\ninitial_query_points = search_space.sample_halton(num_initial_points)\ninitial_data = observer(initial_query_points)\n\n\n# %% [markdown]\n# ## Surrogate model\n#\n# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper. As a good practice, we use priors for the kernel hyperparameters.\n\n# %%\nimport gpflow\nfrom trieste.models.gpflow.models import GaussianProcessRegression\nimport tensorflow_probability as tfp\n\n\ndef build_model(data):\n variance = tf.math.reduce_variance(data.observations)\n kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2, 0.2])\n prior_scale = tf.cast(1.0, dtype=tf.float64)\n kernel.variance.prior = tfp.distributions.LogNormal(\n tf.cast(-2.0, dtype=tf.float64), prior_scale\n )\n kernel.lengthscales.prior = tfp.distributions.LogNormal(\n tf.math.log(kernel.lengthscales), prior_scale\n )\n gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)\n gpflow.set_trainable(gpr.likelihood, False)\n\n return GaussianProcessRegression(gpr)\n\n\nmodel = build_model(initial_data)\n\n# %% [markdown]\n# ## Active learning using predictive variance\n#\n# For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite=\"MacKay1992\"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute.\n#\n# We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize Trieste's `BayesianOptimizer` to do the active learning steps.\n#\n\n# %%\nfrom trieste.acquisition.function import PredictiveVariance\nfrom trieste.acquisition.optimizer import generate_continuous_optimizer\nfrom trieste.acquisition.rule import EfficientGlobalOptimization\n\nacq = PredictiveVariance()\nrule = EfficientGlobalOptimization(builder=acq) # type: ignore\nbo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n\n# %% [markdown]\n# To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make Trieste record our model at each iteration.\n\n# %%\nbo_iter = 5\nresult = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)\n\n# %% [markdown]\n# Then we can retrieve our final dataset from the active learning steps.\n\n# %%\ndataset = result.try_get_final_dataset()\nquery_points = dataset.query_points.numpy()\nobservations = dataset.observations.numpy()\n\n# %% [markdown]\n# Finally, we can check the performance of our `PredictiveVariance` active learning acquisition function by plotting the predictive variance landscape of our model. We can see how it samples regions for which our model is highly uncertain.\n\n# %%\nfrom util.plotting import plot_bo_points, plot_function_2d\n\n\ndef plot_active_learning_query(\n result, bo_iter, num_initial_points, query_points, num_query=1\n):\n\n for i in range(bo_iter):\n\n def pred_var(x):\n _, var = result.history[i].models[\"OBJECTIVE\"].model.predict_f(x)\n return var\n\n _, ax = plot_function_2d(\n pred_var,\n search_space.lower - 0.01,\n search_space.upper + 0.01,\n grid_density=100,\n contour=True,\n colorbar=True,\n figsize=(10, 6),\n title=[\n \"Variance contour with queried points at iter:\" + str(i + 1)\n ],\n xlabel=\"$X_1$\",\n ylabel=\"$X_2$\",\n )\n\n plot_bo_points(\n query_points[: num_initial_points + (i * num_query)],\n ax[0, 0],\n num_initial_points,\n )\n\n\nplot_active_learning_query(result, bo_iter, num_initial_points, query_points)\n\n\n# %% [markdown]\n# ## Batch active learning using predictive variance\n#\n# In cases when we can evaluate the black-box function in parallel, it would be useful to produce a batch of points rather than a single point. `PredictiveVariance` acquisition function can also perform batch active learning. We must pass a `num_query_points` input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is that it tends to query in high variance area less accurately, compared to sequentially drawing one point at a time.\n\n# %%\nbo_iter = 5\nnum_query = 3\n\nmodel = build_model(initial_data)\n\nacq = PredictiveVariance()\nrule = EfficientGlobalOptimization(\n num_query_points=num_query,\n builder=acq,\n optimizer=generate_continuous_optimizer(num_optimization_runs=1),\n)\nbo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n\nresult = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)\n\n\n# %% [markdown]\n# After that, we can retrieve our final dataset.\n\n# %%\ndataset = result.try_get_final_dataset()\nquery_points = dataset.query_points.numpy()\nobservations = dataset.observations.numpy()\n\n\n# %% [markdown]\n# Now we can visualize the batch predictive variance using our plotting function.\n\n# %%\nfrom util.plotting import plot_bo_points, plot_function_2d\n\nplot_active_learning_query(\n result, bo_iter, num_initial_points, query_points, num_query\n)\n\n# %% [markdown]\n# ## LICENSE\n#\n# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)\n", "path": "docs/notebooks/active_learning.pct.py"}]}
2,232
977
gh_patches_debug_14621
rasdani/github-patches
git_diff
talonhub__community-378
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- the command key should probably be made Mac specific --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `code/keys.py` Content: ``` 1 from typing import Set 2 3 from talon import Module, Context, actions, app 4 import sys 5 6 default_alphabet = "air bat cap drum each fine gust harp sit jury crunch look made near odd pit quench red sun trap urge vest whale plex yank zip".split( 7 " " 8 ) 9 letters_string = "abcdefghijklmnopqrstuvwxyz" 10 11 default_digits = "zero one two three four five six seven eight nine".split(" ") 12 numbers = [str(i) for i in range(10)] 13 default_f_digits = "one two three four five six seven eight nine ten eleven twelve".split( 14 " " 15 ) 16 17 mod = Module() 18 mod.list("letter", desc="The spoken phonetic alphabet") 19 mod.list("symbol_key", desc="All symbols from the keyboard") 20 mod.list("arrow_key", desc="All arrow keys") 21 mod.list("number_key", desc="All number keys") 22 mod.list("modifier_key", desc="All modifier keys") 23 mod.list("function_key", desc="All function keys") 24 mod.list("special_key", desc="All special keys") 25 mod.list("punctuation", desc="words for inserting punctuation into text") 26 27 28 @mod.capture(rule="{self.modifier_key}+") 29 def modifiers(m) -> str: 30 "One or more modifier keys" 31 return "-".join(m.modifier_key_list) 32 33 34 @mod.capture(rule="{self.arrow_key}") 35 def arrow_key(m) -> str: 36 "One directional arrow key" 37 return m.arrow_key 38 39 40 @mod.capture(rule="<self.arrow_key>+") 41 def arrow_keys(m) -> str: 42 "One or more arrow keys separated by a space" 43 return str(m) 44 45 46 @mod.capture(rule="{self.number_key}") 47 def number_key(m) -> str: 48 "One number key" 49 return m.number_key 50 51 52 @mod.capture(rule="{self.letter}") 53 def letter(m) -> str: 54 "One letter key" 55 return m.letter 56 57 58 @mod.capture(rule="{self.special_key}") 59 def special_key(m) -> str: 60 "One special key" 61 return m.special_key 62 63 64 @mod.capture(rule="{self.symbol_key}") 65 def symbol_key(m) -> str: 66 "One symbol key" 67 return m.symbol_key 68 69 70 @mod.capture(rule="{self.function_key}") 71 def function_key(m) -> str: 72 "One function key" 73 return m.function_key 74 75 76 @mod.capture(rule="( <self.letter> | <self.number_key> | <self.symbol_key> )") 77 def any_alphanumeric_key(m) -> str: 78 "any alphanumeric key" 79 return str(m) 80 81 82 @mod.capture( 83 rule="( <self.letter> | <self.number_key> | <self.symbol_key> " 84 "| <self.arrow_key> | <self.function_key> | <self.special_key> )" 85 ) 86 def unmodified_key(m) -> str: 87 "A single key with no modifiers" 88 return str(m) 89 90 91 @mod.capture(rule="{self.modifier_key}* <self.unmodified_key>") 92 def key(m) -> str: 93 "A single key with optional modifiers" 94 try: 95 mods = m.modifier_key_list 96 except AttributeError: 97 mods = [] 98 return "-".join(mods + [m.unmodified_key]) 99 100 101 @mod.capture(rule="<self.key>+") 102 def keys(m) -> str: 103 "A sequence of one or more keys with optional modifiers" 104 return " ".join(m.key_list) 105 106 107 @mod.capture(rule="{self.letter}+") 108 def letters(m) -> str: 109 "Multiple letter keys" 110 return "".join(m.letter_list) 111 112 113 ctx = Context() 114 ctx.lists["self.modifier_key"] = { 115 # If you find 'alt' is often misrecognized, try using 'alter'. 116 "alt": "alt", #'alter': 'alt', 117 "command": "cmd", 118 "control": "ctrl", #'troll': 'ctrl', 119 "option": "alt", 120 "shift": "shift", #'sky': 'shift', 121 "super": "super", 122 } 123 alphabet = dict(zip(default_alphabet, letters_string)) 124 ctx.lists["self.letter"] = alphabet 125 126 # `punctuation_words` is for words you want available BOTH in dictation and as 127 # key names in command mode. `symbol_key_words` is for key names that should be 128 # available in command mode, but NOT during dictation. 129 punctuation_words = { 130 # TODO: I'm not sure why we need these, I think it has something to do with 131 # Dragon. Possibly it has been fixed by later improvements to talon? -rntz 132 "`": "`", 133 ",": ",", # <== these things 134 "back tick": "`", 135 "comma": ",", 136 "period": ".", 137 "semicolon": ";", 138 "colon": ":", 139 "forward slash": "/", 140 "question mark": "?", 141 "exclamation mark": "!", 142 "exclamation point": "!", 143 "dollar sign": "$", 144 "asterisk": "*", 145 "hash sign": "#", 146 "number sign": "#", 147 "percent sign": "%", 148 "at sign": "@", 149 "and sign": "&", 150 "ampersand": "&", 151 } 152 symbol_key_words = { 153 "dot": ".", 154 "quote": "'", 155 "L square": "[", 156 "left square": "[", 157 "square": "[", 158 "R square": "]", 159 "right square": "]", 160 "slash": "/", 161 "backslash": "\\", 162 "minus": "-", 163 "dash": "-", 164 "equals": "=", 165 "plus": "+", 166 "tilde": "~", 167 "bang": "!", 168 "dollar": "$", 169 "down score": "_", 170 "under score": "_", 171 "paren": "(", 172 "L paren": "(", 173 "left paren": "(", 174 "R paren": ")", 175 "right paren": ")", 176 "brace": "{", 177 "left brace": "{", 178 "R brace": "}", 179 "right brace": "}", 180 "angle": "<", 181 "left angle": "<", 182 "less than": "<", 183 "rangle": ">", 184 "R angle": ">", 185 "right angle": ">", 186 "greater than": ">", 187 "star": "*", 188 "pound": "#", 189 "hash": "#", 190 "percent": "%", 191 "caret": "^", 192 "amper": "&", 193 "pipe": "|", 194 "dubquote": '"', 195 "double quote": '"', 196 } 197 198 # make punctuation words also included in {user.symbol_keys} 199 symbol_key_words.update(punctuation_words) 200 ctx.lists["self.punctuation"] = punctuation_words 201 ctx.lists["self.symbol_key"] = symbol_key_words 202 ctx.lists["self.number_key"] = dict(zip(default_digits, numbers)) 203 ctx.lists["self.arrow_key"] = { 204 "down": "down", 205 "left": "left", 206 "right": "right", 207 "up": "up", 208 } 209 210 simple_keys = [ 211 "end", 212 "enter", 213 "escape", 214 "home", 215 "insert", 216 "pagedown", 217 "pageup", 218 "space", 219 "tab", 220 ] 221 222 alternate_keys = { 223 "delete": "backspace", 224 "forward delete": "delete", 225 #'junk': 'backspace', 226 } 227 # mac apparently doesn't have the menu key. 228 if app.platform in ("windows", "linux"): 229 alternate_keys["menu key"] = "menu" 230 231 keys = {k: k for k in simple_keys} 232 keys.update(alternate_keys) 233 ctx.lists["self.special_key"] = keys 234 ctx.lists["self.function_key"] = { 235 f"F {default_f_digits[i]}": f"f{i + 1}" for i in range(12) 236 } 237 238 239 @mod.action_class 240 class Actions: 241 def get_alphabet() -> dict: 242 """Provides the alphabet dictionary""" 243 return alphabet 244 245 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/code/keys.py b/code/keys.py --- a/code/keys.py +++ b/code/keys.py @@ -111,15 +111,17 @@ ctx = Context() -ctx.lists["self.modifier_key"] = { +modifier_keys = { # If you find 'alt' is often misrecognized, try using 'alter'. "alt": "alt", #'alter': 'alt', - "command": "cmd", "control": "ctrl", #'troll': 'ctrl', - "option": "alt", "shift": "shift", #'sky': 'shift', "super": "super", } +if app.platform == "mac": + modifier_keys["command"] = "cmd" + modifier_keys["option"] = "alt" +ctx.lists["self.modifier_key"] = modifier_keys alphabet = dict(zip(default_alphabet, letters_string)) ctx.lists["self.letter"] = alphabet
{"golden_diff": "diff --git a/code/keys.py b/code/keys.py\n--- a/code/keys.py\n+++ b/code/keys.py\n@@ -111,15 +111,17 @@\n \n \n ctx = Context()\n-ctx.lists[\"self.modifier_key\"] = {\n+modifier_keys = {\n # If you find 'alt' is often misrecognized, try using 'alter'.\n \"alt\": \"alt\", #'alter': 'alt',\n- \"command\": \"cmd\",\n \"control\": \"ctrl\", #'troll': 'ctrl',\n- \"option\": \"alt\",\n \"shift\": \"shift\", #'sky': 'shift',\n \"super\": \"super\",\n }\n+if app.platform == \"mac\":\n+ modifier_keys[\"command\"] = \"cmd\"\n+ modifier_keys[\"option\"] = \"alt\"\n+ctx.lists[\"self.modifier_key\"] = modifier_keys\n alphabet = dict(zip(default_alphabet, letters_string))\n ctx.lists[\"self.letter\"] = alphabet\n", "issue": "the command key should probably be made Mac specific\n\n", "before_files": [{"content": "from typing import Set\n\nfrom talon import Module, Context, actions, app\nimport sys\n\ndefault_alphabet = \"air bat cap drum each fine gust harp sit jury crunch look made near odd pit quench red sun trap urge vest whale plex yank zip\".split(\n \" \"\n)\nletters_string = \"abcdefghijklmnopqrstuvwxyz\"\n\ndefault_digits = \"zero one two three four five six seven eight nine\".split(\" \")\nnumbers = [str(i) for i in range(10)]\ndefault_f_digits = \"one two three four five six seven eight nine ten eleven twelve\".split(\n \" \"\n)\n\nmod = Module()\nmod.list(\"letter\", desc=\"The spoken phonetic alphabet\")\nmod.list(\"symbol_key\", desc=\"All symbols from the keyboard\")\nmod.list(\"arrow_key\", desc=\"All arrow keys\")\nmod.list(\"number_key\", desc=\"All number keys\")\nmod.list(\"modifier_key\", desc=\"All modifier keys\")\nmod.list(\"function_key\", desc=\"All function keys\")\nmod.list(\"special_key\", desc=\"All special keys\")\nmod.list(\"punctuation\", desc=\"words for inserting punctuation into text\")\n\n\[email protected](rule=\"{self.modifier_key}+\")\ndef modifiers(m) -> str:\n \"One or more modifier keys\"\n return \"-\".join(m.modifier_key_list)\n\n\[email protected](rule=\"{self.arrow_key}\")\ndef arrow_key(m) -> str:\n \"One directional arrow key\"\n return m.arrow_key\n\n\[email protected](rule=\"<self.arrow_key>+\")\ndef arrow_keys(m) -> str:\n \"One or more arrow keys separated by a space\"\n return str(m)\n\n\[email protected](rule=\"{self.number_key}\")\ndef number_key(m) -> str:\n \"One number key\"\n return m.number_key\n\n\[email protected](rule=\"{self.letter}\")\ndef letter(m) -> str:\n \"One letter key\"\n return m.letter\n\n\[email protected](rule=\"{self.special_key}\")\ndef special_key(m) -> str:\n \"One special key\"\n return m.special_key\n\n\[email protected](rule=\"{self.symbol_key}\")\ndef symbol_key(m) -> str:\n \"One symbol key\"\n return m.symbol_key\n\n\[email protected](rule=\"{self.function_key}\")\ndef function_key(m) -> str:\n \"One function key\"\n return m.function_key\n\n\[email protected](rule=\"( <self.letter> | <self.number_key> | <self.symbol_key> )\")\ndef any_alphanumeric_key(m) -> str:\n \"any alphanumeric key\"\n return str(m)\n\n\[email protected](\n rule=\"( <self.letter> | <self.number_key> | <self.symbol_key> \"\n \"| <self.arrow_key> | <self.function_key> | <self.special_key> )\"\n)\ndef unmodified_key(m) -> str:\n \"A single key with no modifiers\"\n return str(m)\n\n\[email protected](rule=\"{self.modifier_key}* <self.unmodified_key>\")\ndef key(m) -> str:\n \"A single key with optional modifiers\"\n try:\n mods = m.modifier_key_list\n except AttributeError:\n mods = []\n return \"-\".join(mods + [m.unmodified_key])\n\n\[email protected](rule=\"<self.key>+\")\ndef keys(m) -> str:\n \"A sequence of one or more keys with optional modifiers\"\n return \" \".join(m.key_list)\n\n\[email protected](rule=\"{self.letter}+\")\ndef letters(m) -> str:\n \"Multiple letter keys\"\n return \"\".join(m.letter_list)\n\n\nctx = Context()\nctx.lists[\"self.modifier_key\"] = {\n # If you find 'alt' is often misrecognized, try using 'alter'.\n \"alt\": \"alt\", #'alter': 'alt',\n \"command\": \"cmd\",\n \"control\": \"ctrl\", #'troll': 'ctrl',\n \"option\": \"alt\",\n \"shift\": \"shift\", #'sky': 'shift',\n \"super\": \"super\",\n}\nalphabet = dict(zip(default_alphabet, letters_string))\nctx.lists[\"self.letter\"] = alphabet\n\n# `punctuation_words` is for words you want available BOTH in dictation and as\n# key names in command mode. `symbol_key_words` is for key names that should be\n# available in command mode, but NOT during dictation.\npunctuation_words = {\n # TODO: I'm not sure why we need these, I think it has something to do with\n # Dragon. Possibly it has been fixed by later improvements to talon? -rntz\n \"`\": \"`\",\n \",\": \",\", # <== these things\n \"back tick\": \"`\",\n \"comma\": \",\",\n \"period\": \".\",\n \"semicolon\": \";\",\n \"colon\": \":\",\n \"forward slash\": \"/\",\n \"question mark\": \"?\",\n \"exclamation mark\": \"!\",\n \"exclamation point\": \"!\",\n \"dollar sign\": \"$\",\n \"asterisk\": \"*\",\n \"hash sign\": \"#\",\n \"number sign\": \"#\",\n \"percent sign\": \"%\",\n \"at sign\": \"@\",\n \"and sign\": \"&\",\n \"ampersand\": \"&\",\n}\nsymbol_key_words = {\n \"dot\": \".\",\n \"quote\": \"'\",\n \"L square\": \"[\",\n \"left square\": \"[\",\n \"square\": \"[\",\n \"R square\": \"]\",\n \"right square\": \"]\",\n \"slash\": \"/\",\n \"backslash\": \"\\\\\",\n \"minus\": \"-\",\n \"dash\": \"-\",\n \"equals\": \"=\",\n \"plus\": \"+\",\n \"tilde\": \"~\",\n \"bang\": \"!\",\n \"dollar\": \"$\",\n \"down score\": \"_\",\n \"under score\": \"_\",\n \"paren\": \"(\",\n \"L paren\": \"(\",\n \"left paren\": \"(\",\n \"R paren\": \")\",\n \"right paren\": \")\",\n \"brace\": \"{\",\n \"left brace\": \"{\",\n \"R brace\": \"}\",\n \"right brace\": \"}\",\n \"angle\": \"<\",\n \"left angle\": \"<\",\n \"less than\": \"<\",\n \"rangle\": \">\",\n \"R angle\": \">\",\n \"right angle\": \">\",\n \"greater than\": \">\",\n \"star\": \"*\",\n \"pound\": \"#\",\n \"hash\": \"#\",\n \"percent\": \"%\",\n \"caret\": \"^\",\n \"amper\": \"&\",\n \"pipe\": \"|\",\n \"dubquote\": '\"',\n \"double quote\": '\"',\n}\n\n# make punctuation words also included in {user.symbol_keys}\nsymbol_key_words.update(punctuation_words)\nctx.lists[\"self.punctuation\"] = punctuation_words\nctx.lists[\"self.symbol_key\"] = symbol_key_words\nctx.lists[\"self.number_key\"] = dict(zip(default_digits, numbers))\nctx.lists[\"self.arrow_key\"] = {\n \"down\": \"down\",\n \"left\": \"left\",\n \"right\": \"right\",\n \"up\": \"up\",\n}\n\nsimple_keys = [\n \"end\",\n \"enter\",\n \"escape\",\n \"home\",\n \"insert\",\n \"pagedown\",\n \"pageup\",\n \"space\",\n \"tab\",\n]\n\nalternate_keys = {\n \"delete\": \"backspace\",\n \"forward delete\": \"delete\",\n #'junk': 'backspace',\n}\n# mac apparently doesn't have the menu key.\nif app.platform in (\"windows\", \"linux\"):\n alternate_keys[\"menu key\"] = \"menu\"\n\nkeys = {k: k for k in simple_keys}\nkeys.update(alternate_keys)\nctx.lists[\"self.special_key\"] = keys\nctx.lists[\"self.function_key\"] = {\n f\"F {default_f_digits[i]}\": f\"f{i + 1}\" for i in range(12)\n}\n\n\[email protected]_class\nclass Actions:\n def get_alphabet() -> dict:\n \"\"\"Provides the alphabet dictionary\"\"\"\n return alphabet\n\n", "path": "code/keys.py"}], "after_files": [{"content": "from typing import Set\n\nfrom talon import Module, Context, actions, app\nimport sys\n\ndefault_alphabet = \"air bat cap drum each fine gust harp sit jury crunch look made near odd pit quench red sun trap urge vest whale plex yank zip\".split(\n \" \"\n)\nletters_string = \"abcdefghijklmnopqrstuvwxyz\"\n\ndefault_digits = \"zero one two three four five six seven eight nine\".split(\" \")\nnumbers = [str(i) for i in range(10)]\ndefault_f_digits = \"one two three four five six seven eight nine ten eleven twelve\".split(\n \" \"\n)\n\nmod = Module()\nmod.list(\"letter\", desc=\"The spoken phonetic alphabet\")\nmod.list(\"symbol_key\", desc=\"All symbols from the keyboard\")\nmod.list(\"arrow_key\", desc=\"All arrow keys\")\nmod.list(\"number_key\", desc=\"All number keys\")\nmod.list(\"modifier_key\", desc=\"All modifier keys\")\nmod.list(\"function_key\", desc=\"All function keys\")\nmod.list(\"special_key\", desc=\"All special keys\")\nmod.list(\"punctuation\", desc=\"words for inserting punctuation into text\")\n\n\[email protected](rule=\"{self.modifier_key}+\")\ndef modifiers(m) -> str:\n \"One or more modifier keys\"\n return \"-\".join(m.modifier_key_list)\n\n\[email protected](rule=\"{self.arrow_key}\")\ndef arrow_key(m) -> str:\n \"One directional arrow key\"\n return m.arrow_key\n\n\[email protected](rule=\"<self.arrow_key>+\")\ndef arrow_keys(m) -> str:\n \"One or more arrow keys separated by a space\"\n return str(m)\n\n\[email protected](rule=\"{self.number_key}\")\ndef number_key(m) -> str:\n \"One number key\"\n return m.number_key\n\n\[email protected](rule=\"{self.letter}\")\ndef letter(m) -> str:\n \"One letter key\"\n return m.letter\n\n\[email protected](rule=\"{self.special_key}\")\ndef special_key(m) -> str:\n \"One special key\"\n return m.special_key\n\n\[email protected](rule=\"{self.symbol_key}\")\ndef symbol_key(m) -> str:\n \"One symbol key\"\n return m.symbol_key\n\n\[email protected](rule=\"{self.function_key}\")\ndef function_key(m) -> str:\n \"One function key\"\n return m.function_key\n\n\[email protected](rule=\"( <self.letter> | <self.number_key> | <self.symbol_key> )\")\ndef any_alphanumeric_key(m) -> str:\n \"any alphanumeric key\"\n return str(m)\n\n\[email protected](\n rule=\"( <self.letter> | <self.number_key> | <self.symbol_key> \"\n \"| <self.arrow_key> | <self.function_key> | <self.special_key> )\"\n)\ndef unmodified_key(m) -> str:\n \"A single key with no modifiers\"\n return str(m)\n\n\[email protected](rule=\"{self.modifier_key}* <self.unmodified_key>\")\ndef key(m) -> str:\n \"A single key with optional modifiers\"\n try:\n mods = m.modifier_key_list\n except AttributeError:\n mods = []\n return \"-\".join(mods + [m.unmodified_key])\n\n\[email protected](rule=\"<self.key>+\")\ndef keys(m) -> str:\n \"A sequence of one or more keys with optional modifiers\"\n return \" \".join(m.key_list)\n\n\[email protected](rule=\"{self.letter}+\")\ndef letters(m) -> str:\n \"Multiple letter keys\"\n return \"\".join(m.letter_list)\n\n\nctx = Context()\nmodifier_keys = {\n # If you find 'alt' is often misrecognized, try using 'alter'.\n \"alt\": \"alt\", #'alter': 'alt',\n \"control\": \"ctrl\", #'troll': 'ctrl',\n \"shift\": \"shift\", #'sky': 'shift',\n \"super\": \"super\",\n}\nif app.platform == \"mac\":\n modifier_keys[\"command\"] = \"cmd\"\n modifier_keys[\"option\"] = \"alt\"\nctx.lists[\"self.modifier_key\"] = modifier_keys\nalphabet = dict(zip(default_alphabet, letters_string))\nctx.lists[\"self.letter\"] = alphabet\n\n# `punctuation_words` is for words you want available BOTH in dictation and as\n# key names in command mode. `symbol_key_words` is for key names that should be\n# available in command mode, but NOT during dictation.\npunctuation_words = {\n # TODO: I'm not sure why we need these, I think it has something to do with\n # Dragon. Possibly it has been fixed by later improvements to talon? -rntz\n \"`\": \"`\",\n \",\": \",\", # <== these things\n \"back tick\": \"`\",\n \"comma\": \",\",\n \"period\": \".\",\n \"semicolon\": \";\",\n \"colon\": \":\",\n \"forward slash\": \"/\",\n \"question mark\": \"?\",\n \"exclamation mark\": \"!\",\n \"exclamation point\": \"!\",\n \"dollar sign\": \"$\",\n \"asterisk\": \"*\",\n \"hash sign\": \"#\",\n \"number sign\": \"#\",\n \"percent sign\": \"%\",\n \"at sign\": \"@\",\n \"and sign\": \"&\",\n \"ampersand\": \"&\",\n}\nsymbol_key_words = {\n \"dot\": \".\",\n \"quote\": \"'\",\n \"L square\": \"[\",\n \"left square\": \"[\",\n \"square\": \"[\",\n \"R square\": \"]\",\n \"right square\": \"]\",\n \"slash\": \"/\",\n \"backslash\": \"\\\\\",\n \"minus\": \"-\",\n \"dash\": \"-\",\n \"equals\": \"=\",\n \"plus\": \"+\",\n \"tilde\": \"~\",\n \"bang\": \"!\",\n \"dollar\": \"$\",\n \"down score\": \"_\",\n \"under score\": \"_\",\n \"paren\": \"(\",\n \"L paren\": \"(\",\n \"left paren\": \"(\",\n \"R paren\": \")\",\n \"right paren\": \")\",\n \"brace\": \"{\",\n \"left brace\": \"{\",\n \"R brace\": \"}\",\n \"right brace\": \"}\",\n \"angle\": \"<\",\n \"left angle\": \"<\",\n \"less than\": \"<\",\n \"rangle\": \">\",\n \"R angle\": \">\",\n \"right angle\": \">\",\n \"greater than\": \">\",\n \"star\": \"*\",\n \"pound\": \"#\",\n \"hash\": \"#\",\n \"percent\": \"%\",\n \"caret\": \"^\",\n \"amper\": \"&\",\n \"pipe\": \"|\",\n \"dubquote\": '\"',\n \"double quote\": '\"',\n}\n\n# make punctuation words also included in {user.symbol_keys}\nsymbol_key_words.update(punctuation_words)\nctx.lists[\"self.punctuation\"] = punctuation_words\nctx.lists[\"self.symbol_key\"] = symbol_key_words\nctx.lists[\"self.number_key\"] = dict(zip(default_digits, numbers))\nctx.lists[\"self.arrow_key\"] = {\n \"down\": \"down\",\n \"left\": \"left\",\n \"right\": \"right\",\n \"up\": \"up\",\n}\n\nsimple_keys = [\n \"end\",\n \"enter\",\n \"escape\",\n \"home\",\n \"insert\",\n \"pagedown\",\n \"pageup\",\n \"space\",\n \"tab\",\n]\n\nalternate_keys = {\n \"delete\": \"backspace\",\n \"forward delete\": \"delete\",\n #'junk': 'backspace',\n}\n# mac apparently doesn't have the menu key.\nif app.platform in (\"windows\", \"linux\"):\n alternate_keys[\"menu key\"] = \"menu\"\n\nkeys = {k: k for k in simple_keys}\nkeys.update(alternate_keys)\nctx.lists[\"self.special_key\"] = keys\nctx.lists[\"self.function_key\"] = {\n f\"F {default_f_digits[i]}\": f\"f{i + 1}\" for i in range(12)\n}\n\n\[email protected]_class\nclass Actions:\n def get_alphabet() -> dict:\n \"\"\"Provides the alphabet dictionary\"\"\"\n return alphabet\n\n", "path": "code/keys.py"}]}
2,600
217
gh_patches_debug_24197
rasdani/github-patches
git_diff
kserve__kserve-3226
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- KServe lgbserver runtime error input for v1 endpoint /kind bug **What steps did you take and what happened:** lgbserver used to support both of following inputs formats prior to 0.11 release. ```python request = {'inputs' : [{'sepal_width_(cm)': {0: 3.5}, 'petal_length_(cm)': {0: 1.4}, 'petal_width_(cm)': {0: 0.2}, 'sepal_length_(cm)': {0: 5.1} }]} ``` ```python request2 = {'inputs': [ [{'sepal_width_(cm)': 3.5}, {'petal_length_(cm)': 1.4}, {'petal_width_(cm)': 0.2}, {'sepal_length_(cm)': 5.1}] ] } ``` KServe only documented the first input format https://kserve.github.io/website/0.11/modelserving/v1beta1/lightgbm and in 0.11 the second input format stop working with following error ```bash 2023-11-03 09:06:02.099 32367 kserve ERROR [inference_error_handler():89] Exception: Traceback (most recent call last): File "/Users/dsun20/kserve/python/lgbserver/lgbserver/model.py", line 62, in predict result = self._booster.predict(instances) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 4220, in predict return predictor.predict( ^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 1047, in predict preds, nrow = self.__pred_for_np2d( ^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 1187, in __pred_for_np2d return self.__inner_predict_np2d( ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py", line 1127, in __inner_predict_np2d data = np.array(mat.reshape(mat.size), dtype=np.float32) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ TypeError: float() argument must be a string or a real number, not 'dict' ``` **What did you expect to happen:** **What's the InferenceService yaml:** [To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output] **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] **Environment:** - Istio Version: - Knative Version: - KServe Version: - Kubeflow version: - Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm] - Minikube/Kind version: - Kubernetes version: (use `kubectl version`): - OS (e.g. from `/etc/os-release`): --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/kserve/kserve/utils/utils.py` Content: ``` 1 # Copyright 2021 The KServe Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import sys 17 import uuid 18 from kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter 19 from typing import Dict, Union 20 21 from kserve.utils.numpy_codec import from_np_dtype 22 import pandas as pd 23 import numpy as np 24 import psutil 25 from cloudevents.conversion import to_binary, to_structured 26 from cloudevents.http import CloudEvent 27 from grpc import ServicerContext 28 from kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse 29 30 31 def is_running_in_k8s(): 32 return os.path.isdir('/var/run/secrets/kubernetes.io/') 33 34 35 def get_current_k8s_namespace(): 36 with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f: 37 return f.readline() 38 39 40 def get_default_target_namespace(): 41 if not is_running_in_k8s(): 42 return 'default' 43 return get_current_k8s_namespace() 44 45 46 def get_isvc_namespace(inferenceservice): 47 return inferenceservice.metadata.namespace or get_default_target_namespace() 48 49 50 def get_ig_namespace(inferencegraph): 51 return inferencegraph.metadata.namespace or get_default_target_namespace() 52 53 54 def cpu_count(): 55 """Get the available CPU count for this system. 56 Takes the minimum value from the following locations: 57 - Total system cpus available on the host. 58 - CPU Affinity (if set) 59 - Cgroups limit (if set) 60 """ 61 count = os.cpu_count() 62 63 # Check CPU affinity if available 64 try: 65 affinity_count = len(psutil.Process().cpu_affinity()) 66 if affinity_count > 0: 67 count = min(count, affinity_count) 68 except Exception: 69 pass 70 71 # Check cgroups if available 72 if sys.platform == "linux": 73 try: 74 with open("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us") as f: 75 quota = int(f.read()) 76 with open("/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us") as f: 77 period = int(f.read()) 78 cgroups_count = int(quota / period) 79 if cgroups_count > 0: 80 count = min(count, cgroups_count) 81 except Exception: 82 pass 83 84 return count 85 86 87 def is_structured_cloudevent(body: Dict) -> bool: 88 """Returns True if the JSON request body resembles a structured CloudEvent""" 89 return "time" in body \ 90 and "type" in body \ 91 and "source" in body \ 92 and "id" in body \ 93 and "specversion" in body \ 94 and "data" in body 95 96 97 def create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict, 98 binary_event=False) -> tuple: 99 ce_attributes = {} 100 101 if os.getenv("CE_MERGE", "false").lower() == "true": 102 if binary_event: 103 ce_attributes = req_attributes 104 if "datacontenttype" in ce_attributes: # Optional field so must check 105 del ce_attributes["datacontenttype"] 106 else: 107 ce_attributes = req_attributes 108 109 # Remove these fields so we generate new ones 110 del ce_attributes["id"] 111 del ce_attributes["time"] 112 113 ce_attributes["type"] = os.getenv("CE_TYPE", "io.kserve.inference.response") 114 ce_attributes["source"] = os.getenv("CE_SOURCE", f"io.kserve.inference.{model_name}") 115 116 event = CloudEvent(ce_attributes, response) 117 118 if binary_event: 119 event_headers, event_body = to_binary(event) 120 else: 121 event_headers, event_body = to_structured(event) 122 123 return event_headers, event_body 124 125 126 def generate_uuid() -> str: 127 return str(uuid.uuid4()) 128 129 130 def to_headers(context: ServicerContext) -> Dict[str, str]: 131 metadata = context.invocation_metadata() 132 if hasattr(context, "trailing_metadata"): 133 metadata += context.trailing_metadata() 134 headers = {} 135 for metadatum in metadata: 136 headers[metadatum.key] = metadatum.value 137 138 return headers 139 140 141 def get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]: 142 if isinstance(payload, Dict): 143 instances = payload["inputs"] if "inputs" in payload else payload["instances"] 144 if len(instances) == 0: 145 return np.array(instances) 146 if isinstance(instances[0], Dict): 147 dfs = [] 148 for input in instances: 149 dfs.append(pd.DataFrame(input)) 150 inputs = pd.concat(dfs, axis=0) 151 return inputs 152 else: 153 return np.array(instances) 154 155 elif isinstance(payload, InferRequest): 156 content_type = '' 157 parameters = payload.parameters 158 if parameters: 159 if isinstance(parameters.get("content_type"), InferParameter): 160 # for v2 grpc, we get InferParameter obj eg: {"content_type": string_param: "pd"} 161 content_type = str(parameters.get("content_type").string_param) 162 else: 163 # for v2 http, we get string eg: {"content_type": "pd"} 164 content_type = parameters.get("content_type") 165 166 if content_type == "pd": 167 return payload.as_dataframe() 168 else: 169 input = payload.inputs[0] 170 return input.as_numpy() 171 172 173 def get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame], 174 model_name: str) -> Union[Dict, InferResponse]: 175 if isinstance(payload, Dict): 176 infer_outputs = result 177 if isinstance(result, pd.DataFrame): 178 infer_outputs = [] 179 for label, row in result.iterrows(): 180 infer_outputs.append(row.to_dict()) 181 elif isinstance(result, np.ndarray): 182 infer_outputs = result.tolist() 183 return {"predictions": infer_outputs} 184 elif isinstance(payload, InferRequest): 185 infer_outputs = [] 186 if isinstance(result, pd.DataFrame): 187 for col in result.columns: 188 infer_output = InferOutput( 189 name=col, 190 shape=list(result[col].shape), 191 datatype=from_np_dtype(result[col].dtype), 192 data=result[col].tolist() 193 ) 194 infer_outputs.append(infer_output) 195 else: 196 infer_output = InferOutput( 197 name="output-0", 198 shape=list(result.shape), 199 datatype=from_np_dtype(result.dtype), 200 data=result.flatten().tolist() 201 ) 202 infer_outputs.append(infer_output) 203 return InferResponse( 204 model_name=model_name, 205 infer_outputs=infer_outputs, 206 response_id=payload.id if payload.id else generate_uuid() 207 ) 208 209 210 def strtobool(val: str) -> bool: 211 """Convert a string representation of truth to True or False. 212 213 True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values 214 are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 215 'val' is anything else. 216 217 Adapted from deprecated `distutils` 218 https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py 219 """ 220 val = val.lower() 221 if val in ('y', 'yes', 't', 'true', 'on', '1'): 222 return True 223 elif val in ('n', 'no', 'f', 'false', 'off', '0'): 224 return False 225 else: 226 raise ValueError("invalid truth value %r" % (val,)) 227 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/kserve/kserve/utils/utils.py b/python/kserve/kserve/utils/utils.py --- a/python/kserve/kserve/utils/utils.py +++ b/python/kserve/kserve/utils/utils.py @@ -16,7 +16,7 @@ import sys import uuid from kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter -from typing import Dict, Union +from typing import Dict, Union, List from kserve.utils.numpy_codec import from_np_dtype import pandas as pd @@ -149,6 +149,24 @@ dfs.append(pd.DataFrame(input)) inputs = pd.concat(dfs, axis=0) return inputs + + # Handles the following input format + # {'inputs': [ + # [{'sepal_width_(cm)': 3.5}, + # {'petal_length_(cm)': 1.4}, + # {'petal_width_(cm)': 0.2}, + # {'sepal_length_(cm)': 5.1}] + # ]} + elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict): + data: Dict[str, List] = {} + for instance in instances: + for item in instance: + for key, val in item.items(): + if key in data: + data[key].append(val) + else: + data[key] = [val] + return pd.DataFrame(data) else: return np.array(instances)
{"golden_diff": "diff --git a/python/kserve/kserve/utils/utils.py b/python/kserve/kserve/utils/utils.py\n--- a/python/kserve/kserve/utils/utils.py\n+++ b/python/kserve/kserve/utils/utils.py\n@@ -16,7 +16,7 @@\n import sys\n import uuid\n from kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\n-from typing import Dict, Union\n+from typing import Dict, Union, List\n \n from kserve.utils.numpy_codec import from_np_dtype\n import pandas as pd\n@@ -149,6 +149,24 @@\n dfs.append(pd.DataFrame(input))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n+\n+ # Handles the following input format\n+ # {'inputs': [\n+ # [{'sepal_width_(cm)': 3.5},\n+ # {'petal_length_(cm)': 1.4},\n+ # {'petal_width_(cm)': 0.2},\n+ # {'sepal_length_(cm)': 5.1}]\n+ # ]}\n+ elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict):\n+ data: Dict[str, List] = {}\n+ for instance in instances:\n+ for item in instance:\n+ for key, val in item.items():\n+ if key in data:\n+ data[key].append(val)\n+ else:\n+ data[key] = [val]\n+ return pd.DataFrame(data)\n else:\n return np.array(instances)\n", "issue": "KServe lgbserver runtime error input for v1 endpoint\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nlgbserver used to support both of following inputs formats prior to 0.11 release.\r\n\r\n```python\r\nrequest = {'inputs' : [{'sepal_width_(cm)': {0: 3.5}, \r\n'petal_length_(cm)': {0: 1.4}, \r\n'petal_width_(cm)': {0: 0.2},\r\n'sepal_length_(cm)': {0: 5.1} }]}\r\n```\r\n\r\n```python\r\nrequest2 = {'inputs': [\r\n[{'sepal_width_(cm)': 3.5}, \r\n {'petal_length_(cm)': 1.4}, \r\n {'petal_width_(cm)': 0.2}, \r\n {'sepal_length_(cm)': 5.1}]\r\n ] }\r\n```\r\n\r\nKServe only documented the first input format https://kserve.github.io/website/0.11/modelserving/v1beta1/lightgbm\r\n\r\nand in 0.11 the second input format stop working with following error\r\n\r\n```bash\r\n2023-11-03 09:06:02.099 32367 kserve ERROR [inference_error_handler():89] Exception:\r\nTraceback (most recent call last):\r\n File \"/Users/dsun20/kserve/python/lgbserver/lgbserver/model.py\", line 62, in predict\r\n result = self._booster.predict(instances)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 4220, in predict\r\n return predictor.predict(\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 1047, in predict\r\n preds, nrow = self.__pred_for_np2d(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 1187, in __pred_for_np2d\r\n return self.__inner_predict_np2d(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/lib/python3.11/site-packages/lightgbm/basic.py\", line 1127, in __inner_predict_np2d\r\n data = np.array(mat.reshape(mat.size), dtype=np.float32)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nTypeError: float() argument must be a string or a real number, not 'dict'\r\n```\r\n\r\n\r\n**What did you expect to happen:**\r\n\r\n\r\n**What's the InferenceService yaml:**\r\n[To help us debug please run `kubectl get isvc $name -n $namespace -oyaml` and paste the output]\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KServe Version:\r\n- Kubeflow version:\r\n- Cloud Environment:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube/Kind version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport uuid\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\nfrom typing import Dict, Union\n\nfrom kserve.utils.numpy_codec import from_np_dtype\nimport pandas as pd\nimport numpy as np\nimport psutil\nfrom cloudevents.conversion import to_binary, to_structured\nfrom cloudevents.http import CloudEvent\nfrom grpc import ServicerContext\nfrom kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse\n\n\ndef is_running_in_k8s():\n return os.path.isdir('/var/run/secrets/kubernetes.io/')\n\n\ndef get_current_k8s_namespace():\n with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:\n return f.readline()\n\n\ndef get_default_target_namespace():\n if not is_running_in_k8s():\n return 'default'\n return get_current_k8s_namespace()\n\n\ndef get_isvc_namespace(inferenceservice):\n return inferenceservice.metadata.namespace or get_default_target_namespace()\n\n\ndef get_ig_namespace(inferencegraph):\n return inferencegraph.metadata.namespace or get_default_target_namespace()\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n Takes the minimum value from the following locations:\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\ndef is_structured_cloudevent(body: Dict) -> bool:\n \"\"\"Returns True if the JSON request body resembles a structured CloudEvent\"\"\"\n return \"time\" in body \\\n and \"type\" in body \\\n and \"source\" in body \\\n and \"id\" in body \\\n and \"specversion\" in body \\\n and \"data\" in body\n\n\ndef create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,\n binary_event=False) -> tuple:\n ce_attributes = {}\n\n if os.getenv(\"CE_MERGE\", \"false\").lower() == \"true\":\n if binary_event:\n ce_attributes = req_attributes\n if \"datacontenttype\" in ce_attributes: # Optional field so must check\n del ce_attributes[\"datacontenttype\"]\n else:\n ce_attributes = req_attributes\n\n # Remove these fields so we generate new ones\n del ce_attributes[\"id\"]\n del ce_attributes[\"time\"]\n\n ce_attributes[\"type\"] = os.getenv(\"CE_TYPE\", \"io.kserve.inference.response\")\n ce_attributes[\"source\"] = os.getenv(\"CE_SOURCE\", f\"io.kserve.inference.{model_name}\")\n\n event = CloudEvent(ce_attributes, response)\n\n if binary_event:\n event_headers, event_body = to_binary(event)\n else:\n event_headers, event_body = to_structured(event)\n\n return event_headers, event_body\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]:\n if isinstance(payload, Dict):\n instances = payload[\"inputs\"] if \"inputs\" in payload else payload[\"instances\"]\n if len(instances) == 0:\n return np.array(instances)\n if isinstance(instances[0], Dict):\n dfs = []\n for input in instances:\n dfs.append(pd.DataFrame(input))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n else:\n return np.array(instances)\n\n elif isinstance(payload, InferRequest):\n content_type = ''\n parameters = payload.parameters\n if parameters:\n if isinstance(parameters.get(\"content_type\"), InferParameter):\n # for v2 grpc, we get InferParameter obj eg: {\"content_type\": string_param: \"pd\"}\n content_type = str(parameters.get(\"content_type\").string_param)\n else:\n # for v2 http, we get string eg: {\"content_type\": \"pd\"}\n content_type = parameters.get(\"content_type\")\n\n if content_type == \"pd\":\n return payload.as_dataframe()\n else:\n input = payload.inputs[0]\n return input.as_numpy()\n\n\ndef get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame],\n model_name: str) -> Union[Dict, InferResponse]:\n if isinstance(payload, Dict):\n infer_outputs = result\n if isinstance(result, pd.DataFrame):\n infer_outputs = []\n for label, row in result.iterrows():\n infer_outputs.append(row.to_dict())\n elif isinstance(result, np.ndarray):\n infer_outputs = result.tolist()\n return {\"predictions\": infer_outputs}\n elif isinstance(payload, InferRequest):\n infer_outputs = []\n if isinstance(result, pd.DataFrame):\n for col in result.columns:\n infer_output = InferOutput(\n name=col,\n shape=list(result[col].shape),\n datatype=from_np_dtype(result[col].dtype),\n data=result[col].tolist()\n )\n infer_outputs.append(infer_output)\n else:\n infer_output = InferOutput(\n name=\"output-0\",\n shape=list(result.shape),\n datatype=from_np_dtype(result.dtype),\n data=result.flatten().tolist()\n )\n infer_outputs.append(infer_output)\n return InferResponse(\n model_name=model_name,\n infer_outputs=infer_outputs,\n response_id=payload.id if payload.id else generate_uuid()\n )\n\n\ndef strtobool(val: str) -> bool:\n \"\"\"Convert a string representation of truth to True or False.\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n\n Adapted from deprecated `distutils`\n https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py\n \"\"\"\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n", "path": "python/kserve/kserve/utils/utils.py"}], "after_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport uuid\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import InferParameter\nfrom typing import Dict, Union, List\n\nfrom kserve.utils.numpy_codec import from_np_dtype\nimport pandas as pd\nimport numpy as np\nimport psutil\nfrom cloudevents.conversion import to_binary, to_structured\nfrom cloudevents.http import CloudEvent\nfrom grpc import ServicerContext\nfrom kserve.protocol.infer_type import InferOutput, InferRequest, InferResponse\n\n\ndef is_running_in_k8s():\n return os.path.isdir('/var/run/secrets/kubernetes.io/')\n\n\ndef get_current_k8s_namespace():\n with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:\n return f.readline()\n\n\ndef get_default_target_namespace():\n if not is_running_in_k8s():\n return 'default'\n return get_current_k8s_namespace()\n\n\ndef get_isvc_namespace(inferenceservice):\n return inferenceservice.metadata.namespace or get_default_target_namespace()\n\n\ndef get_ig_namespace(inferencegraph):\n return inferencegraph.metadata.namespace or get_default_target_namespace()\n\n\ndef cpu_count():\n \"\"\"Get the available CPU count for this system.\n Takes the minimum value from the following locations:\n - Total system cpus available on the host.\n - CPU Affinity (if set)\n - Cgroups limit (if set)\n \"\"\"\n count = os.cpu_count()\n\n # Check CPU affinity if available\n try:\n affinity_count = len(psutil.Process().cpu_affinity())\n if affinity_count > 0:\n count = min(count, affinity_count)\n except Exception:\n pass\n\n # Check cgroups if available\n if sys.platform == \"linux\":\n try:\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_quota_us\") as f:\n quota = int(f.read())\n with open(\"/sys/fs/cgroup/cpu,cpuacct/cpu.cfs_period_us\") as f:\n period = int(f.read())\n cgroups_count = int(quota / period)\n if cgroups_count > 0:\n count = min(count, cgroups_count)\n except Exception:\n pass\n\n return count\n\n\ndef is_structured_cloudevent(body: Dict) -> bool:\n \"\"\"Returns True if the JSON request body resembles a structured CloudEvent\"\"\"\n return \"time\" in body \\\n and \"type\" in body \\\n and \"source\" in body \\\n and \"id\" in body \\\n and \"specversion\" in body \\\n and \"data\" in body\n\n\ndef create_response_cloudevent(model_name: str, response: Dict, req_attributes: Dict,\n binary_event=False) -> tuple:\n ce_attributes = {}\n\n if os.getenv(\"CE_MERGE\", \"false\").lower() == \"true\":\n if binary_event:\n ce_attributes = req_attributes\n if \"datacontenttype\" in ce_attributes: # Optional field so must check\n del ce_attributes[\"datacontenttype\"]\n else:\n ce_attributes = req_attributes\n\n # Remove these fields so we generate new ones\n del ce_attributes[\"id\"]\n del ce_attributes[\"time\"]\n\n ce_attributes[\"type\"] = os.getenv(\"CE_TYPE\", \"io.kserve.inference.response\")\n ce_attributes[\"source\"] = os.getenv(\"CE_SOURCE\", f\"io.kserve.inference.{model_name}\")\n\n event = CloudEvent(ce_attributes, response)\n\n if binary_event:\n event_headers, event_body = to_binary(event)\n else:\n event_headers, event_body = to_structured(event)\n\n return event_headers, event_body\n\n\ndef generate_uuid() -> str:\n return str(uuid.uuid4())\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef get_predict_input(payload: Union[Dict, InferRequest]) -> Union[np.ndarray, pd.DataFrame]:\n if isinstance(payload, Dict):\n instances = payload[\"inputs\"] if \"inputs\" in payload else payload[\"instances\"]\n if len(instances) == 0:\n return np.array(instances)\n if isinstance(instances[0], Dict):\n dfs = []\n for input in instances:\n dfs.append(pd.DataFrame(input))\n inputs = pd.concat(dfs, axis=0)\n return inputs\n\n # Handles the following input format\n # {'inputs': [\n # [{'sepal_width_(cm)': 3.5},\n # {'petal_length_(cm)': 1.4},\n # {'petal_width_(cm)': 0.2},\n # {'sepal_length_(cm)': 5.1}]\n # ]}\n elif isinstance(instances[0], List) and len(instances[0]) != 0 and isinstance(instances[0][0], Dict):\n data: Dict[str, List] = {}\n for instance in instances:\n for item in instance:\n for key, val in item.items():\n if key in data:\n data[key].append(val)\n else:\n data[key] = [val]\n return pd.DataFrame(data)\n else:\n return np.array(instances)\n\n elif isinstance(payload, InferRequest):\n content_type = ''\n parameters = payload.parameters\n if parameters:\n if isinstance(parameters.get(\"content_type\"), InferParameter):\n # for v2 grpc, we get InferParameter obj eg: {\"content_type\": string_param: \"pd\"}\n content_type = str(parameters.get(\"content_type\").string_param)\n else:\n # for v2 http, we get string eg: {\"content_type\": \"pd\"}\n content_type = parameters.get(\"content_type\")\n\n if content_type == \"pd\":\n return payload.as_dataframe()\n else:\n input = payload.inputs[0]\n return input.as_numpy()\n\n\ndef get_predict_response(payload: Union[Dict, InferRequest], result: Union[np.ndarray, pd.DataFrame],\n model_name: str) -> Union[Dict, InferResponse]:\n if isinstance(payload, Dict):\n infer_outputs = result\n if isinstance(result, pd.DataFrame):\n infer_outputs = []\n for label, row in result.iterrows():\n infer_outputs.append(row.to_dict())\n elif isinstance(result, np.ndarray):\n infer_outputs = result.tolist()\n return {\"predictions\": infer_outputs}\n elif isinstance(payload, InferRequest):\n infer_outputs = []\n if isinstance(result, pd.DataFrame):\n for col in result.columns:\n infer_output = InferOutput(\n name=col,\n shape=list(result[col].shape),\n datatype=from_np_dtype(result[col].dtype),\n data=result[col].tolist()\n )\n infer_outputs.append(infer_output)\n else:\n infer_output = InferOutput(\n name=\"output-0\",\n shape=list(result.shape),\n datatype=from_np_dtype(result.dtype),\n data=result.flatten().tolist()\n )\n infer_outputs.append(infer_output)\n return InferResponse(\n model_name=model_name,\n infer_outputs=infer_outputs,\n response_id=payload.id if payload.id else generate_uuid()\n )\n\n\ndef strtobool(val: str) -> bool:\n \"\"\"Convert a string representation of truth to True or False.\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n\n Adapted from deprecated `distutils`\n https://github.com/python/cpython/blob/3.11/Lib/distutils/util.py\n \"\"\"\n val = val.lower()\n if val in ('y', 'yes', 't', 'true', 'on', '1'):\n return True\n elif val in ('n', 'no', 'f', 'false', 'off', '0'):\n return False\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))\n", "path": "python/kserve/kserve/utils/utils.py"}]}
3,295
345
gh_patches_debug_15872
rasdani/github-patches
git_diff
azavea__raster-vision-427
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fill out model-defaults for tensorflow-od Currently we only have 1 model in model_defaults.json: https://github.com/azavea/raster-vision/blob/feature/api-refactor/src/rastervision/backend/model_defaults.json#L2 We need to fill it out to include each of these configurations, matched up with each of the pretrained weights from the model zoo: #### Configs https://github.com/azavea/models/tree/master/research/object_detection/samples/configs #### Weights https://github.com/azavea/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/rastervision/backend/api.py` Content: ``` 1 # flake8: noqa 2 3 # Registry keys 4 5 BACKEND = 'BACKEND' 6 7 ## Backend Keys 8 9 TF_OBJECT_DETECTION = 'TF_OBJECT_DETECTION' 10 KERAS_CLASSIFICATION = 'KERAS_CLASSIFICATION' 11 12 ## Model keys 13 14 ### TF Object Detection 15 SSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO' 16 17 ## Keras Classificaiton 18 RESNET50_IMAGENET = 'RESNET50_IMAGENET' 19 20 from .backend_config import BackendConfig 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/rastervision/backend/api.py b/src/rastervision/backend/api.py --- a/src/rastervision/backend/api.py +++ b/src/rastervision/backend/api.py @@ -13,6 +13,20 @@ ### TF Object Detection SSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO' +SSD_MOBILENET_V2_COCO = 'SSD_MOBILENET_V2_COCO' +SSDLITE_MOBILENET_V2_COCO = 'SSDLITE_MOBILENET_V2_COCO' +SSD_INCEPTION_V2_COCO = 'SSD_INCEPTION_V2_COCO' +FASTER_RCNN_INCEPTION_V2_COCO = 'FASTER_RCNN_INCEPTION_V2_COCO' +FASTER_RCNN_RESNET50_COCO = 'FASTER_RCNN_RESNET50_COCO' +RFCN_RESNET101_COCO = 'RFCN_RESNET101_COCO' +FASTER_RCNN_RESNET101_COCO = 'FASTER_RCNN_RESNET101_COCO' +FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \ +'FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO' +MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \ +'MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO' +MASK_RCNN_INCEPTION_V2_COCO = 'MASK_RCNN_INCEPTION_V2_COCO' +MASK_RCNN_RESNET101_ATROUS_COCO = 'MASK_RCNN_RESNET101_ATROUS_COCO' +MASK_RCNN_RESNET50_ATROUS_COCO = 'MASK_RCNN_RESNET50_ATROUS_COCO' ## Keras Classificaiton RESNET50_IMAGENET = 'RESNET50_IMAGENET'
{"golden_diff": "diff --git a/src/rastervision/backend/api.py b/src/rastervision/backend/api.py\n--- a/src/rastervision/backend/api.py\n+++ b/src/rastervision/backend/api.py\n@@ -13,6 +13,20 @@\n \n ### TF Object Detection\n SSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO'\n+SSD_MOBILENET_V2_COCO = 'SSD_MOBILENET_V2_COCO'\n+SSDLITE_MOBILENET_V2_COCO = 'SSDLITE_MOBILENET_V2_COCO'\n+SSD_INCEPTION_V2_COCO = 'SSD_INCEPTION_V2_COCO'\n+FASTER_RCNN_INCEPTION_V2_COCO = 'FASTER_RCNN_INCEPTION_V2_COCO'\n+FASTER_RCNN_RESNET50_COCO = 'FASTER_RCNN_RESNET50_COCO'\n+RFCN_RESNET101_COCO = 'RFCN_RESNET101_COCO'\n+FASTER_RCNN_RESNET101_COCO = 'FASTER_RCNN_RESNET101_COCO'\n+FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \\\n+'FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO'\n+MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \\\n+'MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO'\n+MASK_RCNN_INCEPTION_V2_COCO = 'MASK_RCNN_INCEPTION_V2_COCO'\n+MASK_RCNN_RESNET101_ATROUS_COCO = 'MASK_RCNN_RESNET101_ATROUS_COCO'\n+MASK_RCNN_RESNET50_ATROUS_COCO = 'MASK_RCNN_RESNET50_ATROUS_COCO'\n \n ## Keras Classificaiton\n RESNET50_IMAGENET = 'RESNET50_IMAGENET'\n", "issue": "Fill out model-defaults for tensorflow-od\nCurrently we only have 1 model in model_defaults.json: \r\n\r\nhttps://github.com/azavea/raster-vision/blob/feature/api-refactor/src/rastervision/backend/model_defaults.json#L2\r\n\r\nWe need to fill it out to include each of these configurations, matched up with each of the pretrained weights from the model zoo:\r\n\r\n#### Configs\r\nhttps://github.com/azavea/models/tree/master/research/object_detection/samples/configs\r\n\r\n#### Weights\r\nhttps://github.com/azavea/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md\n", "before_files": [{"content": "# flake8: noqa\n\n# Registry keys\n\nBACKEND = 'BACKEND'\n\n## Backend Keys\n\nTF_OBJECT_DETECTION = 'TF_OBJECT_DETECTION'\nKERAS_CLASSIFICATION = 'KERAS_CLASSIFICATION'\n\n## Model keys\n\n### TF Object Detection\nSSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO'\n\n## Keras Classificaiton\nRESNET50_IMAGENET = 'RESNET50_IMAGENET'\n\nfrom .backend_config import BackendConfig\n", "path": "src/rastervision/backend/api.py"}], "after_files": [{"content": "# flake8: noqa\n\n# Registry keys\n\nBACKEND = 'BACKEND'\n\n## Backend Keys\n\nTF_OBJECT_DETECTION = 'TF_OBJECT_DETECTION'\nKERAS_CLASSIFICATION = 'KERAS_CLASSIFICATION'\n\n## Model keys\n\n### TF Object Detection\nSSD_MOBILENET_V1_COCO = 'SSD_MOBILENET_V1_COCO'\nSSD_MOBILENET_V2_COCO = 'SSD_MOBILENET_V2_COCO'\nSSDLITE_MOBILENET_V2_COCO = 'SSDLITE_MOBILENET_V2_COCO'\nSSD_INCEPTION_V2_COCO = 'SSD_INCEPTION_V2_COCO'\nFASTER_RCNN_INCEPTION_V2_COCO = 'FASTER_RCNN_INCEPTION_V2_COCO'\nFASTER_RCNN_RESNET50_COCO = 'FASTER_RCNN_RESNET50_COCO'\nRFCN_RESNET101_COCO = 'RFCN_RESNET101_COCO'\nFASTER_RCNN_RESNET101_COCO = 'FASTER_RCNN_RESNET101_COCO'\nFASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \\\n'FASTER_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO'\nMASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO = \\\n'MASK_RCNN_INCEPTION_RESNET_V2_ATROUS_COCO'\nMASK_RCNN_INCEPTION_V2_COCO = 'MASK_RCNN_INCEPTION_V2_COCO'\nMASK_RCNN_RESNET101_ATROUS_COCO = 'MASK_RCNN_RESNET101_ATROUS_COCO'\nMASK_RCNN_RESNET50_ATROUS_COCO = 'MASK_RCNN_RESNET50_ATROUS_COCO'\n\n## Keras Classificaiton\nRESNET50_IMAGENET = 'RESNET50_IMAGENET'\n\nfrom .backend_config import BackendConfig\n", "path": "src/rastervision/backend/api.py"}]}
545
441
gh_patches_debug_37445
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4815
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `op_builder/gptq.py` Content: ``` 1 import os 2 import torch 3 import re 4 5 from .builder import Builder 6 from .utils import append_nvcc_threads, get_cuda_cc_flag 7 8 class GPTQBuilder(Builder): 9 10 NAME = "cu_gptq" 11 PREBUILT_IMPORT_PATH = "colossalai._C.cu_gptq" 12 13 def __init__(self): 14 super().__init__(name=GPTQBuilder.NAME, 15 prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH) 16 17 18 def include_dirs(self): 19 ret = [self.csrc_abs_path("gptq"), self.get_cuda_home_include()] 20 return ret 21 22 def sources_files(self): 23 ret = [ 24 self.csrc_abs_path(fname) for fname in [ 25 'gptq/linear_gptq.cpp', 26 'gptq/column_remap.cu', 27 'gptq/cuda_buffers.cu', 28 'gptq/q4_matmul.cu', 29 'gptq/q4_matrix.cu' 30 ] 31 ] 32 return ret 33 34 def cxx_flags(self): 35 return ['-O3'] + self.version_dependent_macros 36 37 def nvcc_flags(self): 38 extra_cuda_flags = ['-v', 39 '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', 40 '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', "-lcublas", "-std=c++17" 41 ] 42 43 44 for arch in torch.cuda.get_arch_list(): 45 res = re.search(r'sm_(\d+)', arch) 46 if res: 47 arch_cap = res[1] 48 if int(arch_cap) >= 80: 49 extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}']) 50 51 ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags 52 return append_nvcc_threads(ret) ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/op_builder/gptq.py b/op_builder/gptq.py --- a/op_builder/gptq.py +++ b/op_builder/gptq.py @@ -1,19 +1,17 @@ -import os -import torch import re +import torch + from .builder import Builder -from .utils import append_nvcc_threads, get_cuda_cc_flag +from .utils import append_nvcc_threads -class GPTQBuilder(Builder): +class GPTQBuilder(Builder): NAME = "cu_gptq" PREBUILT_IMPORT_PATH = "colossalai._C.cu_gptq" def __init__(self): - super().__init__(name=GPTQBuilder.NAME, - prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH) - + super().__init__(name=GPTQBuilder.NAME, prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH) def include_dirs(self): ret = [self.csrc_abs_path("gptq"), self.get_cuda_home_include()] @@ -21,32 +19,38 @@ def sources_files(self): ret = [ - self.csrc_abs_path(fname) for fname in [ - 'gptq/linear_gptq.cpp', - 'gptq/column_remap.cu', - 'gptq/cuda_buffers.cu', - 'gptq/q4_matmul.cu', - 'gptq/q4_matrix.cu' + self.csrc_abs_path(fname) + for fname in [ + "gptq/linear_gptq.cpp", + "gptq/column_remap.cu", + "gptq/cuda_buffers.cu", + "gptq/q4_matmul.cu", + "gptq/q4_matrix.cu", ] ] return ret def cxx_flags(self): - return ['-O3'] + self.version_dependent_macros + return ["-O3"] + self.version_dependent_macros def nvcc_flags(self): - extra_cuda_flags = ['-v', - '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__', - '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', "-lcublas", "-std=c++17" + extra_cuda_flags = [ + "-v", + "-std=c++14", + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "-U__CUDA_NO_HALF2_OPERATORS__", + "-DTHRUST_IGNORE_CUB_VERSION_CHECK", + "-lcublas", + "-std=c++17", ] - for arch in torch.cuda.get_arch_list(): - res = re.search(r'sm_(\d+)', arch) + res = re.search(r"sm_(\d+)", arch) if res: arch_cap = res[1] if int(arch_cap) >= 80: - extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}']) + extra_cuda_flags.extend(["-gencode", f"arch=compute_{arch_cap},code={arch}"]) - ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags - return append_nvcc_threads(ret) \ No newline at end of file + ret = ["-O3", "--use_fast_math"] + self.version_dependent_macros + extra_cuda_flags + return append_nvcc_threads(ret)
{"golden_diff": "diff --git a/op_builder/gptq.py b/op_builder/gptq.py\n--- a/op_builder/gptq.py\n+++ b/op_builder/gptq.py\n@@ -1,19 +1,17 @@\n-import os\n-import torch\n import re\n \n+import torch\n+\n from .builder import Builder\n-from .utils import append_nvcc_threads, get_cuda_cc_flag\n+from .utils import append_nvcc_threads\n \n-class GPTQBuilder(Builder):\n \n+class GPTQBuilder(Builder):\n NAME = \"cu_gptq\"\n PREBUILT_IMPORT_PATH = \"colossalai._C.cu_gptq\"\n \n def __init__(self):\n- super().__init__(name=GPTQBuilder.NAME,\n- prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)\n-\n+ super().__init__(name=GPTQBuilder.NAME, prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)\n \n def include_dirs(self):\n ret = [self.csrc_abs_path(\"gptq\"), self.get_cuda_home_include()]\n@@ -21,32 +19,38 @@\n \n def sources_files(self):\n ret = [\n- self.csrc_abs_path(fname) for fname in [\n- 'gptq/linear_gptq.cpp',\n- 'gptq/column_remap.cu',\n- 'gptq/cuda_buffers.cu',\n- 'gptq/q4_matmul.cu',\n- 'gptq/q4_matrix.cu'\n+ self.csrc_abs_path(fname)\n+ for fname in [\n+ \"gptq/linear_gptq.cpp\",\n+ \"gptq/column_remap.cu\",\n+ \"gptq/cuda_buffers.cu\",\n+ \"gptq/q4_matmul.cu\",\n+ \"gptq/q4_matrix.cu\",\n ]\n ]\n return ret\n \n def cxx_flags(self):\n- return ['-O3'] + self.version_dependent_macros\n+ return [\"-O3\"] + self.version_dependent_macros\n \n def nvcc_flags(self):\n- extra_cuda_flags = ['-v',\n- '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',\n- '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', \"-lcublas\", \"-std=c++17\"\n+ extra_cuda_flags = [\n+ \"-v\",\n+ \"-std=c++14\",\n+ \"-U__CUDA_NO_HALF_OPERATORS__\",\n+ \"-U__CUDA_NO_HALF_CONVERSIONS__\",\n+ \"-U__CUDA_NO_HALF2_OPERATORS__\",\n+ \"-DTHRUST_IGNORE_CUB_VERSION_CHECK\",\n+ \"-lcublas\",\n+ \"-std=c++17\",\n ]\n \n-\n for arch in torch.cuda.get_arch_list():\n- res = re.search(r'sm_(\\d+)', arch)\n+ res = re.search(r\"sm_(\\d+)\", arch)\n if res:\n arch_cap = res[1]\n if int(arch_cap) >= 80:\n- extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])\n+ extra_cuda_flags.extend([\"-gencode\", f\"arch=compute_{arch_cap},code={arch}\"])\n \n- ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags\n- return append_nvcc_threads(ret)\n\\ No newline at end of file\n+ ret = [\"-O3\", \"--use_fast_math\"] + self.version_dependent_macros + extra_cuda_flags\n+ return append_nvcc_threads(ret)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import os\nimport torch\nimport re\n\nfrom .builder import Builder\nfrom .utils import append_nvcc_threads, get_cuda_cc_flag\n\nclass GPTQBuilder(Builder):\n\n NAME = \"cu_gptq\"\n PREBUILT_IMPORT_PATH = \"colossalai._C.cu_gptq\"\n\n def __init__(self):\n super().__init__(name=GPTQBuilder.NAME,\n prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)\n\n\n def include_dirs(self):\n ret = [self.csrc_abs_path(\"gptq\"), self.get_cuda_home_include()]\n return ret\n\n def sources_files(self):\n ret = [\n self.csrc_abs_path(fname) for fname in [\n 'gptq/linear_gptq.cpp',\n 'gptq/column_remap.cu',\n 'gptq/cuda_buffers.cu',\n 'gptq/q4_matmul.cu',\n 'gptq/q4_matrix.cu'\n ]\n ]\n return ret\n\n def cxx_flags(self):\n return ['-O3'] + self.version_dependent_macros\n\n def nvcc_flags(self):\n extra_cuda_flags = ['-v',\n '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', '-U__CUDA_NO_HALF_CONVERSIONS__',\n '-U__CUDA_NO_HALF2_OPERATORS__', '-DTHRUST_IGNORE_CUB_VERSION_CHECK', \"-lcublas\", \"-std=c++17\"\n ]\n\n\n for arch in torch.cuda.get_arch_list():\n res = re.search(r'sm_(\\d+)', arch)\n if res:\n arch_cap = res[1]\n if int(arch_cap) >= 80:\n extra_cuda_flags.extend(['-gencode', f'arch=compute_{arch_cap},code={arch}'])\n\n ret = ['-O3', '--use_fast_math'] + self.version_dependent_macros + extra_cuda_flags\n return append_nvcc_threads(ret)", "path": "op_builder/gptq.py"}], "after_files": [{"content": "import re\n\nimport torch\n\nfrom .builder import Builder\nfrom .utils import append_nvcc_threads\n\n\nclass GPTQBuilder(Builder):\n NAME = \"cu_gptq\"\n PREBUILT_IMPORT_PATH = \"colossalai._C.cu_gptq\"\n\n def __init__(self):\n super().__init__(name=GPTQBuilder.NAME, prebuilt_import_path=GPTQBuilder.PREBUILT_IMPORT_PATH)\n\n def include_dirs(self):\n ret = [self.csrc_abs_path(\"gptq\"), self.get_cuda_home_include()]\n return ret\n\n def sources_files(self):\n ret = [\n self.csrc_abs_path(fname)\n for fname in [\n \"gptq/linear_gptq.cpp\",\n \"gptq/column_remap.cu\",\n \"gptq/cuda_buffers.cu\",\n \"gptq/q4_matmul.cu\",\n \"gptq/q4_matrix.cu\",\n ]\n ]\n return ret\n\n def cxx_flags(self):\n return [\"-O3\"] + self.version_dependent_macros\n\n def nvcc_flags(self):\n extra_cuda_flags = [\n \"-v\",\n \"-std=c++14\",\n \"-U__CUDA_NO_HALF_OPERATORS__\",\n \"-U__CUDA_NO_HALF_CONVERSIONS__\",\n \"-U__CUDA_NO_HALF2_OPERATORS__\",\n \"-DTHRUST_IGNORE_CUB_VERSION_CHECK\",\n \"-lcublas\",\n \"-std=c++17\",\n ]\n\n for arch in torch.cuda.get_arch_list():\n res = re.search(r\"sm_(\\d+)\", arch)\n if res:\n arch_cap = res[1]\n if int(arch_cap) >= 80:\n extra_cuda_flags.extend([\"-gencode\", f\"arch=compute_{arch_cap},code={arch}\"])\n\n ret = [\"-O3\", \"--use_fast_math\"] + self.version_dependent_macros + extra_cuda_flags\n return append_nvcc_threads(ret)\n", "path": "op_builder/gptq.py"}]}
809
815
gh_patches_debug_5850
rasdani/github-patches
git_diff
web2py__web2py-544
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- db.export_to_csv_file() error with python 3.7 **Describe the bug** db.export_to_csv_file() error with python 3.7 ``` TypeError: a bytes-like object is required, not 'str' ``` in ``` web2py/gluon/packages/dal/pydal/base.py", line 844, in export_to_csv_file ofile.write('TABLE %s\r\n' % table) ``` **To Reproduce** ``` Bens-iMac:web2py ben$ pipenv run python web2py.py -M -S pacificW /Users/ben/.local/share/virtualenvs/web2py-6f1t1lX6/lib/python3.7/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use "pip install psycopg2-binary" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>. """) web2py Web Framework Created by Massimo Di Pierro, Copyright 2007-2018 Version 2.17.1-stable+timestamp.2018.08.05.17.57.00 Database drivers available: sqlite3, psycopg2, imaplib, pymysql, pyodbc WARNING:web2py:import IPython error; use default python shell Python 3.7.0 (default, Jul 23 2018, 20:22:55) [Clang 9.1.0 (clang-902.0.39.2)] on darwin Type "help", "copyright", "credits" or "license" for more information. (InteractiveConsole) >>> db.export_to_csv_file(open('somefile.csv', 'wb')) Traceback (most recent call last): File "<console>", line 1, in <module> File "/Users/ben/Documents/programs/web2py/gluon/packages/dal/pydal/base.py", line 844, in export_to_csv_file ofile.write('TABLE %s\r\n' % table) TypeError: a bytes-like object is required, not 'str' >>> ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gluon/dal/adapters/oracle.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import base64 3 import datetime 4 import re 5 6 from .._globals import IDENTITY 7 from .._load import cx_Oracle 8 from .base import BaseAdapter 9 10 class OracleAdapter(BaseAdapter): 11 drivers = ('cx_Oracle',) 12 13 commit_on_alter_table = False 14 types = { 15 'boolean': 'CHAR(1)', 16 'string': 'VARCHAR2(%(length)s)', 17 'text': 'CLOB', 18 'json': 'CLOB', 19 'password': 'VARCHAR2(%(length)s)', 20 'blob': 'CLOB', 21 'upload': 'VARCHAR2(%(length)s)', 22 'integer': 'INT', 23 'bigint': 'NUMBER', 24 'float': 'FLOAT', 25 'double': 'BINARY_DOUBLE', 26 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 27 'date': 'DATE', 28 'time': 'CHAR(8)', 29 'datetime': 'DATE', 30 'id': 'NUMBER PRIMARY KEY', 31 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 32 'list:integer': 'CLOB', 33 'list:string': 'CLOB', 34 'list:reference': 'CLOB', 35 'big-id': 'NUMBER PRIMARY KEY', 36 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 37 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 38 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 39 } 40 41 42 def trigger_name(self,tablename): 43 return '%s_trigger' % tablename 44 45 def LEFT_JOIN(self): 46 return 'LEFT OUTER JOIN' 47 48 def RANDOM(self): 49 return 'dbms_random.value' 50 51 def NOT_NULL(self,default,field_type): 52 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) 53 54 def REGEXP(self, first, second): 55 return 'REGEXP_LIKE(%s, %s)' % (self.expand(first), 56 self.expand(second, 'string')) 57 58 def _drop(self,table,mode): 59 sequence_name = table._sequence_name 60 return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name] 61 62 def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): 63 if limitby: 64 (lmin, lmax) = limitby 65 if len(sql_w) > 1: 66 sql_w_row = sql_w + ' AND w_row > %i' % lmin 67 else: 68 sql_w_row = 'WHERE w_row > %i' % lmin 69 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 70 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 71 72 def constraint_name(self, tablename, fieldname): 73 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 74 if len(constraint_name)>30: 75 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 76 return constraint_name 77 78 def represent_exceptions(self, obj, fieldtype): 79 if fieldtype == 'blob': 80 obj = base64.b64encode(str(obj)) 81 return ":CLOB('%s')" % obj 82 elif fieldtype == 'date': 83 if isinstance(obj, (datetime.date, datetime.datetime)): 84 obj = obj.isoformat()[:10] 85 else: 86 obj = str(obj) 87 return "to_date('%s','yyyy-mm-dd')" % obj 88 elif fieldtype == 'datetime': 89 if isinstance(obj, datetime.datetime): 90 obj = obj.isoformat()[:19].replace('T',' ') 91 elif isinstance(obj, datetime.date): 92 obj = obj.isoformat()[:10]+' 00:00:00' 93 else: 94 obj = str(obj) 95 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 96 return None 97 98 def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 99 credential_decoder=IDENTITY, driver_args={}, 100 adapter_args={}, do_connect=True, after_connection=None): 101 self.db = db 102 self.dbengine = "oracle" 103 self.uri = uri 104 if do_connect: self.find_driver(adapter_args,uri) 105 self.pool_size = pool_size 106 self.folder = folder 107 self.db_codec = db_codec 108 self._after_connection = after_connection 109 self.find_or_make_work_folder() 110 ruri = uri.split('://',1)[1] 111 if not 'threaded' in driver_args: 112 driver_args['threaded']=True 113 def connector(uri=ruri,driver_args=driver_args): 114 return self.driver.connect(uri,**driver_args) 115 self.connector = connector 116 if do_connect: self.reconnect() 117 118 def after_connection(self): 119 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 120 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 121 122 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 123 124 def execute(self, command, args=None): 125 args = args or [] 126 i = 1 127 while True: 128 m = self.oracle_fix.match(command) 129 if not m: 130 break 131 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 132 args.append(m.group('clob')[6:-2].replace("''", "'")) 133 i += 1 134 if command[-1:]==';': 135 command = command[:-1] 136 return self.log_execute(command, args) 137 138 def create_sequence_and_triggers(self, query, table, **args): 139 tablename = table._tablename 140 id_name = table._id.name 141 sequence_name = table._sequence_name 142 trigger_name = table._trigger_name 143 self.execute(query) 144 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 145 self.execute(""" 146 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 147 DECLARE 148 curr_val NUMBER; 149 diff_val NUMBER; 150 PRAGMA autonomous_transaction; 151 BEGIN 152 IF :NEW.%(id)s IS NOT NULL THEN 153 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 154 diff_val := :NEW.%(id)s - curr_val - 1; 155 IF diff_val != 0 THEN 156 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 157 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 158 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 159 END IF; 160 END IF; 161 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 162 END; 163 """ % dict(trigger_name=trigger_name, tablename=tablename, 164 sequence_name=sequence_name,id=id_name)) 165 166 def lastrowid(self,table): 167 sequence_name = table._sequence_name 168 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 169 return long(self.cursor.fetchone()[0]) 170 171 #def parse_value(self, value, field_type, blob_decode=True): 172 # if blob_decode and isinstance(value, cx_Oracle.LOB): 173 # try: 174 # value = value.read() 175 # except self.driver.ProgrammingError: 176 # # After a subsequent fetch the LOB value is not valid anymore 177 # pass 178 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 179 180 def _fetchall(self): 181 if any(x[1]==cx_Oracle.LOB for x in self.cursor.description): 182 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 183 for c in r]) for r in self.cursor] 184 else: 185 return self.cursor.fetchall() 186 187 def sqlsafe_table(self, tablename, ot=None): 188 if ot is not None: 189 return (self.QUOTE_TEMPLATE + ' ' \ 190 + self.QUOTE_TEMPLATE) % (ot, tablename) 191 return self.QUOTE_TEMPLATE % tablename 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gluon/dal/adapters/oracle.py b/gluon/dal/adapters/oracle.py --- a/gluon/dal/adapters/oracle.py +++ b/gluon/dal/adapters/oracle.py @@ -136,7 +136,7 @@ return self.log_execute(command, args) def create_sequence_and_triggers(self, query, table, **args): - tablename = table._tablename + tablename = table._rname or table._tablename id_name = table._id.name sequence_name = table._sequence_name trigger_name = table._trigger_name
{"golden_diff": "diff --git a/gluon/dal/adapters/oracle.py b/gluon/dal/adapters/oracle.py\n--- a/gluon/dal/adapters/oracle.py\n+++ b/gluon/dal/adapters/oracle.py\n@@ -136,7 +136,7 @@\n return self.log_execute(command, args)\n \n def create_sequence_and_triggers(self, query, table, **args):\n- tablename = table._tablename\n+ tablename = table._rname or table._tablename\n id_name = table._id.name\n sequence_name = table._sequence_name\n trigger_name = table._trigger_name\n", "issue": "db.export_to_csv_file() error with python 3.7\n**Describe the bug**\r\ndb.export_to_csv_file() error with python 3.7\r\n```\r\nTypeError: a bytes-like object is required, not 'str'\r\n```\r\nin\r\n```\r\nweb2py/gluon/packages/dal/pydal/base.py\", line 844, in export_to_csv_file\r\n ofile.write('TABLE %s\\r\\n' % table)\r\n```\r\n\r\n**To Reproduce**\r\n```\r\nBens-iMac:web2py ben$ pipenv run python web2py.py -M -S pacificW\r\n/Users/ben/.local/share/virtualenvs/web2py-6f1t1lX6/lib/python3.7/site-packages/psycopg2/__init__.py:144: UserWarning: The psycopg2 wheel package will be renamed from release 2.8; in order to keep installing from binary please use \"pip install psycopg2-binary\" instead. For details see: <http://initd.org/psycopg/docs/install.html#binary-install-from-pypi>.\r\n \"\"\")\r\nweb2py Web Framework\r\nCreated by Massimo Di Pierro, Copyright 2007-2018\r\nVersion 2.17.1-stable+timestamp.2018.08.05.17.57.00\r\nDatabase drivers available: sqlite3, psycopg2, imaplib, pymysql, pyodbc\r\nWARNING:web2py:import IPython error; use default python shell\r\nPython 3.7.0 (default, Jul 23 2018, 20:22:55) \r\n[Clang 9.1.0 (clang-902.0.39.2)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n(InteractiveConsole)\r\n>>> db.export_to_csv_file(open('somefile.csv', 'wb'))\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/Users/ben/Documents/programs/web2py/gluon/packages/dal/pydal/base.py\", line 844, in export_to_csv_file\r\n ofile.write('TABLE %s\\r\\n' % table)\r\nTypeError: a bytes-like object is required, not 'str'\r\n>>> \r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport base64\nimport datetime\nimport re\n\nfrom .._globals import IDENTITY\nfrom .._load import cx_Oracle\nfrom .base import BaseAdapter\n\nclass OracleAdapter(BaseAdapter):\n drivers = ('cx_Oracle',)\n\n commit_on_alter_table = False\n types = {\n 'boolean': 'CHAR(1)',\n 'string': 'VARCHAR2(%(length)s)',\n 'text': 'CLOB',\n 'json': 'CLOB',\n 'password': 'VARCHAR2(%(length)s)',\n 'blob': 'CLOB',\n 'upload': 'VARCHAR2(%(length)s)',\n 'integer': 'INT',\n 'bigint': 'NUMBER',\n 'float': 'FLOAT',\n 'double': 'BINARY_DOUBLE',\n 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',\n 'date': 'DATE',\n 'time': 'CHAR(8)',\n 'datetime': 'DATE',\n 'id': 'NUMBER PRIMARY KEY',\n 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',\n 'list:integer': 'CLOB',\n 'list:string': 'CLOB',\n 'list:reference': 'CLOB',\n 'big-id': 'NUMBER PRIMARY KEY',\n 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',\n 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',\n 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',\n }\n\n\n def trigger_name(self,tablename):\n return '%s_trigger' % tablename\n\n def LEFT_JOIN(self):\n return 'LEFT OUTER JOIN'\n\n def RANDOM(self):\n return 'dbms_random.value'\n\n def NOT_NULL(self,default,field_type):\n return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)\n\n def REGEXP(self, first, second):\n return 'REGEXP_LIKE(%s, %s)' % (self.expand(first),\n self.expand(second, 'string'))\n\n def _drop(self,table,mode):\n sequence_name = table._sequence_name\n return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name]\n\n def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):\n if limitby:\n (lmin, lmax) = limitby\n if len(sql_w) > 1:\n sql_w_row = sql_w + ' AND w_row > %i' % lmin\n else:\n sql_w_row = 'WHERE w_row > %i' % lmin\n return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)\n return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)\n\n def constraint_name(self, tablename, fieldname):\n constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)\n if len(constraint_name)>30:\n constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])\n return constraint_name\n\n def represent_exceptions(self, obj, fieldtype):\n if fieldtype == 'blob':\n obj = base64.b64encode(str(obj))\n return \":CLOB('%s')\" % obj\n elif fieldtype == 'date':\n if isinstance(obj, (datetime.date, datetime.datetime)):\n obj = obj.isoformat()[:10]\n else:\n obj = str(obj)\n return \"to_date('%s','yyyy-mm-dd')\" % obj\n elif fieldtype == 'datetime':\n if isinstance(obj, datetime.datetime):\n obj = obj.isoformat()[:19].replace('T',' ')\n elif isinstance(obj, datetime.date):\n obj = obj.isoformat()[:10]+' 00:00:00'\n else:\n obj = str(obj)\n return \"to_date('%s','yyyy-mm-dd hh24:mi:ss')\" % obj\n return None\n\n def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',\n credential_decoder=IDENTITY, driver_args={},\n adapter_args={}, do_connect=True, after_connection=None):\n self.db = db\n self.dbengine = \"oracle\"\n self.uri = uri\n if do_connect: self.find_driver(adapter_args,uri)\n self.pool_size = pool_size\n self.folder = folder\n self.db_codec = db_codec\n self._after_connection = after_connection\n self.find_or_make_work_folder()\n ruri = uri.split('://',1)[1]\n if not 'threaded' in driver_args:\n driver_args['threaded']=True\n def connector(uri=ruri,driver_args=driver_args):\n return self.driver.connect(uri,**driver_args)\n self.connector = connector\n if do_connect: self.reconnect()\n\n def after_connection(self):\n self.execute(\"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\")\n self.execute(\"ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\")\n\n oracle_fix = re.compile(\"[^']*('[^']*'[^']*)*\\:(?P<clob>CLOB\\('([^']+|'')*'\\))\")\n\n def execute(self, command, args=None):\n args = args or []\n i = 1\n while True:\n m = self.oracle_fix.match(command)\n if not m:\n break\n command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]\n args.append(m.group('clob')[6:-2].replace(\"''\", \"'\"))\n i += 1\n if command[-1:]==';':\n command = command[:-1]\n return self.log_execute(command, args)\n\n def create_sequence_and_triggers(self, query, table, **args):\n tablename = table._tablename\n id_name = table._id.name\n sequence_name = table._sequence_name\n trigger_name = table._trigger_name\n self.execute(query)\n self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name)\n self.execute(\"\"\"\n CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW\n DECLARE\n curr_val NUMBER;\n diff_val NUMBER;\n PRAGMA autonomous_transaction;\n BEGIN\n IF :NEW.%(id)s IS NOT NULL THEN\n EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;\n diff_val := :NEW.%(id)s - curr_val - 1;\n IF diff_val != 0 THEN\n EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val;\n EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;\n EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1';\n END IF;\n END IF;\n SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL;\n END;\n \"\"\" % dict(trigger_name=trigger_name, tablename=tablename,\n sequence_name=sequence_name,id=id_name))\n\n def lastrowid(self,table):\n sequence_name = table._sequence_name\n self.execute('SELECT %s.currval FROM dual;' % sequence_name)\n return long(self.cursor.fetchone()[0])\n\n #def parse_value(self, value, field_type, blob_decode=True):\n # if blob_decode and isinstance(value, cx_Oracle.LOB):\n # try:\n # value = value.read()\n # except self.driver.ProgrammingError:\n # # After a subsequent fetch the LOB value is not valid anymore\n # pass\n # return BaseAdapter.parse_value(self, value, field_type, blob_decode)\n\n def _fetchall(self):\n if any(x[1]==cx_Oracle.LOB for x in self.cursor.description):\n return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \\\n for c in r]) for r in self.cursor]\n else:\n return self.cursor.fetchall()\n\n def sqlsafe_table(self, tablename, ot=None):\n if ot is not None:\n return (self.QUOTE_TEMPLATE + ' ' \\\n + self.QUOTE_TEMPLATE) % (ot, tablename)\n return self.QUOTE_TEMPLATE % tablename\n", "path": "gluon/dal/adapters/oracle.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport base64\nimport datetime\nimport re\n\nfrom .._globals import IDENTITY\nfrom .._load import cx_Oracle\nfrom .base import BaseAdapter\n\nclass OracleAdapter(BaseAdapter):\n drivers = ('cx_Oracle',)\n\n commit_on_alter_table = False\n types = {\n 'boolean': 'CHAR(1)',\n 'string': 'VARCHAR2(%(length)s)',\n 'text': 'CLOB',\n 'json': 'CLOB',\n 'password': 'VARCHAR2(%(length)s)',\n 'blob': 'CLOB',\n 'upload': 'VARCHAR2(%(length)s)',\n 'integer': 'INT',\n 'bigint': 'NUMBER',\n 'float': 'FLOAT',\n 'double': 'BINARY_DOUBLE',\n 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',\n 'date': 'DATE',\n 'time': 'CHAR(8)',\n 'datetime': 'DATE',\n 'id': 'NUMBER PRIMARY KEY',\n 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',\n 'list:integer': 'CLOB',\n 'list:string': 'CLOB',\n 'list:reference': 'CLOB',\n 'big-id': 'NUMBER PRIMARY KEY',\n 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',\n 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',\n 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',\n }\n\n\n def trigger_name(self,tablename):\n return '%s_trigger' % tablename\n\n def LEFT_JOIN(self):\n return 'LEFT OUTER JOIN'\n\n def RANDOM(self):\n return 'dbms_random.value'\n\n def NOT_NULL(self,default,field_type):\n return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)\n\n def REGEXP(self, first, second):\n return 'REGEXP_LIKE(%s, %s)' % (self.expand(first),\n self.expand(second, 'string'))\n\n def _drop(self,table,mode):\n sequence_name = table._sequence_name\n return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name]\n\n def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):\n if limitby:\n (lmin, lmax) = limitby\n if len(sql_w) > 1:\n sql_w_row = sql_w + ' AND w_row > %i' % lmin\n else:\n sql_w_row = 'WHERE w_row > %i' % lmin\n return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)\n return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)\n\n def constraint_name(self, tablename, fieldname):\n constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)\n if len(constraint_name)>30:\n constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])\n return constraint_name\n\n def represent_exceptions(self, obj, fieldtype):\n if fieldtype == 'blob':\n obj = base64.b64encode(str(obj))\n return \":CLOB('%s')\" % obj\n elif fieldtype == 'date':\n if isinstance(obj, (datetime.date, datetime.datetime)):\n obj = obj.isoformat()[:10]\n else:\n obj = str(obj)\n return \"to_date('%s','yyyy-mm-dd')\" % obj\n elif fieldtype == 'datetime':\n if isinstance(obj, datetime.datetime):\n obj = obj.isoformat()[:19].replace('T',' ')\n elif isinstance(obj, datetime.date):\n obj = obj.isoformat()[:10]+' 00:00:00'\n else:\n obj = str(obj)\n return \"to_date('%s','yyyy-mm-dd hh24:mi:ss')\" % obj\n return None\n\n def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',\n credential_decoder=IDENTITY, driver_args={},\n adapter_args={}, do_connect=True, after_connection=None):\n self.db = db\n self.dbengine = \"oracle\"\n self.uri = uri\n if do_connect: self.find_driver(adapter_args,uri)\n self.pool_size = pool_size\n self.folder = folder\n self.db_codec = db_codec\n self._after_connection = after_connection\n self.find_or_make_work_folder()\n ruri = uri.split('://',1)[1]\n if not 'threaded' in driver_args:\n driver_args['threaded']=True\n def connector(uri=ruri,driver_args=driver_args):\n return self.driver.connect(uri,**driver_args)\n self.connector = connector\n if do_connect: self.reconnect()\n\n def after_connection(self):\n self.execute(\"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\")\n self.execute(\"ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';\")\n\n oracle_fix = re.compile(\"[^']*('[^']*'[^']*)*\\:(?P<clob>CLOB\\('([^']+|'')*'\\))\")\n\n def execute(self, command, args=None):\n args = args or []\n i = 1\n while True:\n m = self.oracle_fix.match(command)\n if not m:\n break\n command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]\n args.append(m.group('clob')[6:-2].replace(\"''\", \"'\"))\n i += 1\n if command[-1:]==';':\n command = command[:-1]\n return self.log_execute(command, args)\n\n def create_sequence_and_triggers(self, query, table, **args):\n tablename = table._rname or table._tablename\n id_name = table._id.name\n sequence_name = table._sequence_name\n trigger_name = table._trigger_name\n self.execute(query)\n self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name)\n self.execute(\"\"\"\n CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW\n DECLARE\n curr_val NUMBER;\n diff_val NUMBER;\n PRAGMA autonomous_transaction;\n BEGIN\n IF :NEW.%(id)s IS NOT NULL THEN\n EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;\n diff_val := :NEW.%(id)s - curr_val - 1;\n IF diff_val != 0 THEN\n EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val;\n EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;\n EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1';\n END IF;\n END IF;\n SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL;\n END;\n \"\"\" % dict(trigger_name=trigger_name, tablename=tablename,\n sequence_name=sequence_name,id=id_name))\n\n def lastrowid(self,table):\n sequence_name = table._sequence_name\n self.execute('SELECT %s.currval FROM dual;' % sequence_name)\n return long(self.cursor.fetchone()[0])\n\n #def parse_value(self, value, field_type, blob_decode=True):\n # if blob_decode and isinstance(value, cx_Oracle.LOB):\n # try:\n # value = value.read()\n # except self.driver.ProgrammingError:\n # # After a subsequent fetch the LOB value is not valid anymore\n # pass\n # return BaseAdapter.parse_value(self, value, field_type, blob_decode)\n\n def _fetchall(self):\n if any(x[1]==cx_Oracle.LOB for x in self.cursor.description):\n return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \\\n for c in r]) for r in self.cursor]\n else:\n return self.cursor.fetchall()\n\n def sqlsafe_table(self, tablename, ot=None):\n if ot is not None:\n return (self.QUOTE_TEMPLATE + ' ' \\\n + self.QUOTE_TEMPLATE) % (ot, tablename)\n return self.QUOTE_TEMPLATE % tablename\n", "path": "gluon/dal/adapters/oracle.py"}]}
3,240
140
gh_patches_debug_35667
rasdani/github-patches
git_diff
scrapy__scrapy-4799
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Usage of HttpCompressionMiddleware needs to be reflected in Scrapy stats ## Summary Usage of `HttpCompressionMiddleware` needs to be relfected in Scrapy stats. ## Motivation In order to estimate scrapy memory usage efficiency and prevent.. memory leaks like [this](https://stackoverflow.com/q/63936759/10884791). I will need to know: 1. number of request/response objects that can be active (can be achieved by using [`trackref`](https://docs.scrapy.org/en/latest/topics/leaks.html#debugging-memory-leaks-with-trackref) ) 2. size of memory required to store that number of request/response objects. A lot of websites use compression to reduce traffic. In this case I would like to calculate average size of **decompressed** responses to estimate p.2. Decompression process means that at some point application will require to allocate memory to store both compressed and decompressed response body and I will need to know this sizes to have more complete vision of scrapy memory usage. Also size of decompressed body will be several times more than size of compressed response and it will affect scrapy memory usage. ## Describe alternatives you've considered The easiest one - is to change priority of `DownloaderStats` middleware and check difference in `downloader/response_bytes` stats parameter. ``` custom_settings = {"DOWNLOAD_DELAY":1, "DOWNLOADER_MIDDLEWARES":{ 'scrapy.downloadermiddlewares.stats.DownloaderStats':50 } ``` Stats from quotes.toscrape.com spider (it uses `gzip` compression) with default settings: ``` {'downloader/request_bytes': 2642, 'downloader/request_count': 10, 'downloader/request_method_count/GET': 10, 'downloader/response_bytes': 24534, ``` And with changed priority of `DownloaderStats` middleware: ``` {'downloader/request_bytes': 912, # size reduced as it didn't count size of request headers populated by downloader middlewares 'downloader/request_count': 10, 'downloader/request_method_count/GET': 10, 'downloader/response_bytes': 110191, # it counted size of decompressed data ``` Average size of compressed response (by default) - 2453 bytes. Average size of decompressed response - 11019 bytes (~4.5 times more). ## Additional context Potential solution is to add something like this: ` self.stats.inc_value('decompressed_bytes', spider=spider)` into `process_response` method of `HttpCompressionMiddleware` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/downloadermiddlewares/httpcompression.py` Content: ``` 1 import io 2 import zlib 3 4 from scrapy.utils.gz import gunzip 5 from scrapy.http import Response, TextResponse 6 from scrapy.responsetypes import responsetypes 7 from scrapy.exceptions import NotConfigured 8 9 10 ACCEPTED_ENCODINGS = [b'gzip', b'deflate'] 11 12 try: 13 import brotli 14 ACCEPTED_ENCODINGS.append(b'br') 15 except ImportError: 16 pass 17 18 try: 19 import zstandard 20 ACCEPTED_ENCODINGS.append(b'zstd') 21 except ImportError: 22 pass 23 24 25 class HttpCompressionMiddleware: 26 """This middleware allows compressed (gzip, deflate) traffic to be 27 sent/received from web sites""" 28 @classmethod 29 def from_crawler(cls, crawler): 30 if not crawler.settings.getbool('COMPRESSION_ENABLED'): 31 raise NotConfigured 32 return cls() 33 34 def process_request(self, request, spider): 35 request.headers.setdefault('Accept-Encoding', 36 b", ".join(ACCEPTED_ENCODINGS)) 37 38 def process_response(self, request, response, spider): 39 40 if request.method == 'HEAD': 41 return response 42 if isinstance(response, Response): 43 content_encoding = response.headers.getlist('Content-Encoding') 44 if content_encoding: 45 encoding = content_encoding.pop() 46 decoded_body = self._decode(response.body, encoding.lower()) 47 respcls = responsetypes.from_args( 48 headers=response.headers, url=response.url, body=decoded_body 49 ) 50 kwargs = dict(cls=respcls, body=decoded_body) 51 if issubclass(respcls, TextResponse): 52 # force recalculating the encoding until we make sure the 53 # responsetypes guessing is reliable 54 kwargs['encoding'] = None 55 response = response.replace(**kwargs) 56 if not content_encoding: 57 del response.headers['Content-Encoding'] 58 59 return response 60 61 def _decode(self, body, encoding): 62 if encoding == b'gzip' or encoding == b'x-gzip': 63 body = gunzip(body) 64 65 if encoding == b'deflate': 66 try: 67 body = zlib.decompress(body) 68 except zlib.error: 69 # ugly hack to work with raw deflate content that may 70 # be sent by microsoft servers. For more information, see: 71 # http://carsten.codimi.de/gzip.yaws/ 72 # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx 73 # http://www.gzip.org/zlib/zlib_faq.html#faq38 74 body = zlib.decompress(body, -15) 75 if encoding == b'br' and b'br' in ACCEPTED_ENCODINGS: 76 body = brotli.decompress(body) 77 if encoding == b'zstd' and b'zstd' in ACCEPTED_ENCODINGS: 78 # Using its streaming API since its simple API could handle only cases 79 # where there is content size data embedded in the frame 80 reader = zstandard.ZstdDecompressor().stream_reader(io.BytesIO(body)) 81 body = reader.read() 82 return body 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py --- a/scrapy/downloadermiddlewares/httpcompression.py +++ b/scrapy/downloadermiddlewares/httpcompression.py @@ -1,10 +1,12 @@ import io +import warnings import zlib -from scrapy.utils.gz import gunzip +from scrapy.exceptions import NotConfigured from scrapy.http import Response, TextResponse from scrapy.responsetypes import responsetypes -from scrapy.exceptions import NotConfigured +from scrapy.utils.deprecate import ScrapyDeprecationWarning +from scrapy.utils.gz import gunzip ACCEPTED_ENCODINGS = [b'gzip', b'deflate'] @@ -25,11 +27,25 @@ class HttpCompressionMiddleware: """This middleware allows compressed (gzip, deflate) traffic to be sent/received from web sites""" + def __init__(self, stats=None): + self.stats = stats + @classmethod def from_crawler(cls, crawler): if not crawler.settings.getbool('COMPRESSION_ENABLED'): raise NotConfigured - return cls() + try: + return cls(stats=crawler.stats) + except TypeError: + warnings.warn( + "HttpCompressionMiddleware subclasses must either modify " + "their '__init__' method to support a 'stats' parameter or " + "reimplement the 'from_crawler' method.", + ScrapyDeprecationWarning, + ) + result = cls() + result.stats = crawler.stats + return result def process_request(self, request, spider): request.headers.setdefault('Accept-Encoding', @@ -44,6 +60,9 @@ if content_encoding: encoding = content_encoding.pop() decoded_body = self._decode(response.body, encoding.lower()) + if self.stats: + self.stats.inc_value('httpcompression/response_bytes', len(decoded_body), spider=spider) + self.stats.inc_value('httpcompression/response_count', spider=spider) respcls = responsetypes.from_args( headers=response.headers, url=response.url, body=decoded_body )
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py\n--- a/scrapy/downloadermiddlewares/httpcompression.py\n+++ b/scrapy/downloadermiddlewares/httpcompression.py\n@@ -1,10 +1,12 @@\n import io\n+import warnings\n import zlib\n \n-from scrapy.utils.gz import gunzip\n+from scrapy.exceptions import NotConfigured\n from scrapy.http import Response, TextResponse\n from scrapy.responsetypes import responsetypes\n-from scrapy.exceptions import NotConfigured\n+from scrapy.utils.deprecate import ScrapyDeprecationWarning\n+from scrapy.utils.gz import gunzip\n \n \n ACCEPTED_ENCODINGS = [b'gzip', b'deflate']\n@@ -25,11 +27,25 @@\n class HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n+ def __init__(self, stats=None):\n+ self.stats = stats\n+\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COMPRESSION_ENABLED'):\n raise NotConfigured\n- return cls()\n+ try:\n+ return cls(stats=crawler.stats)\n+ except TypeError:\n+ warnings.warn(\n+ \"HttpCompressionMiddleware subclasses must either modify \"\n+ \"their '__init__' method to support a 'stats' parameter or \"\n+ \"reimplement the 'from_crawler' method.\",\n+ ScrapyDeprecationWarning,\n+ )\n+ result = cls()\n+ result.stats = crawler.stats\n+ return result\n \n def process_request(self, request, spider):\n request.headers.setdefault('Accept-Encoding',\n@@ -44,6 +60,9 @@\n if content_encoding:\n encoding = content_encoding.pop()\n decoded_body = self._decode(response.body, encoding.lower())\n+ if self.stats:\n+ self.stats.inc_value('httpcompression/response_bytes', len(decoded_body), spider=spider)\n+ self.stats.inc_value('httpcompression/response_count', spider=spider)\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n", "issue": "Usage of HttpCompressionMiddleware needs to be reflected in Scrapy stats\n## Summary\r\nUsage of `HttpCompressionMiddleware` needs to be relfected in Scrapy stats.\r\n## Motivation\r\nIn order to estimate scrapy memory usage efficiency and prevent.. memory leaks like [this](https://stackoverflow.com/q/63936759/10884791).\r\nI will need to know:\r\n1. number of request/response objects that can be active (can be achieved by using [`trackref`](https://docs.scrapy.org/en/latest/topics/leaks.html#debugging-memory-leaks-with-trackref) )\r\n2. size of memory required to store that number of request/response objects. \r\n\r\nA lot of websites use compression to reduce traffic. In this case I would like to calculate average size of **decompressed** responses to estimate p.2.\r\n\r\nDecompression process means that at some point application will require to allocate memory to store both compressed and decompressed response body and I will need to know this sizes to have more complete vision of scrapy memory usage.\r\n\r\nAlso size of decompressed body will be several times more than size of compressed response and it will affect scrapy memory usage.\r\n\r\n## Describe alternatives you've considered\r\nThe easiest one - is to change priority of `DownloaderStats` middleware and check difference in `downloader/response_bytes` stats parameter.\r\n```\r\n custom_settings = {\"DOWNLOAD_DELAY\":1,\r\n \"DOWNLOADER_MIDDLEWARES\":{\r\n 'scrapy.downloadermiddlewares.stats.DownloaderStats':50\r\n }\r\n```\r\nStats from quotes.toscrape.com spider (it uses `gzip` compression) with default settings:\r\n\r\n```\r\n{'downloader/request_bytes': 2642,\r\n 'downloader/request_count': 10,\r\n 'downloader/request_method_count/GET': 10,\r\n 'downloader/response_bytes': 24534,\r\n```\r\n \r\nAnd with changed priority of `DownloaderStats` middleware:\r\n\r\n```\r\n{'downloader/request_bytes': 912, # size reduced as it didn't count size of request headers populated by downloader middlewares\r\n 'downloader/request_count': 10,\r\n 'downloader/request_method_count/GET': 10,\r\n 'downloader/response_bytes': 110191, # it counted size of decompressed data \r\n```\r\n\r\nAverage size of compressed response (by default) - 2453 bytes.\r\nAverage size of decompressed response - 11019 bytes (~4.5 times more).\r\n\r\n## Additional context\r\nPotential solution is to add something like this:\r\n` self.stats.inc_value('decompressed_bytes', spider=spider)`\r\ninto `process_response` method of `HttpCompressionMiddleware`\n", "before_files": [{"content": "import io\nimport zlib\n\nfrom scrapy.utils.gz import gunzip\nfrom scrapy.http import Response, TextResponse\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.exceptions import NotConfigured\n\n\nACCEPTED_ENCODINGS = [b'gzip', b'deflate']\n\ntry:\n import brotli\n ACCEPTED_ENCODINGS.append(b'br')\nexcept ImportError:\n pass\n\ntry:\n import zstandard\n ACCEPTED_ENCODINGS.append(b'zstd')\nexcept ImportError:\n pass\n\n\nclass HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COMPRESSION_ENABLED'):\n raise NotConfigured\n return cls()\n\n def process_request(self, request, spider):\n request.headers.setdefault('Accept-Encoding',\n b\", \".join(ACCEPTED_ENCODINGS))\n\n def process_response(self, request, response, spider):\n\n if request.method == 'HEAD':\n return response\n if isinstance(response, Response):\n content_encoding = response.headers.getlist('Content-Encoding')\n if content_encoding:\n encoding = content_encoding.pop()\n decoded_body = self._decode(response.body, encoding.lower())\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n kwargs = dict(cls=respcls, body=decoded_body)\n if issubclass(respcls, TextResponse):\n # force recalculating the encoding until we make sure the\n # responsetypes guessing is reliable\n kwargs['encoding'] = None\n response = response.replace(**kwargs)\n if not content_encoding:\n del response.headers['Content-Encoding']\n\n return response\n\n def _decode(self, body, encoding):\n if encoding == b'gzip' or encoding == b'x-gzip':\n body = gunzip(body)\n\n if encoding == b'deflate':\n try:\n body = zlib.decompress(body)\n except zlib.error:\n # ugly hack to work with raw deflate content that may\n # be sent by microsoft servers. For more information, see:\n # http://carsten.codimi.de/gzip.yaws/\n # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx\n # http://www.gzip.org/zlib/zlib_faq.html#faq38\n body = zlib.decompress(body, -15)\n if encoding == b'br' and b'br' in ACCEPTED_ENCODINGS:\n body = brotli.decompress(body)\n if encoding == b'zstd' and b'zstd' in ACCEPTED_ENCODINGS:\n # Using its streaming API since its simple API could handle only cases\n # where there is content size data embedded in the frame\n reader = zstandard.ZstdDecompressor().stream_reader(io.BytesIO(body))\n body = reader.read()\n return body\n", "path": "scrapy/downloadermiddlewares/httpcompression.py"}], "after_files": [{"content": "import io\nimport warnings\nimport zlib\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.http import Response, TextResponse\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\nfrom scrapy.utils.gz import gunzip\n\n\nACCEPTED_ENCODINGS = [b'gzip', b'deflate']\n\ntry:\n import brotli\n ACCEPTED_ENCODINGS.append(b'br')\nexcept ImportError:\n pass\n\ntry:\n import zstandard\n ACCEPTED_ENCODINGS.append(b'zstd')\nexcept ImportError:\n pass\n\n\nclass HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n def __init__(self, stats=None):\n self.stats = stats\n\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COMPRESSION_ENABLED'):\n raise NotConfigured\n try:\n return cls(stats=crawler.stats)\n except TypeError:\n warnings.warn(\n \"HttpCompressionMiddleware subclasses must either modify \"\n \"their '__init__' method to support a 'stats' parameter or \"\n \"reimplement the 'from_crawler' method.\",\n ScrapyDeprecationWarning,\n )\n result = cls()\n result.stats = crawler.stats\n return result\n\n def process_request(self, request, spider):\n request.headers.setdefault('Accept-Encoding',\n b\", \".join(ACCEPTED_ENCODINGS))\n\n def process_response(self, request, response, spider):\n\n if request.method == 'HEAD':\n return response\n if isinstance(response, Response):\n content_encoding = response.headers.getlist('Content-Encoding')\n if content_encoding:\n encoding = content_encoding.pop()\n decoded_body = self._decode(response.body, encoding.lower())\n if self.stats:\n self.stats.inc_value('httpcompression/response_bytes', len(decoded_body), spider=spider)\n self.stats.inc_value('httpcompression/response_count', spider=spider)\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n kwargs = dict(cls=respcls, body=decoded_body)\n if issubclass(respcls, TextResponse):\n # force recalculating the encoding until we make sure the\n # responsetypes guessing is reliable\n kwargs['encoding'] = None\n response = response.replace(**kwargs)\n if not content_encoding:\n del response.headers['Content-Encoding']\n\n return response\n\n def _decode(self, body, encoding):\n if encoding == b'gzip' or encoding == b'x-gzip':\n body = gunzip(body)\n\n if encoding == b'deflate':\n try:\n body = zlib.decompress(body)\n except zlib.error:\n # ugly hack to work with raw deflate content that may\n # be sent by microsoft servers. For more information, see:\n # http://carsten.codimi.de/gzip.yaws/\n # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx\n # http://www.gzip.org/zlib/zlib_faq.html#faq38\n body = zlib.decompress(body, -15)\n if encoding == b'br' and b'br' in ACCEPTED_ENCODINGS:\n body = brotli.decompress(body)\n if encoding == b'zstd' and b'zstd' in ACCEPTED_ENCODINGS:\n # Using its streaming API since its simple API could handle only cases\n # where there is content size data embedded in the frame\n reader = zstandard.ZstdDecompressor().stream_reader(io.BytesIO(body))\n body = reader.read()\n return body\n", "path": "scrapy/downloadermiddlewares/httpcompression.py"}]}
1,644
471
gh_patches_debug_9872
rasdani/github-patches
git_diff
google__flax-3344
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ResNetV1 implementation is actually ResNetV1.5 [Downsampling is performed by the 3x3 convolution](https://github.com/google/flax/blob/3ea6381139a340fa7b29c74a84205d8bad18b489/examples/imagenet/models.py#L74), which means it is actually [ResNetV1.5](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/resnet_50_v1_5_for_pytorch), not ResNetV1 as claimed. (In the original version the first convolution in the block has stride 2: see [original paper](https://arxiv.org/pdf/1512.03385.pdf), the caption to Table 1.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/imagenet/models.py` Content: ``` 1 # Copyright 2023 The Flax Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Flax implementation of ResNet V1.""" 16 17 # See issue #620. 18 # pytype: disable=wrong-arg-count 19 20 from functools import partial 21 from typing import Any, Callable, Sequence, Tuple 22 23 from flax import linen as nn 24 import jax.numpy as jnp 25 26 ModuleDef = Any 27 28 29 class ResNetBlock(nn.Module): 30 """ResNet block.""" 31 32 filters: int 33 conv: ModuleDef 34 norm: ModuleDef 35 act: Callable 36 strides: Tuple[int, int] = (1, 1) 37 38 @nn.compact 39 def __call__( 40 self, 41 x, 42 ): 43 residual = x 44 y = self.conv(self.filters, (3, 3), self.strides)(x) 45 y = self.norm()(y) 46 y = self.act(y) 47 y = self.conv(self.filters, (3, 3))(y) 48 y = self.norm(scale_init=nn.initializers.zeros_init())(y) 49 50 if residual.shape != y.shape: 51 residual = self.conv( 52 self.filters, (1, 1), self.strides, name='conv_proj' 53 )(residual) 54 residual = self.norm(name='norm_proj')(residual) 55 56 return self.act(residual + y) 57 58 59 class BottleneckResNetBlock(nn.Module): 60 """Bottleneck ResNet block.""" 61 62 filters: int 63 conv: ModuleDef 64 norm: ModuleDef 65 act: Callable 66 strides: Tuple[int, int] = (1, 1) 67 68 @nn.compact 69 def __call__(self, x): 70 residual = x 71 y = self.conv(self.filters, (1, 1))(x) 72 y = self.norm()(y) 73 y = self.act(y) 74 y = self.conv(self.filters, (3, 3), self.strides)(y) 75 y = self.norm()(y) 76 y = self.act(y) 77 y = self.conv(self.filters * 4, (1, 1))(y) 78 y = self.norm(scale_init=nn.initializers.zeros_init())(y) 79 80 if residual.shape != y.shape: 81 residual = self.conv( 82 self.filters * 4, (1, 1), self.strides, name='conv_proj' 83 )(residual) 84 residual = self.norm(name='norm_proj')(residual) 85 86 return self.act(residual + y) 87 88 89 class ResNet(nn.Module): 90 """ResNetV1.""" 91 92 stage_sizes: Sequence[int] 93 block_cls: ModuleDef 94 num_classes: int 95 num_filters: int = 64 96 dtype: Any = jnp.float32 97 act: Callable = nn.relu 98 conv: ModuleDef = nn.Conv 99 100 @nn.compact 101 def __call__(self, x, train: bool = True): 102 conv = partial(self.conv, use_bias=False, dtype=self.dtype) 103 norm = partial( 104 nn.BatchNorm, 105 use_running_average=not train, 106 momentum=0.9, 107 epsilon=1e-5, 108 dtype=self.dtype, 109 axis_name='batch', 110 ) 111 112 x = conv( 113 self.num_filters, 114 (7, 7), 115 (2, 2), 116 padding=[(3, 3), (3, 3)], 117 name='conv_init', 118 )(x) 119 x = norm(name='bn_init')(x) 120 x = nn.relu(x) 121 x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME') 122 for i, block_size in enumerate(self.stage_sizes): 123 for j in range(block_size): 124 strides = (2, 2) if i > 0 and j == 0 else (1, 1) 125 x = self.block_cls( 126 self.num_filters * 2**i, 127 strides=strides, 128 conv=conv, 129 norm=norm, 130 act=self.act, 131 )(x) 132 x = jnp.mean(x, axis=(1, 2)) 133 x = nn.Dense(self.num_classes, dtype=self.dtype)(x) 134 x = jnp.asarray(x, self.dtype) 135 return x 136 137 138 ResNet18 = partial(ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock) 139 ResNet34 = partial(ResNet, stage_sizes=[3, 4, 6, 3], block_cls=ResNetBlock) 140 ResNet50 = partial( 141 ResNet, stage_sizes=[3, 4, 6, 3], block_cls=BottleneckResNetBlock 142 ) 143 ResNet101 = partial( 144 ResNet, stage_sizes=[3, 4, 23, 3], block_cls=BottleneckResNetBlock 145 ) 146 ResNet152 = partial( 147 ResNet, stage_sizes=[3, 8, 36, 3], block_cls=BottleneckResNetBlock 148 ) 149 ResNet200 = partial( 150 ResNet, stage_sizes=[3, 24, 36, 3], block_cls=BottleneckResNetBlock 151 ) 152 153 154 ResNet18Local = partial( 155 ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock, conv=nn.ConvLocal 156 ) 157 158 159 # Used for testing only. 160 _ResNet1 = partial(ResNet, stage_sizes=[1], block_cls=ResNetBlock) 161 _ResNet1Local = partial( 162 ResNet, stage_sizes=[1], block_cls=ResNetBlock, conv=nn.ConvLocal 163 ) 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/imagenet/models.py b/examples/imagenet/models.py --- a/examples/imagenet/models.py +++ b/examples/imagenet/models.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Flax implementation of ResNet V1.""" +"""Flax implementation of ResNet V1.5.""" # See issue #620. # pytype: disable=wrong-arg-count @@ -87,7 +87,7 @@ class ResNet(nn.Module): - """ResNetV1.""" + """ResNetV1.5.""" stage_sizes: Sequence[int] block_cls: ModuleDef
{"golden_diff": "diff --git a/examples/imagenet/models.py b/examples/imagenet/models.py\n--- a/examples/imagenet/models.py\n+++ b/examples/imagenet/models.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-\"\"\"Flax implementation of ResNet V1.\"\"\"\n+\"\"\"Flax implementation of ResNet V1.5.\"\"\"\n \n # See issue #620.\n # pytype: disable=wrong-arg-count\n@@ -87,7 +87,7 @@\n \n \n class ResNet(nn.Module):\n- \"\"\"ResNetV1.\"\"\"\n+ \"\"\"ResNetV1.5.\"\"\"\n \n stage_sizes: Sequence[int]\n block_cls: ModuleDef\n", "issue": "ResNetV1 implementation is actually ResNetV1.5\n[Downsampling is performed by the 3x3 convolution](https://github.com/google/flax/blob/3ea6381139a340fa7b29c74a84205d8bad18b489/examples/imagenet/models.py#L74), which means it is actually [ResNetV1.5](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/resnet_50_v1_5_for_pytorch), not ResNetV1 as claimed. (In the original version the first convolution in the block has stride 2: see [original paper](https://arxiv.org/pdf/1512.03385.pdf), the caption to Table 1.)\n", "before_files": [{"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Flax implementation of ResNet V1.\"\"\"\n\n# See issue #620.\n# pytype: disable=wrong-arg-count\n\nfrom functools import partial\nfrom typing import Any, Callable, Sequence, Tuple\n\nfrom flax import linen as nn\nimport jax.numpy as jnp\n\nModuleDef = Any\n\n\nclass ResNetBlock(nn.Module):\n \"\"\"ResNet block.\"\"\"\n\n filters: int\n conv: ModuleDef\n norm: ModuleDef\n act: Callable\n strides: Tuple[int, int] = (1, 1)\n\n @nn.compact\n def __call__(\n self,\n x,\n ):\n residual = x\n y = self.conv(self.filters, (3, 3), self.strides)(x)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (3, 3))(y)\n y = self.norm(scale_init=nn.initializers.zeros_init())(y)\n\n if residual.shape != y.shape:\n residual = self.conv(\n self.filters, (1, 1), self.strides, name='conv_proj'\n )(residual)\n residual = self.norm(name='norm_proj')(residual)\n\n return self.act(residual + y)\n\n\nclass BottleneckResNetBlock(nn.Module):\n \"\"\"Bottleneck ResNet block.\"\"\"\n\n filters: int\n conv: ModuleDef\n norm: ModuleDef\n act: Callable\n strides: Tuple[int, int] = (1, 1)\n\n @nn.compact\n def __call__(self, x):\n residual = x\n y = self.conv(self.filters, (1, 1))(x)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (3, 3), self.strides)(y)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters * 4, (1, 1))(y)\n y = self.norm(scale_init=nn.initializers.zeros_init())(y)\n\n if residual.shape != y.shape:\n residual = self.conv(\n self.filters * 4, (1, 1), self.strides, name='conv_proj'\n )(residual)\n residual = self.norm(name='norm_proj')(residual)\n\n return self.act(residual + y)\n\n\nclass ResNet(nn.Module):\n \"\"\"ResNetV1.\"\"\"\n\n stage_sizes: Sequence[int]\n block_cls: ModuleDef\n num_classes: int\n num_filters: int = 64\n dtype: Any = jnp.float32\n act: Callable = nn.relu\n conv: ModuleDef = nn.Conv\n\n @nn.compact\n def __call__(self, x, train: bool = True):\n conv = partial(self.conv, use_bias=False, dtype=self.dtype)\n norm = partial(\n nn.BatchNorm,\n use_running_average=not train,\n momentum=0.9,\n epsilon=1e-5,\n dtype=self.dtype,\n axis_name='batch',\n )\n\n x = conv(\n self.num_filters,\n (7, 7),\n (2, 2),\n padding=[(3, 3), (3, 3)],\n name='conv_init',\n )(x)\n x = norm(name='bn_init')(x)\n x = nn.relu(x)\n x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')\n for i, block_size in enumerate(self.stage_sizes):\n for j in range(block_size):\n strides = (2, 2) if i > 0 and j == 0 else (1, 1)\n x = self.block_cls(\n self.num_filters * 2**i,\n strides=strides,\n conv=conv,\n norm=norm,\n act=self.act,\n )(x)\n x = jnp.mean(x, axis=(1, 2))\n x = nn.Dense(self.num_classes, dtype=self.dtype)(x)\n x = jnp.asarray(x, self.dtype)\n return x\n\n\nResNet18 = partial(ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock)\nResNet34 = partial(ResNet, stage_sizes=[3, 4, 6, 3], block_cls=ResNetBlock)\nResNet50 = partial(\n ResNet, stage_sizes=[3, 4, 6, 3], block_cls=BottleneckResNetBlock\n)\nResNet101 = partial(\n ResNet, stage_sizes=[3, 4, 23, 3], block_cls=BottleneckResNetBlock\n)\nResNet152 = partial(\n ResNet, stage_sizes=[3, 8, 36, 3], block_cls=BottleneckResNetBlock\n)\nResNet200 = partial(\n ResNet, stage_sizes=[3, 24, 36, 3], block_cls=BottleneckResNetBlock\n)\n\n\nResNet18Local = partial(\n ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock, conv=nn.ConvLocal\n)\n\n\n# Used for testing only.\n_ResNet1 = partial(ResNet, stage_sizes=[1], block_cls=ResNetBlock)\n_ResNet1Local = partial(\n ResNet, stage_sizes=[1], block_cls=ResNetBlock, conv=nn.ConvLocal\n)\n", "path": "examples/imagenet/models.py"}], "after_files": [{"content": "# Copyright 2023 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Flax implementation of ResNet V1.5.\"\"\"\n\n# See issue #620.\n# pytype: disable=wrong-arg-count\n\nfrom functools import partial\nfrom typing import Any, Callable, Sequence, Tuple\n\nfrom flax import linen as nn\nimport jax.numpy as jnp\n\nModuleDef = Any\n\n\nclass ResNetBlock(nn.Module):\n \"\"\"ResNet block.\"\"\"\n\n filters: int\n conv: ModuleDef\n norm: ModuleDef\n act: Callable\n strides: Tuple[int, int] = (1, 1)\n\n @nn.compact\n def __call__(\n self,\n x,\n ):\n residual = x\n y = self.conv(self.filters, (3, 3), self.strides)(x)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (3, 3))(y)\n y = self.norm(scale_init=nn.initializers.zeros_init())(y)\n\n if residual.shape != y.shape:\n residual = self.conv(\n self.filters, (1, 1), self.strides, name='conv_proj'\n )(residual)\n residual = self.norm(name='norm_proj')(residual)\n\n return self.act(residual + y)\n\n\nclass BottleneckResNetBlock(nn.Module):\n \"\"\"Bottleneck ResNet block.\"\"\"\n\n filters: int\n conv: ModuleDef\n norm: ModuleDef\n act: Callable\n strides: Tuple[int, int] = (1, 1)\n\n @nn.compact\n def __call__(self, x):\n residual = x\n y = self.conv(self.filters, (1, 1))(x)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters, (3, 3), self.strides)(y)\n y = self.norm()(y)\n y = self.act(y)\n y = self.conv(self.filters * 4, (1, 1))(y)\n y = self.norm(scale_init=nn.initializers.zeros_init())(y)\n\n if residual.shape != y.shape:\n residual = self.conv(\n self.filters * 4, (1, 1), self.strides, name='conv_proj'\n )(residual)\n residual = self.norm(name='norm_proj')(residual)\n\n return self.act(residual + y)\n\n\nclass ResNet(nn.Module):\n \"\"\"ResNetV1.5.\"\"\"\n\n stage_sizes: Sequence[int]\n block_cls: ModuleDef\n num_classes: int\n num_filters: int = 64\n dtype: Any = jnp.float32\n act: Callable = nn.relu\n conv: ModuleDef = nn.Conv\n\n @nn.compact\n def __call__(self, x, train: bool = True):\n conv = partial(self.conv, use_bias=False, dtype=self.dtype)\n norm = partial(\n nn.BatchNorm,\n use_running_average=not train,\n momentum=0.9,\n epsilon=1e-5,\n dtype=self.dtype,\n axis_name='batch',\n )\n\n x = conv(\n self.num_filters,\n (7, 7),\n (2, 2),\n padding=[(3, 3), (3, 3)],\n name='conv_init',\n )(x)\n x = norm(name='bn_init')(x)\n x = nn.relu(x)\n x = nn.max_pool(x, (3, 3), strides=(2, 2), padding='SAME')\n for i, block_size in enumerate(self.stage_sizes):\n for j in range(block_size):\n strides = (2, 2) if i > 0 and j == 0 else (1, 1)\n x = self.block_cls(\n self.num_filters * 2**i,\n strides=strides,\n conv=conv,\n norm=norm,\n act=self.act,\n )(x)\n x = jnp.mean(x, axis=(1, 2))\n x = nn.Dense(self.num_classes, dtype=self.dtype)(x)\n x = jnp.asarray(x, self.dtype)\n return x\n\n\nResNet18 = partial(ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock)\nResNet34 = partial(ResNet, stage_sizes=[3, 4, 6, 3], block_cls=ResNetBlock)\nResNet50 = partial(\n ResNet, stage_sizes=[3, 4, 6, 3], block_cls=BottleneckResNetBlock\n)\nResNet101 = partial(\n ResNet, stage_sizes=[3, 4, 23, 3], block_cls=BottleneckResNetBlock\n)\nResNet152 = partial(\n ResNet, stage_sizes=[3, 8, 36, 3], block_cls=BottleneckResNetBlock\n)\nResNet200 = partial(\n ResNet, stage_sizes=[3, 24, 36, 3], block_cls=BottleneckResNetBlock\n)\n\n\nResNet18Local = partial(\n ResNet, stage_sizes=[2, 2, 2, 2], block_cls=ResNetBlock, conv=nn.ConvLocal\n)\n\n\n# Used for testing only.\n_ResNet1 = partial(ResNet, stage_sizes=[1], block_cls=ResNetBlock)\n_ResNet1Local = partial(\n ResNet, stage_sizes=[1], block_cls=ResNetBlock, conv=nn.ConvLocal\n)\n", "path": "examples/imagenet/models.py"}]}
2,173
163
gh_patches_debug_2301
rasdani/github-patches
git_diff
encode__httpx-1034
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aiter_lines() doesn't return full lines that span multiple chunks <https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline ### test-httpx.py ```python import asyncio import json import httpx class TestHttpx: def __init__(self): pass async def __call__(self): http_client = httpx.AsyncClient() async with http_client.stream(method="GET", url='http://localhost:8080/lines') as response: is_message = True async for line in response.aiter_lines(): is_message = not is_message if is_message: message = json.loads(line) print(message) def main(): test_httpx = TestHttpx() asyncio.run(test_httpx()) if __name__ == '__main__': main() ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `httpx/_decoders.py` Content: ``` 1 """ 2 Handlers for Content-Encoding. 3 4 See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding 5 """ 6 import codecs 7 import typing 8 import zlib 9 10 import chardet 11 12 from ._exceptions import DecodingError 13 14 try: 15 import brotli 16 except ImportError: # pragma: nocover 17 brotli = None 18 19 20 class Decoder: 21 def decode(self, data: bytes) -> bytes: 22 raise NotImplementedError() # pragma: nocover 23 24 def flush(self) -> bytes: 25 raise NotImplementedError() # pragma: nocover 26 27 28 class IdentityDecoder(Decoder): 29 """ 30 Handle unencoded data. 31 """ 32 33 def decode(self, data: bytes) -> bytes: 34 return data 35 36 def flush(self) -> bytes: 37 return b"" 38 39 40 class DeflateDecoder(Decoder): 41 """ 42 Handle 'deflate' decoding. 43 44 See: https://stackoverflow.com/questions/1838699 45 """ 46 47 def __init__(self) -> None: 48 self.first_attempt = True 49 self.decompressor = zlib.decompressobj() 50 51 def decode(self, data: bytes) -> bytes: 52 was_first_attempt = self.first_attempt 53 self.first_attempt = False 54 try: 55 return self.decompressor.decompress(data) 56 except zlib.error as exc: 57 if was_first_attempt: 58 self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS) 59 return self.decode(data) 60 raise DecodingError from exc 61 62 def flush(self) -> bytes: 63 try: 64 return self.decompressor.flush() 65 except zlib.error as exc: # pragma: nocover 66 raise DecodingError from exc 67 68 69 class GZipDecoder(Decoder): 70 """ 71 Handle 'gzip' decoding. 72 73 See: https://stackoverflow.com/questions/1838699 74 """ 75 76 def __init__(self) -> None: 77 self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16) 78 79 def decode(self, data: bytes) -> bytes: 80 try: 81 return self.decompressor.decompress(data) 82 except zlib.error as exc: 83 raise DecodingError from exc 84 85 def flush(self) -> bytes: 86 try: 87 return self.decompressor.flush() 88 except zlib.error as exc: # pragma: nocover 89 raise DecodingError from exc 90 91 92 class BrotliDecoder(Decoder): 93 """ 94 Handle 'brotli' decoding. 95 96 Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/ 97 or `pip install brotli`. See https://github.com/google/brotli 98 Supports both 'brotlipy' and 'Brotli' packages since they share an import 99 name. The top branches are for 'brotlipy' and bottom branches for 'Brotli' 100 """ 101 102 def __init__(self) -> None: 103 assert ( 104 brotli is not None 105 ), "The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'" 106 self.decompressor = brotli.Decompressor() 107 self.seen_data = False 108 if hasattr(self.decompressor, "decompress"): 109 self._decompress = self.decompressor.decompress 110 else: 111 self._decompress = self.decompressor.process # pragma: nocover 112 113 def decode(self, data: bytes) -> bytes: 114 if not data: 115 return b"" 116 self.seen_data = True 117 try: 118 return self._decompress(data) 119 except brotli.error as exc: 120 raise DecodingError from exc 121 122 def flush(self) -> bytes: 123 if not self.seen_data: 124 return b"" 125 try: 126 if hasattr(self.decompressor, "finish"): 127 self.decompressor.finish() 128 return b"" 129 except brotli.error as exc: # pragma: nocover 130 raise DecodingError from exc 131 132 133 class MultiDecoder(Decoder): 134 """ 135 Handle the case where multiple encodings have been applied. 136 """ 137 138 def __init__(self, children: typing.Sequence[Decoder]) -> None: 139 """ 140 'children' should be a sequence of decoders in the order in which 141 each was applied. 142 """ 143 # Note that we reverse the order for decoding. 144 self.children = list(reversed(children)) 145 146 def decode(self, data: bytes) -> bytes: 147 for child in self.children: 148 data = child.decode(data) 149 return data 150 151 def flush(self) -> bytes: 152 data = b"" 153 for child in self.children: 154 data = child.decode(data) + child.flush() 155 return data 156 157 158 class TextDecoder: 159 """ 160 Handles incrementally decoding bytes into text 161 """ 162 163 def __init__(self, encoding: typing.Optional[str] = None): 164 self.decoder: typing.Optional[codecs.IncrementalDecoder] = ( 165 None if encoding is None else codecs.getincrementaldecoder(encoding)() 166 ) 167 self.detector = chardet.universaldetector.UniversalDetector() 168 169 # This buffer is only needed if 'decoder' is 'None' 170 # we want to trigger errors if data is getting added to 171 # our internal buffer for some silly reason while 172 # a decoder is discovered. 173 self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray() 174 175 def decode(self, data: bytes) -> str: 176 try: 177 if self.decoder is not None: 178 text = self.decoder.decode(data) 179 else: 180 assert self.buffer is not None 181 text = "" 182 self.detector.feed(data) 183 self.buffer += data 184 185 # Should be more than enough data to process, we don't 186 # want to buffer too long as chardet will wait until 187 # detector.close() is used to give back common 188 # encodings like 'utf-8'. 189 if len(self.buffer) >= 4096: 190 self.decoder = codecs.getincrementaldecoder( 191 self._detector_result() 192 )() 193 text = self.decoder.decode(bytes(self.buffer), False) 194 self.buffer = None 195 196 return text 197 except UnicodeDecodeError: # pragma: nocover 198 raise DecodingError() from None 199 200 def flush(self) -> str: 201 try: 202 if self.decoder is None: 203 # Empty string case as chardet is guaranteed to not have a guess. 204 assert self.buffer is not None 205 if len(self.buffer) == 0: 206 return "" 207 return bytes(self.buffer).decode(self._detector_result()) 208 209 return self.decoder.decode(b"", True) 210 except UnicodeDecodeError: # pragma: nocover 211 raise DecodingError() from None 212 213 def _detector_result(self) -> str: 214 self.detector.close() 215 result = self.detector.result["encoding"] 216 if not result: # pragma: nocover 217 raise DecodingError("Unable to determine encoding of content") 218 219 return result 220 221 222 class LineDecoder: 223 """ 224 Handles incrementally reading lines from text. 225 226 Uses universal line decoding, supporting any of `\n`, `\r`, or `\r\n` 227 as line endings, normalizing to `\n`. 228 """ 229 230 def __init__(self) -> None: 231 self.buffer = "" 232 233 def decode(self, text: str) -> typing.List[str]: 234 lines = [] 235 236 if text.startswith("\n") and self.buffer and self.buffer[-1] == "\r": 237 # Handle the case where we have an "\r\n" split across 238 # our previous input, and our new chunk. 239 lines.append(self.buffer[:-1] + "\n") 240 self.buffer = "" 241 text = text[1:] 242 243 while text: 244 num_chars = len(text) 245 for idx in range(num_chars): 246 char = text[idx] 247 next_char = None if idx + 1 == num_chars else text[idx + 1] 248 if char == "\n": 249 lines.append(self.buffer + text[: idx + 1]) 250 self.buffer = "" 251 text = text[idx + 1 :] 252 break 253 elif char == "\r" and next_char == "\n": 254 lines.append(self.buffer + text[:idx] + "\n") 255 self.buffer = "" 256 text = text[idx + 2 :] 257 break 258 elif char == "\r" and next_char is not None: 259 lines.append(self.buffer + text[:idx] + "\n") 260 self.buffer = "" 261 text = text[idx + 1 :] 262 break 263 elif next_char is None: 264 self.buffer = text 265 text = "" 266 break 267 268 return lines 269 270 def flush(self) -> typing.List[str]: 271 if self.buffer.endswith("\r"): 272 # Handle the case where we had a trailing '\r', which could have 273 # been a '\r\n' pair. 274 lines = [self.buffer[:-1] + "\n"] 275 elif self.buffer: 276 lines = [self.buffer] 277 else: 278 lines = [] 279 self.buffer = "" 280 return lines 281 282 283 SUPPORTED_DECODERS = { 284 "identity": IdentityDecoder, 285 "gzip": GZipDecoder, 286 "deflate": DeflateDecoder, 287 "br": BrotliDecoder, 288 } 289 290 291 if brotli is None: 292 SUPPORTED_DECODERS.pop("br") # pragma: nocover 293 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/httpx/_decoders.py b/httpx/_decoders.py --- a/httpx/_decoders.py +++ b/httpx/_decoders.py @@ -261,7 +261,7 @@ text = text[idx + 1 :] break elif next_char is None: - self.buffer = text + self.buffer += text text = "" break
{"golden_diff": "diff --git a/httpx/_decoders.py b/httpx/_decoders.py\n--- a/httpx/_decoders.py\n+++ b/httpx/_decoders.py\n@@ -261,7 +261,7 @@\n text = text[idx + 1 :]\n break\n elif next_char is None:\n- self.buffer = text\n+ self.buffer += text\n text = \"\"\n break\n", "issue": "aiter_lines() doesn't return full lines that span multiple chunks\n<https://gist.github.com/scr-oath/aa76d200222a0409d09a0d6feb1a13e2> shows an example setup using cherry.py as server that just outputs two lines - the json is big enough to be sent in two chunks; httpx aiter_lines() gets confused and sends data from the middle of the json line - seems to skip the starting part - which was most likely sent in a chunk without a newline\r\n\r\n### test-httpx.py\r\n```python\r\nimport asyncio\r\nimport json\r\n\r\nimport httpx\r\n\r\n\r\nclass TestHttpx:\r\n def __init__(self):\r\n pass\r\n\r\n async def __call__(self):\r\n http_client = httpx.AsyncClient()\r\n async with http_client.stream(method=\"GET\", url='http://localhost:8080/lines') as response:\r\n is_message = True\r\n async for line in response.aiter_lines():\r\n is_message = not is_message\r\n if is_message:\r\n message = json.loads(line)\r\n print(message)\r\n\r\n\r\ndef main():\r\n test_httpx = TestHttpx()\r\n asyncio.run(test_httpx())\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\n", "before_files": [{"content": "\"\"\"\nHandlers for Content-Encoding.\n\nSee: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\n\"\"\"\nimport codecs\nimport typing\nimport zlib\n\nimport chardet\n\nfrom ._exceptions import DecodingError\n\ntry:\n import brotli\nexcept ImportError: # pragma: nocover\n brotli = None\n\n\nclass Decoder:\n def decode(self, data: bytes) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def flush(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass IdentityDecoder(Decoder):\n \"\"\"\n Handle unencoded data.\n \"\"\"\n\n def decode(self, data: bytes) -> bytes:\n return data\n\n def flush(self) -> bytes:\n return b\"\"\n\n\nclass DeflateDecoder(Decoder):\n \"\"\"\n Handle 'deflate' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.first_attempt = True\n self.decompressor = zlib.decompressobj()\n\n def decode(self, data: bytes) -> bytes:\n was_first_attempt = self.first_attempt\n self.first_attempt = False\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n if was_first_attempt:\n self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n return self.decode(data)\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass GZipDecoder(Decoder):\n \"\"\"\n Handle 'gzip' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)\n\n def decode(self, data: bytes) -> bytes:\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass BrotliDecoder(Decoder):\n \"\"\"\n Handle 'brotli' decoding.\n\n Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/\n or `pip install brotli`. See https://github.com/google/brotli\n Supports both 'brotlipy' and 'Brotli' packages since they share an import\n name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'\n \"\"\"\n\n def __init__(self) -> None:\n assert (\n brotli is not None\n ), \"The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'\"\n self.decompressor = brotli.Decompressor()\n self.seen_data = False\n if hasattr(self.decompressor, \"decompress\"):\n self._decompress = self.decompressor.decompress\n else:\n self._decompress = self.decompressor.process # pragma: nocover\n\n def decode(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n self.seen_data = True\n try:\n return self._decompress(data)\n except brotli.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n if not self.seen_data:\n return b\"\"\n try:\n if hasattr(self.decompressor, \"finish\"):\n self.decompressor.finish()\n return b\"\"\n except brotli.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass MultiDecoder(Decoder):\n \"\"\"\n Handle the case where multiple encodings have been applied.\n \"\"\"\n\n def __init__(self, children: typing.Sequence[Decoder]) -> None:\n \"\"\"\n 'children' should be a sequence of decoders in the order in which\n each was applied.\n \"\"\"\n # Note that we reverse the order for decoding.\n self.children = list(reversed(children))\n\n def decode(self, data: bytes) -> bytes:\n for child in self.children:\n data = child.decode(data)\n return data\n\n def flush(self) -> bytes:\n data = b\"\"\n for child in self.children:\n data = child.decode(data) + child.flush()\n return data\n\n\nclass TextDecoder:\n \"\"\"\n Handles incrementally decoding bytes into text\n \"\"\"\n\n def __init__(self, encoding: typing.Optional[str] = None):\n self.decoder: typing.Optional[codecs.IncrementalDecoder] = (\n None if encoding is None else codecs.getincrementaldecoder(encoding)()\n )\n self.detector = chardet.universaldetector.UniversalDetector()\n\n # This buffer is only needed if 'decoder' is 'None'\n # we want to trigger errors if data is getting added to\n # our internal buffer for some silly reason while\n # a decoder is discovered.\n self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()\n\n def decode(self, data: bytes) -> str:\n try:\n if self.decoder is not None:\n text = self.decoder.decode(data)\n else:\n assert self.buffer is not None\n text = \"\"\n self.detector.feed(data)\n self.buffer += data\n\n # Should be more than enough data to process, we don't\n # want to buffer too long as chardet will wait until\n # detector.close() is used to give back common\n # encodings like 'utf-8'.\n if len(self.buffer) >= 4096:\n self.decoder = codecs.getincrementaldecoder(\n self._detector_result()\n )()\n text = self.decoder.decode(bytes(self.buffer), False)\n self.buffer = None\n\n return text\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def flush(self) -> str:\n try:\n if self.decoder is None:\n # Empty string case as chardet is guaranteed to not have a guess.\n assert self.buffer is not None\n if len(self.buffer) == 0:\n return \"\"\n return bytes(self.buffer).decode(self._detector_result())\n\n return self.decoder.decode(b\"\", True)\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def _detector_result(self) -> str:\n self.detector.close()\n result = self.detector.result[\"encoding\"]\n if not result: # pragma: nocover\n raise DecodingError(\"Unable to determine encoding of content\")\n\n return result\n\n\nclass LineDecoder:\n \"\"\"\n Handles incrementally reading lines from text.\n\n Uses universal line decoding, supporting any of `\\n`, `\\r`, or `\\r\\n`\n as line endings, normalizing to `\\n`.\n \"\"\"\n\n def __init__(self) -> None:\n self.buffer = \"\"\n\n def decode(self, text: str) -> typing.List[str]:\n lines = []\n\n if text.startswith(\"\\n\") and self.buffer and self.buffer[-1] == \"\\r\":\n # Handle the case where we have an \"\\r\\n\" split across\n # our previous input, and our new chunk.\n lines.append(self.buffer[:-1] + \"\\n\")\n self.buffer = \"\"\n text = text[1:]\n\n while text:\n num_chars = len(text)\n for idx in range(num_chars):\n char = text[idx]\n next_char = None if idx + 1 == num_chars else text[idx + 1]\n if char == \"\\n\":\n lines.append(self.buffer + text[: idx + 1])\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif char == \"\\r\" and next_char == \"\\n\":\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 2 :]\n break\n elif char == \"\\r\" and next_char is not None:\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif next_char is None:\n self.buffer = text\n text = \"\"\n break\n\n return lines\n\n def flush(self) -> typing.List[str]:\n if self.buffer.endswith(\"\\r\"):\n # Handle the case where we had a trailing '\\r', which could have\n # been a '\\r\\n' pair.\n lines = [self.buffer[:-1] + \"\\n\"]\n elif self.buffer:\n lines = [self.buffer]\n else:\n lines = []\n self.buffer = \"\"\n return lines\n\n\nSUPPORTED_DECODERS = {\n \"identity\": IdentityDecoder,\n \"gzip\": GZipDecoder,\n \"deflate\": DeflateDecoder,\n \"br\": BrotliDecoder,\n}\n\n\nif brotli is None:\n SUPPORTED_DECODERS.pop(\"br\") # pragma: nocover\n", "path": "httpx/_decoders.py"}], "after_files": [{"content": "\"\"\"\nHandlers for Content-Encoding.\n\nSee: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding\n\"\"\"\nimport codecs\nimport typing\nimport zlib\n\nimport chardet\n\nfrom ._exceptions import DecodingError\n\ntry:\n import brotli\nexcept ImportError: # pragma: nocover\n brotli = None\n\n\nclass Decoder:\n def decode(self, data: bytes) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n def flush(self) -> bytes:\n raise NotImplementedError() # pragma: nocover\n\n\nclass IdentityDecoder(Decoder):\n \"\"\"\n Handle unencoded data.\n \"\"\"\n\n def decode(self, data: bytes) -> bytes:\n return data\n\n def flush(self) -> bytes:\n return b\"\"\n\n\nclass DeflateDecoder(Decoder):\n \"\"\"\n Handle 'deflate' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.first_attempt = True\n self.decompressor = zlib.decompressobj()\n\n def decode(self, data: bytes) -> bytes:\n was_first_attempt = self.first_attempt\n self.first_attempt = False\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n if was_first_attempt:\n self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)\n return self.decode(data)\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass GZipDecoder(Decoder):\n \"\"\"\n Handle 'gzip' decoding.\n\n See: https://stackoverflow.com/questions/1838699\n \"\"\"\n\n def __init__(self) -> None:\n self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)\n\n def decode(self, data: bytes) -> bytes:\n try:\n return self.decompressor.decompress(data)\n except zlib.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n try:\n return self.decompressor.flush()\n except zlib.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass BrotliDecoder(Decoder):\n \"\"\"\n Handle 'brotli' decoding.\n\n Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/\n or `pip install brotli`. See https://github.com/google/brotli\n Supports both 'brotlipy' and 'Brotli' packages since they share an import\n name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'\n \"\"\"\n\n def __init__(self) -> None:\n assert (\n brotli is not None\n ), \"The 'brotlipy' or 'brotli' library must be installed to use 'BrotliDecoder'\"\n self.decompressor = brotli.Decompressor()\n self.seen_data = False\n if hasattr(self.decompressor, \"decompress\"):\n self._decompress = self.decompressor.decompress\n else:\n self._decompress = self.decompressor.process # pragma: nocover\n\n def decode(self, data: bytes) -> bytes:\n if not data:\n return b\"\"\n self.seen_data = True\n try:\n return self._decompress(data)\n except brotli.error as exc:\n raise DecodingError from exc\n\n def flush(self) -> bytes:\n if not self.seen_data:\n return b\"\"\n try:\n if hasattr(self.decompressor, \"finish\"):\n self.decompressor.finish()\n return b\"\"\n except brotli.error as exc: # pragma: nocover\n raise DecodingError from exc\n\n\nclass MultiDecoder(Decoder):\n \"\"\"\n Handle the case where multiple encodings have been applied.\n \"\"\"\n\n def __init__(self, children: typing.Sequence[Decoder]) -> None:\n \"\"\"\n 'children' should be a sequence of decoders in the order in which\n each was applied.\n \"\"\"\n # Note that we reverse the order for decoding.\n self.children = list(reversed(children))\n\n def decode(self, data: bytes) -> bytes:\n for child in self.children:\n data = child.decode(data)\n return data\n\n def flush(self) -> bytes:\n data = b\"\"\n for child in self.children:\n data = child.decode(data) + child.flush()\n return data\n\n\nclass TextDecoder:\n \"\"\"\n Handles incrementally decoding bytes into text\n \"\"\"\n\n def __init__(self, encoding: typing.Optional[str] = None):\n self.decoder: typing.Optional[codecs.IncrementalDecoder] = (\n None if encoding is None else codecs.getincrementaldecoder(encoding)()\n )\n self.detector = chardet.universaldetector.UniversalDetector()\n\n # This buffer is only needed if 'decoder' is 'None'\n # we want to trigger errors if data is getting added to\n # our internal buffer for some silly reason while\n # a decoder is discovered.\n self.buffer: typing.Optional[bytearray] = None if self.decoder else bytearray()\n\n def decode(self, data: bytes) -> str:\n try:\n if self.decoder is not None:\n text = self.decoder.decode(data)\n else:\n assert self.buffer is not None\n text = \"\"\n self.detector.feed(data)\n self.buffer += data\n\n # Should be more than enough data to process, we don't\n # want to buffer too long as chardet will wait until\n # detector.close() is used to give back common\n # encodings like 'utf-8'.\n if len(self.buffer) >= 4096:\n self.decoder = codecs.getincrementaldecoder(\n self._detector_result()\n )()\n text = self.decoder.decode(bytes(self.buffer), False)\n self.buffer = None\n\n return text\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def flush(self) -> str:\n try:\n if self.decoder is None:\n # Empty string case as chardet is guaranteed to not have a guess.\n assert self.buffer is not None\n if len(self.buffer) == 0:\n return \"\"\n return bytes(self.buffer).decode(self._detector_result())\n\n return self.decoder.decode(b\"\", True)\n except UnicodeDecodeError: # pragma: nocover\n raise DecodingError() from None\n\n def _detector_result(self) -> str:\n self.detector.close()\n result = self.detector.result[\"encoding\"]\n if not result: # pragma: nocover\n raise DecodingError(\"Unable to determine encoding of content\")\n\n return result\n\n\nclass LineDecoder:\n \"\"\"\n Handles incrementally reading lines from text.\n\n Uses universal line decoding, supporting any of `\\n`, `\\r`, or `\\r\\n`\n as line endings, normalizing to `\\n`.\n \"\"\"\n\n def __init__(self) -> None:\n self.buffer = \"\"\n\n def decode(self, text: str) -> typing.List[str]:\n lines = []\n\n if text.startswith(\"\\n\") and self.buffer and self.buffer[-1] == \"\\r\":\n # Handle the case where we have an \"\\r\\n\" split across\n # our previous input, and our new chunk.\n lines.append(self.buffer[:-1] + \"\\n\")\n self.buffer = \"\"\n text = text[1:]\n\n while text:\n num_chars = len(text)\n for idx in range(num_chars):\n char = text[idx]\n next_char = None if idx + 1 == num_chars else text[idx + 1]\n if char == \"\\n\":\n lines.append(self.buffer + text[: idx + 1])\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif char == \"\\r\" and next_char == \"\\n\":\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 2 :]\n break\n elif char == \"\\r\" and next_char is not None:\n lines.append(self.buffer + text[:idx] + \"\\n\")\n self.buffer = \"\"\n text = text[idx + 1 :]\n break\n elif next_char is None:\n self.buffer += text\n text = \"\"\n break\n\n return lines\n\n def flush(self) -> typing.List[str]:\n if self.buffer.endswith(\"\\r\"):\n # Handle the case where we had a trailing '\\r', which could have\n # been a '\\r\\n' pair.\n lines = [self.buffer[:-1] + \"\\n\"]\n elif self.buffer:\n lines = [self.buffer]\n else:\n lines = []\n self.buffer = \"\"\n return lines\n\n\nSUPPORTED_DECODERS = {\n \"identity\": IdentityDecoder,\n \"gzip\": GZipDecoder,\n \"deflate\": DeflateDecoder,\n \"br\": BrotliDecoder,\n}\n\n\nif brotli is None:\n SUPPORTED_DECODERS.pop(\"br\") # pragma: nocover\n", "path": "httpx/_decoders.py"}]}
3,345
90