problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_22014
rasdani/github-patches
git_diff
pytorch__text-361
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MosesTokenizer has been moved out of NLTK due to licensing issues @jekbradbury great work here! Due to https://github.com/nltk/nltk/issues/2000, we had to remove MosesTokenizer out of NLTK but now it's hosted on https://github.com/alvations/sacremoses ``` pip install sacremoses ``` The silver lining is that the package comes with the data needed for tokenization so there's no need to keep the `nltk_data` directory =) ---- I would propose adding `sacremoses` on top of `nltk` because NLTK has another port of a nice tokenizer (by @jonsafari) that people overlook, https://github.com/nltk/nltk/blob/develop/nltk/tokenize/toktok.py (I think it's fast too) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchtext/data/utils.py` Content: ``` 1 import random 2 from contextlib import contextmanager 3 from copy import deepcopy 4 5 6 def get_tokenizer(tokenizer): 7 if callable(tokenizer): 8 return tokenizer 9 if tokenizer == "spacy": 10 try: 11 import spacy 12 spacy_en = spacy.load('en') 13 return lambda s: [tok.text for tok in spacy_en.tokenizer(s)] 14 except ImportError: 15 print("Please install SpaCy and the SpaCy English tokenizer. " 16 "See the docs at https://spacy.io for more information.") 17 raise 18 except AttributeError: 19 print("Please install SpaCy and the SpaCy English tokenizer. " 20 "See the docs at https://spacy.io for more information.") 21 raise 22 elif tokenizer == "moses": 23 try: 24 from nltk.tokenize.moses import MosesTokenizer 25 moses_tokenizer = MosesTokenizer() 26 return moses_tokenizer.tokenize 27 except ImportError: 28 print("Please install NLTK. " 29 "See the docs at http://nltk.org for more information.") 30 raise 31 except LookupError: 32 print("Please install the necessary NLTK corpora. " 33 "See the docs at http://nltk.org for more information.") 34 raise 35 elif tokenizer == 'revtok': 36 try: 37 import revtok 38 return revtok.tokenize 39 except ImportError: 40 print("Please install revtok.") 41 raise 42 elif tokenizer == 'subword': 43 try: 44 import revtok 45 return lambda x: revtok.tokenize(x, decap=True) 46 except ImportError: 47 print("Please install revtok.") 48 raise 49 raise ValueError("Requested tokenizer {}, valid choices are a " 50 "callable that takes a single string as input, " 51 "\"revtok\" for the revtok reversible tokenizer, " 52 "\"subword\" for the revtok caps-aware tokenizer, " 53 "\"spacy\" for the SpaCy English tokenizer, or " 54 "\"moses\" for the NLTK port of the Moses tokenization " 55 "script.".format(tokenizer)) 56 57 58 def interleave_keys(a, b): 59 """Interleave bits from two sort keys to form a joint sort key. 60 61 Examples that are similar in both of the provided keys will have similar 62 values for the key defined by this function. Useful for tasks with two 63 text fields like machine translation or natural language inference. 64 """ 65 def interleave(args): 66 return ''.join([x for t in zip(*args) for x in t]) 67 return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2) 68 69 70 def get_torch_version(): 71 import torch 72 v = torch.__version__ 73 version_substrings = v.split('.') 74 major, minor = version_substrings[0], version_substrings[1] 75 return int(major), int(minor) 76 77 78 class RandomShuffler(object): 79 """Use random functions while keeping track of the random state to make it 80 reproducible and deterministic.""" 81 82 def __init__(self, random_state=None): 83 self._random_state = random_state 84 if self._random_state is None: 85 self._random_state = random.getstate() 86 87 @contextmanager 88 def use_internal_state(self): 89 """Use a specific RNG state.""" 90 old_state = random.getstate() 91 random.setstate(self._random_state) 92 yield 93 self._random_state = random.getstate() 94 random.setstate(old_state) 95 96 @property 97 def random_state(self): 98 return deepcopy(self._random_state) 99 100 @random_state.setter 101 def random_state(self, s): 102 self._random_state = s 103 104 def __call__(self, data): 105 """Shuffle and return a new list.""" 106 with self.use_internal_state(): 107 return random.sample(data, len(data)) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchtext/data/utils.py b/torchtext/data/utils.py --- a/torchtext/data/utils.py +++ b/torchtext/data/utils.py @@ -21,16 +21,22 @@ raise elif tokenizer == "moses": try: - from nltk.tokenize.moses import MosesTokenizer + from sacremoses import MosesTokenizer moses_tokenizer = MosesTokenizer() return moses_tokenizer.tokenize except ImportError: - print("Please install NLTK. " - "See the docs at http://nltk.org for more information.") + print("Please install SacreMoses. " + "See the docs at https://github.com/alvations/sacremoses " + "for more information.") raise - except LookupError: - print("Please install the necessary NLTK corpora. " - "See the docs at http://nltk.org for more information.") + elif tokenizer == "toktok": + try: + from nltk.tokenize.toktok import ToktokTokenizer + toktok = ToktokTokenizer() + return toktok.tokenize + except ImportError: + print("Please install NLTK. " + "See the docs at https://nltk.org for more information.") raise elif tokenizer == 'revtok': try:
{"golden_diff": "diff --git a/torchtext/data/utils.py b/torchtext/data/utils.py\n--- a/torchtext/data/utils.py\n+++ b/torchtext/data/utils.py\n@@ -21,16 +21,22 @@\n raise\n elif tokenizer == \"moses\":\n try:\n- from nltk.tokenize.moses import MosesTokenizer\n+ from sacremoses import MosesTokenizer\n moses_tokenizer = MosesTokenizer()\n return moses_tokenizer.tokenize\n except ImportError:\n- print(\"Please install NLTK. \"\n- \"See the docs at http://nltk.org for more information.\")\n+ print(\"Please install SacreMoses. \"\n+ \"See the docs at https://github.com/alvations/sacremoses \"\n+ \"for more information.\")\n raise\n- except LookupError:\n- print(\"Please install the necessary NLTK corpora. \"\n- \"See the docs at http://nltk.org for more information.\")\n+ elif tokenizer == \"toktok\":\n+ try:\n+ from nltk.tokenize.toktok import ToktokTokenizer\n+ toktok = ToktokTokenizer()\n+ return toktok.tokenize\n+ except ImportError:\n+ print(\"Please install NLTK. \"\n+ \"See the docs at https://nltk.org for more information.\")\n raise\n elif tokenizer == 'revtok':\n try:\n", "issue": "MosesTokenizer has been moved out of NLTK due to licensing issues\n@jekbradbury great work here!\r\n\r\nDue to https://github.com/nltk/nltk/issues/2000, we had to remove MosesTokenizer out of NLTK but now it's hosted on https://github.com/alvations/sacremoses \r\n\r\n```\r\npip install sacremoses\r\n```\r\n\r\nThe silver lining is that the package comes with the data needed for tokenization so there's no need to keep the `nltk_data` directory =)\r\n\r\n----\r\n\r\nI would propose adding `sacremoses` on top of `nltk` because NLTK has another port of a nice tokenizer (by @jonsafari) that people overlook, https://github.com/nltk/nltk/blob/develop/nltk/tokenize/toktok.py (I think it's fast too)\n", "before_files": [{"content": "import random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\n\ndef get_tokenizer(tokenizer):\n if callable(tokenizer):\n return tokenizer\n if tokenizer == \"spacy\":\n try:\n import spacy\n spacy_en = spacy.load('en')\n return lambda s: [tok.text for tok in spacy_en.tokenizer(s)]\n except ImportError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n except AttributeError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n elif tokenizer == \"moses\":\n try:\n from nltk.tokenize.moses import MosesTokenizer\n moses_tokenizer = MosesTokenizer()\n return moses_tokenizer.tokenize\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n except LookupError:\n print(\"Please install the necessary NLTK corpora. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n elif tokenizer == 'revtok':\n try:\n import revtok\n return revtok.tokenize\n except ImportError:\n print(\"Please install revtok.\")\n raise\n elif tokenizer == 'subword':\n try:\n import revtok\n return lambda x: revtok.tokenize(x, decap=True)\n except ImportError:\n print(\"Please install revtok.\")\n raise\n raise ValueError(\"Requested tokenizer {}, valid choices are a \"\n \"callable that takes a single string as input, \"\n \"\\\"revtok\\\" for the revtok reversible tokenizer, \"\n \"\\\"subword\\\" for the revtok caps-aware tokenizer, \"\n \"\\\"spacy\\\" for the SpaCy English tokenizer, or \"\n \"\\\"moses\\\" for the NLTK port of the Moses tokenization \"\n \"script.\".format(tokenizer))\n\n\ndef interleave_keys(a, b):\n \"\"\"Interleave bits from two sort keys to form a joint sort key.\n\n Examples that are similar in both of the provided keys will have similar\n values for the key defined by this function. Useful for tasks with two\n text fields like machine translation or natural language inference.\n \"\"\"\n def interleave(args):\n return ''.join([x for t in zip(*args) for x in t])\n return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)\n\n\ndef get_torch_version():\n import torch\n v = torch.__version__\n version_substrings = v.split('.')\n major, minor = version_substrings[0], version_substrings[1]\n return int(major), int(minor)\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n", "path": "torchtext/data/utils.py"}], "after_files": [{"content": "import random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\n\ndef get_tokenizer(tokenizer):\n if callable(tokenizer):\n return tokenizer\n if tokenizer == \"spacy\":\n try:\n import spacy\n spacy_en = spacy.load('en')\n return lambda s: [tok.text for tok in spacy_en.tokenizer(s)]\n except ImportError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n except AttributeError:\n print(\"Please install SpaCy and the SpaCy English tokenizer. \"\n \"See the docs at https://spacy.io for more information.\")\n raise\n elif tokenizer == \"moses\":\n try:\n from sacremoses import MosesTokenizer\n moses_tokenizer = MosesTokenizer()\n return moses_tokenizer.tokenize\n except ImportError:\n print(\"Please install SacreMoses. \"\n \"See the docs at https://github.com/alvations/sacremoses \"\n \"for more information.\")\n raise\n elif tokenizer == \"toktok\":\n try:\n from nltk.tokenize.toktok import ToktokTokenizer\n toktok = ToktokTokenizer()\n return toktok.tokenize\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at https://nltk.org for more information.\")\n raise\n elif tokenizer == 'revtok':\n try:\n import revtok\n return revtok.tokenize\n except ImportError:\n print(\"Please install revtok.\")\n raise\n elif tokenizer == 'subword':\n try:\n import revtok\n return lambda x: revtok.tokenize(x, decap=True)\n except ImportError:\n print(\"Please install revtok.\")\n raise\n raise ValueError(\"Requested tokenizer {}, valid choices are a \"\n \"callable that takes a single string as input, \"\n \"\\\"revtok\\\" for the revtok reversible tokenizer, \"\n \"\\\"subword\\\" for the revtok caps-aware tokenizer, \"\n \"\\\"spacy\\\" for the SpaCy English tokenizer, or \"\n \"\\\"moses\\\" for the NLTK port of the Moses tokenization \"\n \"script.\".format(tokenizer))\n\n\ndef interleave_keys(a, b):\n \"\"\"Interleave bits from two sort keys to form a joint sort key.\n\n Examples that are similar in both of the provided keys will have similar\n values for the key defined by this function. Useful for tasks with two\n text fields like machine translation or natural language inference.\n \"\"\"\n def interleave(args):\n return ''.join([x for t in zip(*args) for x in t])\n return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)\n\n\ndef get_torch_version():\n import torch\n v = torch.__version__\n version_substrings = v.split('.')\n major, minor = version_substrings[0], version_substrings[1]\n return int(major), int(minor)\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n", "path": "torchtext/data/utils.py"}]}
1,468
299
gh_patches_debug_43415
rasdani/github-patches
git_diff
kserve__kserve-2817
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Knative KafaSource detects wrong URL to serve events /kind bug **What steps did you take and what happened:** [A clear and concise description of what the bug is.] - Deployed knative-eventing and kafkasource. - Added https://github.com/kserve/kserve/blob/master/docs/samples/kafka/addressable-resolver.yaml - knative eventing is able to read the kafka source. - The service name created by inferenceservice is \<isvc-name>-predictor-default. However the kafkasource sends the events to http://\<isvc-name>.\<namespace>.svc.cluster.local **What did you expect to happen:** - I expected the requests to be sent to http://\<isvc-name>-predictor-default.\<namespace>.svc.cluster.local **What's the InferenceService yaml:** ``` apiVersion: "serving.kserve.io/v1beta1" kind: "InferenceService" metadata: name: "<inference-name>" namespace: "\<namespace>" annotations: prometheus.io/scrape: 'true' prometheus.io/port: '8082' labels: name: "\<label>" spec: predictor: minReplicas: 3 maxReplicas: 100 pytorch: name: \<name> storageUri: gs://<storage> resources: limits: cpu: 3000m memory: 3Gi requests: cpu: 2000m memory: 3Gi ``` **Anything else you would like to add:** [Miscellaneous information that will assist in solving the issue.] ``` ingress: |- { "ingressGateway" : "knative-serving/knative-ingress-gateway", "ingressService" : "istio-ingressgateway.istio-system.svc.cluster.local", "localGateway" : "knative-serving/knative-local-gateway", "localGatewayService" : "knative-local-gateway.istio-system.svc.cluster.local", "ingressDomain" : "example.com", "ingressClassName" : "kong", "domainTemplate": "{{ .Name }}-{{ .Namespace }}.{{ .IngressDomain }}", "urlScheme": "http" } ``` **Environment:** Using Kong RawDeployment Torchserve version : 0.6.1 - Istio Version: Istio not installed - Knative Version: knative serving not installled. knative eventing v1.8.2 - KServe Version: 0.9.0 - Kubeflow version: N/A - Cloud Environment: GKE - Minikube/Kind version: - Kubernetes version: (use `kubectl version`): 1.23 - OS (e.g. from `/etc/os-release`): GKE --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/samples/kafka/image_transformer/image_transformer.py` Content: ``` 1 # 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 import kserve 15 from typing import Dict, Union 16 import logging 17 import boto3 18 import cv2 19 from cloudevents.http import CloudEvent 20 21 from kserve import InferRequest, InferResponse 22 from kserve.protocol.grpc.grpc_predict_v2_pb2 import ModelInferResponse 23 24 logging.basicConfig(level=kserve.constants.KSERVE_LOGLEVEL) 25 26 session = boto3.Session() 27 client = session.client('s3', endpoint_url='http://minio-service:9000', aws_access_key_id='minio', 28 aws_secret_access_key='minio123') 29 30 31 def image_transform(image): 32 img = cv2.imread(image, cv2.IMREAD_GRAYSCALE) 33 g = cv2.resize(255 - img, (28, 28)) 34 g = g.flatten() / 255.0 35 return g.tolist() 36 37 38 class ImageTransformer(kserve.Model): 39 def __init__(self, name: str, predictor_host: str): 40 super().__init__(name) 41 self.predictor_host = predictor_host 42 self._key = None 43 44 def preprocess(self, inputs: Union[Dict, CloudEvent, InferRequest], 45 headers: Dict[str, str] = None) -> Union[Dict, InferRequest]: 46 if inputs['EventName'] == 's3:ObjectCreated:Put': 47 bucket = inputs['Records'][0]['s3']['bucket']['name'] 48 key = inputs['Records'][0]['s3']['object']['key'] 49 self._key = key 50 client.download_file(bucket, key, '/tmp/' + key) 51 request = image_transform('/tmp/' + key) 52 return {"instances": [request]} 53 raise Exception("unknown event") 54 55 def postprocess(self, response: Union[Dict, InferResponse, ModelInferResponse], headers: Dict[str, str] = None) \ 56 -> Union[Dict, ModelInferResponse]: 57 logging.info(response) 58 index = response["predictions"][0]["classes"] 59 logging.info("digit:" + str(index)) 60 client.upload_file('/tmp/' + self._key, 'digit-' + str(index), self._key) 61 return response 62 ``` Path: `docs/samples/kafka/setup.py` Content: ``` 1 # 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 import os 14 15 from setuptools import setup, find_packages 16 17 tests_require = [ 18 'pytest', 19 'pytest-tornasync', 20 'mypy' 21 ] 22 23 with open(os.path.join(os.getcwd(), '../../../python/VERSION')) as version_file: 24 version = version_file.read().strip() 25 26 setup( 27 name='transformer', 28 version='0.1.0', 29 author_email='[email protected]', 30 license='../../LICENSE.txt', 31 url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka', 32 description='Transformer', 33 long_description=open('README.md').read(), 34 python_requires='>=3.7', 35 packages=find_packages("transformer"), 36 install_requires=[ 37 f"kserve>={version}", 38 "pandas>=0.24.2", 39 "opencv-python-headless==4.2.0.32", 40 ], 41 tests_require=tests_require, 42 extras_require={'test': tests_require} 43 ) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/samples/kafka/image_transformer/image_transformer.py b/docs/samples/kafka/image_transformer/image_transformer.py --- a/docs/samples/kafka/image_transformer/image_transformer.py +++ b/docs/samples/kafka/image_transformer/image_transformer.py @@ -11,13 +11,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import kserve -from typing import Dict, Union import logging +from typing import Dict, Union + import boto3 import cv2 from cloudevents.http import CloudEvent +import kserve from kserve import InferRequest, InferResponse from kserve.protocol.grpc.grpc_predict_v2_pb2 import ModelInferResponse @@ -26,6 +27,7 @@ session = boto3.Session() client = session.client('s3', endpoint_url='http://minio-service:9000', aws_access_key_id='minio', aws_secret_access_key='minio123') +digits_bucket = 'digits' def image_transform(image): @@ -41,8 +43,9 @@ self.predictor_host = predictor_host self._key = None - def preprocess(self, inputs: Union[Dict, CloudEvent, InferRequest], - headers: Dict[str, str] = None) -> Union[Dict, InferRequest]: + async def preprocess(self, inputs: Union[Dict, CloudEvent, InferRequest], + headers: Dict[str, str] = None) -> Union[Dict, InferRequest]: + logging.info("Received inputs %s", inputs) if inputs['EventName'] == 's3:ObjectCreated:Put': bucket = inputs['Records'][0]['s3']['bucket']['name'] key = inputs['Records'][0]['s3']['object']['key'] @@ -54,8 +57,10 @@ def postprocess(self, response: Union[Dict, InferResponse, ModelInferResponse], headers: Dict[str, str] = None) \ -> Union[Dict, ModelInferResponse]: - logging.info(response) + logging.info("response: %s", response) index = response["predictions"][0]["classes"] logging.info("digit:" + str(index)) - client.upload_file('/tmp/' + self._key, 'digit-' + str(index), self._key) + upload_path = f'digit-{index}/{self._key}' + client.upload_file('/tmp/' + self._key, digits_bucket, upload_path) + logging.info(f"Image {self._key} successfully uploaded to {upload_path}") return response diff --git a/docs/samples/kafka/setup.py b/docs/samples/kafka/setup.py --- a/docs/samples/kafka/setup.py +++ b/docs/samples/kafka/setup.py @@ -10,22 +10,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import os from setuptools import setup, find_packages tests_require = [ 'pytest', - 'pytest-tornasync', 'mypy' ] -with open(os.path.join(os.getcwd(), '../../../python/VERSION')) as version_file: - version = version_file.read().strip() - setup( name='transformer', - version='0.1.0', + version='0.2.0', author_email='[email protected]', license='../../LICENSE.txt', url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka', @@ -34,9 +29,9 @@ python_requires='>=3.7', packages=find_packages("transformer"), install_requires=[ - f"kserve>={version}", + "kserve>0.10.0", "pandas>=0.24.2", - "opencv-python-headless==4.2.0.32", + "opencv-python-headless==4.7.0.72", ], tests_require=tests_require, extras_require={'test': tests_require}
{"golden_diff": "diff --git a/docs/samples/kafka/image_transformer/image_transformer.py b/docs/samples/kafka/image_transformer/image_transformer.py\n--- a/docs/samples/kafka/image_transformer/image_transformer.py\n+++ b/docs/samples/kafka/image_transformer/image_transformer.py\n@@ -11,13 +11,14 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import kserve\n-from typing import Dict, Union\n import logging\n+from typing import Dict, Union\n+\n import boto3\n import cv2\n from cloudevents.http import CloudEvent\n \n+import kserve\n from kserve import InferRequest, InferResponse\n from kserve.protocol.grpc.grpc_predict_v2_pb2 import ModelInferResponse\n \n@@ -26,6 +27,7 @@\n session = boto3.Session()\n client = session.client('s3', endpoint_url='http://minio-service:9000', aws_access_key_id='minio',\n aws_secret_access_key='minio123')\n+digits_bucket = 'digits'\n \n \n def image_transform(image):\n@@ -41,8 +43,9 @@\n self.predictor_host = predictor_host\n self._key = None\n \n- def preprocess(self, inputs: Union[Dict, CloudEvent, InferRequest],\n- headers: Dict[str, str] = None) -> Union[Dict, InferRequest]:\n+ async def preprocess(self, inputs: Union[Dict, CloudEvent, InferRequest],\n+ headers: Dict[str, str] = None) -> Union[Dict, InferRequest]:\n+ logging.info(\"Received inputs %s\", inputs)\n if inputs['EventName'] == 's3:ObjectCreated:Put':\n bucket = inputs['Records'][0]['s3']['bucket']['name']\n key = inputs['Records'][0]['s3']['object']['key']\n@@ -54,8 +57,10 @@\n \n def postprocess(self, response: Union[Dict, InferResponse, ModelInferResponse], headers: Dict[str, str] = None) \\\n -> Union[Dict, ModelInferResponse]:\n- logging.info(response)\n+ logging.info(\"response: %s\", response)\n index = response[\"predictions\"][0][\"classes\"]\n logging.info(\"digit:\" + str(index))\n- client.upload_file('/tmp/' + self._key, 'digit-' + str(index), self._key)\n+ upload_path = f'digit-{index}/{self._key}'\n+ client.upload_file('/tmp/' + self._key, digits_bucket, upload_path)\n+ logging.info(f\"Image {self._key} successfully uploaded to {upload_path}\")\n return response\ndiff --git a/docs/samples/kafka/setup.py b/docs/samples/kafka/setup.py\n--- a/docs/samples/kafka/setup.py\n+++ b/docs/samples/kafka/setup.py\n@@ -10,22 +10,17 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-import os\n \n from setuptools import setup, find_packages\n \n tests_require = [\n 'pytest',\n- 'pytest-tornasync',\n 'mypy'\n ]\n \n-with open(os.path.join(os.getcwd(), '../../../python/VERSION')) as version_file:\n- version = version_file.read().strip()\n-\n setup(\n name='transformer',\n- version='0.1.0',\n+ version='0.2.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka',\n@@ -34,9 +29,9 @@\n python_requires='>=3.7',\n packages=find_packages(\"transformer\"),\n install_requires=[\n- f\"kserve>={version}\",\n+ \"kserve>0.10.0\",\n \"pandas>=0.24.2\",\n- \"opencv-python-headless==4.2.0.32\",\n+ \"opencv-python-headless==4.7.0.72\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n", "issue": "Knative KafaSource detects wrong URL to serve events\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\n- Deployed knative-eventing and kafkasource.\r\n- Added https://github.com/kserve/kserve/blob/master/docs/samples/kafka/addressable-resolver.yaml\r\n- knative eventing is able to read the kafka source.\r\n- The service name created by inferenceservice is \\<isvc-name>-predictor-default. However the kafkasource sends the events to http://\\<isvc-name>.\\<namespace>.svc.cluster.local\r\n\r\n**What did you expect to happen:**\r\n- I expected the requests to be sent to http://\\<isvc-name>-predictor-default.\\<namespace>.svc.cluster.local\r\n\r\n**What's the InferenceService yaml:**\r\n```\r\napiVersion: \"serving.kserve.io/v1beta1\"\r\nkind: \"InferenceService\"\r\nmetadata:\r\n name: \"<inference-name>\"\r\n namespace: \"\\<namespace>\"\r\n annotations:\r\n prometheus.io/scrape: 'true'\r\n prometheus.io/port: '8082'\r\n labels:\r\n name: \"\\<label>\"\r\nspec:\r\n predictor:\r\n minReplicas: 3\r\n maxReplicas: 100\r\n pytorch:\r\n name: \\<name>\r\n storageUri: gs://<storage>\r\n resources:\r\n limits:\r\n cpu: 3000m\r\n memory: 3Gi\r\n requests:\r\n cpu: 2000m\r\n memory: 3Gi\r\n```\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n```\r\ningress: |-\r\n {\r\n \"ingressGateway\" : \"knative-serving/knative-ingress-gateway\",\r\n \"ingressService\" : \"istio-ingressgateway.istio-system.svc.cluster.local\",\r\n \"localGateway\" : \"knative-serving/knative-local-gateway\",\r\n \"localGatewayService\" : \"knative-local-gateway.istio-system.svc.cluster.local\",\r\n \"ingressDomain\" : \"example.com\",\r\n \"ingressClassName\" : \"kong\",\r\n \"domainTemplate\": \"{{ .Name }}-{{ .Namespace }}.{{ .IngressDomain }}\",\r\n \"urlScheme\": \"http\"\r\n }\r\n```\r\n\r\n**Environment:**\r\n Using Kong\r\n RawDeployment\r\nTorchserve version : 0.6.1\r\n- Istio Version: Istio not installed\r\n- Knative Version: knative serving not installled. knative eventing v1.8.2\r\n- KServe Version: 0.9.0\r\n- Kubeflow version: N/A\r\n- Cloud Environment: GKE\r\n- Minikube/Kind version:\r\n- Kubernetes version: (use `kubectl version`): 1.23\r\n- OS (e.g. from `/etc/os-release`): GKE\r\n\n", "before_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kserve\nfrom typing import Dict, Union\nimport logging\nimport boto3\nimport cv2\nfrom cloudevents.http import CloudEvent\n\nfrom kserve import InferRequest, InferResponse\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import ModelInferResponse\n\nlogging.basicConfig(level=kserve.constants.KSERVE_LOGLEVEL)\n\nsession = boto3.Session()\nclient = session.client('s3', endpoint_url='http://minio-service:9000', aws_access_key_id='minio',\n aws_secret_access_key='minio123')\n\n\ndef image_transform(image):\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n g = cv2.resize(255 - img, (28, 28))\n g = g.flatten() / 255.0\n return g.tolist()\n\n\nclass ImageTransformer(kserve.Model):\n def __init__(self, name: str, predictor_host: str):\n super().__init__(name)\n self.predictor_host = predictor_host\n self._key = None\n\n def preprocess(self, inputs: Union[Dict, CloudEvent, InferRequest],\n headers: Dict[str, str] = None) -> Union[Dict, InferRequest]:\n if inputs['EventName'] == 's3:ObjectCreated:Put':\n bucket = inputs['Records'][0]['s3']['bucket']['name']\n key = inputs['Records'][0]['s3']['object']['key']\n self._key = key\n client.download_file(bucket, key, '/tmp/' + key)\n request = image_transform('/tmp/' + key)\n return {\"instances\": [request]}\n raise Exception(\"unknown event\")\n\n def postprocess(self, response: Union[Dict, InferResponse, ModelInferResponse], headers: Dict[str, str] = None) \\\n -> Union[Dict, ModelInferResponse]:\n logging.info(response)\n index = response[\"predictions\"][0][\"classes\"]\n logging.info(\"digit:\" + str(index))\n client.upload_file('/tmp/' + self._key, 'digit-' + str(index), self._key)\n return response\n", "path": "docs/samples/kafka/image_transformer/image_transformer.py"}, {"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open(os.path.join(os.getcwd(), '../../../python/VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='transformer',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka',\n description='Transformer',\n long_description=open('README.md').read(),\n python_requires='>=3.7',\n packages=find_packages(\"transformer\"),\n install_requires=[\n f\"kserve>={version}\",\n \"pandas>=0.24.2\",\n \"opencv-python-headless==4.2.0.32\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "docs/samples/kafka/setup.py"}], "after_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import Dict, Union\n\nimport boto3\nimport cv2\nfrom cloudevents.http import CloudEvent\n\nimport kserve\nfrom kserve import InferRequest, InferResponse\nfrom kserve.protocol.grpc.grpc_predict_v2_pb2 import ModelInferResponse\n\nlogging.basicConfig(level=kserve.constants.KSERVE_LOGLEVEL)\n\nsession = boto3.Session()\nclient = session.client('s3', endpoint_url='http://minio-service:9000', aws_access_key_id='minio',\n aws_secret_access_key='minio123')\ndigits_bucket = 'digits'\n\n\ndef image_transform(image):\n img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)\n g = cv2.resize(255 - img, (28, 28))\n g = g.flatten() / 255.0\n return g.tolist()\n\n\nclass ImageTransformer(kserve.Model):\n def __init__(self, name: str, predictor_host: str):\n super().__init__(name)\n self.predictor_host = predictor_host\n self._key = None\n\n async def preprocess(self, inputs: Union[Dict, CloudEvent, InferRequest],\n headers: Dict[str, str] = None) -> Union[Dict, InferRequest]:\n logging.info(\"Received inputs %s\", inputs)\n if inputs['EventName'] == 's3:ObjectCreated:Put':\n bucket = inputs['Records'][0]['s3']['bucket']['name']\n key = inputs['Records'][0]['s3']['object']['key']\n self._key = key\n client.download_file(bucket, key, '/tmp/' + key)\n request = image_transform('/tmp/' + key)\n return {\"instances\": [request]}\n raise Exception(\"unknown event\")\n\n def postprocess(self, response: Union[Dict, InferResponse, ModelInferResponse], headers: Dict[str, str] = None) \\\n -> Union[Dict, ModelInferResponse]:\n logging.info(\"response: %s\", response)\n index = response[\"predictions\"][0][\"classes\"]\n logging.info(\"digit:\" + str(index))\n upload_path = f'digit-{index}/{self._key}'\n client.upload_file('/tmp/' + self._key, digits_bucket, upload_path)\n logging.info(f\"Image {self._key} successfully uploaded to {upload_path}\")\n return response\n", "path": "docs/samples/kafka/image_transformer/image_transformer.py"}, {"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'mypy'\n]\n\nsetup(\n name='transformer',\n version='0.2.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kserve/kserve/tree/master/docs/samples/kafka',\n description='Transformer',\n long_description=open('README.md').read(),\n python_requires='>=3.7',\n packages=find_packages(\"transformer\"),\n install_requires=[\n \"kserve>0.10.0\",\n \"pandas>=0.24.2\",\n \"opencv-python-headless==4.7.0.72\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "docs/samples/kafka/setup.py"}]}
2,017
914
gh_patches_debug_9600
rasdani/github-patches
git_diff
ansible__ansible-17457
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ansible fails to create retry files with [Errno 2] No such file or directory: '' ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME retry files ##### ANSIBLE VERSION ``` ansible 2.1.1.0 config file = configured module search path = Default w/o overrides ``` ##### CONFIGURATION none ##### OS / ENVIRONMENT Ubuntu 16.04 ##### SUMMARY When a playbook fails, Ansible tries to create a retry file and then fails. ##### STEPS TO REPRODUCE ``` # test.yml --- - hosts: localhost gather_facts: no tasks: - command: "false" ``` Run `ansible-playbook -i localhost, -c local test.yml` ##### EXPECTED RESULTS Playbook fails, ansible doesn't complain about failing to create `test.retry`. ##### ACTUAL RESULTS ``` PLAY [localhost] *************************************************************** TASK [command] ***************************************************************** fatal: [localhost]: FAILED! => {"changed": true, "cmd": ["false"], "delta": "0:00:00.001666", "end": "2016-09-08 11:42:55.135782", "failed": true, "rc": 1, "start": "2016-09-08 11:42:55.134116", "stderr": "", "stdout": "", "stdout_lines": [], "warnings": []} NO MORE HOSTS LEFT ************************************************************* [WARNING]: Could not create retry file 'test.retry'. [Errno 2] No such file or directory: '' PLAY RECAP ********************************************************************* localhost : ok=0 changed=0 unreachable=0 failed=1 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/executor/playbook_executor.py` Content: ``` 1 # (c) 2012-2014, Michael DeHaan <[email protected]> 2 # 3 # This file is part of Ansible 4 # 5 # Ansible is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License as published by 7 # the Free Software Foundation, either version 3 of the License, or 8 # (at your option) any later version. 9 # 10 # Ansible is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 17 18 # Make coding more python3-ish 19 from __future__ import (absolute_import, division, print_function) 20 __metaclass__ = type 21 22 import os 23 24 from ansible import constants as C 25 from ansible.executor.task_queue_manager import TaskQueueManager 26 from ansible.module_utils._text import to_native, to_text 27 from ansible.playbook import Playbook 28 from ansible.template import Templar 29 from ansible.utils.helpers import pct_to_int 30 from ansible.utils.path import makedirs_safe 31 32 try: 33 from __main__ import display 34 except ImportError: 35 from ansible.utils.display import Display 36 display = Display() 37 38 39 class PlaybookExecutor: 40 41 ''' 42 This is the primary class for executing playbooks, and thus the 43 basis for bin/ansible-playbook operation. 44 ''' 45 46 def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords): 47 self._playbooks = playbooks 48 self._inventory = inventory 49 self._variable_manager = variable_manager 50 self._loader = loader 51 self._options = options 52 self.passwords = passwords 53 self._unreachable_hosts = dict() 54 55 if options.listhosts or options.listtasks or options.listtags or options.syntax: 56 self._tqm = None 57 else: 58 self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords) 59 60 def run(self): 61 62 ''' 63 Run the given playbook, based on the settings in the play which 64 may limit the runs to serialized groups, etc. 65 ''' 66 67 result = 0 68 entrylist = [] 69 entry = {} 70 try: 71 for playbook_path in self._playbooks: 72 pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader) 73 self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path))) 74 75 if self._tqm is None: # we are doing a listing 76 entry = {'playbook': playbook_path} 77 entry['plays'] = [] 78 else: 79 # make sure the tqm has callbacks loaded 80 self._tqm.load_callbacks() 81 self._tqm.send_callback('v2_playbook_on_start', pb) 82 83 i = 1 84 plays = pb.get_plays() 85 display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path))) 86 87 for play in plays: 88 if play._included_path is not None: 89 self._loader.set_basedir(play._included_path) 90 else: 91 self._loader.set_basedir(pb._basedir) 92 93 # clear any filters which may have been applied to the inventory 94 self._inventory.remove_restriction() 95 96 if play.vars_prompt: 97 for var in play.vars_prompt: 98 vname = var['name'] 99 prompt = var.get("prompt", vname) 100 default = var.get("default", None) 101 private = var.get("private", True) 102 confirm = var.get("confirm", False) 103 encrypt = var.get("encrypt", None) 104 salt_size = var.get("salt_size", None) 105 salt = var.get("salt", None) 106 107 if vname not in self._variable_manager.extra_vars: 108 if self._tqm: 109 self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default) 110 play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default) 111 else: # we are either in --list-<option> or syntax check 112 play.vars[vname] = default 113 114 # Create a temporary copy of the play here, so we can run post_validate 115 # on it without the templating changes affecting the original object. 116 all_vars = self._variable_manager.get_vars(loader=self._loader, play=play) 117 templar = Templar(loader=self._loader, variables=all_vars) 118 new_play = play.copy() 119 new_play.post_validate(templar) 120 121 if self._options.syntax: 122 continue 123 124 if self._tqm is None: 125 # we are just doing a listing 126 entry['plays'].append(new_play) 127 128 else: 129 self._tqm._unreachable_hosts.update(self._unreachable_hosts) 130 131 previously_failed = len(self._tqm._failed_hosts) 132 previously_unreachable = len(self._tqm._unreachable_hosts) 133 134 break_play = False 135 # we are actually running plays 136 for batch in self._get_serialized_batches(new_play): 137 if len(batch) == 0: 138 self._tqm.send_callback('v2_playbook_on_play_start', new_play) 139 self._tqm.send_callback('v2_playbook_on_no_hosts_matched') 140 break 141 142 # restrict the inventory to the hosts in the serialized batch 143 self._inventory.restrict_to_hosts(batch) 144 # and run it... 145 result = self._tqm.run(play=play) 146 147 # break the play if the result equals the special return code 148 if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0: 149 result = self._tqm.RUN_FAILED_HOSTS 150 break_play = True 151 152 # check the number of failures here, to see if they're above the maximum 153 # failure percentage allowed, or if any errors are fatal. If either of those 154 # conditions are met, we break out, otherwise we only break out if the entire 155 # batch failed 156 failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \ 157 (previously_failed + previously_unreachable) 158 159 if len(batch) == failed_hosts_count: 160 break_play = True 161 break 162 163 # update the previous counts so they don't accumulate incorrectly 164 # over multiple serial batches 165 previously_failed += len(self._tqm._failed_hosts) - previously_failed 166 previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable 167 168 # save the unreachable hosts from this batch 169 self._unreachable_hosts.update(self._tqm._unreachable_hosts) 170 171 if break_play: 172 break 173 174 i = i + 1 # per play 175 176 if entry: 177 entrylist.append(entry) # per playbook 178 179 # send the stats callback for this playbook 180 if self._tqm is not None: 181 if C.RETRY_FILES_ENABLED: 182 retries = set(self._tqm._failed_hosts.keys()) 183 retries.update(self._tqm._unreachable_hosts.keys()) 184 retries = sorted(retries) 185 if len(retries) > 0: 186 if C.RETRY_FILES_SAVE_PATH: 187 basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH) 188 elif playbook_path: 189 basedir = os.path.dirname(playbook_path) 190 else: 191 basedir = '~/' 192 193 (retry_name, _) = os.path.splitext(os.path.basename(playbook_path)) 194 filename = os.path.join(basedir, "%s.retry" % retry_name) 195 if self._generate_retry_inventory(filename, retries): 196 display.display("\tto retry, use: --limit @%s\n" % filename) 197 198 self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats) 199 200 # if the last result wasn't zero, break out of the playbook file name loop 201 if result != 0: 202 break 203 204 if entrylist: 205 return entrylist 206 207 finally: 208 if self._tqm is not None: 209 self._tqm.cleanup() 210 if self._loader: 211 self._loader.cleanup_all_tmp_files() 212 213 if self._options.syntax: 214 display.display("No issues encountered") 215 return result 216 217 return result 218 219 def _get_serialized_batches(self, play): 220 ''' 221 Returns a list of hosts, subdivided into batches based on 222 the serial size specified in the play. 223 ''' 224 225 # make sure we have a unique list of hosts 226 all_hosts = self._inventory.get_hosts(play.hosts) 227 all_hosts_len = len(all_hosts) 228 229 # the serial value can be listed as a scalar or a list of 230 # scalars, so we make sure it's a list here 231 serial_batch_list = play.serial 232 if len(serial_batch_list) == 0: 233 serial_batch_list = [-1] 234 235 cur_item = 0 236 serialized_batches = [] 237 238 while len(all_hosts) > 0: 239 # get the serial value from current item in the list 240 serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len) 241 242 # if the serial count was not specified or is invalid, default to 243 # a list of all hosts, otherwise grab a chunk of the hosts equal 244 # to the current serial item size 245 if serial <= 0: 246 serialized_batches.append(all_hosts) 247 break 248 else: 249 play_hosts = [] 250 for x in range(serial): 251 if len(all_hosts) > 0: 252 play_hosts.append(all_hosts.pop(0)) 253 254 serialized_batches.append(play_hosts) 255 256 # increment the current batch list item number, and if we've hit 257 # the end keep using the last element until we've consumed all of 258 # the hosts in the inventory 259 cur_item += 1 260 if cur_item > len(serial_batch_list) - 1: 261 cur_item = len(serial_batch_list) - 1 262 263 return serialized_batches 264 265 def _generate_retry_inventory(self, retry_path, replay_hosts): 266 ''' 267 Called when a playbook run fails. It generates an inventory which allows 268 re-running on ONLY the failed hosts. This may duplicate some variable 269 information in group_vars/host_vars but that is ok, and expected. 270 ''' 271 try: 272 makedirs_safe(os.path.dirname(retry_path)) 273 with open(retry_path, 'w') as fd: 274 for x in replay_hosts: 275 fd.write("%s\n" % x) 276 except Exception as e: 277 display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_native(e))) 278 return False 279 280 return True 281 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py --- a/lib/ansible/executor/playbook_executor.py +++ b/lib/ansible/executor/playbook_executor.py @@ -186,7 +186,7 @@ if C.RETRY_FILES_SAVE_PATH: basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH) elif playbook_path: - basedir = os.path.dirname(playbook_path) + basedir = os.path.dirname(os.path.abspath(playbook_path)) else: basedir = '~/'
{"golden_diff": "diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py\n--- a/lib/ansible/executor/playbook_executor.py\n+++ b/lib/ansible/executor/playbook_executor.py\n@@ -186,7 +186,7 @@\n if C.RETRY_FILES_SAVE_PATH:\n basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)\n elif playbook_path:\n- basedir = os.path.dirname(playbook_path)\n+ basedir = os.path.dirname(os.path.abspath(playbook_path))\n else:\n basedir = '~/'\n", "issue": "Ansible fails to create retry files with [Errno 2] No such file or directory: ''\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\nretry files\n##### ANSIBLE VERSION\n\n```\nansible 2.1.1.0\n config file = \n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nnone\n##### OS / ENVIRONMENT\n\nUbuntu 16.04\n##### SUMMARY\n\nWhen a playbook fails, Ansible tries to create a retry file and then fails.\n##### STEPS TO REPRODUCE\n\n```\n# test.yml\n\n---\n- hosts: localhost\n gather_facts: no\n tasks:\n - command: \"false\"\n```\n\nRun `ansible-playbook -i localhost, -c local test.yml`\n##### EXPECTED RESULTS\n\nPlaybook fails, ansible doesn't complain about failing to create `test.retry`.\n##### ACTUAL RESULTS\n\n```\nPLAY [localhost] ***************************************************************\n\nTASK [command] *****************************************************************\nfatal: [localhost]: FAILED! => {\"changed\": true, \"cmd\": [\"false\"], \"delta\": \"0:00:00.001666\", \"end\": \"2016-09-08 11:42:55.135782\", \"failed\": true, \"rc\": 1, \"start\": \"2016-09-08 11:42:55.134116\", \"stderr\": \"\", \"stdout\": \"\", \"stdout_lines\": [], \"warnings\": []}\n\nNO MORE HOSTS LEFT *************************************************************\n [WARNING]: Could not create retry file 'test.retry'. [Errno 2] No such file or directory: ''\n\n\nPLAY RECAP *********************************************************************\nlocalhost : ok=0 changed=0 unreachable=0 failed=1 \n```\n\n", "before_files": [{"content": "# (c) 2012-2014, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\n\nfrom ansible import constants as C\nfrom ansible.executor.task_queue_manager import TaskQueueManager\nfrom ansible.module_utils._text import to_native, to_text\nfrom ansible.playbook import Playbook\nfrom ansible.template import Templar\nfrom ansible.utils.helpers import pct_to_int\nfrom ansible.utils.path import makedirs_safe\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass PlaybookExecutor:\n\n '''\n This is the primary class for executing playbooks, and thus the\n basis for bin/ansible-playbook operation.\n '''\n\n def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):\n self._playbooks = playbooks\n self._inventory = inventory\n self._variable_manager = variable_manager\n self._loader = loader\n self._options = options\n self.passwords = passwords\n self._unreachable_hosts = dict()\n\n if options.listhosts or options.listtasks or options.listtags or options.syntax:\n self._tqm = None\n else:\n self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)\n\n def run(self):\n\n '''\n Run the given playbook, based on the settings in the play which\n may limit the runs to serialized groups, etc.\n '''\n\n result = 0\n entrylist = []\n entry = {}\n try:\n for playbook_path in self._playbooks:\n pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)\n self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))\n\n if self._tqm is None: # we are doing a listing\n entry = {'playbook': playbook_path}\n entry['plays'] = []\n else:\n # make sure the tqm has callbacks loaded\n self._tqm.load_callbacks()\n self._tqm.send_callback('v2_playbook_on_start', pb)\n\n i = 1\n plays = pb.get_plays()\n display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))\n\n for play in plays:\n if play._included_path is not None:\n self._loader.set_basedir(play._included_path)\n else:\n self._loader.set_basedir(pb._basedir)\n\n # clear any filters which may have been applied to the inventory\n self._inventory.remove_restriction()\n\n if play.vars_prompt:\n for var in play.vars_prompt:\n vname = var['name']\n prompt = var.get(\"prompt\", vname)\n default = var.get(\"default\", None)\n private = var.get(\"private\", True)\n confirm = var.get(\"confirm\", False)\n encrypt = var.get(\"encrypt\", None)\n salt_size = var.get(\"salt_size\", None)\n salt = var.get(\"salt\", None)\n\n if vname not in self._variable_manager.extra_vars:\n if self._tqm:\n self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)\n play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)\n else: # we are either in --list-<option> or syntax check\n play.vars[vname] = default\n\n # Create a temporary copy of the play here, so we can run post_validate\n # on it without the templating changes affecting the original object.\n all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)\n templar = Templar(loader=self._loader, variables=all_vars)\n new_play = play.copy()\n new_play.post_validate(templar)\n\n if self._options.syntax:\n continue\n\n if self._tqm is None:\n # we are just doing a listing\n entry['plays'].append(new_play)\n\n else:\n self._tqm._unreachable_hosts.update(self._unreachable_hosts)\n\n previously_failed = len(self._tqm._failed_hosts)\n previously_unreachable = len(self._tqm._unreachable_hosts)\n\n break_play = False\n # we are actually running plays\n for batch in self._get_serialized_batches(new_play):\n if len(batch) == 0:\n self._tqm.send_callback('v2_playbook_on_play_start', new_play)\n self._tqm.send_callback('v2_playbook_on_no_hosts_matched')\n break\n\n # restrict the inventory to the hosts in the serialized batch\n self._inventory.restrict_to_hosts(batch)\n # and run it...\n result = self._tqm.run(play=play)\n\n # break the play if the result equals the special return code\n if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:\n result = self._tqm.RUN_FAILED_HOSTS\n break_play = True\n\n # check the number of failures here, to see if they're above the maximum\n # failure percentage allowed, or if any errors are fatal. If either of those\n # conditions are met, we break out, otherwise we only break out if the entire\n # batch failed\n failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \\\n (previously_failed + previously_unreachable)\n\n if len(batch) == failed_hosts_count:\n break_play = True\n break\n\n # update the previous counts so they don't accumulate incorrectly\n # over multiple serial batches\n previously_failed += len(self._tqm._failed_hosts) - previously_failed\n previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable\n\n # save the unreachable hosts from this batch\n self._unreachable_hosts.update(self._tqm._unreachable_hosts)\n\n if break_play:\n break\n\n i = i + 1 # per play\n\n if entry:\n entrylist.append(entry) # per playbook\n\n # send the stats callback for this playbook\n if self._tqm is not None:\n if C.RETRY_FILES_ENABLED:\n retries = set(self._tqm._failed_hosts.keys())\n retries.update(self._tqm._unreachable_hosts.keys())\n retries = sorted(retries)\n if len(retries) > 0:\n if C.RETRY_FILES_SAVE_PATH:\n basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)\n elif playbook_path:\n basedir = os.path.dirname(playbook_path)\n else:\n basedir = '~/'\n\n (retry_name, _) = os.path.splitext(os.path.basename(playbook_path))\n filename = os.path.join(basedir, \"%s.retry\" % retry_name)\n if self._generate_retry_inventory(filename, retries):\n display.display(\"\\tto retry, use: --limit @%s\\n\" % filename)\n\n self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)\n\n # if the last result wasn't zero, break out of the playbook file name loop\n if result != 0:\n break\n\n if entrylist:\n return entrylist\n\n finally:\n if self._tqm is not None:\n self._tqm.cleanup()\n if self._loader:\n self._loader.cleanup_all_tmp_files()\n\n if self._options.syntax:\n display.display(\"No issues encountered\")\n return result\n\n return result\n\n def _get_serialized_batches(self, play):\n '''\n Returns a list of hosts, subdivided into batches based on\n the serial size specified in the play.\n '''\n\n # make sure we have a unique list of hosts\n all_hosts = self._inventory.get_hosts(play.hosts)\n all_hosts_len = len(all_hosts)\n\n # the serial value can be listed as a scalar or a list of\n # scalars, so we make sure it's a list here\n serial_batch_list = play.serial\n if len(serial_batch_list) == 0:\n serial_batch_list = [-1]\n\n cur_item = 0\n serialized_batches = []\n\n while len(all_hosts) > 0:\n # get the serial value from current item in the list\n serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)\n\n # if the serial count was not specified or is invalid, default to\n # a list of all hosts, otherwise grab a chunk of the hosts equal\n # to the current serial item size\n if serial <= 0:\n serialized_batches.append(all_hosts)\n break\n else:\n play_hosts = []\n for x in range(serial):\n if len(all_hosts) > 0:\n play_hosts.append(all_hosts.pop(0))\n\n serialized_batches.append(play_hosts)\n\n # increment the current batch list item number, and if we've hit\n # the end keep using the last element until we've consumed all of\n # the hosts in the inventory\n cur_item += 1\n if cur_item > len(serial_batch_list) - 1:\n cur_item = len(serial_batch_list) - 1\n\n return serialized_batches\n\n def _generate_retry_inventory(self, retry_path, replay_hosts):\n '''\n Called when a playbook run fails. It generates an inventory which allows\n re-running on ONLY the failed hosts. This may duplicate some variable\n information in group_vars/host_vars but that is ok, and expected.\n '''\n try:\n makedirs_safe(os.path.dirname(retry_path))\n with open(retry_path, 'w') as fd:\n for x in replay_hosts:\n fd.write(\"%s\\n\" % x)\n except Exception as e:\n display.warning(\"Could not create retry file '%s'.\\n\\t%s\" % (retry_path, to_native(e)))\n return False\n\n return True\n", "path": "lib/ansible/executor/playbook_executor.py"}], "after_files": [{"content": "# (c) 2012-2014, Michael DeHaan <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\n\nfrom ansible import constants as C\nfrom ansible.executor.task_queue_manager import TaskQueueManager\nfrom ansible.module_utils._text import to_native, to_text\nfrom ansible.playbook import Playbook\nfrom ansible.template import Templar\nfrom ansible.utils.helpers import pct_to_int\nfrom ansible.utils.path import makedirs_safe\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n\nclass PlaybookExecutor:\n\n '''\n This is the primary class for executing playbooks, and thus the\n basis for bin/ansible-playbook operation.\n '''\n\n def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):\n self._playbooks = playbooks\n self._inventory = inventory\n self._variable_manager = variable_manager\n self._loader = loader\n self._options = options\n self.passwords = passwords\n self._unreachable_hosts = dict()\n\n if options.listhosts or options.listtasks or options.listtags or options.syntax:\n self._tqm = None\n else:\n self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)\n\n def run(self):\n\n '''\n Run the given playbook, based on the settings in the play which\n may limit the runs to serialized groups, etc.\n '''\n\n result = 0\n entrylist = []\n entry = {}\n try:\n for playbook_path in self._playbooks:\n pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)\n self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))\n\n if self._tqm is None: # we are doing a listing\n entry = {'playbook': playbook_path}\n entry['plays'] = []\n else:\n # make sure the tqm has callbacks loaded\n self._tqm.load_callbacks()\n self._tqm.send_callback('v2_playbook_on_start', pb)\n\n i = 1\n plays = pb.get_plays()\n display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))\n\n for play in plays:\n if play._included_path is not None:\n self._loader.set_basedir(play._included_path)\n else:\n self._loader.set_basedir(pb._basedir)\n\n # clear any filters which may have been applied to the inventory\n self._inventory.remove_restriction()\n\n if play.vars_prompt:\n for var in play.vars_prompt:\n vname = var['name']\n prompt = var.get(\"prompt\", vname)\n default = var.get(\"default\", None)\n private = var.get(\"private\", True)\n confirm = var.get(\"confirm\", False)\n encrypt = var.get(\"encrypt\", None)\n salt_size = var.get(\"salt_size\", None)\n salt = var.get(\"salt\", None)\n\n if vname not in self._variable_manager.extra_vars:\n if self._tqm:\n self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)\n play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)\n else: # we are either in --list-<option> or syntax check\n play.vars[vname] = default\n\n # Create a temporary copy of the play here, so we can run post_validate\n # on it without the templating changes affecting the original object.\n all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)\n templar = Templar(loader=self._loader, variables=all_vars)\n new_play = play.copy()\n new_play.post_validate(templar)\n\n if self._options.syntax:\n continue\n\n if self._tqm is None:\n # we are just doing a listing\n entry['plays'].append(new_play)\n\n else:\n self._tqm._unreachable_hosts.update(self._unreachable_hosts)\n\n previously_failed = len(self._tqm._failed_hosts)\n previously_unreachable = len(self._tqm._unreachable_hosts)\n\n break_play = False\n # we are actually running plays\n for batch in self._get_serialized_batches(new_play):\n if len(batch) == 0:\n self._tqm.send_callback('v2_playbook_on_play_start', new_play)\n self._tqm.send_callback('v2_playbook_on_no_hosts_matched')\n break\n\n # restrict the inventory to the hosts in the serialized batch\n self._inventory.restrict_to_hosts(batch)\n # and run it...\n result = self._tqm.run(play=play)\n\n # break the play if the result equals the special return code\n if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:\n result = self._tqm.RUN_FAILED_HOSTS\n break_play = True\n\n # check the number of failures here, to see if they're above the maximum\n # failure percentage allowed, or if any errors are fatal. If either of those\n # conditions are met, we break out, otherwise we only break out if the entire\n # batch failed\n failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \\\n (previously_failed + previously_unreachable)\n\n if len(batch) == failed_hosts_count:\n break_play = True\n break\n\n # update the previous counts so they don't accumulate incorrectly\n # over multiple serial batches\n previously_failed += len(self._tqm._failed_hosts) - previously_failed\n previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable\n\n # save the unreachable hosts from this batch\n self._unreachable_hosts.update(self._tqm._unreachable_hosts)\n\n if break_play:\n break\n\n i = i + 1 # per play\n\n if entry:\n entrylist.append(entry) # per playbook\n\n # send the stats callback for this playbook\n if self._tqm is not None:\n if C.RETRY_FILES_ENABLED:\n retries = set(self._tqm._failed_hosts.keys())\n retries.update(self._tqm._unreachable_hosts.keys())\n retries = sorted(retries)\n if len(retries) > 0:\n if C.RETRY_FILES_SAVE_PATH:\n basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)\n elif playbook_path:\n basedir = os.path.dirname(os.path.abspath(playbook_path))\n else:\n basedir = '~/'\n\n (retry_name, _) = os.path.splitext(os.path.basename(playbook_path))\n filename = os.path.join(basedir, \"%s.retry\" % retry_name)\n if self._generate_retry_inventory(filename, retries):\n display.display(\"\\tto retry, use: --limit @%s\\n\" % filename)\n\n self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)\n\n # if the last result wasn't zero, break out of the playbook file name loop\n if result != 0:\n break\n\n if entrylist:\n return entrylist\n\n finally:\n if self._tqm is not None:\n self._tqm.cleanup()\n if self._loader:\n self._loader.cleanup_all_tmp_files()\n\n if self._options.syntax:\n display.display(\"No issues encountered\")\n return result\n\n return result\n\n def _get_serialized_batches(self, play):\n '''\n Returns a list of hosts, subdivided into batches based on\n the serial size specified in the play.\n '''\n\n # make sure we have a unique list of hosts\n all_hosts = self._inventory.get_hosts(play.hosts)\n all_hosts_len = len(all_hosts)\n\n # the serial value can be listed as a scalar or a list of\n # scalars, so we make sure it's a list here\n serial_batch_list = play.serial\n if len(serial_batch_list) == 0:\n serial_batch_list = [-1]\n\n cur_item = 0\n serialized_batches = []\n\n while len(all_hosts) > 0:\n # get the serial value from current item in the list\n serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)\n\n # if the serial count was not specified or is invalid, default to\n # a list of all hosts, otherwise grab a chunk of the hosts equal\n # to the current serial item size\n if serial <= 0:\n serialized_batches.append(all_hosts)\n break\n else:\n play_hosts = []\n for x in range(serial):\n if len(all_hosts) > 0:\n play_hosts.append(all_hosts.pop(0))\n\n serialized_batches.append(play_hosts)\n\n # increment the current batch list item number, and if we've hit\n # the end keep using the last element until we've consumed all of\n # the hosts in the inventory\n cur_item += 1\n if cur_item > len(serial_batch_list) - 1:\n cur_item = len(serial_batch_list) - 1\n\n return serialized_batches\n\n def _generate_retry_inventory(self, retry_path, replay_hosts):\n '''\n Called when a playbook run fails. It generates an inventory which allows\n re-running on ONLY the failed hosts. This may duplicate some variable\n information in group_vars/host_vars but that is ok, and expected.\n '''\n try:\n makedirs_safe(os.path.dirname(retry_path))\n with open(retry_path, 'w') as fd:\n for x in replay_hosts:\n fd.write(\"%s\\n\" % x)\n except Exception as e:\n display.warning(\"Could not create retry file '%s'.\\n\\t%s\" % (retry_path, to_native(e)))\n return False\n\n return True\n", "path": "lib/ansible/executor/playbook_executor.py"}]}
3,794
126
gh_patches_debug_4660
rasdani/github-patches
git_diff
bridgecrewio__checkov-2935
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- False positive for CKV_AZURE_43 when using the "random" provider resources **Describe the issue** Check ID: CKV_AZURE_43 When using any of the random_* resources from the [random provider](https://registry.terraform.io/providers/hashicorp/random/latest/docs) check CKV_AZURE_43 fails. StorageAccountName.py probably needs the VARIABLE_REFS list expanded to include the random_* resources. **Examples** ``` resource "random_string" "random" { length = 4 number = true lower = false special = false upper = false } resource "azurerm_storage_account" "vmstorageaccount" { name = "storage${random_string.random}" .... } ``` **Version:** - Checkov Version 2.0.113 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/azure/StorageAccountName.py` Content: ``` 1 import re 2 from typing import List, Dict, Any 3 4 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 5 from checkov.common.models.enums import CheckResult, CheckCategories 6 7 STO_NAME_REGEX = re.compile(r"^[a-z0-9]{3,24}$") 8 VARIABLE_REFS = ("local.", "module.", "var.") 9 10 11 class StorageAccountName(BaseResourceCheck): 12 def __init__(self) -> None: 13 name = "Ensure Storage Accounts adhere to the naming rules" 14 id = "CKV_AZURE_43" 15 supported_resources = ["azurerm_storage_account"] 16 categories = [CheckCategories.CONVENTION] 17 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 18 19 def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult: 20 """ 21 The Storage Account naming reference: 22 https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts 23 :param conf: azurerm_storage_account configuration 24 :return: <CheckResult> 25 """ 26 name = conf.get("name") 27 if name: 28 name = str(name[0]) 29 if any(x in name for x in VARIABLE_REFS): 30 # in the case we couldn't evaluate the name, just ignore 31 return CheckResult.UNKNOWN 32 if re.findall(STO_NAME_REGEX, str(conf["name"][0])): 33 return CheckResult.PASSED 34 35 return CheckResult.FAILED 36 37 def get_evaluated_keys(self) -> List[str]: 38 return ["name"] 39 40 41 check = StorageAccountName() 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/azure/StorageAccountName.py b/checkov/terraform/checks/resource/azure/StorageAccountName.py --- a/checkov/terraform/checks/resource/azure/StorageAccountName.py +++ b/checkov/terraform/checks/resource/azure/StorageAccountName.py @@ -5,7 +5,7 @@ from checkov.common.models.enums import CheckResult, CheckCategories STO_NAME_REGEX = re.compile(r"^[a-z0-9]{3,24}$") -VARIABLE_REFS = ("local.", "module.", "var.") +VARIABLE_REFS = ("local.", "module.", "var.", "random_string.", "random_id.", "random_integer.", "random_pet.") class StorageAccountName(BaseResourceCheck):
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/StorageAccountName.py b/checkov/terraform/checks/resource/azure/StorageAccountName.py\n--- a/checkov/terraform/checks/resource/azure/StorageAccountName.py\n+++ b/checkov/terraform/checks/resource/azure/StorageAccountName.py\n@@ -5,7 +5,7 @@\n from checkov.common.models.enums import CheckResult, CheckCategories\n \n STO_NAME_REGEX = re.compile(r\"^[a-z0-9]{3,24}$\")\n-VARIABLE_REFS = (\"local.\", \"module.\", \"var.\")\n+VARIABLE_REFS = (\"local.\", \"module.\", \"var.\", \"random_string.\", \"random_id.\", \"random_integer.\", \"random_pet.\")\n \n \n class StorageAccountName(BaseResourceCheck):\n", "issue": "False positive for CKV_AZURE_43 when using the \"random\" provider resources\n**Describe the issue**\r\nCheck ID: CKV_AZURE_43\r\nWhen using any of the random_* resources from the [random provider](https://registry.terraform.io/providers/hashicorp/random/latest/docs) check CKV_AZURE_43 fails.\r\n\r\nStorageAccountName.py probably needs the VARIABLE_REFS list expanded to include the random_* resources.\r\n\r\n**Examples**\r\n```\r\nresource \"random_string\" \"random\" {\r\n length = 4\r\n number = true\r\n lower = false\r\n special = false\r\n upper = false\r\n}\r\n\r\nresource \"azurerm_storage_account\" \"vmstorageaccount\" {\r\n name = \"storage${random_string.random}\"\r\n ....\r\n}\r\n```\r\n\r\n**Version:**\r\n - Checkov Version 2.0.113\n", "before_files": [{"content": "import re\nfrom typing import List, Dict, Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\nSTO_NAME_REGEX = re.compile(r\"^[a-z0-9]{3,24}$\")\nVARIABLE_REFS = (\"local.\", \"module.\", \"var.\")\n\n\nclass StorageAccountName(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n supported_resources = [\"azurerm_storage_account\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n name = conf.get(\"name\")\n if name:\n name = str(name[0])\n if any(x in name for x in VARIABLE_REFS):\n # in the case we couldn't evaluate the name, just ignore\n return CheckResult.UNKNOWN\n if re.findall(STO_NAME_REGEX, str(conf[\"name\"][0])):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"name\"]\n\n\ncheck = StorageAccountName()\n", "path": "checkov/terraform/checks/resource/azure/StorageAccountName.py"}], "after_files": [{"content": "import re\nfrom typing import List, Dict, Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\nSTO_NAME_REGEX = re.compile(r\"^[a-z0-9]{3,24}$\")\nVARIABLE_REFS = (\"local.\", \"module.\", \"var.\", \"random_string.\", \"random_id.\", \"random_integer.\", \"random_pet.\")\n\n\nclass StorageAccountName(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure Storage Accounts adhere to the naming rules\"\n id = \"CKV_AZURE_43\"\n supported_resources = [\"azurerm_storage_account\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n \"\"\"\n The Storage Account naming reference:\n https://docs.microsoft.com/en-us/azure/storage/common/storage-account-overview#naming-storage-accounts\n :param conf: azurerm_storage_account configuration\n :return: <CheckResult>\n \"\"\"\n name = conf.get(\"name\")\n if name:\n name = str(name[0])\n if any(x in name for x in VARIABLE_REFS):\n # in the case we couldn't evaluate the name, just ignore\n return CheckResult.UNKNOWN\n if re.findall(STO_NAME_REGEX, str(conf[\"name\"][0])):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return [\"name\"]\n\n\ncheck = StorageAccountName()\n", "path": "checkov/terraform/checks/resource/azure/StorageAccountName.py"}]}
884
167
gh_patches_debug_14368
rasdani/github-patches
git_diff
scrapy__scrapy-1131
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unhandled error in Deferred (RobotsTxtMiddleware) **Dev story**: Let's say spider downloads all .zip files from http://habrahabr.ru/post/212029/ page Url with .zip files looks like this: http://layer6.jenkins.tox.im/job/qt_gui_win32/lastSuccessfulBuild/artifact/qt/build/release/TOX-Qt-GUI.zip It's a polite spider, so settings file contains: `ROBOTSTXT_OBEY = True` Middleware parses habrahabr.ru robots.txt file as well as 'external' robots.txt file from layer6.jenkins.tox.im. It's expected behaviour. But if request will be returned with error then the output would be: ``` 2015-04-02 17:06:16+0300 [habrahabr] DEBUG: Gave up retrying <GET http://layer6.jenkins.tox.im/robots.txt> (failed 1 times): DNS lookup failed: address 'layer6.jenkins.tox.im' not found: [Errno 8] nodename nor servname provided, or not known. 2015-04-02 17:06:16+0300 [-] ERROR: Unhandled error in Deferred: 2015-04-02 17:06:16+0300 [-] Unhandled Error Traceback (most recent call last): Failure: twisted.internet.error.DNSLookupError: DNS lookup failed: address 'layer6.jenkins.tox.im' not found: [Errno 8] nodename nor servname provided, or not known. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/contrib/downloadermiddleware/robotstxt.py` Content: ``` 1 """ 2 This is a middleware to respect robots.txt policies. To activate it you must 3 enable this middleware and enable the ROBOTSTXT_OBEY setting. 4 5 """ 6 7 from six.moves.urllib import robotparser 8 9 from scrapy import signals, log 10 from scrapy.exceptions import NotConfigured, IgnoreRequest 11 from scrapy.http import Request 12 from scrapy.utils.httpobj import urlparse_cached 13 14 15 class RobotsTxtMiddleware(object): 16 DOWNLOAD_PRIORITY = 1000 17 18 def __init__(self, crawler): 19 if not crawler.settings.getbool('ROBOTSTXT_OBEY'): 20 raise NotConfigured 21 22 self.crawler = crawler 23 self._useragent = crawler.settings.get('USER_AGENT') 24 self._parsers = {} 25 26 @classmethod 27 def from_crawler(cls, crawler): 28 return cls(crawler) 29 30 def process_request(self, request, spider): 31 if request.meta.get('dont_obey_robotstxt'): 32 return 33 rp = self.robot_parser(request, spider) 34 if rp and not rp.can_fetch(self._useragent, request.url): 35 log.msg(format="Forbidden by robots.txt: %(request)s", 36 level=log.DEBUG, request=request) 37 raise IgnoreRequest 38 39 def robot_parser(self, request, spider): 40 url = urlparse_cached(request) 41 netloc = url.netloc 42 if netloc not in self._parsers: 43 self._parsers[netloc] = None 44 robotsurl = "%s://%s/robots.txt" % (url.scheme, url.netloc) 45 robotsreq = Request( 46 robotsurl, 47 priority=self.DOWNLOAD_PRIORITY, 48 meta={'dont_obey_robotstxt': True} 49 ) 50 dfd = self.crawler.engine.download(robotsreq, spider) 51 dfd.addCallback(self._parse_robots) 52 return self._parsers[netloc] 53 54 def _parse_robots(self, response): 55 rp = robotparser.RobotFileParser(response.url) 56 rp.parse(response.body.splitlines()) 57 self._parsers[urlparse_cached(response).netloc] = rp 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/contrib/downloadermiddleware/robotstxt.py b/scrapy/contrib/downloadermiddleware/robotstxt.py --- a/scrapy/contrib/downloadermiddleware/robotstxt.py +++ b/scrapy/contrib/downloadermiddleware/robotstxt.py @@ -49,8 +49,14 @@ ) dfd = self.crawler.engine.download(robotsreq, spider) dfd.addCallback(self._parse_robots) + dfd.addErrback(self._logerror, robotsreq, spider) return self._parsers[netloc] + def _logerror(self, failure, request, spider): + if failure.type is not IgnoreRequest: + log.msg(format="Error downloading %%(request)s: %s" % failure.value, + level=log.ERROR, request=request, spider=spider) + def _parse_robots(self, response): rp = robotparser.RobotFileParser(response.url) rp.parse(response.body.splitlines())
{"golden_diff": "diff --git a/scrapy/contrib/downloadermiddleware/robotstxt.py b/scrapy/contrib/downloadermiddleware/robotstxt.py\n--- a/scrapy/contrib/downloadermiddleware/robotstxt.py\n+++ b/scrapy/contrib/downloadermiddleware/robotstxt.py\n@@ -49,8 +49,14 @@\n )\n dfd = self.crawler.engine.download(robotsreq, spider)\n dfd.addCallback(self._parse_robots)\n+ dfd.addErrback(self._logerror, robotsreq, spider)\n return self._parsers[netloc]\n \n+ def _logerror(self, failure, request, spider):\n+ if failure.type is not IgnoreRequest:\n+ log.msg(format=\"Error downloading %%(request)s: %s\" % failure.value,\n+ level=log.ERROR, request=request, spider=spider)\n+\n def _parse_robots(self, response):\n rp = robotparser.RobotFileParser(response.url)\n rp.parse(response.body.splitlines())\n", "issue": "Unhandled error in Deferred (RobotsTxtMiddleware)\n**Dev story**:\nLet's say spider downloads all .zip files from http://habrahabr.ru/post/212029/ page\nUrl with .zip files looks like this: http://layer6.jenkins.tox.im/job/qt_gui_win32/lastSuccessfulBuild/artifact/qt/build/release/TOX-Qt-GUI.zip\n\nIt's a polite spider, so settings file contains:\n`ROBOTSTXT_OBEY = True`\n\nMiddleware parses habrahabr.ru robots.txt file as well as 'external' robots.txt file from layer6.jenkins.tox.im. It's expected behaviour. \nBut if request will be returned with error then the output would be:\n\n```\n2015-04-02 17:06:16+0300 [habrahabr] DEBUG: Gave up retrying <GET http://layer6.jenkins.tox.im/robots.txt> (failed 1 times): DNS lookup failed: address 'layer6.jenkins.tox.im' not found: [Errno 8] nodename nor servname provided, or not known.\n\n2015-04-02 17:06:16+0300 [-] ERROR: Unhandled error in Deferred:\n2015-04-02 17:06:16+0300 [-] Unhandled Error\n Traceback (most recent call last):\n Failure: twisted.internet.error.DNSLookupError: DNS lookup failed: address 'layer6.jenkins.tox.im' not found: [Errno 8] nodename nor servname provided, or not known.\n```\n\n", "before_files": [{"content": "\"\"\"\nThis is a middleware to respect robots.txt policies. To activate it you must\nenable this middleware and enable the ROBOTSTXT_OBEY setting.\n\n\"\"\"\n\nfrom six.moves.urllib import robotparser\n\nfrom scrapy import signals, log\nfrom scrapy.exceptions import NotConfigured, IgnoreRequest\nfrom scrapy.http import Request\nfrom scrapy.utils.httpobj import urlparse_cached\n\n\nclass RobotsTxtMiddleware(object):\n DOWNLOAD_PRIORITY = 1000\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('ROBOTSTXT_OBEY'):\n raise NotConfigured\n\n self.crawler = crawler\n self._useragent = crawler.settings.get('USER_AGENT')\n self._parsers = {}\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def process_request(self, request, spider):\n if request.meta.get('dont_obey_robotstxt'):\n return\n rp = self.robot_parser(request, spider)\n if rp and not rp.can_fetch(self._useragent, request.url):\n log.msg(format=\"Forbidden by robots.txt: %(request)s\",\n level=log.DEBUG, request=request)\n raise IgnoreRequest\n\n def robot_parser(self, request, spider):\n url = urlparse_cached(request)\n netloc = url.netloc\n if netloc not in self._parsers:\n self._parsers[netloc] = None\n robotsurl = \"%s://%s/robots.txt\" % (url.scheme, url.netloc)\n robotsreq = Request(\n robotsurl,\n priority=self.DOWNLOAD_PRIORITY,\n meta={'dont_obey_robotstxt': True}\n )\n dfd = self.crawler.engine.download(robotsreq, spider)\n dfd.addCallback(self._parse_robots)\n return self._parsers[netloc]\n\n def _parse_robots(self, response):\n rp = robotparser.RobotFileParser(response.url)\n rp.parse(response.body.splitlines())\n self._parsers[urlparse_cached(response).netloc] = rp\n", "path": "scrapy/contrib/downloadermiddleware/robotstxt.py"}], "after_files": [{"content": "\"\"\"\nThis is a middleware to respect robots.txt policies. To activate it you must\nenable this middleware and enable the ROBOTSTXT_OBEY setting.\n\n\"\"\"\n\nfrom six.moves.urllib import robotparser\n\nfrom scrapy import signals, log\nfrom scrapy.exceptions import NotConfigured, IgnoreRequest\nfrom scrapy.http import Request\nfrom scrapy.utils.httpobj import urlparse_cached\n\n\nclass RobotsTxtMiddleware(object):\n DOWNLOAD_PRIORITY = 1000\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('ROBOTSTXT_OBEY'):\n raise NotConfigured\n\n self.crawler = crawler\n self._useragent = crawler.settings.get('USER_AGENT')\n self._parsers = {}\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def process_request(self, request, spider):\n if request.meta.get('dont_obey_robotstxt'):\n return\n rp = self.robot_parser(request, spider)\n if rp and not rp.can_fetch(self._useragent, request.url):\n log.msg(format=\"Forbidden by robots.txt: %(request)s\",\n level=log.DEBUG, request=request)\n raise IgnoreRequest\n\n def robot_parser(self, request, spider):\n url = urlparse_cached(request)\n netloc = url.netloc\n if netloc not in self._parsers:\n self._parsers[netloc] = None\n robotsurl = \"%s://%s/robots.txt\" % (url.scheme, url.netloc)\n robotsreq = Request(\n robotsurl,\n priority=self.DOWNLOAD_PRIORITY,\n meta={'dont_obey_robotstxt': True}\n )\n dfd = self.crawler.engine.download(robotsreq, spider)\n dfd.addCallback(self._parse_robots)\n dfd.addErrback(self._logerror, robotsreq, spider)\n return self._parsers[netloc]\n\n def _logerror(self, failure, request, spider):\n if failure.type is not IgnoreRequest:\n log.msg(format=\"Error downloading %%(request)s: %s\" % failure.value,\n level=log.ERROR, request=request, spider=spider)\n\n def _parse_robots(self, response):\n rp = robotparser.RobotFileParser(response.url)\n rp.parse(response.body.splitlines())\n self._parsers[urlparse_cached(response).netloc] = rp\n", "path": "scrapy/contrib/downloadermiddleware/robotstxt.py"}]}
1,177
217
gh_patches_debug_3525
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1747
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- port: turn memory scope includesnapshot to false (#5441) The changes in [turn memory scope includesnapshot to false (#5441)](https://github.com/microsoft/botbuilder-dotnet/pull/5441) may need to be ported to maintain parity with `microsoft/botbuilder-dotnet`. <blockquote> Fixes #5432 </blockquote> Please review and, if necessary, port the changes. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from botbuilder.dialogs.memory import scope_path 5 6 from .memory_scope import MemoryScope 7 8 9 class CaseInsensitiveDict(dict): 10 # pylint: disable=protected-access 11 12 @classmethod 13 def _k(cls, key): 14 return key.lower() if isinstance(key, str) else key 15 16 def __init__(self, *args, **kwargs): 17 super(CaseInsensitiveDict, self).__init__(*args, **kwargs) 18 self._convert_keys() 19 20 def __getitem__(self, key): 21 return super(CaseInsensitiveDict, self).__getitem__(self.__class__._k(key)) 22 23 def __setitem__(self, key, value): 24 super(CaseInsensitiveDict, self).__setitem__(self.__class__._k(key), value) 25 26 def __delitem__(self, key): 27 return super(CaseInsensitiveDict, self).__delitem__(self.__class__._k(key)) 28 29 def __contains__(self, key): 30 return super(CaseInsensitiveDict, self).__contains__(self.__class__._k(key)) 31 32 def pop(self, key, *args, **kwargs): 33 return super(CaseInsensitiveDict, self).pop( 34 self.__class__._k(key), *args, **kwargs 35 ) 36 37 def get(self, key, *args, **kwargs): 38 return super(CaseInsensitiveDict, self).get( 39 self.__class__._k(key), *args, **kwargs 40 ) 41 42 def setdefault(self, key, *args, **kwargs): 43 return super(CaseInsensitiveDict, self).setdefault( 44 self.__class__._k(key), *args, **kwargs 45 ) 46 47 def update(self, e=None, **f): 48 if e is None: 49 e = {} 50 super(CaseInsensitiveDict, self).update(self.__class__(e)) 51 super(CaseInsensitiveDict, self).update(self.__class__(**f)) 52 53 def _convert_keys(self): 54 for k in list(self.keys()): 55 val = super(CaseInsensitiveDict, self).pop(k) 56 self.__setitem__(k, val) 57 58 59 class TurnMemoryScope(MemoryScope): 60 def __init__(self): 61 super().__init__(scope_path.TURN) 62 63 def get_memory(self, dialog_context: "DialogContext") -> object: 64 if not dialog_context: 65 raise TypeError(f"Expecting: DialogContext, but received None") 66 67 turn_value = dialog_context.context.turn_state.get(scope_path.TURN, None) 68 69 if not turn_value: 70 turn_value = CaseInsensitiveDict() 71 dialog_context.context.turn_state[scope_path.TURN] = turn_value 72 73 return turn_value 74 75 def set_memory(self, dialog_context: "DialogContext", memory: object): 76 if not dialog_context: 77 raise TypeError(f"Expecting: DialogContext, but received None") 78 79 dialog_context.context.turn_state[scope_path.TURN] = memory 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py @@ -58,7 +58,7 @@ class TurnMemoryScope(MemoryScope): def __init__(self): - super().__init__(scope_path.TURN) + super().__init__(scope_path.TURN, False) def get_memory(self, dialog_context: "DialogContext") -> object: if not dialog_context:
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py\n@@ -58,7 +58,7 @@\n \n class TurnMemoryScope(MemoryScope):\n def __init__(self):\n- super().__init__(scope_path.TURN)\n+ super().__init__(scope_path.TURN, False)\n \n def get_memory(self, dialog_context: \"DialogContext\") -> object:\n if not dialog_context:\n", "issue": "port: turn memory scope includesnapshot to false (#5441)\nThe changes in [turn memory scope includesnapshot to false (#5441)](https://github.com/microsoft/botbuilder-dotnet/pull/5441) may need to be ported to maintain parity with `microsoft/botbuilder-dotnet`.\n\n<blockquote>\nFixes #5432\n</blockquote>\n\nPlease review and, if necessary, port the changes.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom botbuilder.dialogs.memory import scope_path\n\nfrom .memory_scope import MemoryScope\n\n\nclass CaseInsensitiveDict(dict):\n # pylint: disable=protected-access\n\n @classmethod\n def _k(cls, key):\n return key.lower() if isinstance(key, str) else key\n\n def __init__(self, *args, **kwargs):\n super(CaseInsensitiveDict, self).__init__(*args, **kwargs)\n self._convert_keys()\n\n def __getitem__(self, key):\n return super(CaseInsensitiveDict, self).__getitem__(self.__class__._k(key))\n\n def __setitem__(self, key, value):\n super(CaseInsensitiveDict, self).__setitem__(self.__class__._k(key), value)\n\n def __delitem__(self, key):\n return super(CaseInsensitiveDict, self).__delitem__(self.__class__._k(key))\n\n def __contains__(self, key):\n return super(CaseInsensitiveDict, self).__contains__(self.__class__._k(key))\n\n def pop(self, key, *args, **kwargs):\n return super(CaseInsensitiveDict, self).pop(\n self.__class__._k(key), *args, **kwargs\n )\n\n def get(self, key, *args, **kwargs):\n return super(CaseInsensitiveDict, self).get(\n self.__class__._k(key), *args, **kwargs\n )\n\n def setdefault(self, key, *args, **kwargs):\n return super(CaseInsensitiveDict, self).setdefault(\n self.__class__._k(key), *args, **kwargs\n )\n\n def update(self, e=None, **f):\n if e is None:\n e = {}\n super(CaseInsensitiveDict, self).update(self.__class__(e))\n super(CaseInsensitiveDict, self).update(self.__class__(**f))\n\n def _convert_keys(self):\n for k in list(self.keys()):\n val = super(CaseInsensitiveDict, self).pop(k)\n self.__setitem__(k, val)\n\n\nclass TurnMemoryScope(MemoryScope):\n def __init__(self):\n super().__init__(scope_path.TURN)\n\n def get_memory(self, dialog_context: \"DialogContext\") -> object:\n if not dialog_context:\n raise TypeError(f\"Expecting: DialogContext, but received None\")\n\n turn_value = dialog_context.context.turn_state.get(scope_path.TURN, None)\n\n if not turn_value:\n turn_value = CaseInsensitiveDict()\n dialog_context.context.turn_state[scope_path.TURN] = turn_value\n\n return turn_value\n\n def set_memory(self, dialog_context: \"DialogContext\", memory: object):\n if not dialog_context:\n raise TypeError(f\"Expecting: DialogContext, but received None\")\n\n dialog_context.context.turn_state[scope_path.TURN] = memory\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom botbuilder.dialogs.memory import scope_path\n\nfrom .memory_scope import MemoryScope\n\n\nclass CaseInsensitiveDict(dict):\n # pylint: disable=protected-access\n\n @classmethod\n def _k(cls, key):\n return key.lower() if isinstance(key, str) else key\n\n def __init__(self, *args, **kwargs):\n super(CaseInsensitiveDict, self).__init__(*args, **kwargs)\n self._convert_keys()\n\n def __getitem__(self, key):\n return super(CaseInsensitiveDict, self).__getitem__(self.__class__._k(key))\n\n def __setitem__(self, key, value):\n super(CaseInsensitiveDict, self).__setitem__(self.__class__._k(key), value)\n\n def __delitem__(self, key):\n return super(CaseInsensitiveDict, self).__delitem__(self.__class__._k(key))\n\n def __contains__(self, key):\n return super(CaseInsensitiveDict, self).__contains__(self.__class__._k(key))\n\n def pop(self, key, *args, **kwargs):\n return super(CaseInsensitiveDict, self).pop(\n self.__class__._k(key), *args, **kwargs\n )\n\n def get(self, key, *args, **kwargs):\n return super(CaseInsensitiveDict, self).get(\n self.__class__._k(key), *args, **kwargs\n )\n\n def setdefault(self, key, *args, **kwargs):\n return super(CaseInsensitiveDict, self).setdefault(\n self.__class__._k(key), *args, **kwargs\n )\n\n def update(self, e=None, **f):\n if e is None:\n e = {}\n super(CaseInsensitiveDict, self).update(self.__class__(e))\n super(CaseInsensitiveDict, self).update(self.__class__(**f))\n\n def _convert_keys(self):\n for k in list(self.keys()):\n val = super(CaseInsensitiveDict, self).pop(k)\n self.__setitem__(k, val)\n\n\nclass TurnMemoryScope(MemoryScope):\n def __init__(self):\n super().__init__(scope_path.TURN, False)\n\n def get_memory(self, dialog_context: \"DialogContext\") -> object:\n if not dialog_context:\n raise TypeError(f\"Expecting: DialogContext, but received None\")\n\n turn_value = dialog_context.context.turn_state.get(scope_path.TURN, None)\n\n if not turn_value:\n turn_value = CaseInsensitiveDict()\n dialog_context.context.turn_state[scope_path.TURN] = turn_value\n\n return turn_value\n\n def set_memory(self, dialog_context: \"DialogContext\", memory: object):\n if not dialog_context:\n raise TypeError(f\"Expecting: DialogContext, but received None\")\n\n dialog_context.context.turn_state[scope_path.TURN] = memory\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py"}]}
1,167
167
gh_patches_debug_440
rasdani/github-patches
git_diff
pyca__cryptography-3035
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 1.4 Test failure TestRSACertificate.test_cert_serial_warning While testing an upgrade of the GNU Guix python-cryptography package from 1.3.2 to 1.4, I encountered the following failure of `TestRSACertificate.test_cert_serial_warning[backend0]`. ``` =================================== FAILURES =================================== ____________ TestRSACertificate.test_cert_serial_warning[backend0] _____________ self = <tests.test_x509.TestRSACertificate object at 0x7fffe0fa7978> backend = <cryptography.hazmat.backends.openssl.backend.Backend object at 0x7ffff3020278> def test_cert_serial_warning(self, backend): cert = _load_cert( os.path.join("x509", "PKITS_data", "certs", "GoodCACert.crt"), x509.load_der_x509_certificate, backend ) with warnings.catch_warnings(): warnings.simplefilter("always", utils.DeprecatedIn10) > with pytest.deprecated_call(): E TypeError: deprecated_call() missing 1 required positional argument: 'func' tests/test_x509.py:516: TypeError ============ 1 failed, 83123 passed, 3222 skipped in 164.91 seconds ============ ``` The dependency graph is using Python 3.4.3 and Hypothesis 3.1.0. Please let me know what other information I can provide to help debug this failure. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # This file is dual licensed under the terms of the Apache License, Version 4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 5 # for complete details. 6 7 from __future__ import absolute_import, division, print_function 8 9 import os 10 import platform 11 import subprocess 12 import sys 13 from distutils.command.build import build 14 15 import pkg_resources 16 17 from setuptools import find_packages, setup 18 from setuptools.command.install import install 19 from setuptools.command.test import test 20 21 22 base_dir = os.path.dirname(__file__) 23 src_dir = os.path.join(base_dir, "src") 24 25 # When executing the setup.py, we need to be able to import ourselves, this 26 # means that we need to add the src/ directory to the sys.path. 27 sys.path.insert(0, src_dir) 28 29 about = {} 30 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f: 31 exec(f.read(), about) 32 33 34 VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__']) 35 36 requirements = [ 37 "idna>=2.0", 38 "pyasn1>=0.1.8", 39 "six>=1.4.1", 40 "setuptools>=11.3", 41 ] 42 setup_requirements = [] 43 44 if sys.version_info < (3, 4): 45 requirements.append("enum34") 46 47 if sys.version_info < (3, 3): 48 requirements.append("ipaddress") 49 50 if platform.python_implementation() == "PyPy": 51 if sys.pypy_version_info < (2, 6): 52 raise RuntimeError( 53 "cryptography 1.0 is not compatible with PyPy < 2.6. Please " 54 "upgrade PyPy to use this library." 55 ) 56 else: 57 requirements.append("cffi>=1.4.1") 58 setup_requirements.append("cffi>=1.4.1") 59 60 test_requirements = [ 61 "pytest", 62 "pretend", 63 "iso8601", 64 "pyasn1_modules", 65 ] 66 if sys.version_info[:2] > (2, 6): 67 test_requirements.append("hypothesis>=1.11.4") 68 69 70 # If there's no vectors locally that probably means we are in a tarball and 71 # need to go and get the matching vectors package from PyPi 72 if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")): 73 test_requirements.append(VECTORS_DEPENDENCY) 74 75 76 def cc_is_available(): 77 return sys.platform == "darwin" and list(map( 78 int, platform.mac_ver()[0].split("."))) >= [10, 8, 0] 79 80 81 backends = [ 82 "openssl = cryptography.hazmat.backends.openssl:backend" 83 ] 84 85 if cc_is_available(): 86 backends.append( 87 "commoncrypto = cryptography.hazmat.backends.commoncrypto:backend", 88 ) 89 90 91 class PyTest(test): 92 def finalize_options(self): 93 test.finalize_options(self) 94 self.test_args = [] 95 self.test_suite = True 96 97 # This means there's a vectors/ folder with the package in here. 98 # cd into it, install the vectors package and then refresh sys.path 99 if VECTORS_DEPENDENCY not in test_requirements: 100 subprocess.check_call( 101 [sys.executable, "setup.py", "install"], cwd="vectors" 102 ) 103 pkg_resources.get_distribution("cryptography_vectors").activate() 104 105 def run_tests(self): 106 # Import here because in module scope the eggs are not loaded. 107 import pytest 108 test_args = [os.path.join(base_dir, "tests")] 109 errno = pytest.main(test_args) 110 sys.exit(errno) 111 112 113 def keywords_with_side_effects(argv): 114 """ 115 Get a dictionary with setup keywords that (can) have side effects. 116 117 :param argv: A list of strings with command line arguments. 118 :returns: A dictionary with keyword arguments for the ``setup()`` function. 119 120 This setup.py script uses the setuptools 'setup_requires' feature because 121 this is required by the cffi package to compile extension modules. The 122 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi 123 build process as a result of setup.py invocations that don't need the cffi 124 module to be built (setup.py serves the dual purpose of exposing package 125 metadata). 126 127 All of the options listed by ``python setup.py --help`` that print 128 information should be recognized here. The commands ``clean``, 129 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized. 130 Any combination of these options and commands is also supported. 131 132 This function was originally based on the `setup.py script`_ of SciPy (see 133 also the discussion in `pip issue #25`_). 134 135 .. _pip issue #25: https://github.com/pypa/pip/issues/25 136 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py 137 """ 138 no_setup_requires_arguments = ( 139 '-h', '--help', 140 '-n', '--dry-run', 141 '-q', '--quiet', 142 '-v', '--verbose', 143 '-V', '--version', 144 '--author', 145 '--author-email', 146 '--classifiers', 147 '--contact', 148 '--contact-email', 149 '--description', 150 '--egg-base', 151 '--fullname', 152 '--help-commands', 153 '--keywords', 154 '--licence', 155 '--license', 156 '--long-description', 157 '--maintainer', 158 '--maintainer-email', 159 '--name', 160 '--no-user-cfg', 161 '--obsoletes', 162 '--platforms', 163 '--provides', 164 '--requires', 165 '--url', 166 'clean', 167 'egg_info', 168 'register', 169 'sdist', 170 'upload', 171 ) 172 173 def is_short_option(argument): 174 """Check whether a command line argument is a short option.""" 175 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-' 176 177 def expand_short_options(argument): 178 """Expand combined short options into canonical short options.""" 179 return ('-' + char for char in argument[1:]) 180 181 def argument_without_setup_requirements(argv, i): 182 """Check whether a command line argument needs setup requirements.""" 183 if argv[i] in no_setup_requires_arguments: 184 # Simple case: An argument which is either an option or a command 185 # which doesn't need setup requirements. 186 return True 187 elif (is_short_option(argv[i]) and 188 all(option in no_setup_requires_arguments 189 for option in expand_short_options(argv[i]))): 190 # Not so simple case: Combined short options none of which need 191 # setup requirements. 192 return True 193 elif argv[i - 1:i] == ['--egg-base']: 194 # Tricky case: --egg-info takes an argument which should not make 195 # us use setup_requires (defeating the purpose of this code). 196 return True 197 else: 198 return False 199 200 if all(argument_without_setup_requirements(argv, i) 201 for i in range(1, len(argv))): 202 return { 203 "cmdclass": { 204 "build": DummyBuild, 205 "install": DummyInstall, 206 "test": DummyPyTest, 207 } 208 } 209 else: 210 cffi_modules = [ 211 "src/_cffi_src/build_openssl.py:ffi", 212 "src/_cffi_src/build_constant_time.py:ffi", 213 "src/_cffi_src/build_padding.py:ffi", 214 ] 215 if cc_is_available(): 216 cffi_modules.append("src/_cffi_src/build_commoncrypto.py:ffi") 217 218 return { 219 "setup_requires": setup_requirements, 220 "cmdclass": { 221 "test": PyTest, 222 }, 223 "cffi_modules": cffi_modules 224 } 225 226 227 setup_requires_error = ("Requested setup command that needs 'setup_requires' " 228 "while command line arguments implied a side effect " 229 "free command or option.") 230 231 232 class DummyBuild(build): 233 """ 234 This class makes it very obvious when ``keywords_with_side_effects()`` has 235 incorrectly interpreted the command line arguments to ``setup.py build`` as 236 one of the 'side effect free' commands or options. 237 """ 238 239 def run(self): 240 raise RuntimeError(setup_requires_error) 241 242 243 class DummyInstall(install): 244 """ 245 This class makes it very obvious when ``keywords_with_side_effects()`` has 246 incorrectly interpreted the command line arguments to ``setup.py install`` 247 as one of the 'side effect free' commands or options. 248 """ 249 250 def run(self): 251 raise RuntimeError(setup_requires_error) 252 253 254 class DummyPyTest(test): 255 """ 256 This class makes it very obvious when ``keywords_with_side_effects()`` has 257 incorrectly interpreted the command line arguments to ``setup.py test`` as 258 one of the 'side effect free' commands or options. 259 """ 260 261 def run_tests(self): 262 raise RuntimeError(setup_requires_error) 263 264 265 with open(os.path.join(base_dir, "README.rst")) as f: 266 long_description = f.read() 267 268 269 setup( 270 name=about["__title__"], 271 version=about["__version__"], 272 273 description=about["__summary__"], 274 long_description=long_description, 275 license=about["__license__"], 276 url=about["__uri__"], 277 278 author=about["__author__"], 279 author_email=about["__email__"], 280 281 classifiers=[ 282 "Intended Audience :: Developers", 283 "License :: OSI Approved :: Apache Software License", 284 "License :: OSI Approved :: BSD License", 285 "Natural Language :: English", 286 "Operating System :: MacOS :: MacOS X", 287 "Operating System :: POSIX", 288 "Operating System :: POSIX :: BSD", 289 "Operating System :: POSIX :: Linux", 290 "Operating System :: Microsoft :: Windows", 291 "Programming Language :: Python", 292 "Programming Language :: Python :: 2", 293 "Programming Language :: Python :: 2.6", 294 "Programming Language :: Python :: 2.7", 295 "Programming Language :: Python :: 3", 296 "Programming Language :: Python :: 3.3", 297 "Programming Language :: Python :: 3.4", 298 "Programming Language :: Python :: 3.5", 299 "Programming Language :: Python :: Implementation :: CPython", 300 "Programming Language :: Python :: Implementation :: PyPy", 301 "Topic :: Security :: Cryptography", 302 ], 303 304 package_dir={"": "src"}, 305 packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]), 306 include_package_data=True, 307 308 install_requires=requirements, 309 tests_require=test_requirements, 310 extras_require={ 311 "test": test_requirements, 312 "docstest": [ 313 "doc8", 314 "pyenchant", 315 "readme_renderer", 316 "sphinx", 317 "sphinx_rtd_theme", 318 "sphinxcontrib-spelling", 319 ], 320 "pep8test": [ 321 "flake8", 322 "flake8-import-order", 323 "pep8-naming", 324 ], 325 }, 326 327 # for cffi 328 zip_safe=False, 329 ext_package="cryptography.hazmat.bindings", 330 entry_points={ 331 "cryptography.backends": backends, 332 }, 333 **keywords_with_side_effects(sys.argv) 334 ) 335 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ setup_requirements.append("cffi>=1.4.1") test_requirements = [ - "pytest", + "pytest>=2.9.0", "pretend", "iso8601", "pyasn1_modules",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n setup_requirements.append(\"cffi>=1.4.1\")\n \n test_requirements = [\n- \"pytest\",\n+ \"pytest>=2.9.0\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n", "issue": "1.4 Test failure TestRSACertificate.test_cert_serial_warning\nWhile testing an upgrade of the GNU Guix python-cryptography package from 1.3.2 to 1.4, I encountered the following failure of `TestRSACertificate.test_cert_serial_warning[backend0]`.\n\n```\n=================================== FAILURES ===================================\n____________ TestRSACertificate.test_cert_serial_warning[backend0] _____________\n\nself = <tests.test_x509.TestRSACertificate object at 0x7fffe0fa7978>\nbackend = <cryptography.hazmat.backends.openssl.backend.Backend object at 0x7ffff3020278>\n\n def test_cert_serial_warning(self, backend):\n cert = _load_cert(\n os.path.join(\"x509\", \"PKITS_data\", \"certs\", \"GoodCACert.crt\"),\n x509.load_der_x509_certificate,\n backend\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\", utils.DeprecatedIn10)\n> with pytest.deprecated_call():\nE TypeError: deprecated_call() missing 1 required positional argument: 'func'\n\ntests/test_x509.py:516: TypeError\n============ 1 failed, 83123 passed, 3222 skipped in 164.91 seconds ============\n```\n\nThe dependency graph is using Python 3.4.3 and Hypothesis 3.1.0. Please let me know what other information I can provide to help debug this failure.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest>=2.9.0\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]}
3,913
87
gh_patches_debug_14752
rasdani/github-patches
git_diff
pymodbus-dev__pymodbus-2186
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ssl.SSLWantReadError: The operation did not complete (read) when using ModbusTlsClient <!-- Before opening a new issue, make sure you do the following: - Check that your issue isn't already filed: https://github.com/pymodbus-dev/pymodbus/issues - Check the discussions forum https://github.com/pymodbus-dev/pymodbus/discussions - Prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus --> ### Versions - Python: 3.9.9 - OS: MacOS (14.4.1) - Pymodbus: 3.6.8 - Modbus Hardware (if used): ### Pymodbus Specific - Server: tls - async - Client: tls - sync ### Description I'm starting the async tls modbus server using certificates from the example folder and function `StartAsyncTlsServer` with defining a few slaves. After that, I tried to read some slaves using the client (ModbusTlsClient) and received the exception `Modbus Error: The operation did not complete (read) (_ssl.c:2633)`. I read slave 1 `python client.py --slave_id 1 --address 0 --count 1 ` ### Code and Logs Client code is following: ```python import argparse import pymodbus from pymodbus.client import ModbusTcpClient, ModbusTlsClient from pymodbus.exceptions import ConnectionException, ModbusIOException pymodbus.pymodbus_apply_logging_config("DEBUG") def main(): parser = argparse.ArgumentParser(description="Modbus client") parser.add_argument("--write", action="store_true", help="Write mode") parser.add_argument("--host", type=str, help="Host", default="localhost") parser.add_argument("--port", type=int, help="Port", default=502) parser.add_argument("--slave_id", type=int, help="Slave ID") parser.add_argument("--address", type=int, help="Address") parser.add_argument("--count", type=int, help="Count of registers to read", default=1) parser.add_argument("--value", type=int, help="Value to write", default=0) args = parser.parse_args() try: client = ModbusTlsClient( args.host, args.port, certfile="certificates/pymodbus.crt", keyfile="certificates/pymodbus.key", server_hostname="localhost", ) client.connect() if args.write: result = client.write_register(args.address, args.value, slave=args.slave_id) else: result = client.read_holding_registers(args.address, args.count, slave=args.slave_id) if result.isError(): print(f"ModbusClient Error, the id {args.slave_id} or address {args.address} is invalid") print(result) return except (ConnectionException, ModbusIOException) as e: print(f"Error connecting to {args.host}:{args.port} ({str(e)})") return print("Results:") print(result.registers) if __name__ == "__main__": main() ``` ### Server logs ``` 2024-05-02 12:13:16,849 DEBUG logging:103 Awaiting connections server_listener 2024-05-02 12:13:16,850 INFO logging:97 Server listening. 2024-05-02 12:13:29,453 DEBUG logging:103 Connected to server 2024-05-02 12:13:29,453 DEBUG logging:103 recv: 0x3 0x0 0x0 0x0 0x1 old_data: addr=None 2024-05-02 12:13:29,453 DEBUG logging:103 Handling data: 0x3 0x0 0x0 0x0 0x1 2024-05-02 12:13:29,453 DEBUG logging:103 Processing: 0x3 0x0 0x0 0x0 0x1 2024-05-02 12:13:29,453 DEBUG logging:103 Factory Request[ReadHoldingRegistersRequest': 3] 2024-05-02 12:13:29,454 ERROR logging:115 requested slave does not exist: 0 2024-05-02 12:13:29,454 ERROR logging:115 Exception response Exception Response(131, 3, GatewayNoResponse) 2024-05-02 12:13:29,454 DEBUG logging:103 send: 0x83 0xb 2024-05-02 12:13:29,454 DEBUG logging:103 -> transport: received eof 2024-05-02 12:13:29,454 DEBUG logging:103 Connection lost server due to None 2024-05-02 12:13:29,454 DEBUG logging:103 Handler for stream [server] has been canceled ``` ### Client logs ``` 2024-05-02 12:13:29,453 DEBUG logging:103 Current transaction state - IDLE 2024-05-02 12:13:29,453 DEBUG logging:103 Running transaction 1 2024-05-02 12:13:29,453 DEBUG logging:103 SEND: 0x3 0x0 0x0 0x0 0x1 2024-05-02 12:13:29,453 DEBUG logging:103 New Transaction state "SENDING" 2024-05-02 12:13:29,453 DEBUG logging:103 Changing transaction state from "SENDING" to "WAITING FOR REPLY" 2024-05-02 12:13:29,453 DEBUG logging:103 Transaction failed. (The operation did not complete (read) (_ssl.c:2633)) Traceback (most recent call last): File "client.py", line 43, in <module> main() File "client.py", line 30, in main result = client.read_holding_registers(args.address, args.count, slave=args.slave_id) File ".venv/lib/python3.9/site-packages/pymodbus/client/mixin.py", line 107, in read_holding_registers return self.execute( File ".venv/lib/python3.9/site-packages/pymodbus/client/base.py", line 396, in execute return self.transaction.execute(request) File ".venv/lib/python3.9/site-packages/pymodbus/transaction.py", line 180, in execute response, last_exception = self._transact( File ".venv/lib/python3.9/site-packages/pymodbus/transaction.py", line 326, in _transact result = self._recv(response_length, full) File ".venv/lib/python3.9/site-packages/pymodbus/transaction.py", line 357, in _recv read_min = self.client.framer.recvPacket(min_size) File ".venv/lib/python3.9/site-packages/pymodbus/framer/base.py", line 79, in recvPacket return self.client.recv(size) File ".venv/lib/python3.9/site-packages/pymodbus/client/tcp.py", line 236, in recv if (recv_data := self.socket.recv(recv_size)) == b"": File "/Users/vmartyniak/.pyenv/versions/3.9.9/lib/python3.9/ssl.py", line 1227, in recv return self.read(buflen) File "/Users/vmartyniak/.pyenv/versions/3.9.9/lib/python3.9/ssl.py", line 1101, in read return self._sslobj.read(len) ssl.SSLWantReadError: The operation did not complete (read) (_ssl.c:2633) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pymodbus/framer/old_framer_tls.py` Content: ``` 1 """TLS framer.""" 2 import struct 3 4 from pymodbus.exceptions import ( 5 ModbusIOException, 6 ) 7 from pymodbus.framer.old_framer_base import TLS_FRAME_HEADER, ModbusFramer 8 from pymodbus.framer.tls import FramerTLS 9 10 11 # --------------------------------------------------------------------------- # 12 # Modbus TLS old framer 13 # --------------------------------------------------------------------------- # 14 15 16 class ModbusTlsFramer(ModbusFramer): 17 """Modbus TLS Frame controller. 18 19 No prefix MBAP header before decrypted PDU is used as a message frame for 20 Modbus Security Application Protocol. It allows us to easily separate 21 decrypted messages which is PDU as follows: 22 23 [ Function Code] [ Data ] 24 1b Nb 25 """ 26 27 method = "tls" 28 29 def __init__(self, decoder, client=None): 30 """Initialize a new instance of the framer. 31 32 :param decoder: The decoder factory implementation to use 33 """ 34 super().__init__(decoder, client) 35 self._hsize = 0x0 36 self.message_handler = FramerTLS() 37 38 def decode_data(self, data): 39 """Decode data.""" 40 if len(data) > self._hsize: 41 (fcode,) = struct.unpack(TLS_FRAME_HEADER, data[0 : self._hsize + 1]) 42 return {"fcode": fcode} 43 return {} 44 45 def frameProcessIncomingPacket(self, _single, callback, _slave, _tid=None, **kwargs): 46 """Process new packet pattern.""" 47 # no slave id for Modbus Security Application Protocol 48 49 while True: 50 used_len, use_tid, dev_id, data = self.message_handler.decode(self._buffer) 51 if not data: 52 return 53 self._header["uid"] = dev_id 54 self._header["tid"] = use_tid 55 self._header["pid"] = 0 56 57 if (result := self.decoder.decode(data)) is None: 58 self.resetFrame() 59 raise ModbusIOException("Unable to decode request") 60 self.populateResult(result) 61 self._buffer = self._buffer[used_len:] 62 self._header = {"tid": 0, "pid": 0, "len": 0, "uid": 0} 63 callback(result) # defer or push to a thread? 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pymodbus/framer/old_framer_tls.py b/pymodbus/framer/old_framer_tls.py --- a/pymodbus/framer/old_framer_tls.py +++ b/pymodbus/framer/old_framer_tls.py @@ -1,5 +1,6 @@ """TLS framer.""" import struct +from time import sleep from pymodbus.exceptions import ( ModbusIOException, @@ -42,6 +43,11 @@ return {"fcode": fcode} return {} + def recvPacket(self, size): + """Receive packet from the bus.""" + sleep(0.5) + return super().recvPacket(size) + def frameProcessIncomingPacket(self, _single, callback, _slave, _tid=None, **kwargs): """Process new packet pattern.""" # no slave id for Modbus Security Application Protocol
{"golden_diff": "diff --git a/pymodbus/framer/old_framer_tls.py b/pymodbus/framer/old_framer_tls.py\n--- a/pymodbus/framer/old_framer_tls.py\n+++ b/pymodbus/framer/old_framer_tls.py\n@@ -1,5 +1,6 @@\n \"\"\"TLS framer.\"\"\"\n import struct\n+from time import sleep\n \n from pymodbus.exceptions import (\n ModbusIOException,\n@@ -42,6 +43,11 @@\n return {\"fcode\": fcode}\n return {}\n \n+ def recvPacket(self, size):\n+ \"\"\"Receive packet from the bus.\"\"\"\n+ sleep(0.5)\n+ return super().recvPacket(size)\n+\n def frameProcessIncomingPacket(self, _single, callback, _slave, _tid=None, **kwargs):\n \"\"\"Process new packet pattern.\"\"\"\n # no slave id for Modbus Security Application Protocol\n", "issue": "ssl.SSLWantReadError: The operation did not complete (read) when using ModbusTlsClient\n<!--\r\nBefore opening a new issue, make sure you do the following:\r\n\r\n- Check that your issue isn't already filed: https://github.com/pymodbus-dev/pymodbus/issues\r\n- Check the discussions forum https://github.com/pymodbus-dev/pymodbus/discussions\r\n- Prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n- Python: 3.9.9\r\n- OS: MacOS (14.4.1)\r\n- Pymodbus: 3.6.8\r\n- Modbus Hardware (if used):\r\n\r\n### Pymodbus Specific\r\n\r\n- Server: tls - async\r\n- Client: tls - sync\r\n\r\n### Description\r\n\r\nI'm starting the async tls modbus server using certificates from the example folder and function `StartAsyncTlsServer` with defining a few slaves. After that, I tried to read some slaves using the client (ModbusTlsClient) and received the exception `Modbus Error: The operation did not complete (read) (_ssl.c:2633)`.\r\nI read slave 1 `python client.py --slave_id 1 --address 0 --count 1 `\r\n\r\n### Code and Logs\r\n\r\nClient code is following:\r\n```python\r\nimport argparse\r\nimport pymodbus\r\nfrom pymodbus.client import ModbusTcpClient, ModbusTlsClient\r\nfrom pymodbus.exceptions import ConnectionException, ModbusIOException\r\n\r\npymodbus.pymodbus_apply_logging_config(\"DEBUG\")\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description=\"Modbus client\")\r\n parser.add_argument(\"--write\", action=\"store_true\", help=\"Write mode\")\r\n parser.add_argument(\"--host\", type=str, help=\"Host\", default=\"localhost\")\r\n parser.add_argument(\"--port\", type=int, help=\"Port\", default=502)\r\n parser.add_argument(\"--slave_id\", type=int, help=\"Slave ID\")\r\n parser.add_argument(\"--address\", type=int, help=\"Address\")\r\n parser.add_argument(\"--count\", type=int, help=\"Count of registers to read\", default=1)\r\n parser.add_argument(\"--value\", type=int, help=\"Value to write\", default=0)\r\n\r\n args = parser.parse_args()\r\n try:\r\n client = ModbusTlsClient(\r\n args.host,\r\n args.port,\r\n certfile=\"certificates/pymodbus.crt\",\r\n keyfile=\"certificates/pymodbus.key\",\r\n server_hostname=\"localhost\",\r\n )\r\n client.connect()\r\n if args.write:\r\n result = client.write_register(args.address, args.value, slave=args.slave_id)\r\n else:\r\n result = client.read_holding_registers(args.address, args.count, slave=args.slave_id)\r\n if result.isError():\r\n print(f\"ModbusClient Error, the id {args.slave_id} or address {args.address} is invalid\")\r\n print(result)\r\n return\r\n except (ConnectionException, ModbusIOException) as e:\r\n print(f\"Error connecting to {args.host}:{args.port} ({str(e)})\")\r\n return\r\n print(\"Results:\")\r\n print(result.registers)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n```\r\n\r\n### Server logs\r\n```\r\n2024-05-02 12:13:16,849 DEBUG logging:103 Awaiting connections server_listener\r\n2024-05-02 12:13:16,850 INFO logging:97 Server listening.\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Connected to server\r\n2024-05-02 12:13:29,453 DEBUG logging:103 recv: 0x3 0x0 0x0 0x0 0x1 old_data: addr=None\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Handling data: 0x3 0x0 0x0 0x0 0x1\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Processing: 0x3 0x0 0x0 0x0 0x1\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Factory Request[ReadHoldingRegistersRequest': 3]\r\n2024-05-02 12:13:29,454 ERROR logging:115 requested slave does not exist: 0\r\n2024-05-02 12:13:29,454 ERROR logging:115 Exception response Exception Response(131, 3, GatewayNoResponse)\r\n2024-05-02 12:13:29,454 DEBUG logging:103 send: 0x83 0xb\r\n2024-05-02 12:13:29,454 DEBUG logging:103 -> transport: received eof\r\n2024-05-02 12:13:29,454 DEBUG logging:103 Connection lost server due to None\r\n2024-05-02 12:13:29,454 DEBUG logging:103 Handler for stream [server] has been canceled\r\n```\r\n\r\n### Client logs\r\n```\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Current transaction state - IDLE\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Running transaction 1\r\n2024-05-02 12:13:29,453 DEBUG logging:103 SEND: 0x3 0x0 0x0 0x0 0x1\r\n2024-05-02 12:13:29,453 DEBUG logging:103 New Transaction state \"SENDING\"\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Changing transaction state from \"SENDING\" to \"WAITING FOR REPLY\"\r\n2024-05-02 12:13:29,453 DEBUG logging:103 Transaction failed. (The operation did not complete (read) (_ssl.c:2633))\r\nTraceback (most recent call last):\r\n File \"client.py\", line 43, in <module>\r\n main()\r\n File \"client.py\", line 30, in main\r\n result = client.read_holding_registers(args.address, args.count, slave=args.slave_id)\r\n File \".venv/lib/python3.9/site-packages/pymodbus/client/mixin.py\", line 107, in read_holding_registers\r\n return self.execute(\r\n File \".venv/lib/python3.9/site-packages/pymodbus/client/base.py\", line 396, in execute\r\n return self.transaction.execute(request)\r\n File \".venv/lib/python3.9/site-packages/pymodbus/transaction.py\", line 180, in execute\r\n response, last_exception = self._transact(\r\n File \".venv/lib/python3.9/site-packages/pymodbus/transaction.py\", line 326, in _transact\r\n result = self._recv(response_length, full)\r\n File \".venv/lib/python3.9/site-packages/pymodbus/transaction.py\", line 357, in _recv\r\n read_min = self.client.framer.recvPacket(min_size)\r\n File \".venv/lib/python3.9/site-packages/pymodbus/framer/base.py\", line 79, in recvPacket\r\n return self.client.recv(size)\r\n File \".venv/lib/python3.9/site-packages/pymodbus/client/tcp.py\", line 236, in recv\r\n if (recv_data := self.socket.recv(recv_size)) == b\"\":\r\n File \"/Users/vmartyniak/.pyenv/versions/3.9.9/lib/python3.9/ssl.py\", line 1227, in recv\r\n return self.read(buflen)\r\n File \"/Users/vmartyniak/.pyenv/versions/3.9.9/lib/python3.9/ssl.py\", line 1101, in read\r\n return self._sslobj.read(len)\r\nssl.SSLWantReadError: The operation did not complete (read) (_ssl.c:2633)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"TLS framer.\"\"\"\nimport struct\n\nfrom pymodbus.exceptions import (\n ModbusIOException,\n)\nfrom pymodbus.framer.old_framer_base import TLS_FRAME_HEADER, ModbusFramer\nfrom pymodbus.framer.tls import FramerTLS\n\n\n# --------------------------------------------------------------------------- #\n# Modbus TLS old framer\n# --------------------------------------------------------------------------- #\n\n\nclass ModbusTlsFramer(ModbusFramer):\n \"\"\"Modbus TLS Frame controller.\n\n No prefix MBAP header before decrypted PDU is used as a message frame for\n Modbus Security Application Protocol. It allows us to easily separate\n decrypted messages which is PDU as follows:\n\n [ Function Code] [ Data ]\n 1b Nb\n \"\"\"\n\n method = \"tls\"\n\n def __init__(self, decoder, client=None):\n \"\"\"Initialize a new instance of the framer.\n\n :param decoder: The decoder factory implementation to use\n \"\"\"\n super().__init__(decoder, client)\n self._hsize = 0x0\n self.message_handler = FramerTLS()\n\n def decode_data(self, data):\n \"\"\"Decode data.\"\"\"\n if len(data) > self._hsize:\n (fcode,) = struct.unpack(TLS_FRAME_HEADER, data[0 : self._hsize + 1])\n return {\"fcode\": fcode}\n return {}\n\n def frameProcessIncomingPacket(self, _single, callback, _slave, _tid=None, **kwargs):\n \"\"\"Process new packet pattern.\"\"\"\n # no slave id for Modbus Security Application Protocol\n\n while True:\n used_len, use_tid, dev_id, data = self.message_handler.decode(self._buffer)\n if not data:\n return\n self._header[\"uid\"] = dev_id\n self._header[\"tid\"] = use_tid\n self._header[\"pid\"] = 0\n\n if (result := self.decoder.decode(data)) is None:\n self.resetFrame()\n raise ModbusIOException(\"Unable to decode request\")\n self.populateResult(result)\n self._buffer = self._buffer[used_len:]\n self._header = {\"tid\": 0, \"pid\": 0, \"len\": 0, \"uid\": 0}\n callback(result) # defer or push to a thread?\n", "path": "pymodbus/framer/old_framer_tls.py"}], "after_files": [{"content": "\"\"\"TLS framer.\"\"\"\nimport struct\nfrom time import sleep\n\nfrom pymodbus.exceptions import (\n ModbusIOException,\n)\nfrom pymodbus.framer.old_framer_base import TLS_FRAME_HEADER, ModbusFramer\nfrom pymodbus.framer.tls import FramerTLS\n\n\n# --------------------------------------------------------------------------- #\n# Modbus TLS old framer\n# --------------------------------------------------------------------------- #\n\n\nclass ModbusTlsFramer(ModbusFramer):\n \"\"\"Modbus TLS Frame controller.\n\n No prefix MBAP header before decrypted PDU is used as a message frame for\n Modbus Security Application Protocol. It allows us to easily separate\n decrypted messages which is PDU as follows:\n\n [ Function Code] [ Data ]\n 1b Nb\n \"\"\"\n\n method = \"tls\"\n\n def __init__(self, decoder, client=None):\n \"\"\"Initialize a new instance of the framer.\n\n :param decoder: The decoder factory implementation to use\n \"\"\"\n super().__init__(decoder, client)\n self._hsize = 0x0\n self.message_handler = FramerTLS()\n\n def decode_data(self, data):\n \"\"\"Decode data.\"\"\"\n if len(data) > self._hsize:\n (fcode,) = struct.unpack(TLS_FRAME_HEADER, data[0 : self._hsize + 1])\n return {\"fcode\": fcode}\n return {}\n\n def recvPacket(self, size):\n \"\"\"Receive packet from the bus.\"\"\"\n sleep(0.5)\n return super().recvPacket(size)\n\n def frameProcessIncomingPacket(self, _single, callback, _slave, _tid=None, **kwargs):\n \"\"\"Process new packet pattern.\"\"\"\n # no slave id for Modbus Security Application Protocol\n\n while True:\n used_len, use_tid, dev_id, data = self.message_handler.decode(self._buffer)\n if not data:\n return\n self._header[\"uid\"] = dev_id\n self._header[\"tid\"] = use_tid\n self._header[\"pid\"] = 0\n\n if (result := self.decoder.decode(data)) is None:\n self.resetFrame()\n raise ModbusIOException(\"Unable to decode request\")\n self.populateResult(result)\n self._buffer = self._buffer[used_len:]\n self._header = {\"tid\": 0, \"pid\": 0, \"len\": 0, \"uid\": 0}\n callback(result) # defer or push to a thread?\n", "path": "pymodbus/framer/old_framer_tls.py"}]}
2,840
200
gh_patches_debug_8913
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2644
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- I re-read the specification. I think the requirement is to json encode the non-string attribute values not just the sequence type. For example `_check_value(True)` returns `'True'` which should actually be `'true'`. I re-read the specification. I think the requirement is to json encode the non-string attribute values not just the sequence type. For example `_check_value(True)` returns `'True'` which should actually be `'true'`. _Originally posted by @srikanthccv in https://github.com/open-telemetry/opentelemetry-python/pull/2642#discussion_r859218726_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This library allows export of metrics data to `Prometheus <https://prometheus.io/>`_. 17 18 Usage 19 ----- 20 21 The **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_ 22 metrics to `Prometheus`_. 23 24 25 .. _Prometheus: https://prometheus.io/ 26 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ 27 28 .. code:: python 29 30 from prometheus_client import start_http_server 31 32 from opentelemetry._metrics import get_meter_provider, set_meter_provider 33 from opentelemetry.exporter.prometheus import PrometheusMetricReader 34 from opentelemetry.sdk._metrics import MeterProvider 35 36 # Start Prometheus client 37 start_http_server(port=8000, addr="localhost") 38 39 # Exporter to export metrics to Prometheus 40 prefix = "MyAppPrefix" 41 reader = PrometheusMetricReader(prefix) 42 43 # Meter is responsible for creating and recording metrics 44 set_meter_provider(MeterProvider(metric_readers=[reader])) 45 meter = get_meter_provider().get_meter("myapp", "0.1.2") 46 47 counter = meter.create_counter( 48 "requests", 49 "requests", 50 "number of requests", 51 ) 52 53 # Labels are used to identify key-values that are associated with a specific 54 # metric that you want to record. These are useful for pre-aggregation and can 55 # be used to store custom dimensions pertaining to a metric 56 labels = {"environment": "staging"} 57 58 counter.add(25, labels) 59 input("Press any key to exit...") 60 61 API 62 --- 63 """ 64 65 from collections import deque 66 from itertools import chain 67 from json import dumps 68 from logging import getLogger 69 from re import IGNORECASE, UNICODE, compile 70 from typing import Iterable, Optional, Sequence, Tuple, Union 71 72 from prometheus_client import core 73 74 from opentelemetry.sdk._metrics.export import MetricReader 75 from opentelemetry.sdk._metrics.point import Gauge, Histogram, Metric, Sum 76 77 _logger = getLogger(__name__) 78 79 80 def _convert_buckets(metric: Metric) -> Sequence[Tuple[str, int]]: 81 buckets = [] 82 total_count = 0 83 for upper_bound, count in zip( 84 chain(metric.point.explicit_bounds, ["+Inf"]), 85 metric.point.bucket_counts, 86 ): 87 total_count += count 88 buckets.append((f"{upper_bound}", total_count)) 89 90 return buckets 91 92 93 class PrometheusMetricReader(MetricReader): 94 """Prometheus metric exporter for OpenTelemetry. 95 96 Args: 97 prefix: single-word application prefix relevant to the domain 98 the metric belongs to. 99 """ 100 101 def __init__(self, prefix: str = "") -> None: 102 super().__init__() 103 self._collector = _CustomCollector(prefix) 104 core.REGISTRY.register(self._collector) 105 self._collector._callback = self.collect 106 107 def _receive_metrics(self, metrics: Iterable[Metric]) -> None: 108 if metrics is None: 109 return 110 self._collector.add_metrics_data(metrics) 111 112 def shutdown(self) -> bool: 113 core.REGISTRY.unregister(self._collector) 114 return True 115 116 117 class _CustomCollector: 118 """_CustomCollector represents the Prometheus Collector object 119 120 See more: 121 https://github.com/prometheus/client_python#custom-collectors 122 """ 123 124 def __init__(self, prefix: str = ""): 125 self._prefix = prefix 126 self._callback = None 127 self._metrics_to_export = deque() 128 self._non_letters_digits_underscore_re = compile( 129 r"[^\w]", UNICODE | IGNORECASE 130 ) 131 132 def add_metrics_data(self, export_records: Sequence[Metric]) -> None: 133 """Add metrics to Prometheus data""" 134 self._metrics_to_export.append(export_records) 135 136 def collect(self) -> None: 137 """Collect fetches the metrics from OpenTelemetry 138 and delivers them as Prometheus Metrics. 139 Collect is invoked every time a ``prometheus.Gatherer`` is run 140 for example when the HTTP endpoint is invoked by Prometheus. 141 """ 142 if self._callback is not None: 143 self._callback() 144 145 while self._metrics_to_export: 146 for export_record in self._metrics_to_export.popleft(): 147 prometheus_metric = self._translate_to_prometheus( 148 export_record 149 ) 150 if prometheus_metric is not None: 151 yield prometheus_metric 152 153 def _translate_to_prometheus( 154 self, metric: Metric 155 ) -> Optional[core.Metric]: 156 prometheus_metric = None 157 label_values = [] 158 label_keys = [] 159 for key, value in metric.attributes.items(): 160 label_keys.append(self._sanitize(key)) 161 label_values.append(self._check_value(value)) 162 163 metric_name = "" 164 if self._prefix != "": 165 metric_name = self._prefix + "_" 166 metric_name += self._sanitize(metric.name) 167 168 description = metric.description or "" 169 if isinstance(metric.point, Sum): 170 prometheus_metric = core.CounterMetricFamily( 171 name=metric_name, 172 documentation=description, 173 labels=label_keys, 174 unit=metric.unit, 175 ) 176 prometheus_metric.add_metric( 177 labels=label_values, value=metric.point.value 178 ) 179 elif isinstance(metric.point, Gauge): 180 prometheus_metric = core.GaugeMetricFamily( 181 name=metric_name, 182 documentation=description, 183 labels=label_keys, 184 unit=metric.unit, 185 ) 186 prometheus_metric.add_metric( 187 labels=label_values, value=metric.point.value 188 ) 189 elif isinstance(metric.point, Histogram): 190 value = metric.point.sum 191 prometheus_metric = core.HistogramMetricFamily( 192 name=metric_name, 193 documentation=description, 194 labels=label_keys, 195 unit=metric.unit, 196 ) 197 buckets = _convert_buckets(metric) 198 prometheus_metric.add_metric( 199 labels=label_values, buckets=buckets, sum_value=value 200 ) 201 else: 202 _logger.warning("Unsupported metric type. %s", type(metric.point)) 203 return prometheus_metric 204 205 def _sanitize(self, key: str) -> str: 206 """sanitize the given metric name or label according to Prometheus rule. 207 Replace all characters other than [A-Za-z0-9_] with '_'. 208 """ 209 return self._non_letters_digits_underscore_re.sub("_", key) 210 211 # pylint: disable=no-self-use 212 def _check_value(self, value: Union[int, float, str, Sequence]) -> str: 213 """Check the label value and return is appropriate representation""" 214 if not isinstance(value, str) and isinstance(value, Sequence): 215 return dumps(value, default=str) 216 return str(value) 217 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py --- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py @@ -211,6 +211,6 @@ # pylint: disable=no-self-use def _check_value(self, value: Union[int, float, str, Sequence]) -> str: """Check the label value and return is appropriate representation""" - if not isinstance(value, str) and isinstance(value, Sequence): + if not isinstance(value, str): return dumps(value, default=str) return str(value)
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n--- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n+++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n@@ -211,6 +211,6 @@\n # pylint: disable=no-self-use\n def _check_value(self, value: Union[int, float, str, Sequence]) -> str:\n \"\"\"Check the label value and return is appropriate representation\"\"\"\n- if not isinstance(value, str) and isinstance(value, Sequence):\n+ if not isinstance(value, str):\n return dumps(value, default=str)\n return str(value)\n", "issue": "I re-read the specification. I think the requirement is to json encode the non-string attribute values not just the sequence type. For example `_check_value(True)` returns `'True'` which should actually be `'true'`.\nI re-read the specification. I think the requirement is to json encode the non-string attribute values not just the sequence type. For example `_check_value(True)` returns `'True'` which should actually be `'true'`.\r\n\r\n_Originally posted by @srikanthccv in https://github.com/open-telemetry/opentelemetry-python/pull/2642#discussion_r859218726_\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows export of metrics data to `Prometheus <https://prometheus.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_\nmetrics to `Prometheus`_.\n\n\n.. _Prometheus: https://prometheus.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from prometheus_client import start_http_server\n\n from opentelemetry._metrics import get_meter_provider, set_meter_provider\n from opentelemetry.exporter.prometheus import PrometheusMetricReader\n from opentelemetry.sdk._metrics import MeterProvider\n\n # Start Prometheus client\n start_http_server(port=8000, addr=\"localhost\")\n\n # Exporter to export metrics to Prometheus\n prefix = \"MyAppPrefix\"\n reader = PrometheusMetricReader(prefix)\n\n # Meter is responsible for creating and recording metrics\n set_meter_provider(MeterProvider(metric_readers=[reader]))\n meter = get_meter_provider().get_meter(\"myapp\", \"0.1.2\")\n\n counter = meter.create_counter(\n \"requests\",\n \"requests\",\n \"number of requests\",\n )\n\n # Labels are used to identify key-values that are associated with a specific\n # metric that you want to record. These are useful for pre-aggregation and can\n # be used to store custom dimensions pertaining to a metric\n labels = {\"environment\": \"staging\"}\n\n counter.add(25, labels)\n input(\"Press any key to exit...\")\n\nAPI\n---\n\"\"\"\n\nfrom collections import deque\nfrom itertools import chain\nfrom json import dumps\nfrom logging import getLogger\nfrom re import IGNORECASE, UNICODE, compile\nfrom typing import Iterable, Optional, Sequence, Tuple, Union\n\nfrom prometheus_client import core\n\nfrom opentelemetry.sdk._metrics.export import MetricReader\nfrom opentelemetry.sdk._metrics.point import Gauge, Histogram, Metric, Sum\n\n_logger = getLogger(__name__)\n\n\ndef _convert_buckets(metric: Metric) -> Sequence[Tuple[str, int]]:\n buckets = []\n total_count = 0\n for upper_bound, count in zip(\n chain(metric.point.explicit_bounds, [\"+Inf\"]),\n metric.point.bucket_counts,\n ):\n total_count += count\n buckets.append((f\"{upper_bound}\", total_count))\n\n return buckets\n\n\nclass PrometheusMetricReader(MetricReader):\n \"\"\"Prometheus metric exporter for OpenTelemetry.\n\n Args:\n prefix: single-word application prefix relevant to the domain\n the metric belongs to.\n \"\"\"\n\n def __init__(self, prefix: str = \"\") -> None:\n super().__init__()\n self._collector = _CustomCollector(prefix)\n core.REGISTRY.register(self._collector)\n self._collector._callback = self.collect\n\n def _receive_metrics(self, metrics: Iterable[Metric]) -> None:\n if metrics is None:\n return\n self._collector.add_metrics_data(metrics)\n\n def shutdown(self) -> bool:\n core.REGISTRY.unregister(self._collector)\n return True\n\n\nclass _CustomCollector:\n \"\"\"_CustomCollector represents the Prometheus Collector object\n\n See more:\n https://github.com/prometheus/client_python#custom-collectors\n \"\"\"\n\n def __init__(self, prefix: str = \"\"):\n self._prefix = prefix\n self._callback = None\n self._metrics_to_export = deque()\n self._non_letters_digits_underscore_re = compile(\n r\"[^\\w]\", UNICODE | IGNORECASE\n )\n\n def add_metrics_data(self, export_records: Sequence[Metric]) -> None:\n \"\"\"Add metrics to Prometheus data\"\"\"\n self._metrics_to_export.append(export_records)\n\n def collect(self) -> None:\n \"\"\"Collect fetches the metrics from OpenTelemetry\n and delivers them as Prometheus Metrics.\n Collect is invoked every time a ``prometheus.Gatherer`` is run\n for example when the HTTP endpoint is invoked by Prometheus.\n \"\"\"\n if self._callback is not None:\n self._callback()\n\n while self._metrics_to_export:\n for export_record in self._metrics_to_export.popleft():\n prometheus_metric = self._translate_to_prometheus(\n export_record\n )\n if prometheus_metric is not None:\n yield prometheus_metric\n\n def _translate_to_prometheus(\n self, metric: Metric\n ) -> Optional[core.Metric]:\n prometheus_metric = None\n label_values = []\n label_keys = []\n for key, value in metric.attributes.items():\n label_keys.append(self._sanitize(key))\n label_values.append(self._check_value(value))\n\n metric_name = \"\"\n if self._prefix != \"\":\n metric_name = self._prefix + \"_\"\n metric_name += self._sanitize(metric.name)\n\n description = metric.description or \"\"\n if isinstance(metric.point, Sum):\n prometheus_metric = core.CounterMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n prometheus_metric.add_metric(\n labels=label_values, value=metric.point.value\n )\n elif isinstance(metric.point, Gauge):\n prometheus_metric = core.GaugeMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n prometheus_metric.add_metric(\n labels=label_values, value=metric.point.value\n )\n elif isinstance(metric.point, Histogram):\n value = metric.point.sum\n prometheus_metric = core.HistogramMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n buckets = _convert_buckets(metric)\n prometheus_metric.add_metric(\n labels=label_values, buckets=buckets, sum_value=value\n )\n else:\n _logger.warning(\"Unsupported metric type. %s\", type(metric.point))\n return prometheus_metric\n\n def _sanitize(self, key: str) -> str:\n \"\"\"sanitize the given metric name or label according to Prometheus rule.\n Replace all characters other than [A-Za-z0-9_] with '_'.\n \"\"\"\n return self._non_letters_digits_underscore_re.sub(\"_\", key)\n\n # pylint: disable=no-self-use\n def _check_value(self, value: Union[int, float, str, Sequence]) -> str:\n \"\"\"Check the label value and return is appropriate representation\"\"\"\n if not isinstance(value, str) and isinstance(value, Sequence):\n return dumps(value, default=str)\n return str(value)\n", "path": "exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows export of metrics data to `Prometheus <https://prometheus.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_\nmetrics to `Prometheus`_.\n\n\n.. _Prometheus: https://prometheus.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from prometheus_client import start_http_server\n\n from opentelemetry._metrics import get_meter_provider, set_meter_provider\n from opentelemetry.exporter.prometheus import PrometheusMetricReader\n from opentelemetry.sdk._metrics import MeterProvider\n\n # Start Prometheus client\n start_http_server(port=8000, addr=\"localhost\")\n\n # Exporter to export metrics to Prometheus\n prefix = \"MyAppPrefix\"\n reader = PrometheusMetricReader(prefix)\n\n # Meter is responsible for creating and recording metrics\n set_meter_provider(MeterProvider(metric_readers=[reader]))\n meter = get_meter_provider().get_meter(\"myapp\", \"0.1.2\")\n\n counter = meter.create_counter(\n \"requests\",\n \"requests\",\n \"number of requests\",\n )\n\n # Labels are used to identify key-values that are associated with a specific\n # metric that you want to record. These are useful for pre-aggregation and can\n # be used to store custom dimensions pertaining to a metric\n labels = {\"environment\": \"staging\"}\n\n counter.add(25, labels)\n input(\"Press any key to exit...\")\n\nAPI\n---\n\"\"\"\n\nfrom collections import deque\nfrom itertools import chain\nfrom json import dumps\nfrom logging import getLogger\nfrom re import IGNORECASE, UNICODE, compile\nfrom typing import Iterable, Optional, Sequence, Tuple, Union\n\nfrom prometheus_client import core\n\nfrom opentelemetry.sdk._metrics.export import MetricReader\nfrom opentelemetry.sdk._metrics.point import Gauge, Histogram, Metric, Sum\n\n_logger = getLogger(__name__)\n\n\ndef _convert_buckets(metric: Metric) -> Sequence[Tuple[str, int]]:\n buckets = []\n total_count = 0\n for upper_bound, count in zip(\n chain(metric.point.explicit_bounds, [\"+Inf\"]),\n metric.point.bucket_counts,\n ):\n total_count += count\n buckets.append((f\"{upper_bound}\", total_count))\n\n return buckets\n\n\nclass PrometheusMetricReader(MetricReader):\n \"\"\"Prometheus metric exporter for OpenTelemetry.\n\n Args:\n prefix: single-word application prefix relevant to the domain\n the metric belongs to.\n \"\"\"\n\n def __init__(self, prefix: str = \"\") -> None:\n super().__init__()\n self._collector = _CustomCollector(prefix)\n core.REGISTRY.register(self._collector)\n self._collector._callback = self.collect\n\n def _receive_metrics(self, metrics: Iterable[Metric]) -> None:\n if metrics is None:\n return\n self._collector.add_metrics_data(metrics)\n\n def shutdown(self) -> bool:\n core.REGISTRY.unregister(self._collector)\n return True\n\n\nclass _CustomCollector:\n \"\"\"_CustomCollector represents the Prometheus Collector object\n\n See more:\n https://github.com/prometheus/client_python#custom-collectors\n \"\"\"\n\n def __init__(self, prefix: str = \"\"):\n self._prefix = prefix\n self._callback = None\n self._metrics_to_export = deque()\n self._non_letters_digits_underscore_re = compile(\n r\"[^\\w]\", UNICODE | IGNORECASE\n )\n\n def add_metrics_data(self, export_records: Sequence[Metric]) -> None:\n \"\"\"Add metrics to Prometheus data\"\"\"\n self._metrics_to_export.append(export_records)\n\n def collect(self) -> None:\n \"\"\"Collect fetches the metrics from OpenTelemetry\n and delivers them as Prometheus Metrics.\n Collect is invoked every time a ``prometheus.Gatherer`` is run\n for example when the HTTP endpoint is invoked by Prometheus.\n \"\"\"\n if self._callback is not None:\n self._callback()\n\n while self._metrics_to_export:\n for export_record in self._metrics_to_export.popleft():\n prometheus_metric = self._translate_to_prometheus(\n export_record\n )\n if prometheus_metric is not None:\n yield prometheus_metric\n\n def _translate_to_prometheus(\n self, metric: Metric\n ) -> Optional[core.Metric]:\n prometheus_metric = None\n label_values = []\n label_keys = []\n for key, value in metric.attributes.items():\n label_keys.append(self._sanitize(key))\n label_values.append(self._check_value(value))\n\n metric_name = \"\"\n if self._prefix != \"\":\n metric_name = self._prefix + \"_\"\n metric_name += self._sanitize(metric.name)\n\n description = metric.description or \"\"\n if isinstance(metric.point, Sum):\n prometheus_metric = core.CounterMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n prometheus_metric.add_metric(\n labels=label_values, value=metric.point.value\n )\n elif isinstance(metric.point, Gauge):\n prometheus_metric = core.GaugeMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n prometheus_metric.add_metric(\n labels=label_values, value=metric.point.value\n )\n elif isinstance(metric.point, Histogram):\n value = metric.point.sum\n prometheus_metric = core.HistogramMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n buckets = _convert_buckets(metric)\n prometheus_metric.add_metric(\n labels=label_values, buckets=buckets, sum_value=value\n )\n else:\n _logger.warning(\"Unsupported metric type. %s\", type(metric.point))\n return prometheus_metric\n\n def _sanitize(self, key: str) -> str:\n \"\"\"sanitize the given metric name or label according to Prometheus rule.\n Replace all characters other than [A-Za-z0-9_] with '_'.\n \"\"\"\n return self._non_letters_digits_underscore_re.sub(\"_\", key)\n\n # pylint: disable=no-self-use\n def _check_value(self, value: Union[int, float, str, Sequence]) -> str:\n \"\"\"Check the label value and return is appropriate representation\"\"\"\n if not isinstance(value, str):\n return dumps(value, default=str)\n return str(value)\n", "path": "exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py"}]}
2,491
189
gh_patches_debug_30826
rasdani/github-patches
git_diff
freedomofpress__securedrop-4133
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [xenial] Verify Trusty backup -> Xenial recovery story We should ensure that a SecureDrop backup completed on 14.04 can successfully be recovered on 16.04. Whether we ask admins to perform this step manually, or whether we automate it, it may be a required part of the Xenial migration and will certainly be highly recommended. If clean upgrades to Xenial are not yet implemented one should complete this ticket by following these steps instead: 1. Create a backup on 14.04 server 2. Create fresh install on 16.04 and then attempt to run the restore Part of #3204, may result in follow-up issues. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `install_files/ansible-base/roles/restore/files/restore.py` Content: ``` 1 #!/usr/bin/python2.7 2 """ 3 This script and backup archive should be copied to the App server and run by 4 the Ansible playbook. When run (as root), it restores the contents of the 0.3 5 backup file to the machine it's run on. 6 7 python restore.py sd-backup-TIMESTAMP.tar.gz 8 """ 9 10 import os 11 import subprocess 12 import sys 13 import tarfile 14 15 16 def verify_args(): 17 usage = """ 18 Usage: restore.py <backup file> 19 20 <backup file> Path to a SecureDrop 0.3 backup created by backup.py" 21 """ 22 if len(sys.argv) != 2: 23 print(usage) 24 sys.exit(1) 25 26 if not os.path.exists(sys.argv[1]): 27 print("<backup file> '{}' not found".format(sys.argv[1])) 28 sys.exit(1) 29 30 if os.geteuid() != 0: 31 print("This program must be run as root!") 32 sys.exit(1) 33 34 35 def main(): 36 verify_args() 37 38 with tarfile.open(sys.argv[1], 'r:*') as backup: 39 # This assumes that both the old installation (source of the backup) 40 # and the new installation (destination of the restore) used the 41 # default paths for various locations. 42 backup.extractall(path='/') 43 44 # Reload Tor and the web server so they pick up the new configuration 45 # If the process exits with a non-zero return code, raises an exception. 46 subprocess.check_call(['service', 'apache2', 'restart']) 47 subprocess.check_call(['service', 'tor', 'reload']) 48 # Apply database migrations (if backed-up version < version to restore) 49 subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code']) 50 51 52 if __name__ == "__main__": 53 main() 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py --- a/install_files/ansible-base/roles/restore/files/restore.py +++ b/install_files/ansible-base/roles/restore/files/restore.py @@ -8,6 +8,7 @@ """ import os +import shutil import subprocess import sys import tarfile @@ -35,18 +36,29 @@ def main(): verify_args() + # Remove the /var/lib/tor/services directories to purge values that may have been + # generated by running the ansible playbooks + for d in ['journalist', 'source']: + full_path = os.path.join('/var/lib/tor/services', d) + if os.path.exists(full_path): + shutil.rmtree(full_path) + with tarfile.open(sys.argv[1], 'r:*') as backup: # This assumes that both the old installation (source of the backup) # and the new installation (destination of the restore) used the # default paths for various locations. backup.extractall(path='/') + # Apply database migrations (if backed-up version < version to restore) + subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code']) + + # Update the configs + subprocess.check_call(['dpkg-reconfigure', 'securedrop-config']) + # Reload Tor and the web server so they pick up the new configuration # If the process exits with a non-zero return code, raises an exception. subprocess.check_call(['service', 'apache2', 'restart']) subprocess.check_call(['service', 'tor', 'reload']) - # Apply database migrations (if backed-up version < version to restore) - subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code']) if __name__ == "__main__":
{"golden_diff": "diff --git a/install_files/ansible-base/roles/restore/files/restore.py b/install_files/ansible-base/roles/restore/files/restore.py\n--- a/install_files/ansible-base/roles/restore/files/restore.py\n+++ b/install_files/ansible-base/roles/restore/files/restore.py\n@@ -8,6 +8,7 @@\n \"\"\"\n \n import os\n+import shutil\n import subprocess\n import sys\n import tarfile\n@@ -35,18 +36,29 @@\n def main():\n verify_args()\n \n+ # Remove the /var/lib/tor/services directories to purge values that may have been\n+ # generated by running the ansible playbooks\n+ for d in ['journalist', 'source']:\n+ full_path = os.path.join('/var/lib/tor/services', d)\n+ if os.path.exists(full_path):\n+ shutil.rmtree(full_path)\n+\n with tarfile.open(sys.argv[1], 'r:*') as backup:\n # This assumes that both the old installation (source of the backup)\n # and the new installation (destination of the restore) used the\n # default paths for various locations.\n backup.extractall(path='/')\n \n+ # Apply database migrations (if backed-up version < version to restore)\n+ subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n+\n+ # Update the configs\n+ subprocess.check_call(['dpkg-reconfigure', 'securedrop-config'])\n+\n # Reload Tor and the web server so they pick up the new configuration\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n- # Apply database migrations (if backed-up version < version to restore)\n- subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n \n \n if __name__ == \"__main__\":\n", "issue": "[xenial] Verify Trusty backup -> Xenial recovery story\nWe should ensure that a SecureDrop backup completed on 14.04 can successfully be recovered on 16.04. Whether we ask admins to perform this step manually, or whether we automate it, it may be a required part of the Xenial migration and will certainly be highly recommended.\r\n\r\nIf clean upgrades to Xenial are not yet implemented one should complete this ticket by following these steps instead:\r\n\r\n1. Create a backup on 14.04 server\r\n2. Create fresh install on 16.04 and then attempt to run the restore\r\n\r\nPart of #3204, may result in follow-up issues.\n", "before_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\nThis script and backup archive should be copied to the App server and run by\nthe Ansible playbook. When run (as root), it restores the contents of the 0.3\nbackup file to the machine it's run on.\n\npython restore.py sd-backup-TIMESTAMP.tar.gz\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport tarfile\n\n\ndef verify_args():\n usage = \"\"\"\nUsage: restore.py <backup file>\n\n <backup file> Path to a SecureDrop 0.3 backup created by backup.py\"\n \"\"\"\n if len(sys.argv) != 2:\n print(usage)\n sys.exit(1)\n\n if not os.path.exists(sys.argv[1]):\n print(\"<backup file> '{}' not found\".format(sys.argv[1]))\n sys.exit(1)\n\n if os.geteuid() != 0:\n print(\"This program must be run as root!\")\n sys.exit(1)\n\n\ndef main():\n verify_args()\n\n with tarfile.open(sys.argv[1], 'r:*') as backup:\n # This assumes that both the old installation (source of the backup)\n # and the new installation (destination of the restore) used the\n # default paths for various locations.\n backup.extractall(path='/')\n\n # Reload Tor and the web server so they pick up the new configuration\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n # Apply database migrations (if backed-up version < version to restore)\n subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/restore/files/restore.py"}], "after_files": [{"content": "#!/usr/bin/python2.7\n\"\"\"\nThis script and backup archive should be copied to the App server and run by\nthe Ansible playbook. When run (as root), it restores the contents of the 0.3\nbackup file to the machine it's run on.\n\npython restore.py sd-backup-TIMESTAMP.tar.gz\n\"\"\"\n\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tarfile\n\n\ndef verify_args():\n usage = \"\"\"\nUsage: restore.py <backup file>\n\n <backup file> Path to a SecureDrop 0.3 backup created by backup.py\"\n \"\"\"\n if len(sys.argv) != 2:\n print(usage)\n sys.exit(1)\n\n if not os.path.exists(sys.argv[1]):\n print(\"<backup file> '{}' not found\".format(sys.argv[1]))\n sys.exit(1)\n\n if os.geteuid() != 0:\n print(\"This program must be run as root!\")\n sys.exit(1)\n\n\ndef main():\n verify_args()\n\n # Remove the /var/lib/tor/services directories to purge values that may have been\n # generated by running the ansible playbooks\n for d in ['journalist', 'source']:\n full_path = os.path.join('/var/lib/tor/services', d)\n if os.path.exists(full_path):\n shutil.rmtree(full_path)\n\n with tarfile.open(sys.argv[1], 'r:*') as backup:\n # This assumes that both the old installation (source of the backup)\n # and the new installation (destination of the restore) used the\n # default paths for various locations.\n backup.extractall(path='/')\n\n # Apply database migrations (if backed-up version < version to restore)\n subprocess.check_call(['dpkg-reconfigure', 'securedrop-app-code'])\n\n # Update the configs\n subprocess.check_call(['dpkg-reconfigure', 'securedrop-config'])\n\n # Reload Tor and the web server so they pick up the new configuration\n # If the process exits with a non-zero return code, raises an exception.\n subprocess.check_call(['service', 'apache2', 'restart'])\n subprocess.check_call(['service', 'tor', 'reload'])\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install_files/ansible-base/roles/restore/files/restore.py"}]}
901
415
gh_patches_debug_30080
rasdani/github-patches
git_diff
zulip__zulip-21977
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update "notifications language" setting to use the "Default language" picker modal The settings UI for picking the "notifications language" (previously "Default language for new users"; see #20866) should use the much nicer language picker component that we have for an individual user's language setting (i.e. this, rather than the simple dropdown). ![image](https://user-images.githubusercontent.com/2746074/165846747-f381f28d-081d-4eb1-978c-6dc967156acc.png) I haven't looked at how complex this is, but it seems clearly better to reuse that component. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `version.py` Content: ``` 1 import os 2 3 ZULIP_VERSION = "6.0-dev+git" 4 5 # Add information on number of commits and commit hash to version, if available 6 zulip_git_version_file = os.path.join( 7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version" 8 ) 9 lines = [ZULIP_VERSION, ""] 10 if os.path.exists(zulip_git_version_file): 11 with open(zulip_git_version_file) as f: 12 lines = f.readlines() + ["", ""] 13 ZULIP_VERSION = lines.pop(0).strip() 14 ZULIP_MERGE_BASE = lines.pop(0).strip() 15 16 LATEST_MAJOR_VERSION = "5.0" 17 LATEST_RELEASE_VERSION = "5.2" 18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2022/03/29/zulip-5-0-released/" 19 20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be 21 # prevented from connecting to the Zulip server. Versions above 22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have 23 # a banner at the top of the page asking the user to upgrade. 24 DESKTOP_MINIMUM_VERSION = "5.2.0" 25 DESKTOP_WARNING_VERSION = "5.4.3" 26 27 # Bump the API_FEATURE_LEVEL whenever an API change is made 28 # that clients might want to condition on. If we forget at 29 # the time we make the change, then bump it later as soon 30 # as we notice; clients using API_FEATURE_LEVEL will just not 31 # use the new feature/API until the bump. 32 # 33 # Changes should be accompanied by documentation explaining what the 34 # new level means in templates/zerver/api/changelog.md, as well as 35 # "**Changes**" entries in the endpoint's documentation in `zulip.yaml`. 36 API_FEATURE_LEVEL = 132 37 38 # Bump the minor PROVISION_VERSION to indicate that folks should provision 39 # only when going from an old version of the code to a newer version. Bump 40 # the major version to indicate that folks should provision in both 41 # directions. 42 43 # Typically, 44 # * adding a dependency only requires a minor version bump; 45 # * removing a dependency requires a major version bump; 46 # * upgrading a dependency requires a major version bump, unless the 47 # upgraded dependency is backwards compatible with all of our 48 # historical commits sharing the same major version, in which case a 49 # minor version bump suffices. 50 51 PROVISION_VERSION = "190.0" 52 ``` Path: `zerver/lib/home.py` Content: ``` 1 import calendar 2 import time 3 from dataclasses import dataclass 4 from typing import Any, Dict, List, Optional, Tuple 5 6 from django.conf import settings 7 from django.http import HttpRequest 8 from django.utils import translation 9 from two_factor.utils import default_device 10 11 from zerver.context_processors import get_apps_page_url 12 from zerver.lib.events import do_events_register 13 from zerver.lib.i18n import ( 14 get_and_set_request_language, 15 get_language_list, 16 get_language_translation_data, 17 ) 18 from zerver.lib.realm_description import get_realm_rendered_description 19 from zerver.lib.request import RequestNotes 20 from zerver.models import Message, Realm, Stream, UserProfile 21 from zerver.views.message_flags import get_latest_update_message_flag_activity 22 23 24 @dataclass 25 class BillingInfo: 26 show_billing: bool 27 show_plans: bool 28 29 30 @dataclass 31 class UserPermissionInfo: 32 color_scheme: int 33 is_guest: bool 34 is_realm_admin: bool 35 is_realm_owner: bool 36 show_webathena: bool 37 38 39 def get_furthest_read_time(user_profile: Optional[UserProfile]) -> Optional[float]: 40 if user_profile is None: 41 return time.time() 42 43 user_activity = get_latest_update_message_flag_activity(user_profile) 44 if user_activity is None: 45 return None 46 47 return calendar.timegm(user_activity.last_visit.utctimetuple()) 48 49 50 def get_bot_types(user_profile: Optional[UserProfile]) -> List[Dict[str, object]]: 51 bot_types: List[Dict[str, object]] = [] 52 if user_profile is None: 53 return bot_types 54 55 for type_id, name in UserProfile.BOT_TYPES.items(): 56 bot_types.append( 57 dict( 58 type_id=type_id, 59 name=name, 60 allowed=type_id in user_profile.allowed_bot_types, 61 ) 62 ) 63 return bot_types 64 65 66 def promote_sponsoring_zulip_in_realm(realm: Realm) -> bool: 67 if not settings.PROMOTE_SPONSORING_ZULIP: 68 return False 69 70 # If PROMOTE_SPONSORING_ZULIP is enabled, advertise sponsoring 71 # Zulip in the gear menu of non-paying organizations. 72 return realm.plan_type in [Realm.PLAN_TYPE_STANDARD_FREE, Realm.PLAN_TYPE_SELF_HOSTED] 73 74 75 def get_billing_info(user_profile: Optional[UserProfile]) -> BillingInfo: 76 show_billing = False 77 show_plans = False 78 if settings.CORPORATE_ENABLED and user_profile is not None: 79 if user_profile.has_billing_access: 80 from corporate.models import CustomerPlan, get_customer_by_realm 81 82 customer = get_customer_by_realm(user_profile.realm) 83 if customer is not None: 84 if customer.sponsorship_pending: 85 show_billing = True 86 elif CustomerPlan.objects.filter(customer=customer).exists(): 87 show_billing = True 88 89 if not user_profile.is_guest and user_profile.realm.plan_type == Realm.PLAN_TYPE_LIMITED: 90 show_plans = True 91 92 return BillingInfo( 93 show_billing=show_billing, 94 show_plans=show_plans, 95 ) 96 97 98 def get_user_permission_info(user_profile: Optional[UserProfile]) -> UserPermissionInfo: 99 if user_profile is not None: 100 return UserPermissionInfo( 101 color_scheme=user_profile.color_scheme, 102 is_guest=user_profile.is_guest, 103 is_realm_owner=user_profile.is_realm_owner, 104 is_realm_admin=user_profile.is_realm_admin, 105 show_webathena=user_profile.realm.webathena_enabled, 106 ) 107 else: 108 return UserPermissionInfo( 109 color_scheme=UserProfile.COLOR_SCHEME_AUTOMATIC, 110 is_guest=False, 111 is_realm_admin=False, 112 is_realm_owner=False, 113 show_webathena=False, 114 ) 115 116 117 def build_page_params_for_home_page_load( 118 request: HttpRequest, 119 user_profile: Optional[UserProfile], 120 realm: Realm, 121 insecure_desktop_app: bool, 122 narrow: List[List[str]], 123 narrow_stream: Optional[Stream], 124 narrow_topic: Optional[str], 125 first_in_realm: bool, 126 prompt_for_invites: bool, 127 needs_tutorial: bool, 128 ) -> Tuple[int, Dict[str, Any]]: 129 """ 130 This function computes page_params for when we load the home page. 131 132 The page_params data structure gets sent to the client. 133 """ 134 client_capabilities = { 135 "notification_settings_null": True, 136 "bulk_message_deletion": True, 137 "user_avatar_url_field_optional": True, 138 "stream_typing_notifications": False, # Set this to True when frontend support is implemented. 139 "user_settings_object": True, 140 } 141 142 if user_profile is not None: 143 client = RequestNotes.get_notes(request).client 144 assert client is not None 145 register_ret = do_events_register( 146 user_profile, 147 realm, 148 client, 149 apply_markdown=True, 150 client_gravatar=True, 151 slim_presence=True, 152 client_capabilities=client_capabilities, 153 narrow=narrow, 154 include_streams=False, 155 ) 156 default_language = register_ret["user_settings"]["default_language"] 157 else: 158 # The spectator client will be fetching the /register response 159 # for spectators via the API. But we still need to set the 160 # values not presence in that object. 161 register_ret = { 162 "queue_id": None, 163 } 164 default_language = realm.default_language 165 166 furthest_read_time = get_furthest_read_time(user_profile) 167 168 request_language = get_and_set_request_language( 169 request, 170 default_language, 171 translation.get_language_from_path(request.path_info), 172 ) 173 174 two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None 175 billing_info = get_billing_info(user_profile) 176 user_permission_info = get_user_permission_info(user_profile) 177 178 # Pass parameters to the client-side JavaScript code. 179 # These end up in a JavaScript Object named 'page_params'. 180 page_params = dict( 181 ## Server settings. 182 test_suite=settings.TEST_SUITE, 183 insecure_desktop_app=insecure_desktop_app, 184 login_page=settings.HOME_NOT_LOGGED_IN, 185 warn_no_email=settings.WARN_NO_EMAIL, 186 search_pills_enabled=settings.SEARCH_PILLS_ENABLED, 187 # Only show marketing email settings if on Zulip Cloud 188 corporate_enabled=settings.CORPORATE_ENABLED, 189 ## Misc. extra data. 190 language_list=get_language_list(), 191 needs_tutorial=needs_tutorial, 192 first_in_realm=first_in_realm, 193 prompt_for_invites=prompt_for_invites, 194 furthest_read_time=furthest_read_time, 195 bot_types=get_bot_types(user_profile), 196 two_fa_enabled=two_fa_enabled, 197 apps_page_url=get_apps_page_url(), 198 show_billing=billing_info.show_billing, 199 promote_sponsoring_zulip=promote_sponsoring_zulip_in_realm(realm), 200 show_plans=billing_info.show_plans, 201 show_webathena=user_permission_info.show_webathena, 202 # Adding two_fa_enabled as condition saves us 3 queries when 203 # 2FA is not enabled. 204 two_fa_enabled_user=two_fa_enabled and bool(default_device(user_profile)), 205 is_spectator=user_profile is None, 206 # There is no event queue for spectators since 207 # events support for spectators is not implemented yet. 208 no_event_queue=user_profile is None, 209 ) 210 211 for field_name in register_ret.keys(): 212 page_params[field_name] = register_ret[field_name] 213 214 if narrow_stream is not None: 215 # In narrow_stream context, initial pointer is just latest message 216 recipient = narrow_stream.recipient 217 try: 218 max_message_id = ( 219 Message.objects.filter(recipient=recipient).order_by("id").reverse()[0].id 220 ) 221 except IndexError: 222 max_message_id = -1 223 page_params["narrow_stream"] = narrow_stream.name 224 if narrow_topic is not None: 225 page_params["narrow_topic"] = narrow_topic 226 page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow] 227 page_params["max_message_id"] = max_message_id 228 assert isinstance(page_params["user_settings"], dict) 229 page_params["user_settings"]["enable_desktop_notifications"] = False 230 231 page_params["translation_data"] = get_language_translation_data(request_language) 232 233 if user_profile is None: 234 # Get rendered version of realm description which is displayed in right 235 # sidebar for spectator. 236 page_params["realm_rendered_description"] = get_realm_rendered_description(realm) 237 238 return register_ret["queue_id"], page_params 239 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/version.py b/version.py --- a/version.py +++ b/version.py @@ -48,4 +48,4 @@ # historical commits sharing the same major version, in which case a # minor version bump suffices. -PROVISION_VERSION = "190.0" +PROVISION_VERSION = "190.1" diff --git a/zerver/lib/home.py b/zerver/lib/home.py --- a/zerver/lib/home.py +++ b/zerver/lib/home.py @@ -163,14 +163,16 @@ } default_language = realm.default_language - furthest_read_time = get_furthest_read_time(user_profile) - - request_language = get_and_set_request_language( - request, - default_language, - translation.get_language_from_path(request.path_info), - ) + if user_profile is None: + request_language = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, default_language) + else: + request_language = get_and_set_request_language( + request, + default_language, + translation.get_language_from_path(request.path_info), + ) + furthest_read_time = get_furthest_read_time(user_profile) two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None billing_info = get_billing_info(user_profile) user_permission_info = get_user_permission_info(user_profile) @@ -234,5 +236,6 @@ # Get rendered version of realm description which is displayed in right # sidebar for spectator. page_params["realm_rendered_description"] = get_realm_rendered_description(realm) + page_params["language_cookie_name"] = settings.LANGUAGE_COOKIE_NAME return register_ret["queue_id"], page_params
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -48,4 +48,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = \"190.0\"\n+PROVISION_VERSION = \"190.1\"\ndiff --git a/zerver/lib/home.py b/zerver/lib/home.py\n--- a/zerver/lib/home.py\n+++ b/zerver/lib/home.py\n@@ -163,14 +163,16 @@\n }\n default_language = realm.default_language\n \n- furthest_read_time = get_furthest_read_time(user_profile)\n-\n- request_language = get_and_set_request_language(\n- request,\n- default_language,\n- translation.get_language_from_path(request.path_info),\n- )\n+ if user_profile is None:\n+ request_language = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, default_language)\n+ else:\n+ request_language = get_and_set_request_language(\n+ request,\n+ default_language,\n+ translation.get_language_from_path(request.path_info),\n+ )\n \n+ furthest_read_time = get_furthest_read_time(user_profile)\n two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None\n billing_info = get_billing_info(user_profile)\n user_permission_info = get_user_permission_info(user_profile)\n@@ -234,5 +236,6 @@\n # Get rendered version of realm description which is displayed in right\n # sidebar for spectator.\n page_params[\"realm_rendered_description\"] = get_realm_rendered_description(realm)\n+ page_params[\"language_cookie_name\"] = settings.LANGUAGE_COOKIE_NAME\n \n return register_ret[\"queue_id\"], page_params\n", "issue": "Update \"notifications language\" setting to use the \"Default language\" picker modal\nThe settings UI for picking the \"notifications language\" (previously \"Default language for new users\"; see #20866) should use the much nicer language picker component that we have for an individual user's language setting (i.e. this, rather than the simple dropdown).\r\n\r\n![image](https://user-images.githubusercontent.com/2746074/165846747-f381f28d-081d-4eb1-978c-6dc967156acc.png)\r\n\r\nI haven't looked at how complex this is, but it seems clearly better to reuse that component.\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"6.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"5.0\"\nLATEST_RELEASE_VERSION = \"5.2\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2022/03/29/zulip-5-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 132\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"190.0\"\n", "path": "version.py"}, {"content": "import calendar\nimport time\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom django.conf import settings\nfrom django.http import HttpRequest\nfrom django.utils import translation\nfrom two_factor.utils import default_device\n\nfrom zerver.context_processors import get_apps_page_url\nfrom zerver.lib.events import do_events_register\nfrom zerver.lib.i18n import (\n get_and_set_request_language,\n get_language_list,\n get_language_translation_data,\n)\nfrom zerver.lib.realm_description import get_realm_rendered_description\nfrom zerver.lib.request import RequestNotes\nfrom zerver.models import Message, Realm, Stream, UserProfile\nfrom zerver.views.message_flags import get_latest_update_message_flag_activity\n\n\n@dataclass\nclass BillingInfo:\n show_billing: bool\n show_plans: bool\n\n\n@dataclass\nclass UserPermissionInfo:\n color_scheme: int\n is_guest: bool\n is_realm_admin: bool\n is_realm_owner: bool\n show_webathena: bool\n\n\ndef get_furthest_read_time(user_profile: Optional[UserProfile]) -> Optional[float]:\n if user_profile is None:\n return time.time()\n\n user_activity = get_latest_update_message_flag_activity(user_profile)\n if user_activity is None:\n return None\n\n return calendar.timegm(user_activity.last_visit.utctimetuple())\n\n\ndef get_bot_types(user_profile: Optional[UserProfile]) -> List[Dict[str, object]]:\n bot_types: List[Dict[str, object]] = []\n if user_profile is None:\n return bot_types\n\n for type_id, name in UserProfile.BOT_TYPES.items():\n bot_types.append(\n dict(\n type_id=type_id,\n name=name,\n allowed=type_id in user_profile.allowed_bot_types,\n )\n )\n return bot_types\n\n\ndef promote_sponsoring_zulip_in_realm(realm: Realm) -> bool:\n if not settings.PROMOTE_SPONSORING_ZULIP:\n return False\n\n # If PROMOTE_SPONSORING_ZULIP is enabled, advertise sponsoring\n # Zulip in the gear menu of non-paying organizations.\n return realm.plan_type in [Realm.PLAN_TYPE_STANDARD_FREE, Realm.PLAN_TYPE_SELF_HOSTED]\n\n\ndef get_billing_info(user_profile: Optional[UserProfile]) -> BillingInfo:\n show_billing = False\n show_plans = False\n if settings.CORPORATE_ENABLED and user_profile is not None:\n if user_profile.has_billing_access:\n from corporate.models import CustomerPlan, get_customer_by_realm\n\n customer = get_customer_by_realm(user_profile.realm)\n if customer is not None:\n if customer.sponsorship_pending:\n show_billing = True\n elif CustomerPlan.objects.filter(customer=customer).exists():\n show_billing = True\n\n if not user_profile.is_guest and user_profile.realm.plan_type == Realm.PLAN_TYPE_LIMITED:\n show_plans = True\n\n return BillingInfo(\n show_billing=show_billing,\n show_plans=show_plans,\n )\n\n\ndef get_user_permission_info(user_profile: Optional[UserProfile]) -> UserPermissionInfo:\n if user_profile is not None:\n return UserPermissionInfo(\n color_scheme=user_profile.color_scheme,\n is_guest=user_profile.is_guest,\n is_realm_owner=user_profile.is_realm_owner,\n is_realm_admin=user_profile.is_realm_admin,\n show_webathena=user_profile.realm.webathena_enabled,\n )\n else:\n return UserPermissionInfo(\n color_scheme=UserProfile.COLOR_SCHEME_AUTOMATIC,\n is_guest=False,\n is_realm_admin=False,\n is_realm_owner=False,\n show_webathena=False,\n )\n\n\ndef build_page_params_for_home_page_load(\n request: HttpRequest,\n user_profile: Optional[UserProfile],\n realm: Realm,\n insecure_desktop_app: bool,\n narrow: List[List[str]],\n narrow_stream: Optional[Stream],\n narrow_topic: Optional[str],\n first_in_realm: bool,\n prompt_for_invites: bool,\n needs_tutorial: bool,\n) -> Tuple[int, Dict[str, Any]]:\n \"\"\"\n This function computes page_params for when we load the home page.\n\n The page_params data structure gets sent to the client.\n \"\"\"\n client_capabilities = {\n \"notification_settings_null\": True,\n \"bulk_message_deletion\": True,\n \"user_avatar_url_field_optional\": True,\n \"stream_typing_notifications\": False, # Set this to True when frontend support is implemented.\n \"user_settings_object\": True,\n }\n\n if user_profile is not None:\n client = RequestNotes.get_notes(request).client\n assert client is not None\n register_ret = do_events_register(\n user_profile,\n realm,\n client,\n apply_markdown=True,\n client_gravatar=True,\n slim_presence=True,\n client_capabilities=client_capabilities,\n narrow=narrow,\n include_streams=False,\n )\n default_language = register_ret[\"user_settings\"][\"default_language\"]\n else:\n # The spectator client will be fetching the /register response\n # for spectators via the API. But we still need to set the\n # values not presence in that object.\n register_ret = {\n \"queue_id\": None,\n }\n default_language = realm.default_language\n\n furthest_read_time = get_furthest_read_time(user_profile)\n\n request_language = get_and_set_request_language(\n request,\n default_language,\n translation.get_language_from_path(request.path_info),\n )\n\n two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None\n billing_info = get_billing_info(user_profile)\n user_permission_info = get_user_permission_info(user_profile)\n\n # Pass parameters to the client-side JavaScript code.\n # These end up in a JavaScript Object named 'page_params'.\n page_params = dict(\n ## Server settings.\n test_suite=settings.TEST_SUITE,\n insecure_desktop_app=insecure_desktop_app,\n login_page=settings.HOME_NOT_LOGGED_IN,\n warn_no_email=settings.WARN_NO_EMAIL,\n search_pills_enabled=settings.SEARCH_PILLS_ENABLED,\n # Only show marketing email settings if on Zulip Cloud\n corporate_enabled=settings.CORPORATE_ENABLED,\n ## Misc. extra data.\n language_list=get_language_list(),\n needs_tutorial=needs_tutorial,\n first_in_realm=first_in_realm,\n prompt_for_invites=prompt_for_invites,\n furthest_read_time=furthest_read_time,\n bot_types=get_bot_types(user_profile),\n two_fa_enabled=two_fa_enabled,\n apps_page_url=get_apps_page_url(),\n show_billing=billing_info.show_billing,\n promote_sponsoring_zulip=promote_sponsoring_zulip_in_realm(realm),\n show_plans=billing_info.show_plans,\n show_webathena=user_permission_info.show_webathena,\n # Adding two_fa_enabled as condition saves us 3 queries when\n # 2FA is not enabled.\n two_fa_enabled_user=two_fa_enabled and bool(default_device(user_profile)),\n is_spectator=user_profile is None,\n # There is no event queue for spectators since\n # events support for spectators is not implemented yet.\n no_event_queue=user_profile is None,\n )\n\n for field_name in register_ret.keys():\n page_params[field_name] = register_ret[field_name]\n\n if narrow_stream is not None:\n # In narrow_stream context, initial pointer is just latest message\n recipient = narrow_stream.recipient\n try:\n max_message_id = (\n Message.objects.filter(recipient=recipient).order_by(\"id\").reverse()[0].id\n )\n except IndexError:\n max_message_id = -1\n page_params[\"narrow_stream\"] = narrow_stream.name\n if narrow_topic is not None:\n page_params[\"narrow_topic\"] = narrow_topic\n page_params[\"narrow\"] = [dict(operator=term[0], operand=term[1]) for term in narrow]\n page_params[\"max_message_id\"] = max_message_id\n assert isinstance(page_params[\"user_settings\"], dict)\n page_params[\"user_settings\"][\"enable_desktop_notifications\"] = False\n\n page_params[\"translation_data\"] = get_language_translation_data(request_language)\n\n if user_profile is None:\n # Get rendered version of realm description which is displayed in right\n # sidebar for spectator.\n page_params[\"realm_rendered_description\"] = get_realm_rendered_description(realm)\n\n return register_ret[\"queue_id\"], page_params\n", "path": "zerver/lib/home.py"}], "after_files": [{"content": "import os\n\nZULIP_VERSION = \"6.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"5.0\"\nLATEST_RELEASE_VERSION = \"5.2\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2022/03/29/zulip-5-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 132\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"190.1\"\n", "path": "version.py"}, {"content": "import calendar\nimport time\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom django.conf import settings\nfrom django.http import HttpRequest\nfrom django.utils import translation\nfrom two_factor.utils import default_device\n\nfrom zerver.context_processors import get_apps_page_url\nfrom zerver.lib.events import do_events_register\nfrom zerver.lib.i18n import (\n get_and_set_request_language,\n get_language_list,\n get_language_translation_data,\n)\nfrom zerver.lib.realm_description import get_realm_rendered_description\nfrom zerver.lib.request import RequestNotes\nfrom zerver.models import Message, Realm, Stream, UserProfile\nfrom zerver.views.message_flags import get_latest_update_message_flag_activity\n\n\n@dataclass\nclass BillingInfo:\n show_billing: bool\n show_plans: bool\n\n\n@dataclass\nclass UserPermissionInfo:\n color_scheme: int\n is_guest: bool\n is_realm_admin: bool\n is_realm_owner: bool\n show_webathena: bool\n\n\ndef get_furthest_read_time(user_profile: Optional[UserProfile]) -> Optional[float]:\n if user_profile is None:\n return time.time()\n\n user_activity = get_latest_update_message_flag_activity(user_profile)\n if user_activity is None:\n return None\n\n return calendar.timegm(user_activity.last_visit.utctimetuple())\n\n\ndef get_bot_types(user_profile: Optional[UserProfile]) -> List[Dict[str, object]]:\n bot_types: List[Dict[str, object]] = []\n if user_profile is None:\n return bot_types\n\n for type_id, name in UserProfile.BOT_TYPES.items():\n bot_types.append(\n dict(\n type_id=type_id,\n name=name,\n allowed=type_id in user_profile.allowed_bot_types,\n )\n )\n return bot_types\n\n\ndef promote_sponsoring_zulip_in_realm(realm: Realm) -> bool:\n if not settings.PROMOTE_SPONSORING_ZULIP:\n return False\n\n # If PROMOTE_SPONSORING_ZULIP is enabled, advertise sponsoring\n # Zulip in the gear menu of non-paying organizations.\n return realm.plan_type in [Realm.PLAN_TYPE_STANDARD_FREE, Realm.PLAN_TYPE_SELF_HOSTED]\n\n\ndef get_billing_info(user_profile: Optional[UserProfile]) -> BillingInfo:\n show_billing = False\n show_plans = False\n if settings.CORPORATE_ENABLED and user_profile is not None:\n if user_profile.has_billing_access:\n from corporate.models import CustomerPlan, get_customer_by_realm\n\n customer = get_customer_by_realm(user_profile.realm)\n if customer is not None:\n if customer.sponsorship_pending:\n show_billing = True\n elif CustomerPlan.objects.filter(customer=customer).exists():\n show_billing = True\n\n if not user_profile.is_guest and user_profile.realm.plan_type == Realm.PLAN_TYPE_LIMITED:\n show_plans = True\n\n return BillingInfo(\n show_billing=show_billing,\n show_plans=show_plans,\n )\n\n\ndef get_user_permission_info(user_profile: Optional[UserProfile]) -> UserPermissionInfo:\n if user_profile is not None:\n return UserPermissionInfo(\n color_scheme=user_profile.color_scheme,\n is_guest=user_profile.is_guest,\n is_realm_owner=user_profile.is_realm_owner,\n is_realm_admin=user_profile.is_realm_admin,\n show_webathena=user_profile.realm.webathena_enabled,\n )\n else:\n return UserPermissionInfo(\n color_scheme=UserProfile.COLOR_SCHEME_AUTOMATIC,\n is_guest=False,\n is_realm_admin=False,\n is_realm_owner=False,\n show_webathena=False,\n )\n\n\ndef build_page_params_for_home_page_load(\n request: HttpRequest,\n user_profile: Optional[UserProfile],\n realm: Realm,\n insecure_desktop_app: bool,\n narrow: List[List[str]],\n narrow_stream: Optional[Stream],\n narrow_topic: Optional[str],\n first_in_realm: bool,\n prompt_for_invites: bool,\n needs_tutorial: bool,\n) -> Tuple[int, Dict[str, Any]]:\n \"\"\"\n This function computes page_params for when we load the home page.\n\n The page_params data structure gets sent to the client.\n \"\"\"\n client_capabilities = {\n \"notification_settings_null\": True,\n \"bulk_message_deletion\": True,\n \"user_avatar_url_field_optional\": True,\n \"stream_typing_notifications\": False, # Set this to True when frontend support is implemented.\n \"user_settings_object\": True,\n }\n\n if user_profile is not None:\n client = RequestNotes.get_notes(request).client\n assert client is not None\n register_ret = do_events_register(\n user_profile,\n realm,\n client,\n apply_markdown=True,\n client_gravatar=True,\n slim_presence=True,\n client_capabilities=client_capabilities,\n narrow=narrow,\n include_streams=False,\n )\n default_language = register_ret[\"user_settings\"][\"default_language\"]\n else:\n # The spectator client will be fetching the /register response\n # for spectators via the API. But we still need to set the\n # values not presence in that object.\n register_ret = {\n \"queue_id\": None,\n }\n default_language = realm.default_language\n\n if user_profile is None:\n request_language = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, default_language)\n else:\n request_language = get_and_set_request_language(\n request,\n default_language,\n translation.get_language_from_path(request.path_info),\n )\n\n furthest_read_time = get_furthest_read_time(user_profile)\n two_fa_enabled = settings.TWO_FACTOR_AUTHENTICATION_ENABLED and user_profile is not None\n billing_info = get_billing_info(user_profile)\n user_permission_info = get_user_permission_info(user_profile)\n\n # Pass parameters to the client-side JavaScript code.\n # These end up in a JavaScript Object named 'page_params'.\n page_params = dict(\n ## Server settings.\n test_suite=settings.TEST_SUITE,\n insecure_desktop_app=insecure_desktop_app,\n login_page=settings.HOME_NOT_LOGGED_IN,\n warn_no_email=settings.WARN_NO_EMAIL,\n search_pills_enabled=settings.SEARCH_PILLS_ENABLED,\n # Only show marketing email settings if on Zulip Cloud\n corporate_enabled=settings.CORPORATE_ENABLED,\n ## Misc. extra data.\n language_list=get_language_list(),\n needs_tutorial=needs_tutorial,\n first_in_realm=first_in_realm,\n prompt_for_invites=prompt_for_invites,\n furthest_read_time=furthest_read_time,\n bot_types=get_bot_types(user_profile),\n two_fa_enabled=two_fa_enabled,\n apps_page_url=get_apps_page_url(),\n show_billing=billing_info.show_billing,\n promote_sponsoring_zulip=promote_sponsoring_zulip_in_realm(realm),\n show_plans=billing_info.show_plans,\n show_webathena=user_permission_info.show_webathena,\n # Adding two_fa_enabled as condition saves us 3 queries when\n # 2FA is not enabled.\n two_fa_enabled_user=two_fa_enabled and bool(default_device(user_profile)),\n is_spectator=user_profile is None,\n # There is no event queue for spectators since\n # events support for spectators is not implemented yet.\n no_event_queue=user_profile is None,\n )\n\n for field_name in register_ret.keys():\n page_params[field_name] = register_ret[field_name]\n\n if narrow_stream is not None:\n # In narrow_stream context, initial pointer is just latest message\n recipient = narrow_stream.recipient\n try:\n max_message_id = (\n Message.objects.filter(recipient=recipient).order_by(\"id\").reverse()[0].id\n )\n except IndexError:\n max_message_id = -1\n page_params[\"narrow_stream\"] = narrow_stream.name\n if narrow_topic is not None:\n page_params[\"narrow_topic\"] = narrow_topic\n page_params[\"narrow\"] = [dict(operator=term[0], operand=term[1]) for term in narrow]\n page_params[\"max_message_id\"] = max_message_id\n assert isinstance(page_params[\"user_settings\"], dict)\n page_params[\"user_settings\"][\"enable_desktop_notifications\"] = False\n\n page_params[\"translation_data\"] = get_language_translation_data(request_language)\n\n if user_profile is None:\n # Get rendered version of realm description which is displayed in right\n # sidebar for spectator.\n page_params[\"realm_rendered_description\"] = get_realm_rendered_description(realm)\n page_params[\"language_cookie_name\"] = settings.LANGUAGE_COOKIE_NAME\n\n return register_ret[\"queue_id\"], page_params\n", "path": "zerver/lib/home.py"}]}
3,514
392
gh_patches_debug_34300
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-142
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Admin UI suggestions As I've been using the admin UI a bit for my class, I just had a few things that I thought would be nice to have/change in it: - move the "add user" button to the top -- it is annoying with lots of users to have to scroll all the way down to find it - add some simple statistics at the top of the page: number of running servers, number of currently active users, etc. - it would be awesome to be able to sort users by the different columns -- admin, alphabetically, by last seen - currently, if you shut down a user's server, it causes the page to refresh which often jumps you up to the top (or just somewhere else). It would be nice if the update could be done in the background without actually reloading the page. Obviously, none of these are urgent, but I think they would make the admin experience a little easier. I can open separate issues for them if so desired. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jupyterhub/handlers/pages.py` Content: ``` 1 """Basic html-rendering handlers.""" 2 3 # Copyright (c) Jupyter Development Team. 4 # Distributed under the terms of the Modified BSD License. 5 6 from tornado import web 7 8 from .. import orm 9 from ..utils import admin_only, url_path_join 10 from .base import BaseHandler 11 12 13 class RootHandler(BaseHandler): 14 """Render the Hub root page. 15 16 Currently redirects to home if logged in, 17 shows big fat login button otherwise. 18 """ 19 def get(self): 20 if self.get_current_user(): 21 self.redirect( 22 url_path_join(self.hub.server.base_url, 'home'), 23 permanent=False, 24 ) 25 return 26 27 html = self.render_template('index.html', 28 login_url=self.settings['login_url'], 29 ) 30 self.finish(html) 31 32 class HomeHandler(BaseHandler): 33 """Render the user's home page.""" 34 35 @web.authenticated 36 def get(self): 37 html = self.render_template('home.html', 38 user=self.get_current_user(), 39 ) 40 self.finish(html) 41 42 43 class AdminHandler(BaseHandler): 44 """Render the admin page.""" 45 46 @admin_only 47 def get(self): 48 html = self.render_template('admin.html', 49 user=self.get_current_user(), 50 users=self.db.query(orm.User), 51 admin_access=self.settings.get('admin_access', False), 52 ) 53 self.finish(html) 54 55 56 default_handlers = [ 57 (r'/', RootHandler), 58 (r'/home', HomeHandler), 59 (r'/admin', AdminHandler), 60 ] 61 ``` Path: `jupyterhub/apihandlers/base.py` Content: ``` 1 """Base API handlers""" 2 # Copyright (c) Jupyter Development Team. 3 # Distributed under the terms of the Modified BSD License. 4 5 import json 6 7 from http.client import responses 8 9 from tornado import web 10 11 from ..handlers import BaseHandler 12 13 class APIHandler(BaseHandler): 14 def get_json_body(self): 15 """Return the body of the request as JSON data.""" 16 if not self.request.body: 17 return None 18 body = self.request.body.strip().decode('utf-8') 19 try: 20 model = json.loads(body) 21 except Exception: 22 self.log.debug("Bad JSON: %r", body) 23 self.log.error("Couldn't parse JSON", exc_info=True) 24 raise web.HTTPError(400, 'Invalid JSON in body of request') 25 return model 26 27 28 def write_error(self, status_code, **kwargs): 29 """Write JSON errors instead of HTML""" 30 exc_info = kwargs.get('exc_info') 31 message = '' 32 status_message = responses.get(status_code, 'Unknown Error') 33 if exc_info: 34 exception = exc_info[1] 35 # get the custom message, if defined 36 try: 37 message = exception.log_message % exception.args 38 except Exception: 39 pass 40 41 # construct the custom reason, if defined 42 reason = getattr(exception, 'reason', '') 43 if reason: 44 status_message = reason 45 46 self.write(json.dumps({ 47 'status': status_code, 48 'message': message or status_message, 49 })) 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py --- a/jupyterhub/apihandlers/base.py +++ b/jupyterhub/apihandlers/base.py @@ -42,7 +42,7 @@ reason = getattr(exception, 'reason', '') if reason: status_message = reason - + self.set_header('Content-Type', 'application/json') self.write(json.dumps({ 'status': status_code, 'message': message or status_message, diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py --- a/jupyterhub/handlers/pages.py +++ b/jupyterhub/handlers/pages.py @@ -45,10 +45,52 @@ @admin_only def get(self): + available = {'name', 'admin', 'running', 'last_activity'} + default_sort = ['admin', 'name'] + mapping = { + 'running': '_server_id' + } + default_order = { + 'name': 'asc', + 'last_activity': 'desc', + 'admin': 'desc', + 'running': 'desc', + } + sorts = self.get_arguments('sort') or default_sort + orders = self.get_arguments('order') + + for bad in set(sorts).difference(available): + self.log.warn("ignoring invalid sort: %r", bad) + sorts.remove(bad) + for bad in set(orders).difference({'asc', 'desc'}): + self.log.warn("ignoring invalid order: %r", bad) + orders.remove(bad) + + # add default sort as secondary + for s in default_sort: + if s not in sorts: + sorts.append(s) + if len(orders) < len(sorts): + for col in sorts[len(orders):]: + orders.append(default_order[col]) + else: + orders = orders[:len(sorts)] + + # this could be one incomprehensible nested list comprehension + # get User columns + cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ] + # get User.col.desc() order objects + ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ] + + users = self.db.query(orm.User).order_by(*ordered) + running = users.filter(orm.User.server != None) + html = self.render_template('admin.html', user=self.get_current_user(), - users=self.db.query(orm.User), admin_access=self.settings.get('admin_access', False), + users=users, + running=running, + sort={s:o for s,o in zip(sorts, orders)}, ) self.finish(html)
{"golden_diff": "diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py\n--- a/jupyterhub/apihandlers/base.py\n+++ b/jupyterhub/apihandlers/base.py\n@@ -42,7 +42,7 @@\n reason = getattr(exception, 'reason', '')\n if reason:\n status_message = reason\n- \n+ self.set_header('Content-Type', 'application/json')\n self.write(json.dumps({\n 'status': status_code,\n 'message': message or status_message,\ndiff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py\n--- a/jupyterhub/handlers/pages.py\n+++ b/jupyterhub/handlers/pages.py\n@@ -45,10 +45,52 @@\n \n @admin_only\n def get(self):\n+ available = {'name', 'admin', 'running', 'last_activity'}\n+ default_sort = ['admin', 'name']\n+ mapping = {\n+ 'running': '_server_id'\n+ }\n+ default_order = {\n+ 'name': 'asc',\n+ 'last_activity': 'desc',\n+ 'admin': 'desc',\n+ 'running': 'desc',\n+ }\n+ sorts = self.get_arguments('sort') or default_sort\n+ orders = self.get_arguments('order')\n+ \n+ for bad in set(sorts).difference(available):\n+ self.log.warn(\"ignoring invalid sort: %r\", bad)\n+ sorts.remove(bad)\n+ for bad in set(orders).difference({'asc', 'desc'}):\n+ self.log.warn(\"ignoring invalid order: %r\", bad)\n+ orders.remove(bad)\n+ \n+ # add default sort as secondary\n+ for s in default_sort:\n+ if s not in sorts:\n+ sorts.append(s)\n+ if len(orders) < len(sorts):\n+ for col in sorts[len(orders):]:\n+ orders.append(default_order[col])\n+ else:\n+ orders = orders[:len(sorts)]\n+ \n+ # this could be one incomprehensible nested list comprehension\n+ # get User columns\n+ cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n+ # get User.col.desc() order objects\n+ ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n+ \n+ users = self.db.query(orm.User).order_by(*ordered)\n+ running = users.filter(orm.User.server != None)\n+ \n html = self.render_template('admin.html',\n user=self.get_current_user(),\n- users=self.db.query(orm.User),\n admin_access=self.settings.get('admin_access', False),\n+ users=users,\n+ running=running,\n+ sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n", "issue": "Admin UI suggestions\nAs I've been using the admin UI a bit for my class, I just had a few things that I thought would be nice to have/change in it:\n- move the \"add user\" button to the top -- it is annoying with lots of users to have to scroll all the way down to find it\n- add some simple statistics at the top of the page: number of running servers, number of currently active users, etc.\n- it would be awesome to be able to sort users by the different columns -- admin, alphabetically, by last seen\n- currently, if you shut down a user's server, it causes the page to refresh which often jumps you up to the top (or just somewhere else). It would be nice if the update could be done in the background without actually reloading the page.\n\nObviously, none of these are urgent, but I think they would make the admin experience a little easier. I can open separate issues for them if so desired.\n\n", "before_files": [{"content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom tornado import web\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n \n Currently redirects to home if logged in,\n shows big fat login button otherwise.\n \"\"\"\n def get(self):\n if self.get_current_user():\n self.redirect(\n url_path_join(self.hub.server.base_url, 'home'),\n permanent=False,\n )\n return\n \n html = self.render_template('index.html',\n login_url=self.settings['login_url'],\n )\n self.finish(html)\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('home.html',\n user=self.get_current_user(),\n )\n self.finish(html)\n\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n html = self.render_template('admin.html',\n user=self.get_current_user(),\n users=self.db.query(orm.User),\n admin_access=self.settings.get('admin_access', False),\n )\n self.finish(html)\n\n\ndefault_handlers = [\n (r'/', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n]\n", "path": "jupyterhub/handlers/pages.py"}, {"content": "\"\"\"Base API handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom http.client import responses\n\nfrom tornado import web\n\nfrom ..handlers import BaseHandler\n\nclass APIHandler(BaseHandler):\n def get_json_body(self):\n \"\"\"Return the body of the request as JSON data.\"\"\"\n if not self.request.body:\n return None\n body = self.request.body.strip().decode('utf-8')\n try:\n model = json.loads(body)\n except Exception:\n self.log.debug(\"Bad JSON: %r\", body)\n self.log.error(\"Couldn't parse JSON\", exc_info=True)\n raise web.HTTPError(400, 'Invalid JSON in body of request')\n return model\n \n \n def write_error(self, status_code, **kwargs):\n \"\"\"Write JSON errors instead of HTML\"\"\"\n exc_info = kwargs.get('exc_info')\n message = ''\n status_message = responses.get(status_code, 'Unknown Error')\n if exc_info:\n exception = exc_info[1]\n # get the custom message, if defined\n try:\n message = exception.log_message % exception.args\n except Exception:\n pass\n\n # construct the custom reason, if defined\n reason = getattr(exception, 'reason', '')\n if reason:\n status_message = reason\n \n self.write(json.dumps({\n 'status': status_code,\n 'message': message or status_message,\n }))\n", "path": "jupyterhub/apihandlers/base.py"}], "after_files": [{"content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom tornado import web\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n \n Currently redirects to home if logged in,\n shows big fat login button otherwise.\n \"\"\"\n def get(self):\n if self.get_current_user():\n self.redirect(\n url_path_join(self.hub.server.base_url, 'home'),\n permanent=False,\n )\n return\n \n html = self.render_template('index.html',\n login_url=self.settings['login_url'],\n )\n self.finish(html)\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('home.html',\n user=self.get_current_user(),\n )\n self.finish(html)\n\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': '_server_id'\n }\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n \n for bad in set(sorts).difference(available):\n self.log.warn(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warn(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n \n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n \n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n \n users = self.db.query(orm.User).order_by(*ordered)\n running = users.filter(orm.User.server != None)\n \n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\ndefault_handlers = [\n (r'/', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n]\n", "path": "jupyterhub/handlers/pages.py"}, {"content": "\"\"\"Base API handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom http.client import responses\n\nfrom tornado import web\n\nfrom ..handlers import BaseHandler\n\nclass APIHandler(BaseHandler):\n def get_json_body(self):\n \"\"\"Return the body of the request as JSON data.\"\"\"\n if not self.request.body:\n return None\n body = self.request.body.strip().decode('utf-8')\n try:\n model = json.loads(body)\n except Exception:\n self.log.debug(\"Bad JSON: %r\", body)\n self.log.error(\"Couldn't parse JSON\", exc_info=True)\n raise web.HTTPError(400, 'Invalid JSON in body of request')\n return model\n \n \n def write_error(self, status_code, **kwargs):\n \"\"\"Write JSON errors instead of HTML\"\"\"\n exc_info = kwargs.get('exc_info')\n message = ''\n status_message = responses.get(status_code, 'Unknown Error')\n if exc_info:\n exception = exc_info[1]\n # get the custom message, if defined\n try:\n message = exception.log_message % exception.args\n except Exception:\n pass\n\n # construct the custom reason, if defined\n reason = getattr(exception, 'reason', '')\n if reason:\n status_message = reason\n self.set_header('Content-Type', 'application/json')\n self.write(json.dumps({\n 'status': status_code,\n 'message': message or status_message,\n }))\n", "path": "jupyterhub/apihandlers/base.py"}]}
1,315
629
gh_patches_debug_1499
rasdani/github-patches
git_diff
inventree__InvenTree-5627
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Stocktake doesn't save parts with no stock ### Please verify that this bug has NOT been raised before. - [X] I checked and didn't find a similar issue ### Describe the bug* Stocktake is ignoring active parts with 0 stock. (see https://github.com/inventree/InvenTree/blob/master/InvenTree/part/stocktake.py#L252-L254) ### Steps to Reproduce 1. Add a Part 2. Give it some Stock 3. Run stocktake 4. Sell all the Stock 5. Run stocktake again 6. In the Parts stocktake you'll see no new ("0") entry ### Expected behaviour If I have an active part and I run stocktake, I expect the Part to be noted down with "0 Stock at DateTime". ### Deployment Method - [X] Docker - [ ] Bare metal ### Version Information # Version Information: InvenTree-Version: 0.13.0 dev Django Version: 3.2.21 Commit Hash: 2b0d81f Commit Date: 2023-09-25 Database: postgresql Debug-Mode: False Deployed using Docker: True Platform: Linux-5.15.0-82-generic-x86_64-with Installer: DOC Active plugins: False ### Please verify if you can reproduce this bug on the demo site. - [X] I can reproduce this bug on the demo site. ### Relevant log output _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `InvenTree/part/stocktake.py` Content: ``` 1 """Stocktake report functionality""" 2 3 import io 4 import logging 5 import time 6 from datetime import datetime 7 8 from django.contrib.auth.models import User 9 from django.core.files.base import ContentFile 10 from django.utils.translation import gettext_lazy as _ 11 12 import tablib 13 from djmoney.contrib.exchange.models import convert_money 14 from djmoney.money import Money 15 16 import common.models 17 import InvenTree.helpers 18 import part.models 19 import stock.models 20 21 logger = logging.getLogger('inventree') 22 23 24 def perform_stocktake(target: part.models.Part, user: User, note: str = '', commit=True, **kwargs): 25 """Perform stocktake action on a single part. 26 27 arguments: 28 target: A single Part model instance 29 commit: If True (default) save the result to the database 30 user: User who requested this stocktake 31 32 kwargs: 33 exclude_external: If True, exclude stock items in external locations (default = False) 34 location: Optional StockLocation to filter results for generated report 35 36 Returns: 37 PartStocktake: A new PartStocktake model instance (for the specified Part) 38 39 Note that while we record a *total stocktake* for the Part instance which gets saved to the database, 40 the user may have requested a stocktake limited to a particular location. 41 42 In this case, the stocktake *report* will be limited to the specified location. 43 """ 44 45 # Determine which locations are "valid" for the generated report 46 location = kwargs.get('location', None) 47 locations = location.get_descendants(include_self=True) if location else [] 48 49 # Grab all "available" stock items for the Part 50 # We do not include variant stock when performing a stocktake, 51 # otherwise the stocktake entries will be duplicated 52 stock_entries = target.stock_entries(in_stock=True, include_variants=False) 53 54 exclude_external = kwargs.get('exclude_external', False) 55 56 if exclude_external: 57 stock_entries = stock_entries.exclude(location__external=True) 58 59 # Cache min/max pricing information for this Part 60 pricing = target.pricing 61 62 if not pricing.is_valid: 63 # If pricing is not valid, let's update 64 logger.info("Pricing not valid for %s - updating", target) 65 pricing.update_pricing(cascade=False) 66 pricing.refresh_from_db() 67 68 base_currency = common.settings.currency_code_default() 69 70 # Keep track of total quantity and cost for this part 71 total_quantity = 0 72 total_cost_min = Money(0, base_currency) 73 total_cost_max = Money(0, base_currency) 74 75 # Separately, keep track of stock quantity and value within the specified location 76 location_item_count = 0 77 location_quantity = 0 78 location_cost_min = Money(0, base_currency) 79 location_cost_max = Money(0, base_currency) 80 81 for entry in stock_entries: 82 83 entry_cost_min = None 84 entry_cost_max = None 85 86 # Update price range values 87 if entry.purchase_price: 88 entry_cost_min = entry.purchase_price 89 entry_cost_max = entry.purchase_price 90 91 else: 92 # If no purchase price is available, fall back to the part pricing data 93 entry_cost_min = pricing.overall_min or pricing.overall_max 94 entry_cost_max = pricing.overall_max or pricing.overall_min 95 96 # Convert to base currency 97 try: 98 entry_cost_min = convert_money(entry_cost_min, base_currency) * entry.quantity 99 entry_cost_max = convert_money(entry_cost_max, base_currency) * entry.quantity 100 except Exception: 101 102 entry_cost_min = Money(0, base_currency) 103 entry_cost_max = Money(0, base_currency) 104 105 # Update total cost values 106 total_quantity += entry.quantity 107 total_cost_min += entry_cost_min 108 total_cost_max += entry_cost_max 109 110 # Test if this stock item is within the specified location 111 if location and entry.location not in locations: 112 continue 113 114 # Update location cost values 115 location_item_count += 1 116 location_quantity += entry.quantity 117 location_cost_min += entry_cost_min 118 location_cost_max += entry_cost_max 119 120 # Construct PartStocktake instance 121 # Note that we use the *total* values for the PartStocktake instance 122 instance = part.models.PartStocktake( 123 part=target, 124 item_count=stock_entries.count(), 125 quantity=total_quantity, 126 cost_min=total_cost_min, 127 cost_max=total_cost_max, 128 note=note, 129 user=user, 130 ) 131 132 if commit: 133 instance.save() 134 135 # Add location-specific data to the instance 136 instance.location_item_count = location_item_count 137 instance.location_quantity = location_quantity 138 instance.location_cost_min = location_cost_min 139 instance.location_cost_max = location_cost_max 140 141 return instance 142 143 144 def generate_stocktake_report(**kwargs): 145 """Generated a new stocktake report. 146 147 Note that this method should be called only by the background worker process! 148 149 Unless otherwise specified, the stocktake report is generated for *all* Part instances. 150 Optional filters can by supplied via the kwargs 151 152 kwargs: 153 user: The user who requested this stocktake (set to None for automated stocktake) 154 part: Optional Part instance to filter by (including variant parts) 155 category: Optional PartCategory to filter results 156 location: Optional StockLocation to filter results 157 exclude_external: If True, exclude stock items in external locations (default = False) 158 generate_report: If True, generate a stocktake report from the calculated data (default=True) 159 update_parts: If True, save stocktake information against each filtered Part (default = True) 160 """ 161 162 # Determine if external locations should be excluded 163 exclude_external = kwargs.get( 164 'exclude_exernal', 165 common.models.InvenTreeSetting.get_setting('STOCKTAKE_EXCLUDE_EXTERNAL', False) 166 ) 167 168 parts = part.models.Part.objects.all() 169 user = kwargs.get('user', None) 170 171 generate_report = kwargs.get('generate_report', True) 172 update_parts = kwargs.get('update_parts', True) 173 174 # Filter by 'Part' instance 175 if p := kwargs.get('part', None): 176 variants = p.get_descendants(include_self=True) 177 parts = parts.filter( 178 pk__in=[v.pk for v in variants] 179 ) 180 181 # Filter by 'Category' instance (cascading) 182 if category := kwargs.get('category', None): 183 categories = category.get_descendants(include_self=True) 184 parts = parts.filter(category__in=categories) 185 186 # Filter by 'Location' instance (cascading) 187 # Stocktake report will be limited to parts which have stock items within this location 188 if location := kwargs.get('location', None): 189 # Extract flat list of all sublocations 190 locations = list(location.get_descendants(include_self=True)) 191 192 # Items which exist within these locations 193 items = stock.models.StockItem.objects.filter(location__in=locations) 194 195 if exclude_external: 196 items = items.exclude(location__external=True) 197 198 # List of parts which exist within these locations 199 unique_parts = items.order_by().values('part').distinct() 200 201 parts = parts.filter( 202 pk__in=[result['part'] for result in unique_parts] 203 ) 204 205 # Exit if filters removed all parts 206 n_parts = parts.count() 207 208 if n_parts == 0: 209 logger.info("No parts selected for stocktake report - exiting") 210 return 211 212 logger.info("Generating new stocktake report for %s parts", n_parts) 213 214 base_currency = common.settings.currency_code_default() 215 216 # Construct an initial dataset for the stocktake report 217 dataset = tablib.Dataset( 218 headers=[ 219 _('Part ID'), 220 _('Part Name'), 221 _('Part Description'), 222 _('Category ID'), 223 _('Category Name'), 224 _('Stock Items'), 225 _('Total Quantity'), 226 _('Total Cost Min') + f' ({base_currency})', 227 _('Total Cost Max') + f' ({base_currency})', 228 ] 229 ) 230 231 parts = parts.prefetch_related('category', 'stock_items') 232 233 # Simple profiling for this task 234 t_start = time.time() 235 236 # Keep track of each individual "stocktake" we perform. 237 # They may be bulk-commited to the database afterwards 238 stocktake_instances = [] 239 240 total_parts = 0 241 242 # Iterate through each Part which matches the filters above 243 for p in parts: 244 245 # Create a new stocktake for this part (do not commit, this will take place later on) 246 stocktake = perform_stocktake( 247 p, user, commit=False, 248 exclude_external=exclude_external, 249 location=location, 250 ) 251 252 if stocktake.quantity == 0: 253 # Skip rows with zero total quantity 254 continue 255 256 total_parts += 1 257 258 stocktake_instances.append(stocktake) 259 260 # Add a row to the dataset 261 dataset.append([ 262 p.pk, 263 p.full_name, 264 p.description, 265 p.category.pk if p.category else '', 266 p.category.name if p.category else '', 267 stocktake.location_item_count, 268 stocktake.location_quantity, 269 InvenTree.helpers.normalize(stocktake.location_cost_min.amount), 270 InvenTree.helpers.normalize(stocktake.location_cost_max.amount), 271 ]) 272 273 # Save a new PartStocktakeReport instance 274 buffer = io.StringIO() 275 buffer.write(dataset.export('csv')) 276 277 today = datetime.now().date().isoformat() 278 filename = f"InvenTree_Stocktake_{today}.csv" 279 report_file = ContentFile(buffer.getvalue(), name=filename) 280 281 if generate_report: 282 report_instance = part.models.PartStocktakeReport.objects.create( 283 report=report_file, 284 part_count=total_parts, 285 user=user 286 ) 287 288 # Notify the requesting user 289 if user: 290 291 common.notifications.trigger_notification( 292 report_instance, 293 category='generate_stocktake_report', 294 context={ 295 'name': _('Stocktake Report Available'), 296 'message': _('A new stocktake report is available for download'), 297 }, 298 targets=[ 299 user, 300 ] 301 ) 302 303 # If 'update_parts' is set, we save stocktake entries for each individual part 304 if update_parts: 305 # Use bulk_create for efficient insertion of stocktake 306 part.models.PartStocktake.objects.bulk_create( 307 stocktake_instances, 308 batch_size=500, 309 ) 310 311 t_stocktake = time.time() - t_start 312 logger.info("Generated stocktake report for %s parts in %ss", total_parts, round(t_stocktake, 2)) 313 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/InvenTree/part/stocktake.py b/InvenTree/part/stocktake.py --- a/InvenTree/part/stocktake.py +++ b/InvenTree/part/stocktake.py @@ -249,10 +249,6 @@ location=location, ) - if stocktake.quantity == 0: - # Skip rows with zero total quantity - continue - total_parts += 1 stocktake_instances.append(stocktake)
{"golden_diff": "diff --git a/InvenTree/part/stocktake.py b/InvenTree/part/stocktake.py\n--- a/InvenTree/part/stocktake.py\n+++ b/InvenTree/part/stocktake.py\n@@ -249,10 +249,6 @@\n location=location,\n )\n \n- if stocktake.quantity == 0:\n- # Skip rows with zero total quantity\n- continue\n-\n total_parts += 1\n \n stocktake_instances.append(stocktake)\n", "issue": "Stocktake doesn't save parts with no stock\n### Please verify that this bug has NOT been raised before.\r\n\r\n- [X] I checked and didn't find a similar issue\r\n\r\n### Describe the bug*\r\n\r\nStocktake is ignoring active parts with 0 stock. (see https://github.com/inventree/InvenTree/blob/master/InvenTree/part/stocktake.py#L252-L254)\r\n\r\n### Steps to Reproduce\r\n\r\n1. Add a Part\r\n2. Give it some Stock\r\n3. Run stocktake\r\n4. Sell all the Stock\r\n5. Run stocktake again\r\n6. In the Parts stocktake you'll see no new (\"0\") entry \r\n\r\n### Expected behaviour\r\n\r\nIf I have an active part and I run stocktake, I expect the Part to be noted down with \"0 Stock at DateTime\".\r\n\r\n### Deployment Method\r\n\r\n- [X] Docker\r\n- [ ] Bare metal\r\n\r\n### Version Information\r\n\r\n# Version Information:\r\nInvenTree-Version: 0.13.0 dev\r\nDjango Version: 3.2.21\r\nCommit Hash: 2b0d81f\r\nCommit Date: 2023-09-25\r\n\r\nDatabase: postgresql\r\nDebug-Mode: False\r\nDeployed using Docker: True\r\nPlatform: Linux-5.15.0-82-generic-x86_64-with\r\nInstaller: DOC\r\n\r\nActive plugins: False\r\n\r\n\r\n### Please verify if you can reproduce this bug on the demo site.\r\n\r\n- [X] I can reproduce this bug on the demo site.\r\n\r\n### Relevant log output\r\n\r\n_No response_\n", "before_files": [{"content": "\"\"\"Stocktake report functionality\"\"\"\n\nimport io\nimport logging\nimport time\nfrom datetime import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.files.base import ContentFile\nfrom django.utils.translation import gettext_lazy as _\n\nimport tablib\nfrom djmoney.contrib.exchange.models import convert_money\nfrom djmoney.money import Money\n\nimport common.models\nimport InvenTree.helpers\nimport part.models\nimport stock.models\n\nlogger = logging.getLogger('inventree')\n\n\ndef perform_stocktake(target: part.models.Part, user: User, note: str = '', commit=True, **kwargs):\n \"\"\"Perform stocktake action on a single part.\n\n arguments:\n target: A single Part model instance\n commit: If True (default) save the result to the database\n user: User who requested this stocktake\n\n kwargs:\n exclude_external: If True, exclude stock items in external locations (default = False)\n location: Optional StockLocation to filter results for generated report\n\n Returns:\n PartStocktake: A new PartStocktake model instance (for the specified Part)\n\n Note that while we record a *total stocktake* for the Part instance which gets saved to the database,\n the user may have requested a stocktake limited to a particular location.\n\n In this case, the stocktake *report* will be limited to the specified location.\n \"\"\"\n\n # Determine which locations are \"valid\" for the generated report\n location = kwargs.get('location', None)\n locations = location.get_descendants(include_self=True) if location else []\n\n # Grab all \"available\" stock items for the Part\n # We do not include variant stock when performing a stocktake,\n # otherwise the stocktake entries will be duplicated\n stock_entries = target.stock_entries(in_stock=True, include_variants=False)\n\n exclude_external = kwargs.get('exclude_external', False)\n\n if exclude_external:\n stock_entries = stock_entries.exclude(location__external=True)\n\n # Cache min/max pricing information for this Part\n pricing = target.pricing\n\n if not pricing.is_valid:\n # If pricing is not valid, let's update\n logger.info(\"Pricing not valid for %s - updating\", target)\n pricing.update_pricing(cascade=False)\n pricing.refresh_from_db()\n\n base_currency = common.settings.currency_code_default()\n\n # Keep track of total quantity and cost for this part\n total_quantity = 0\n total_cost_min = Money(0, base_currency)\n total_cost_max = Money(0, base_currency)\n\n # Separately, keep track of stock quantity and value within the specified location\n location_item_count = 0\n location_quantity = 0\n location_cost_min = Money(0, base_currency)\n location_cost_max = Money(0, base_currency)\n\n for entry in stock_entries:\n\n entry_cost_min = None\n entry_cost_max = None\n\n # Update price range values\n if entry.purchase_price:\n entry_cost_min = entry.purchase_price\n entry_cost_max = entry.purchase_price\n\n else:\n # If no purchase price is available, fall back to the part pricing data\n entry_cost_min = pricing.overall_min or pricing.overall_max\n entry_cost_max = pricing.overall_max or pricing.overall_min\n\n # Convert to base currency\n try:\n entry_cost_min = convert_money(entry_cost_min, base_currency) * entry.quantity\n entry_cost_max = convert_money(entry_cost_max, base_currency) * entry.quantity\n except Exception:\n\n entry_cost_min = Money(0, base_currency)\n entry_cost_max = Money(0, base_currency)\n\n # Update total cost values\n total_quantity += entry.quantity\n total_cost_min += entry_cost_min\n total_cost_max += entry_cost_max\n\n # Test if this stock item is within the specified location\n if location and entry.location not in locations:\n continue\n\n # Update location cost values\n location_item_count += 1\n location_quantity += entry.quantity\n location_cost_min += entry_cost_min\n location_cost_max += entry_cost_max\n\n # Construct PartStocktake instance\n # Note that we use the *total* values for the PartStocktake instance\n instance = part.models.PartStocktake(\n part=target,\n item_count=stock_entries.count(),\n quantity=total_quantity,\n cost_min=total_cost_min,\n cost_max=total_cost_max,\n note=note,\n user=user,\n )\n\n if commit:\n instance.save()\n\n # Add location-specific data to the instance\n instance.location_item_count = location_item_count\n instance.location_quantity = location_quantity\n instance.location_cost_min = location_cost_min\n instance.location_cost_max = location_cost_max\n\n return instance\n\n\ndef generate_stocktake_report(**kwargs):\n \"\"\"Generated a new stocktake report.\n\n Note that this method should be called only by the background worker process!\n\n Unless otherwise specified, the stocktake report is generated for *all* Part instances.\n Optional filters can by supplied via the kwargs\n\n kwargs:\n user: The user who requested this stocktake (set to None for automated stocktake)\n part: Optional Part instance to filter by (including variant parts)\n category: Optional PartCategory to filter results\n location: Optional StockLocation to filter results\n exclude_external: If True, exclude stock items in external locations (default = False)\n generate_report: If True, generate a stocktake report from the calculated data (default=True)\n update_parts: If True, save stocktake information against each filtered Part (default = True)\n \"\"\"\n\n # Determine if external locations should be excluded\n exclude_external = kwargs.get(\n 'exclude_exernal',\n common.models.InvenTreeSetting.get_setting('STOCKTAKE_EXCLUDE_EXTERNAL', False)\n )\n\n parts = part.models.Part.objects.all()\n user = kwargs.get('user', None)\n\n generate_report = kwargs.get('generate_report', True)\n update_parts = kwargs.get('update_parts', True)\n\n # Filter by 'Part' instance\n if p := kwargs.get('part', None):\n variants = p.get_descendants(include_self=True)\n parts = parts.filter(\n pk__in=[v.pk for v in variants]\n )\n\n # Filter by 'Category' instance (cascading)\n if category := kwargs.get('category', None):\n categories = category.get_descendants(include_self=True)\n parts = parts.filter(category__in=categories)\n\n # Filter by 'Location' instance (cascading)\n # Stocktake report will be limited to parts which have stock items within this location\n if location := kwargs.get('location', None):\n # Extract flat list of all sublocations\n locations = list(location.get_descendants(include_self=True))\n\n # Items which exist within these locations\n items = stock.models.StockItem.objects.filter(location__in=locations)\n\n if exclude_external:\n items = items.exclude(location__external=True)\n\n # List of parts which exist within these locations\n unique_parts = items.order_by().values('part').distinct()\n\n parts = parts.filter(\n pk__in=[result['part'] for result in unique_parts]\n )\n\n # Exit if filters removed all parts\n n_parts = parts.count()\n\n if n_parts == 0:\n logger.info(\"No parts selected for stocktake report - exiting\")\n return\n\n logger.info(\"Generating new stocktake report for %s parts\", n_parts)\n\n base_currency = common.settings.currency_code_default()\n\n # Construct an initial dataset for the stocktake report\n dataset = tablib.Dataset(\n headers=[\n _('Part ID'),\n _('Part Name'),\n _('Part Description'),\n _('Category ID'),\n _('Category Name'),\n _('Stock Items'),\n _('Total Quantity'),\n _('Total Cost Min') + f' ({base_currency})',\n _('Total Cost Max') + f' ({base_currency})',\n ]\n )\n\n parts = parts.prefetch_related('category', 'stock_items')\n\n # Simple profiling for this task\n t_start = time.time()\n\n # Keep track of each individual \"stocktake\" we perform.\n # They may be bulk-commited to the database afterwards\n stocktake_instances = []\n\n total_parts = 0\n\n # Iterate through each Part which matches the filters above\n for p in parts:\n\n # Create a new stocktake for this part (do not commit, this will take place later on)\n stocktake = perform_stocktake(\n p, user, commit=False,\n exclude_external=exclude_external,\n location=location,\n )\n\n if stocktake.quantity == 0:\n # Skip rows with zero total quantity\n continue\n\n total_parts += 1\n\n stocktake_instances.append(stocktake)\n\n # Add a row to the dataset\n dataset.append([\n p.pk,\n p.full_name,\n p.description,\n p.category.pk if p.category else '',\n p.category.name if p.category else '',\n stocktake.location_item_count,\n stocktake.location_quantity,\n InvenTree.helpers.normalize(stocktake.location_cost_min.amount),\n InvenTree.helpers.normalize(stocktake.location_cost_max.amount),\n ])\n\n # Save a new PartStocktakeReport instance\n buffer = io.StringIO()\n buffer.write(dataset.export('csv'))\n\n today = datetime.now().date().isoformat()\n filename = f\"InvenTree_Stocktake_{today}.csv\"\n report_file = ContentFile(buffer.getvalue(), name=filename)\n\n if generate_report:\n report_instance = part.models.PartStocktakeReport.objects.create(\n report=report_file,\n part_count=total_parts,\n user=user\n )\n\n # Notify the requesting user\n if user:\n\n common.notifications.trigger_notification(\n report_instance,\n category='generate_stocktake_report',\n context={\n 'name': _('Stocktake Report Available'),\n 'message': _('A new stocktake report is available for download'),\n },\n targets=[\n user,\n ]\n )\n\n # If 'update_parts' is set, we save stocktake entries for each individual part\n if update_parts:\n # Use bulk_create for efficient insertion of stocktake\n part.models.PartStocktake.objects.bulk_create(\n stocktake_instances,\n batch_size=500,\n )\n\n t_stocktake = time.time() - t_start\n logger.info(\"Generated stocktake report for %s parts in %ss\", total_parts, round(t_stocktake, 2))\n", "path": "InvenTree/part/stocktake.py"}], "after_files": [{"content": "\"\"\"Stocktake report functionality\"\"\"\n\nimport io\nimport logging\nimport time\nfrom datetime import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.files.base import ContentFile\nfrom django.utils.translation import gettext_lazy as _\n\nimport tablib\nfrom djmoney.contrib.exchange.models import convert_money\nfrom djmoney.money import Money\n\nimport common.models\nimport InvenTree.helpers\nimport part.models\nimport stock.models\n\nlogger = logging.getLogger('inventree')\n\n\ndef perform_stocktake(target: part.models.Part, user: User, note: str = '', commit=True, **kwargs):\n \"\"\"Perform stocktake action on a single part.\n\n arguments:\n target: A single Part model instance\n commit: If True (default) save the result to the database\n user: User who requested this stocktake\n\n kwargs:\n exclude_external: If True, exclude stock items in external locations (default = False)\n location: Optional StockLocation to filter results for generated report\n\n Returns:\n PartStocktake: A new PartStocktake model instance (for the specified Part)\n\n Note that while we record a *total stocktake* for the Part instance which gets saved to the database,\n the user may have requested a stocktake limited to a particular location.\n\n In this case, the stocktake *report* will be limited to the specified location.\n \"\"\"\n\n # Determine which locations are \"valid\" for the generated report\n location = kwargs.get('location', None)\n locations = location.get_descendants(include_self=True) if location else []\n\n # Grab all \"available\" stock items for the Part\n # We do not include variant stock when performing a stocktake,\n # otherwise the stocktake entries will be duplicated\n stock_entries = target.stock_entries(in_stock=True, include_variants=False)\n\n exclude_external = kwargs.get('exclude_external', False)\n\n if exclude_external:\n stock_entries = stock_entries.exclude(location__external=True)\n\n # Cache min/max pricing information for this Part\n pricing = target.pricing\n\n if not pricing.is_valid:\n # If pricing is not valid, let's update\n logger.info(\"Pricing not valid for %s - updating\", target)\n pricing.update_pricing(cascade=False)\n pricing.refresh_from_db()\n\n base_currency = common.settings.currency_code_default()\n\n # Keep track of total quantity and cost for this part\n total_quantity = 0\n total_cost_min = Money(0, base_currency)\n total_cost_max = Money(0, base_currency)\n\n # Separately, keep track of stock quantity and value within the specified location\n location_item_count = 0\n location_quantity = 0\n location_cost_min = Money(0, base_currency)\n location_cost_max = Money(0, base_currency)\n\n for entry in stock_entries:\n\n entry_cost_min = None\n entry_cost_max = None\n\n # Update price range values\n if entry.purchase_price:\n entry_cost_min = entry.purchase_price\n entry_cost_max = entry.purchase_price\n\n else:\n # If no purchase price is available, fall back to the part pricing data\n entry_cost_min = pricing.overall_min or pricing.overall_max\n entry_cost_max = pricing.overall_max or pricing.overall_min\n\n # Convert to base currency\n try:\n entry_cost_min = convert_money(entry_cost_min, base_currency) * entry.quantity\n entry_cost_max = convert_money(entry_cost_max, base_currency) * entry.quantity\n except Exception:\n\n entry_cost_min = Money(0, base_currency)\n entry_cost_max = Money(0, base_currency)\n\n # Update total cost values\n total_quantity += entry.quantity\n total_cost_min += entry_cost_min\n total_cost_max += entry_cost_max\n\n # Test if this stock item is within the specified location\n if location and entry.location not in locations:\n continue\n\n # Update location cost values\n location_item_count += 1\n location_quantity += entry.quantity\n location_cost_min += entry_cost_min\n location_cost_max += entry_cost_max\n\n # Construct PartStocktake instance\n # Note that we use the *total* values for the PartStocktake instance\n instance = part.models.PartStocktake(\n part=target,\n item_count=stock_entries.count(),\n quantity=total_quantity,\n cost_min=total_cost_min,\n cost_max=total_cost_max,\n note=note,\n user=user,\n )\n\n if commit:\n instance.save()\n\n # Add location-specific data to the instance\n instance.location_item_count = location_item_count\n instance.location_quantity = location_quantity\n instance.location_cost_min = location_cost_min\n instance.location_cost_max = location_cost_max\n\n return instance\n\n\ndef generate_stocktake_report(**kwargs):\n \"\"\"Generated a new stocktake report.\n\n Note that this method should be called only by the background worker process!\n\n Unless otherwise specified, the stocktake report is generated for *all* Part instances.\n Optional filters can by supplied via the kwargs\n\n kwargs:\n user: The user who requested this stocktake (set to None for automated stocktake)\n part: Optional Part instance to filter by (including variant parts)\n category: Optional PartCategory to filter results\n location: Optional StockLocation to filter results\n exclude_external: If True, exclude stock items in external locations (default = False)\n generate_report: If True, generate a stocktake report from the calculated data (default=True)\n update_parts: If True, save stocktake information against each filtered Part (default = True)\n \"\"\"\n\n # Determine if external locations should be excluded\n exclude_external = kwargs.get(\n 'exclude_exernal',\n common.models.InvenTreeSetting.get_setting('STOCKTAKE_EXCLUDE_EXTERNAL', False)\n )\n\n parts = part.models.Part.objects.all()\n user = kwargs.get('user', None)\n\n generate_report = kwargs.get('generate_report', True)\n update_parts = kwargs.get('update_parts', True)\n\n # Filter by 'Part' instance\n if p := kwargs.get('part', None):\n variants = p.get_descendants(include_self=True)\n parts = parts.filter(\n pk__in=[v.pk for v in variants]\n )\n\n # Filter by 'Category' instance (cascading)\n if category := kwargs.get('category', None):\n categories = category.get_descendants(include_self=True)\n parts = parts.filter(category__in=categories)\n\n # Filter by 'Location' instance (cascading)\n # Stocktake report will be limited to parts which have stock items within this location\n if location := kwargs.get('location', None):\n # Extract flat list of all sublocations\n locations = list(location.get_descendants(include_self=True))\n\n # Items which exist within these locations\n items = stock.models.StockItem.objects.filter(location__in=locations)\n\n if exclude_external:\n items = items.exclude(location__external=True)\n\n # List of parts which exist within these locations\n unique_parts = items.order_by().values('part').distinct()\n\n parts = parts.filter(\n pk__in=[result['part'] for result in unique_parts]\n )\n\n # Exit if filters removed all parts\n n_parts = parts.count()\n\n if n_parts == 0:\n logger.info(\"No parts selected for stocktake report - exiting\")\n return\n\n logger.info(\"Generating new stocktake report for %s parts\", n_parts)\n\n base_currency = common.settings.currency_code_default()\n\n # Construct an initial dataset for the stocktake report\n dataset = tablib.Dataset(\n headers=[\n _('Part ID'),\n _('Part Name'),\n _('Part Description'),\n _('Category ID'),\n _('Category Name'),\n _('Stock Items'),\n _('Total Quantity'),\n _('Total Cost Min') + f' ({base_currency})',\n _('Total Cost Max') + f' ({base_currency})',\n ]\n )\n\n parts = parts.prefetch_related('category', 'stock_items')\n\n # Simple profiling for this task\n t_start = time.time()\n\n # Keep track of each individual \"stocktake\" we perform.\n # They may be bulk-commited to the database afterwards\n stocktake_instances = []\n\n total_parts = 0\n\n # Iterate through each Part which matches the filters above\n for p in parts:\n\n # Create a new stocktake for this part (do not commit, this will take place later on)\n stocktake = perform_stocktake(\n p, user, commit=False,\n exclude_external=exclude_external,\n location=location,\n )\n\n total_parts += 1\n\n stocktake_instances.append(stocktake)\n\n # Add a row to the dataset\n dataset.append([\n p.pk,\n p.full_name,\n p.description,\n p.category.pk if p.category else '',\n p.category.name if p.category else '',\n stocktake.location_item_count,\n stocktake.location_quantity,\n InvenTree.helpers.normalize(stocktake.location_cost_min.amount),\n InvenTree.helpers.normalize(stocktake.location_cost_max.amount),\n ])\n\n # Save a new PartStocktakeReport instance\n buffer = io.StringIO()\n buffer.write(dataset.export('csv'))\n\n today = datetime.now().date().isoformat()\n filename = f\"InvenTree_Stocktake_{today}.csv\"\n report_file = ContentFile(buffer.getvalue(), name=filename)\n\n if generate_report:\n report_instance = part.models.PartStocktakeReport.objects.create(\n report=report_file,\n part_count=total_parts,\n user=user\n )\n\n # Notify the requesting user\n if user:\n\n common.notifications.trigger_notification(\n report_instance,\n category='generate_stocktake_report',\n context={\n 'name': _('Stocktake Report Available'),\n 'message': _('A new stocktake report is available for download'),\n },\n targets=[\n user,\n ]\n )\n\n # If 'update_parts' is set, we save stocktake entries for each individual part\n if update_parts:\n # Use bulk_create for efficient insertion of stocktake\n part.models.PartStocktake.objects.bulk_create(\n stocktake_instances,\n batch_size=500,\n )\n\n t_stocktake = time.time() - t_start\n logger.info(\"Generated stocktake report for %s parts in %ss\", total_parts, round(t_stocktake, 2))\n", "path": "InvenTree/part/stocktake.py"}]}
3,698
111
gh_patches_debug_40919
rasdani/github-patches
git_diff
facebookresearch__hydra-141
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Task name lost with submitit and fairtask --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hydra/plugins/common/utils.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 3 import copy 4 import logging 5 import os 6 import re 7 import sys 8 from time import strftime, localtime 9 10 import six 11 from omegaconf import OmegaConf, DictConfig 12 13 # pylint: disable=C0103 14 log = logging.getLogger(__name__) 15 16 17 def configure_log(log_config, verbose=None): 18 if log_config is not None: 19 conf = log_config.to_container(resolve=True) 20 logging.config.dictConfig(conf) 21 else: 22 # default logging to stdout 23 root = logging.getLogger() 24 root.setLevel(logging.INFO) 25 handler = logging.StreamHandler(sys.stdout) 26 formatter = logging.Formatter( 27 "[%(asctime)s][%(name)s][%(levelname)s] - %(message)s" 28 ) 29 handler.setFormatter(formatter) 30 root.addHandler(handler) 31 32 if verbose is not None: 33 if verbose == "root": 34 logging.getLogger().setLevel(logging.DEBUG) 35 for logger in verbose.split(","): 36 logging.getLogger(logger).setLevel(logging.DEBUG) 37 38 39 def save_config(cfg, filename): 40 with open(os.path.join(filename), "w") as file: 41 file.write(cfg.pretty()) 42 43 44 def get_overrides_dirname(lst, exclude_keys=[]): 45 lst = [x for x in lst if x not in exclude_keys] 46 lst.sort() 47 return re.sub(pattern="[=]", repl="=", string=",".join(lst)) 48 49 50 def filter_overrides(overrides): 51 """ 52 :param overrides: overrides list 53 :return: returning a new overrides list with all the keys starting with hydra. fitlered. 54 """ 55 return [x for x in overrides if not x.startswith("hydra.")] 56 57 58 def run_job(config, task_function, verbose, job_dir_key, job_subdir_key): 59 old_cwd = os.getcwd() 60 working_dir = str(config.select(job_dir_key)) 61 if job_subdir_key is not None: 62 # evaluate job_subdir_key lazily. 63 # this is running on the client side in sweep and contains things such as job:id which 64 # are only available there. 65 subdir = str(config.select(job_subdir_key)) 66 working_dir = os.path.join(working_dir, subdir) 67 try: 68 ret = JobReturn() 69 ret.working_dir = working_dir 70 task_cfg = copy.deepcopy(config) 71 hydra_cfg = task_cfg["hydra"] 72 del task_cfg["hydra"] 73 ret.cfg = task_cfg 74 ret.hydra_cfg = copy.deepcopy(HydraConfig()) 75 ret.overrides = config.hydra.overrides.task.to_container() 76 if not os.path.exists(working_dir): 77 os.makedirs(working_dir) 78 os.chdir(working_dir) 79 configure_log(config.hydra.job_logging, verbose) 80 81 save_config(task_cfg, "config.yaml") 82 save_config(hydra_cfg, "hydra.yaml") 83 save_config(config.hydra.overrides.task, "overrides.yaml") 84 ret.return_value = task_function(task_cfg) 85 ret.task_name = JobRuntime().get("name") 86 return ret 87 finally: 88 os.chdir(old_cwd) 89 90 91 def get_valid_filename(s): 92 s = str(s).strip().replace(" ", "_") 93 return re.sub(r"(?u)[^-\w.]", "", s) 94 95 96 def setup_globals(): 97 try: 98 OmegaConf.register_resolver( 99 "now", lambda pattern: strftime(pattern, localtime()) 100 ) 101 102 def job_error(x): 103 raise Exception( 104 "job:{} is no longer available. use hydra.job.{}".format(x, x) 105 ) 106 107 OmegaConf.register_resolver("job", job_error) 108 109 except AssertionError: 110 # calling it again in no_workers mode will throw. safe to ignore. 111 pass 112 113 114 class JobReturn: 115 def __init__(self): 116 self.overrides = None 117 self.return_value = None 118 self.cfg = None 119 self.hydra_cfg = None 120 self.working_dir = None 121 self.task_name = None 122 123 124 class Singleton(type): 125 _instances = {} 126 127 def __call__(cls, *args, **kwargs): 128 if cls not in cls._instances: 129 cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) 130 return cls._instances[cls] 131 132 133 @six.add_metaclass(Singleton) 134 class JobRuntime: 135 def __init__(self): 136 self.conf = OmegaConf.create() 137 self.set("name", "UNKNOWN_NAME") 138 139 def get(self, key): 140 ret = self.conf.select(key) 141 if ret is None: 142 raise KeyError("Key not found in {}: {}".format(type(self).__name__, key)) 143 return ret 144 145 def set(self, key, value): 146 log.debug("Setting {}:{}={}".format(type(self).__name__, key, value)) 147 self.conf[key] = value 148 149 150 @six.add_metaclass(Singleton) 151 class HydraConfig(DictConfig): 152 def __init__(self): 153 super(HydraConfig, self).__init__(content={}) 154 self.hydra = None 155 156 def set_config(self, cfg): 157 try: 158 OmegaConf.set_readonly(self, False) 159 self.hydra = copy.deepcopy(cfg.hydra) 160 finally: 161 OmegaConf.set_readonly(self, True) 162 ``` Path: `plugins/submitit/hydra_plugins/submitit/submitit_launcher.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 import logging 3 import os 4 5 import hydra._internal.utils 6 import hydra.plugins.common.utils 7 from hydra.plugins import Launcher 8 9 from omegaconf import open_dict 10 11 # pylint: disable=C0103 12 log = logging.getLogger(__name__) 13 14 15 class SubmititLauncher(Launcher): 16 def __init__(self, queue, folder, queue_parameters, conda_file=None): 17 self.queue = queue 18 self.queue_parameters = queue_parameters 19 self.folder = folder 20 self.conda_file = conda_file 21 self.config = None 22 self.task_function = None 23 self.verbose = None 24 self.sweep_configs = None 25 self.config_loader = None 26 27 def setup(self, config, config_loader, task_function, verbose): 28 self.config = config 29 self.config_loader = config_loader 30 self.task_function = task_function 31 self.verbose = verbose 32 33 def launch_job(self, sweep_overrides, job_dir_key, job_num): 34 hydra.plugins.common.utils.configure_log(None, self.verbose) 35 hydra.plugins.common.utils.setup_globals() 36 sweep_config = self.config_loader.load_sweep_config( 37 self.config, sweep_overrides 38 ) 39 with open_dict(sweep_config): 40 # Populate new job variables 41 if "SLURM_JOB_ID" in os.environ: 42 sweep_config.hydra.job.id = os.environ["SLURM_JOB_ID"] 43 elif "CHRONOS_JOB_ID" in os.environ: 44 sweep_config.hydra.job.id = os.environ["CHRONOS_JOB_ID"] 45 else: 46 sweep_config.hydra.job.id = "unknown" 47 48 sweep_config.hydra.job.num = job_num 49 sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname( 50 sweep_config.hydra.overrides.task 51 ) 52 53 return hydra.plugins.common.utils.run_job( 54 config=sweep_config, 55 task_function=self.task_function, 56 verbose=self.verbose, 57 job_dir_key=job_dir_key, 58 job_subdir_key="hydra.sweep.subdir", 59 ) 60 61 def launch(self, job_overrides): 62 # lazy import to ensurue plugin discovery remains fast 63 import submitit 64 65 num_jobs = len(job_overrides) 66 assert num_jobs > 0 67 with open_dict(self.config): 68 self.config.hydra.job.num_jobs = num_jobs 69 70 if self.queue == "auto": 71 executor = submitit.AutoExecutor( 72 folder=self.folder, conda_file=self.conda_file 73 ) 74 elif self.queue == "slurm": 75 executor = submitit.SlurmExecutor(folder=self.folder) 76 elif self.queue == "chronos": 77 executor = submitit.ChronosExecutor( 78 folder=self.folder, conda_file=self.conda_file 79 ) 80 elif self.queue == "local": 81 executor = submitit.LocalExecutor(folder=self.folder) 82 else: 83 raise RuntimeError("Unsupported queue type {}".format(self.queue)) 84 85 executor.update_parameters(**self.queue_parameters[self.queue]) 86 87 log.info("Sweep output dir : {}".format(self.config.hydra.sweep.dir)) 88 path_str = str(self.config.hydra.sweep.dir) 89 os.makedirs(path_str, exist_ok=True) 90 if "mode" in self.config.hydra.sweep: 91 mode = int(str(self.config.hydra.sweep.mode), 8) 92 os.chmod(path_str, mode=mode) 93 94 jobs = [] 95 for job_num in range(num_jobs): 96 sweep_override = list(job_overrides[job_num]) 97 log.info( 98 "\t#{} : {}".format( 99 job_num, 100 " ".join( 101 hydra.plugins.common.utils.filter_overrides(sweep_override) 102 ), 103 ) 104 ) 105 job = executor.submit( 106 self.launch_job, sweep_override, "hydra.sweep.dir", job_num 107 ) 108 jobs.append(job) 109 110 return [j.results() for j in jobs] 111 ``` Path: `plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 3 import asyncio 4 import logging 5 import os 6 7 from omegaconf import open_dict 8 9 import hydra.plugins.common.utils 10 from hydra import utils 11 from hydra.plugins import Launcher 12 13 # TODO: initialize logger before importing fairtask until comments at 14 # https://github.com/fairinternal/fairtask/pull/23 are addressed 15 log = logging.getLogger(__name__) # noqa: E402 16 17 18 class FAIRTaskLauncher(Launcher): 19 def __init__(self, queue, queues, no_workers=False): 20 self.queue_name = queue 21 self.queues = queues 22 self.config = None 23 self.task_function = None 24 self.verbose = None 25 self.sweep_configs = None 26 self.no_workers = no_workers 27 self.config_loader = None 28 29 def setup(self, config, config_loader, task_function, verbose): 30 self.config = config 31 self.config_loader = config_loader 32 self.task_function = task_function 33 self.verbose = verbose 34 35 def launch_job(self, sweep_overrides, job_dir_key, job_num): 36 # stdout logging until we get the file logging going, logs will be in slurm job log files 37 hydra.plugins.common.utils.configure_log(None, self.verbose) 38 hydra.plugins.common.utils.setup_globals() 39 sweep_config = self.config_loader.load_sweep_config( 40 self.config, sweep_overrides 41 ) 42 43 with open_dict(sweep_config): 44 # Populate new job variables 45 sweep_config.hydra.job.id = ( 46 "${env:SLURM_JOB_ID}" 47 if "SLURM_JOB_ID" in os.environ 48 else "_UNKNOWN_SLURM_ID_" 49 ) 50 sweep_config.hydra.job.num = job_num 51 sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname( 52 sweep_config.hydra.overrides.task 53 ) 54 55 return hydra.plugins.common.utils.run_job( 56 config=sweep_config, 57 task_function=self.task_function, 58 verbose=self.verbose, 59 job_dir_key=job_dir_key, 60 job_subdir_key="hydra.sweep.subdir", 61 ) 62 63 async def run_sweep(self, queue, job_overrides): 64 # load lazily to ensure plugin discovery is fast. 65 from fairtask import gatherl 66 67 log.info( 68 "Launching {} jobs to {} queue".format(len(job_overrides), self.queue_name) 69 ) 70 num_jobs = len(job_overrides) 71 queue = queue.task(self.queue_name) 72 runs = [] 73 for job_num in range(num_jobs): 74 sweep_override = list(job_overrides[job_num]) 75 log.info( 76 "\t#{} : {}".format( 77 job_num, 78 " ".join( 79 hydra.plugins.common.utils.filter_overrides(sweep_override) 80 ), 81 ) 82 ) 83 runs.append( 84 queue(self.launch_job)(sweep_override, "hydra.sweep.dir", job_num) 85 ) 86 return await gatherl(runs) 87 88 def create_queue(self, num_jobs): 89 # load lazily to ensure plugin discovery is fast. 90 from fairtask import TaskQueues 91 92 assert num_jobs > 0 93 # num_jobs is needed to instantiate the queue below 94 with open_dict(self.config): 95 self.config.hydra.job.num_jobs = num_jobs 96 queues = {} 97 for queue_name, queue_conf in self.queues.items(): 98 queues[queue_name] = utils.instantiate(queue_conf) 99 100 # if no_workers == True, then turn off all queue functionality 101 # and run everything synchronously (good for debugging) 102 return TaskQueues(queues, no_workers=self.no_workers) 103 104 def launch(self, job_overrides): 105 log.info("Sweep output dir : {}".format(self.config.hydra.sweep.dir)) 106 os.makedirs(str(self.config.hydra.sweep.dir), exist_ok=True) 107 loop = asyncio.get_event_loop() 108 with self.create_queue(num_jobs=len(job_overrides)) as queue: 109 return loop.run_until_complete(self.run_sweep(queue, job_overrides)) 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hydra/plugins/common/utils.py b/hydra/plugins/common/utils.py --- a/hydra/plugins/common/utils.py +++ b/hydra/plugins/common/utils.py @@ -129,6 +129,14 @@ cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] + @staticmethod + def get_state(): + return Singleton._instances + + @staticmethod + def set_state(instances): + Singleton._instances = instances + @six.add_metaclass(Singleton) class JobRuntime: diff --git a/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py b/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py --- a/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py +++ b/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py @@ -4,11 +4,10 @@ import logging import os -from omegaconf import open_dict - import hydra.plugins.common.utils from hydra import utils from hydra.plugins import Launcher +from omegaconf import open_dict # TODO: initialize logger before importing fairtask until comments at # https://github.com/fairinternal/fairtask/pull/23 are addressed @@ -32,7 +31,9 @@ self.task_function = task_function self.verbose = verbose - def launch_job(self, sweep_overrides, job_dir_key, job_num): + def launch_job(self, sweep_overrides, job_dir_key, job_num, singleton_state): + hydra.plugins.common.utils.Singleton.set_state(singleton_state) + # stdout logging until we get the file logging going, logs will be in slurm job log files hydra.plugins.common.utils.configure_log(None, self.verbose) hydra.plugins.common.utils.setup_globals() @@ -81,7 +82,12 @@ ) ) runs.append( - queue(self.launch_job)(sweep_override, "hydra.sweep.dir", job_num) + queue(self.launch_job)( + sweep_override, + "hydra.sweep.dir", + job_num, + hydra.plugins.common.utils.Singleton.get_state(), + ) ) return await gatherl(runs) diff --git a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py --- a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py +++ b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py @@ -30,7 +30,8 @@ self.task_function = task_function self.verbose = verbose - def launch_job(self, sweep_overrides, job_dir_key, job_num): + def launch_job(self, sweep_overrides, job_dir_key, job_num, singleton_state): + hydra.plugins.common.utils.Singleton.set_state(singleton_state) hydra.plugins.common.utils.configure_log(None, self.verbose) hydra.plugins.common.utils.setup_globals() sweep_config = self.config_loader.load_sweep_config( @@ -103,7 +104,11 @@ ) ) job = executor.submit( - self.launch_job, sweep_override, "hydra.sweep.dir", job_num + self.launch_job, + sweep_override, + "hydra.sweep.dir", + job_num, + hydra.plugins.common.utils.Singleton.get_state(), ) jobs.append(job)
{"golden_diff": "diff --git a/hydra/plugins/common/utils.py b/hydra/plugins/common/utils.py\n--- a/hydra/plugins/common/utils.py\n+++ b/hydra/plugins/common/utils.py\n@@ -129,6 +129,14 @@\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n \n+ @staticmethod\n+ def get_state():\n+ return Singleton._instances\n+\n+ @staticmethod\n+ def set_state(instances):\n+ Singleton._instances = instances\n+\n \n @six.add_metaclass(Singleton)\n class JobRuntime:\ndiff --git a/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py b/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py\n--- a/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py\n+++ b/plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py\n@@ -4,11 +4,10 @@\n import logging\n import os\n \n-from omegaconf import open_dict\n-\n import hydra.plugins.common.utils\n from hydra import utils\n from hydra.plugins import Launcher\n+from omegaconf import open_dict\n \n # TODO: initialize logger before importing fairtask until comments at\n # https://github.com/fairinternal/fairtask/pull/23 are addressed\n@@ -32,7 +31,9 @@\n self.task_function = task_function\n self.verbose = verbose\n \n- def launch_job(self, sweep_overrides, job_dir_key, job_num):\n+ def launch_job(self, sweep_overrides, job_dir_key, job_num, singleton_state):\n+ hydra.plugins.common.utils.Singleton.set_state(singleton_state)\n+\n # stdout logging until we get the file logging going, logs will be in slurm job log files\n hydra.plugins.common.utils.configure_log(None, self.verbose)\n hydra.plugins.common.utils.setup_globals()\n@@ -81,7 +82,12 @@\n )\n )\n runs.append(\n- queue(self.launch_job)(sweep_override, \"hydra.sweep.dir\", job_num)\n+ queue(self.launch_job)(\n+ sweep_override,\n+ \"hydra.sweep.dir\",\n+ job_num,\n+ hydra.plugins.common.utils.Singleton.get_state(),\n+ )\n )\n return await gatherl(runs)\n \ndiff --git a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py\n--- a/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py\n+++ b/plugins/submitit/hydra_plugins/submitit/submitit_launcher.py\n@@ -30,7 +30,8 @@\n self.task_function = task_function\n self.verbose = verbose\n \n- def launch_job(self, sweep_overrides, job_dir_key, job_num):\n+ def launch_job(self, sweep_overrides, job_dir_key, job_num, singleton_state):\n+ hydra.plugins.common.utils.Singleton.set_state(singleton_state)\n hydra.plugins.common.utils.configure_log(None, self.verbose)\n hydra.plugins.common.utils.setup_globals()\n sweep_config = self.config_loader.load_sweep_config(\n@@ -103,7 +104,11 @@\n )\n )\n job = executor.submit(\n- self.launch_job, sweep_override, \"hydra.sweep.dir\", job_num\n+ self.launch_job,\n+ sweep_override,\n+ \"hydra.sweep.dir\",\n+ job_num,\n+ hydra.plugins.common.utils.Singleton.get_state(),\n )\n jobs.append(job)\n", "issue": "Task name lost with submitit and fairtask\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport copy\nimport logging\nimport os\nimport re\nimport sys\nfrom time import strftime, localtime\n\nimport six\nfrom omegaconf import OmegaConf, DictConfig\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef configure_log(log_config, verbose=None):\n if log_config is not None:\n conf = log_config.to_container(resolve=True)\n logging.config.dictConfig(conf)\n else:\n # default logging to stdout\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(name)s][%(levelname)s] - %(message)s\"\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n if verbose is not None:\n if verbose == \"root\":\n logging.getLogger().setLevel(logging.DEBUG)\n for logger in verbose.split(\",\"):\n logging.getLogger(logger).setLevel(logging.DEBUG)\n\n\ndef save_config(cfg, filename):\n with open(os.path.join(filename), \"w\") as file:\n file.write(cfg.pretty())\n\n\ndef get_overrides_dirname(lst, exclude_keys=[]):\n lst = [x for x in lst if x not in exclude_keys]\n lst.sort()\n return re.sub(pattern=\"[=]\", repl=\"=\", string=\",\".join(lst))\n\n\ndef filter_overrides(overrides):\n \"\"\"\n :param overrides: overrides list\n :return: returning a new overrides list with all the keys starting with hydra. fitlered.\n \"\"\"\n return [x for x in overrides if not x.startswith(\"hydra.\")]\n\n\ndef run_job(config, task_function, verbose, job_dir_key, job_subdir_key):\n old_cwd = os.getcwd()\n working_dir = str(config.select(job_dir_key))\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n # are only available there.\n subdir = str(config.select(job_subdir_key))\n working_dir = os.path.join(working_dir, subdir)\n try:\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n hydra_cfg = task_cfg[\"hydra\"]\n del task_cfg[\"hydra\"]\n ret.cfg = task_cfg\n ret.hydra_cfg = copy.deepcopy(HydraConfig())\n ret.overrides = config.hydra.overrides.task.to_container()\n if not os.path.exists(working_dir):\n os.makedirs(working_dir)\n os.chdir(working_dir)\n configure_log(config.hydra.job_logging, verbose)\n\n save_config(task_cfg, \"config.yaml\")\n save_config(hydra_cfg, \"hydra.yaml\")\n save_config(config.hydra.overrides.task, \"overrides.yaml\")\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime().get(\"name\")\n return ret\n finally:\n os.chdir(old_cwd)\n\n\ndef get_valid_filename(s):\n s = str(s).strip().replace(\" \", \"_\")\n return re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n\n\ndef setup_globals():\n try:\n OmegaConf.register_resolver(\n \"now\", lambda pattern: strftime(pattern, localtime())\n )\n\n def job_error(x):\n raise Exception(\n \"job:{} is no longer available. use hydra.job.{}\".format(x, x)\n )\n\n OmegaConf.register_resolver(\"job\", job_error)\n\n except AssertionError:\n # calling it again in no_workers mode will throw. safe to ignore.\n pass\n\n\nclass JobReturn:\n def __init__(self):\n self.overrides = None\n self.return_value = None\n self.cfg = None\n self.hydra_cfg = None\n self.working_dir = None\n self.task_name = None\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\[email protected]_metaclass(Singleton)\nclass JobRuntime:\n def __init__(self):\n self.conf = OmegaConf.create()\n self.set(\"name\", \"UNKNOWN_NAME\")\n\n def get(self, key):\n ret = self.conf.select(key)\n if ret is None:\n raise KeyError(\"Key not found in {}: {}\".format(type(self).__name__, key))\n return ret\n\n def set(self, key, value):\n log.debug(\"Setting {}:{}={}\".format(type(self).__name__, key, value))\n self.conf[key] = value\n\n\[email protected]_metaclass(Singleton)\nclass HydraConfig(DictConfig):\n def __init__(self):\n super(HydraConfig, self).__init__(content={})\n self.hydra = None\n\n def set_config(self, cfg):\n try:\n OmegaConf.set_readonly(self, False)\n self.hydra = copy.deepcopy(cfg.hydra)\n finally:\n OmegaConf.set_readonly(self, True)\n", "path": "hydra/plugins/common/utils.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\n\nimport hydra._internal.utils\nimport hydra.plugins.common.utils\nfrom hydra.plugins import Launcher\n\nfrom omegaconf import open_dict\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\nclass SubmititLauncher(Launcher):\n def __init__(self, queue, folder, queue_parameters, conda_file=None):\n self.queue = queue\n self.queue_parameters = queue_parameters\n self.folder = folder\n self.conda_file = conda_file\n self.config = None\n self.task_function = None\n self.verbose = None\n self.sweep_configs = None\n self.config_loader = None\n\n def setup(self, config, config_loader, task_function, verbose):\n self.config = config\n self.config_loader = config_loader\n self.task_function = task_function\n self.verbose = verbose\n\n def launch_job(self, sweep_overrides, job_dir_key, job_num):\n hydra.plugins.common.utils.configure_log(None, self.verbose)\n hydra.plugins.common.utils.setup_globals()\n sweep_config = self.config_loader.load_sweep_config(\n self.config, sweep_overrides\n )\n with open_dict(sweep_config):\n # Populate new job variables\n if \"SLURM_JOB_ID\" in os.environ:\n sweep_config.hydra.job.id = os.environ[\"SLURM_JOB_ID\"]\n elif \"CHRONOS_JOB_ID\" in os.environ:\n sweep_config.hydra.job.id = os.environ[\"CHRONOS_JOB_ID\"]\n else:\n sweep_config.hydra.job.id = \"unknown\"\n\n sweep_config.hydra.job.num = job_num\n sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname(\n sweep_config.hydra.overrides.task\n )\n\n return hydra.plugins.common.utils.run_job(\n config=sweep_config,\n task_function=self.task_function,\n verbose=self.verbose,\n job_dir_key=job_dir_key,\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n\n def launch(self, job_overrides):\n # lazy import to ensurue plugin discovery remains fast\n import submitit\n\n num_jobs = len(job_overrides)\n assert num_jobs > 0\n with open_dict(self.config):\n self.config.hydra.job.num_jobs = num_jobs\n\n if self.queue == \"auto\":\n executor = submitit.AutoExecutor(\n folder=self.folder, conda_file=self.conda_file\n )\n elif self.queue == \"slurm\":\n executor = submitit.SlurmExecutor(folder=self.folder)\n elif self.queue == \"chronos\":\n executor = submitit.ChronosExecutor(\n folder=self.folder, conda_file=self.conda_file\n )\n elif self.queue == \"local\":\n executor = submitit.LocalExecutor(folder=self.folder)\n else:\n raise RuntimeError(\"Unsupported queue type {}\".format(self.queue))\n\n executor.update_parameters(**self.queue_parameters[self.queue])\n\n log.info(\"Sweep output dir : {}\".format(self.config.hydra.sweep.dir))\n path_str = str(self.config.hydra.sweep.dir)\n os.makedirs(path_str, exist_ok=True)\n if \"mode\" in self.config.hydra.sweep:\n mode = int(str(self.config.hydra.sweep.mode), 8)\n os.chmod(path_str, mode=mode)\n\n jobs = []\n for job_num in range(num_jobs):\n sweep_override = list(job_overrides[job_num])\n log.info(\n \"\\t#{} : {}\".format(\n job_num,\n \" \".join(\n hydra.plugins.common.utils.filter_overrides(sweep_override)\n ),\n )\n )\n job = executor.submit(\n self.launch_job, sweep_override, \"hydra.sweep.dir\", job_num\n )\n jobs.append(job)\n\n return [j.results() for j in jobs]\n", "path": "plugins/submitit/hydra_plugins/submitit/submitit_launcher.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport asyncio\nimport logging\nimport os\n\nfrom omegaconf import open_dict\n\nimport hydra.plugins.common.utils\nfrom hydra import utils\nfrom hydra.plugins import Launcher\n\n# TODO: initialize logger before importing fairtask until comments at\n# https://github.com/fairinternal/fairtask/pull/23 are addressed\nlog = logging.getLogger(__name__) # noqa: E402\n\n\nclass FAIRTaskLauncher(Launcher):\n def __init__(self, queue, queues, no_workers=False):\n self.queue_name = queue\n self.queues = queues\n self.config = None\n self.task_function = None\n self.verbose = None\n self.sweep_configs = None\n self.no_workers = no_workers\n self.config_loader = None\n\n def setup(self, config, config_loader, task_function, verbose):\n self.config = config\n self.config_loader = config_loader\n self.task_function = task_function\n self.verbose = verbose\n\n def launch_job(self, sweep_overrides, job_dir_key, job_num):\n # stdout logging until we get the file logging going, logs will be in slurm job log files\n hydra.plugins.common.utils.configure_log(None, self.verbose)\n hydra.plugins.common.utils.setup_globals()\n sweep_config = self.config_loader.load_sweep_config(\n self.config, sweep_overrides\n )\n\n with open_dict(sweep_config):\n # Populate new job variables\n sweep_config.hydra.job.id = (\n \"${env:SLURM_JOB_ID}\"\n if \"SLURM_JOB_ID\" in os.environ\n else \"_UNKNOWN_SLURM_ID_\"\n )\n sweep_config.hydra.job.num = job_num\n sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname(\n sweep_config.hydra.overrides.task\n )\n\n return hydra.plugins.common.utils.run_job(\n config=sweep_config,\n task_function=self.task_function,\n verbose=self.verbose,\n job_dir_key=job_dir_key,\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n\n async def run_sweep(self, queue, job_overrides):\n # load lazily to ensure plugin discovery is fast.\n from fairtask import gatherl\n\n log.info(\n \"Launching {} jobs to {} queue\".format(len(job_overrides), self.queue_name)\n )\n num_jobs = len(job_overrides)\n queue = queue.task(self.queue_name)\n runs = []\n for job_num in range(num_jobs):\n sweep_override = list(job_overrides[job_num])\n log.info(\n \"\\t#{} : {}\".format(\n job_num,\n \" \".join(\n hydra.plugins.common.utils.filter_overrides(sweep_override)\n ),\n )\n )\n runs.append(\n queue(self.launch_job)(sweep_override, \"hydra.sweep.dir\", job_num)\n )\n return await gatherl(runs)\n\n def create_queue(self, num_jobs):\n # load lazily to ensure plugin discovery is fast.\n from fairtask import TaskQueues\n\n assert num_jobs > 0\n # num_jobs is needed to instantiate the queue below\n with open_dict(self.config):\n self.config.hydra.job.num_jobs = num_jobs\n queues = {}\n for queue_name, queue_conf in self.queues.items():\n queues[queue_name] = utils.instantiate(queue_conf)\n\n # if no_workers == True, then turn off all queue functionality\n # and run everything synchronously (good for debugging)\n return TaskQueues(queues, no_workers=self.no_workers)\n\n def launch(self, job_overrides):\n log.info(\"Sweep output dir : {}\".format(self.config.hydra.sweep.dir))\n os.makedirs(str(self.config.hydra.sweep.dir), exist_ok=True)\n loop = asyncio.get_event_loop()\n with self.create_queue(num_jobs=len(job_overrides)) as queue:\n return loop.run_until_complete(self.run_sweep(queue, job_overrides))\n", "path": "plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport copy\nimport logging\nimport os\nimport re\nimport sys\nfrom time import strftime, localtime\n\nimport six\nfrom omegaconf import OmegaConf, DictConfig\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef configure_log(log_config, verbose=None):\n if log_config is not None:\n conf = log_config.to_container(resolve=True)\n logging.config.dictConfig(conf)\n else:\n # default logging to stdout\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(name)s][%(levelname)s] - %(message)s\"\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n if verbose is not None:\n if verbose == \"root\":\n logging.getLogger().setLevel(logging.DEBUG)\n for logger in verbose.split(\",\"):\n logging.getLogger(logger).setLevel(logging.DEBUG)\n\n\ndef save_config(cfg, filename):\n with open(os.path.join(filename), \"w\") as file:\n file.write(cfg.pretty())\n\n\ndef get_overrides_dirname(lst, exclude_keys=[]):\n lst = [x for x in lst if x not in exclude_keys]\n lst.sort()\n return re.sub(pattern=\"[=]\", repl=\"=\", string=\",\".join(lst))\n\n\ndef filter_overrides(overrides):\n \"\"\"\n :param overrides: overrides list\n :return: returning a new overrides list with all the keys starting with hydra. fitlered.\n \"\"\"\n return [x for x in overrides if not x.startswith(\"hydra.\")]\n\n\ndef run_job(config, task_function, verbose, job_dir_key, job_subdir_key):\n old_cwd = os.getcwd()\n working_dir = str(config.select(job_dir_key))\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n # are only available there.\n subdir = str(config.select(job_subdir_key))\n working_dir = os.path.join(working_dir, subdir)\n try:\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n hydra_cfg = task_cfg[\"hydra\"]\n del task_cfg[\"hydra\"]\n ret.cfg = task_cfg\n ret.hydra_cfg = copy.deepcopy(HydraConfig())\n ret.overrides = config.hydra.overrides.task.to_container()\n if not os.path.exists(working_dir):\n os.makedirs(working_dir)\n os.chdir(working_dir)\n configure_log(config.hydra.job_logging, verbose)\n\n save_config(task_cfg, \"config.yaml\")\n save_config(hydra_cfg, \"hydra.yaml\")\n save_config(config.hydra.overrides.task, \"overrides.yaml\")\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime().get(\"name\")\n return ret\n finally:\n os.chdir(old_cwd)\n\n\ndef get_valid_filename(s):\n s = str(s).strip().replace(\" \", \"_\")\n return re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n\n\ndef setup_globals():\n try:\n OmegaConf.register_resolver(\n \"now\", lambda pattern: strftime(pattern, localtime())\n )\n\n def job_error(x):\n raise Exception(\n \"job:{} is no longer available. use hydra.job.{}\".format(x, x)\n )\n\n OmegaConf.register_resolver(\"job\", job_error)\n\n except AssertionError:\n # calling it again in no_workers mode will throw. safe to ignore.\n pass\n\n\nclass JobReturn:\n def __init__(self):\n self.overrides = None\n self.return_value = None\n self.cfg = None\n self.hydra_cfg = None\n self.working_dir = None\n self.task_name = None\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n @staticmethod\n def get_state():\n return Singleton._instances\n\n @staticmethod\n def set_state(instances):\n Singleton._instances = instances\n\n\[email protected]_metaclass(Singleton)\nclass JobRuntime:\n def __init__(self):\n self.conf = OmegaConf.create()\n self.set(\"name\", \"UNKNOWN_NAME\")\n\n def get(self, key):\n ret = self.conf.select(key)\n if ret is None:\n raise KeyError(\"Key not found in {}: {}\".format(type(self).__name__, key))\n return ret\n\n def set(self, key, value):\n log.debug(\"Setting {}:{}={}\".format(type(self).__name__, key, value))\n self.conf[key] = value\n\n\[email protected]_metaclass(Singleton)\nclass HydraConfig(DictConfig):\n def __init__(self):\n super(HydraConfig, self).__init__(content={})\n self.hydra = None\n\n def set_config(self, cfg):\n try:\n OmegaConf.set_readonly(self, False)\n self.hydra = copy.deepcopy(cfg.hydra)\n finally:\n OmegaConf.set_readonly(self, True)\n", "path": "hydra/plugins/common/utils.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nimport os\n\nimport hydra._internal.utils\nimport hydra.plugins.common.utils\nfrom hydra.plugins import Launcher\n\nfrom omegaconf import open_dict\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\nclass SubmititLauncher(Launcher):\n def __init__(self, queue, folder, queue_parameters, conda_file=None):\n self.queue = queue\n self.queue_parameters = queue_parameters\n self.folder = folder\n self.conda_file = conda_file\n self.config = None\n self.task_function = None\n self.verbose = None\n self.sweep_configs = None\n self.config_loader = None\n\n def setup(self, config, config_loader, task_function, verbose):\n self.config = config\n self.config_loader = config_loader\n self.task_function = task_function\n self.verbose = verbose\n\n def launch_job(self, sweep_overrides, job_dir_key, job_num, singleton_state):\n hydra.plugins.common.utils.Singleton.set_state(singleton_state)\n hydra.plugins.common.utils.configure_log(None, self.verbose)\n hydra.plugins.common.utils.setup_globals()\n sweep_config = self.config_loader.load_sweep_config(\n self.config, sweep_overrides\n )\n with open_dict(sweep_config):\n # Populate new job variables\n if \"SLURM_JOB_ID\" in os.environ:\n sweep_config.hydra.job.id = os.environ[\"SLURM_JOB_ID\"]\n elif \"CHRONOS_JOB_ID\" in os.environ:\n sweep_config.hydra.job.id = os.environ[\"CHRONOS_JOB_ID\"]\n else:\n sweep_config.hydra.job.id = \"unknown\"\n\n sweep_config.hydra.job.num = job_num\n sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname(\n sweep_config.hydra.overrides.task\n )\n\n return hydra.plugins.common.utils.run_job(\n config=sweep_config,\n task_function=self.task_function,\n verbose=self.verbose,\n job_dir_key=job_dir_key,\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n\n def launch(self, job_overrides):\n # lazy import to ensurue plugin discovery remains fast\n import submitit\n\n num_jobs = len(job_overrides)\n assert num_jobs > 0\n with open_dict(self.config):\n self.config.hydra.job.num_jobs = num_jobs\n\n if self.queue == \"auto\":\n executor = submitit.AutoExecutor(\n folder=self.folder, conda_file=self.conda_file\n )\n elif self.queue == \"slurm\":\n executor = submitit.SlurmExecutor(folder=self.folder)\n elif self.queue == \"chronos\":\n executor = submitit.ChronosExecutor(\n folder=self.folder, conda_file=self.conda_file\n )\n elif self.queue == \"local\":\n executor = submitit.LocalExecutor(folder=self.folder)\n else:\n raise RuntimeError(\"Unsupported queue type {}\".format(self.queue))\n\n executor.update_parameters(**self.queue_parameters[self.queue])\n\n log.info(\"Sweep output dir : {}\".format(self.config.hydra.sweep.dir))\n path_str = str(self.config.hydra.sweep.dir)\n os.makedirs(path_str, exist_ok=True)\n if \"mode\" in self.config.hydra.sweep:\n mode = int(str(self.config.hydra.sweep.mode), 8)\n os.chmod(path_str, mode=mode)\n\n jobs = []\n for job_num in range(num_jobs):\n sweep_override = list(job_overrides[job_num])\n log.info(\n \"\\t#{} : {}\".format(\n job_num,\n \" \".join(\n hydra.plugins.common.utils.filter_overrides(sweep_override)\n ),\n )\n )\n job = executor.submit(\n self.launch_job,\n sweep_override,\n \"hydra.sweep.dir\",\n job_num,\n hydra.plugins.common.utils.Singleton.get_state(),\n )\n jobs.append(job)\n\n return [j.results() for j in jobs]\n", "path": "plugins/submitit/hydra_plugins/submitit/submitit_launcher.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport asyncio\nimport logging\nimport os\n\nimport hydra.plugins.common.utils\nfrom hydra import utils\nfrom hydra.plugins import Launcher\nfrom omegaconf import open_dict\n\n# TODO: initialize logger before importing fairtask until comments at\n# https://github.com/fairinternal/fairtask/pull/23 are addressed\nlog = logging.getLogger(__name__) # noqa: E402\n\n\nclass FAIRTaskLauncher(Launcher):\n def __init__(self, queue, queues, no_workers=False):\n self.queue_name = queue\n self.queues = queues\n self.config = None\n self.task_function = None\n self.verbose = None\n self.sweep_configs = None\n self.no_workers = no_workers\n self.config_loader = None\n\n def setup(self, config, config_loader, task_function, verbose):\n self.config = config\n self.config_loader = config_loader\n self.task_function = task_function\n self.verbose = verbose\n\n def launch_job(self, sweep_overrides, job_dir_key, job_num, singleton_state):\n hydra.plugins.common.utils.Singleton.set_state(singleton_state)\n\n # stdout logging until we get the file logging going, logs will be in slurm job log files\n hydra.plugins.common.utils.configure_log(None, self.verbose)\n hydra.plugins.common.utils.setup_globals()\n sweep_config = self.config_loader.load_sweep_config(\n self.config, sweep_overrides\n )\n\n with open_dict(sweep_config):\n # Populate new job variables\n sweep_config.hydra.job.id = (\n \"${env:SLURM_JOB_ID}\"\n if \"SLURM_JOB_ID\" in os.environ\n else \"_UNKNOWN_SLURM_ID_\"\n )\n sweep_config.hydra.job.num = job_num\n sweep_config.hydra.job.override_dirname = hydra.plugins.common.utils.get_overrides_dirname(\n sweep_config.hydra.overrides.task\n )\n\n return hydra.plugins.common.utils.run_job(\n config=sweep_config,\n task_function=self.task_function,\n verbose=self.verbose,\n job_dir_key=job_dir_key,\n job_subdir_key=\"hydra.sweep.subdir\",\n )\n\n async def run_sweep(self, queue, job_overrides):\n # load lazily to ensure plugin discovery is fast.\n from fairtask import gatherl\n\n log.info(\n \"Launching {} jobs to {} queue\".format(len(job_overrides), self.queue_name)\n )\n num_jobs = len(job_overrides)\n queue = queue.task(self.queue_name)\n runs = []\n for job_num in range(num_jobs):\n sweep_override = list(job_overrides[job_num])\n log.info(\n \"\\t#{} : {}\".format(\n job_num,\n \" \".join(\n hydra.plugins.common.utils.filter_overrides(sweep_override)\n ),\n )\n )\n runs.append(\n queue(self.launch_job)(\n sweep_override,\n \"hydra.sweep.dir\",\n job_num,\n hydra.plugins.common.utils.Singleton.get_state(),\n )\n )\n return await gatherl(runs)\n\n def create_queue(self, num_jobs):\n # load lazily to ensure plugin discovery is fast.\n from fairtask import TaskQueues\n\n assert num_jobs > 0\n # num_jobs is needed to instantiate the queue below\n with open_dict(self.config):\n self.config.hydra.job.num_jobs = num_jobs\n queues = {}\n for queue_name, queue_conf in self.queues.items():\n queues[queue_name] = utils.instantiate(queue_conf)\n\n # if no_workers == True, then turn off all queue functionality\n # and run everything synchronously (good for debugging)\n return TaskQueues(queues, no_workers=self.no_workers)\n\n def launch(self, job_overrides):\n log.info(\"Sweep output dir : {}\".format(self.config.hydra.sweep.dir))\n os.makedirs(str(self.config.hydra.sweep.dir), exist_ok=True)\n loop = asyncio.get_event_loop()\n with self.create_queue(num_jobs=len(job_overrides)) as queue:\n return loop.run_until_complete(self.run_sweep(queue, job_overrides))\n", "path": "plugins/fairtask/hydra_plugins/fairtask/fairtask_launcher.py"}]}
3,998
794
gh_patches_debug_15429
rasdani/github-patches
git_diff
ipython__ipython-10264
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TAB key does not indent In the git version of IPython, type ``` def foo(a): pass<ENTER> ``` The new line is not correctly indented, and the tab key does not insert 4 spaces. /cc @Carreau @takluyver --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/terminal/shortcuts.py` Content: ``` 1 import signal 2 import sys 3 4 from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER 5 from prompt_toolkit.filters import (HasFocus, HasSelection, Condition, 6 ViInsertMode, EmacsInsertMode, HasCompletions) 7 from prompt_toolkit.filters.cli import ViMode, ViNavigationMode 8 from prompt_toolkit.keys import Keys 9 from prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline 10 11 from IPython.utils.decorators import undoc 12 13 @Condition 14 def cursor_in_leading_ws(cli): 15 before = cli.application.buffer.document.current_line_before_cursor 16 return (not before) or before.isspace() 17 18 def register_ipython_shortcuts(registry, shell): 19 """Set up the prompt_toolkit keyboard shortcuts for IPython""" 20 insert_mode = ViInsertMode() | EmacsInsertMode() 21 22 # Ctrl+J == Enter, seemingly 23 registry.add_binding(Keys.ControlJ, 24 filter=(HasFocus(DEFAULT_BUFFER) 25 & ~HasSelection() 26 & insert_mode 27 ))(newline_or_execute_outer(shell)) 28 29 registry.add_binding(Keys.ControlBackslash)(force_exit) 30 31 registry.add_binding(Keys.ControlP, 32 filter=(ViInsertMode() & HasFocus(DEFAULT_BUFFER) 33 ))(previous_history_or_previous_completion) 34 35 registry.add_binding(Keys.ControlN, 36 filter=(ViInsertMode() & HasFocus(DEFAULT_BUFFER) 37 ))(next_history_or_next_completion) 38 39 registry.add_binding(Keys.ControlG, 40 filter=(HasFocus(DEFAULT_BUFFER) & HasCompletions() 41 ))(dismiss_completion) 42 43 registry.add_binding(Keys.ControlC, filter=HasFocus(DEFAULT_BUFFER) 44 )(reset_buffer) 45 46 registry.add_binding(Keys.ControlC, filter=HasFocus(SEARCH_BUFFER) 47 )(reset_search_buffer) 48 49 supports_suspend = Condition(lambda cli: hasattr(signal, 'SIGTSTP')) 50 registry.add_binding(Keys.ControlZ, filter=supports_suspend 51 )(suspend_to_bg) 52 53 # Ctrl+I == Tab 54 registry.add_binding(Keys.ControlI, 55 filter=(HasFocus(DEFAULT_BUFFER) 56 & ~HasSelection() 57 & insert_mode 58 & cursor_in_leading_ws 59 ))(indent_buffer) 60 61 registry.add_binding(Keys.ControlO, 62 filter=(HasFocus(DEFAULT_BUFFER) 63 & EmacsInsertMode()))(newline_with_copy_margin) 64 65 registry.add_binding(Keys.F2, 66 filter=HasFocus(DEFAULT_BUFFER) 67 )(open_input_in_editor) 68 69 registry.add_binding('v', 70 filter=HasFocus(DEFAULT_BUFFER) & ViNavigationMode() 71 )(open_input_in_editor) 72 73 if shell.display_completions == 'readlinelike': 74 registry.add_binding(Keys.ControlI, 75 filter=(HasFocus(DEFAULT_BUFFER) 76 & ~HasSelection() 77 & insert_mode 78 & ~cursor_in_leading_ws 79 ))(display_completions_like_readline) 80 81 if sys.platform == 'win32': 82 registry.add_binding(Keys.ControlV, 83 filter=( 84 HasFocus( 85 DEFAULT_BUFFER) & ~ViMode() 86 ))(win_paste) 87 88 89 def newline_or_execute_outer(shell): 90 def newline_or_execute(event): 91 """When the user presses return, insert a newline or execute the code.""" 92 b = event.current_buffer 93 d = b.document 94 95 if b.complete_state: 96 cc = b.complete_state.current_completion 97 if cc: 98 b.apply_completion(cc) 99 else: 100 b.cancel_completion() 101 return 102 103 if not (d.on_last_line or d.cursor_position_row >= d.line_count 104 - d.empty_line_count_at_the_end()): 105 b.newline() 106 return 107 108 status, indent = shell.input_splitter.check_complete(d.text + '\n') 109 110 if (status != 'incomplete') and b.accept_action.is_returnable: 111 b.accept_action.validate_and_handle(event.cli, b) 112 else: 113 b.insert_text('\n' + (' ' * (indent or 0))) 114 return newline_or_execute 115 116 117 def previous_history_or_previous_completion(event): 118 """ 119 Control-P in vi edit mode on readline is history next, unlike default prompt toolkit. 120 121 If completer is open this still select previous completion. 122 """ 123 event.current_buffer.auto_up() 124 125 126 def next_history_or_next_completion(event): 127 """ 128 Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit. 129 130 If completer is open this still select next completion. 131 """ 132 event.current_buffer.auto_down() 133 134 135 def dismiss_completion(event): 136 b = event.current_buffer 137 if b.complete_state: 138 b.cancel_completion() 139 140 141 def reset_buffer(event): 142 b = event.current_buffer 143 if b.complete_state: 144 b.cancel_completion() 145 else: 146 b.reset() 147 148 149 def reset_search_buffer(event): 150 if event.current_buffer.document.text: 151 event.current_buffer.reset() 152 else: 153 event.cli.push_focus(DEFAULT_BUFFER) 154 155 def suspend_to_bg(event): 156 event.cli.suspend_to_background() 157 158 def force_exit(event): 159 """ 160 Force exit (with a non-zero return value) 161 """ 162 sys.exit("Quit") 163 164 def indent_buffer(event): 165 event.current_buffer.insert_text(' ' * 4) 166 167 def newline_with_copy_margin(event): 168 """ 169 Preserve margin and cursor position when using 170 Control-O to insert a newline in EMACS mode 171 """ 172 b = event.current_buffer 173 cursor_start_pos = b.document.cursor_position_col 174 b.newline(copy_margin=True) 175 b.cursor_up(count=1) 176 cursor_end_pos = b.document.cursor_position_col 177 if cursor_start_pos != cursor_end_pos: 178 pos_diff = cursor_start_pos - cursor_end_pos 179 b.cursor_right(count=pos_diff) 180 181 def open_input_in_editor(event): 182 event.cli.current_buffer.tempfile_suffix = ".py" 183 event.cli.current_buffer.open_in_editor(event.cli) 184 185 186 if sys.platform == 'win32': 187 from IPython.core.error import TryNext 188 from IPython.lib.clipboard import (ClipboardEmpty, 189 win32_clipboard_get, 190 tkinter_clipboard_get) 191 192 @undoc 193 def win_paste(event): 194 try: 195 text = win32_clipboard_get() 196 except TryNext: 197 try: 198 text = tkinter_clipboard_get() 199 except (TryNext, ClipboardEmpty): 200 return 201 except ClipboardEmpty: 202 return 203 event.current_buffer.insert_text(text.replace('\t', ' ' * 4)) 204 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/terminal/shortcuts.py b/IPython/terminal/shortcuts.py --- a/IPython/terminal/shortcuts.py +++ b/IPython/terminal/shortcuts.py @@ -100,12 +100,14 @@ b.cancel_completion() return - if not (d.on_last_line or d.cursor_position_row >= d.line_count - - d.empty_line_count_at_the_end()): - b.newline() - return + before_text = d.text[:d.cursor_position] + status, indent = shell.input_splitter.check_complete(before_text + '\n') - status, indent = shell.input_splitter.check_complete(d.text + '\n') + if not (d.on_last_line or + d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end() + ): + b.insert_text('\n' + (' ' * (indent or 0))) + return if (status != 'incomplete') and b.accept_action.is_returnable: b.accept_action.validate_and_handle(event.cli, b)
{"golden_diff": "diff --git a/IPython/terminal/shortcuts.py b/IPython/terminal/shortcuts.py\n--- a/IPython/terminal/shortcuts.py\n+++ b/IPython/terminal/shortcuts.py\n@@ -100,12 +100,14 @@\n b.cancel_completion()\n return\n \n- if not (d.on_last_line or d.cursor_position_row >= d.line_count\n- - d.empty_line_count_at_the_end()):\n- b.newline()\n- return\n+ before_text = d.text[:d.cursor_position]\n+ status, indent = shell.input_splitter.check_complete(before_text + '\\n')\n \n- status, indent = shell.input_splitter.check_complete(d.text + '\\n')\n+ if not (d.on_last_line or\n+ d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end()\n+ ):\n+ b.insert_text('\\n' + (' ' * (indent or 0)))\n+ return\n \n if (status != 'incomplete') and b.accept_action.is_returnable:\n b.accept_action.validate_and_handle(event.cli, b)\n", "issue": "TAB key does not indent\nIn the git version of IPython, type\n\n```\ndef foo(a):\n pass<ENTER>\n```\n\nThe new line is not correctly indented, and the tab key does not insert 4 spaces.\n\n/cc @Carreau @takluyver \n\n", "before_files": [{"content": "import signal\nimport sys\n\nfrom prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER\nfrom prompt_toolkit.filters import (HasFocus, HasSelection, Condition,\n ViInsertMode, EmacsInsertMode, HasCompletions)\nfrom prompt_toolkit.filters.cli import ViMode, ViNavigationMode\nfrom prompt_toolkit.keys import Keys\nfrom prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline\n\nfrom IPython.utils.decorators import undoc\n\n@Condition\ndef cursor_in_leading_ws(cli):\n before = cli.application.buffer.document.current_line_before_cursor\n return (not before) or before.isspace()\n\ndef register_ipython_shortcuts(registry, shell):\n \"\"\"Set up the prompt_toolkit keyboard shortcuts for IPython\"\"\"\n insert_mode = ViInsertMode() | EmacsInsertMode()\n\n # Ctrl+J == Enter, seemingly\n registry.add_binding(Keys.ControlJ,\n filter=(HasFocus(DEFAULT_BUFFER)\n & ~HasSelection()\n & insert_mode\n ))(newline_or_execute_outer(shell))\n\n registry.add_binding(Keys.ControlBackslash)(force_exit)\n\n registry.add_binding(Keys.ControlP,\n filter=(ViInsertMode() & HasFocus(DEFAULT_BUFFER)\n ))(previous_history_or_previous_completion)\n\n registry.add_binding(Keys.ControlN,\n filter=(ViInsertMode() & HasFocus(DEFAULT_BUFFER)\n ))(next_history_or_next_completion)\n\n registry.add_binding(Keys.ControlG,\n filter=(HasFocus(DEFAULT_BUFFER) & HasCompletions()\n ))(dismiss_completion)\n\n registry.add_binding(Keys.ControlC, filter=HasFocus(DEFAULT_BUFFER)\n )(reset_buffer)\n\n registry.add_binding(Keys.ControlC, filter=HasFocus(SEARCH_BUFFER)\n )(reset_search_buffer)\n\n supports_suspend = Condition(lambda cli: hasattr(signal, 'SIGTSTP'))\n registry.add_binding(Keys.ControlZ, filter=supports_suspend\n )(suspend_to_bg)\n\n # Ctrl+I == Tab\n registry.add_binding(Keys.ControlI,\n filter=(HasFocus(DEFAULT_BUFFER)\n & ~HasSelection()\n & insert_mode\n & cursor_in_leading_ws\n ))(indent_buffer)\n\n registry.add_binding(Keys.ControlO,\n filter=(HasFocus(DEFAULT_BUFFER)\n & EmacsInsertMode()))(newline_with_copy_margin)\n\n registry.add_binding(Keys.F2,\n filter=HasFocus(DEFAULT_BUFFER)\n )(open_input_in_editor)\n\n registry.add_binding('v',\n filter=HasFocus(DEFAULT_BUFFER) & ViNavigationMode()\n )(open_input_in_editor)\n\n if shell.display_completions == 'readlinelike':\n registry.add_binding(Keys.ControlI,\n filter=(HasFocus(DEFAULT_BUFFER)\n & ~HasSelection()\n & insert_mode\n & ~cursor_in_leading_ws\n ))(display_completions_like_readline)\n\n if sys.platform == 'win32':\n registry.add_binding(Keys.ControlV,\n filter=(\n HasFocus(\n DEFAULT_BUFFER) & ~ViMode()\n ))(win_paste)\n\n\ndef newline_or_execute_outer(shell):\n def newline_or_execute(event):\n \"\"\"When the user presses return, insert a newline or execute the code.\"\"\"\n b = event.current_buffer\n d = b.document\n\n if b.complete_state:\n cc = b.complete_state.current_completion\n if cc:\n b.apply_completion(cc)\n else:\n b.cancel_completion()\n return\n\n if not (d.on_last_line or d.cursor_position_row >= d.line_count\n - d.empty_line_count_at_the_end()):\n b.newline()\n return\n\n status, indent = shell.input_splitter.check_complete(d.text + '\\n')\n\n if (status != 'incomplete') and b.accept_action.is_returnable:\n b.accept_action.validate_and_handle(event.cli, b)\n else:\n b.insert_text('\\n' + (' ' * (indent or 0)))\n return newline_or_execute\n\n\ndef previous_history_or_previous_completion(event):\n \"\"\"\n Control-P in vi edit mode on readline is history next, unlike default prompt toolkit.\n\n If completer is open this still select previous completion.\n \"\"\"\n event.current_buffer.auto_up()\n\n\ndef next_history_or_next_completion(event):\n \"\"\"\n Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit.\n\n If completer is open this still select next completion.\n \"\"\"\n event.current_buffer.auto_down()\n\n\ndef dismiss_completion(event):\n b = event.current_buffer\n if b.complete_state:\n b.cancel_completion()\n\n\ndef reset_buffer(event):\n b = event.current_buffer\n if b.complete_state:\n b.cancel_completion()\n else:\n b.reset()\n\n\ndef reset_search_buffer(event):\n if event.current_buffer.document.text:\n event.current_buffer.reset()\n else:\n event.cli.push_focus(DEFAULT_BUFFER)\n\ndef suspend_to_bg(event):\n event.cli.suspend_to_background()\n\ndef force_exit(event):\n \"\"\"\n Force exit (with a non-zero return value)\n \"\"\"\n sys.exit(\"Quit\")\n\ndef indent_buffer(event):\n event.current_buffer.insert_text(' ' * 4)\n\ndef newline_with_copy_margin(event):\n \"\"\"\n Preserve margin and cursor position when using\n Control-O to insert a newline in EMACS mode\n \"\"\"\n b = event.current_buffer\n cursor_start_pos = b.document.cursor_position_col\n b.newline(copy_margin=True)\n b.cursor_up(count=1)\n cursor_end_pos = b.document.cursor_position_col\n if cursor_start_pos != cursor_end_pos:\n pos_diff = cursor_start_pos - cursor_end_pos\n b.cursor_right(count=pos_diff)\n\ndef open_input_in_editor(event):\n event.cli.current_buffer.tempfile_suffix = \".py\"\n event.cli.current_buffer.open_in_editor(event.cli)\n\n\nif sys.platform == 'win32':\n from IPython.core.error import TryNext\n from IPython.lib.clipboard import (ClipboardEmpty,\n win32_clipboard_get,\n tkinter_clipboard_get)\n\n @undoc\n def win_paste(event):\n try:\n text = win32_clipboard_get()\n except TryNext:\n try:\n text = tkinter_clipboard_get()\n except (TryNext, ClipboardEmpty):\n return\n except ClipboardEmpty:\n return\n event.current_buffer.insert_text(text.replace('\\t', ' ' * 4))\n", "path": "IPython/terminal/shortcuts.py"}], "after_files": [{"content": "import signal\nimport sys\n\nfrom prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER\nfrom prompt_toolkit.filters import (HasFocus, HasSelection, Condition,\n ViInsertMode, EmacsInsertMode, HasCompletions)\nfrom prompt_toolkit.filters.cli import ViMode, ViNavigationMode\nfrom prompt_toolkit.keys import Keys\nfrom prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline\n\nfrom IPython.utils.decorators import undoc\n\n@Condition\ndef cursor_in_leading_ws(cli):\n before = cli.application.buffer.document.current_line_before_cursor\n return (not before) or before.isspace()\n\ndef register_ipython_shortcuts(registry, shell):\n \"\"\"Set up the prompt_toolkit keyboard shortcuts for IPython\"\"\"\n insert_mode = ViInsertMode() | EmacsInsertMode()\n\n # Ctrl+J == Enter, seemingly\n registry.add_binding(Keys.ControlJ,\n filter=(HasFocus(DEFAULT_BUFFER)\n & ~HasSelection()\n & insert_mode\n ))(newline_or_execute_outer(shell))\n\n registry.add_binding(Keys.ControlBackslash)(force_exit)\n\n registry.add_binding(Keys.ControlP,\n filter=(ViInsertMode() & HasFocus(DEFAULT_BUFFER)\n ))(previous_history_or_previous_completion)\n\n registry.add_binding(Keys.ControlN,\n filter=(ViInsertMode() & HasFocus(DEFAULT_BUFFER)\n ))(next_history_or_next_completion)\n\n registry.add_binding(Keys.ControlG,\n filter=(HasFocus(DEFAULT_BUFFER) & HasCompletions()\n ))(dismiss_completion)\n\n registry.add_binding(Keys.ControlC, filter=HasFocus(DEFAULT_BUFFER)\n )(reset_buffer)\n\n registry.add_binding(Keys.ControlC, filter=HasFocus(SEARCH_BUFFER)\n )(reset_search_buffer)\n\n supports_suspend = Condition(lambda cli: hasattr(signal, 'SIGTSTP'))\n registry.add_binding(Keys.ControlZ, filter=supports_suspend\n )(suspend_to_bg)\n\n # Ctrl+I == Tab\n registry.add_binding(Keys.ControlI,\n filter=(HasFocus(DEFAULT_BUFFER)\n & ~HasSelection()\n & insert_mode\n & cursor_in_leading_ws\n ))(indent_buffer)\n\n registry.add_binding(Keys.ControlO,\n filter=(HasFocus(DEFAULT_BUFFER)\n & EmacsInsertMode()))(newline_with_copy_margin)\n\n registry.add_binding(Keys.F2,\n filter=HasFocus(DEFAULT_BUFFER)\n )(open_input_in_editor)\n\n registry.add_binding('v',\n filter=HasFocus(DEFAULT_BUFFER) & ViNavigationMode()\n )(open_input_in_editor)\n\n if shell.display_completions == 'readlinelike':\n registry.add_binding(Keys.ControlI,\n filter=(HasFocus(DEFAULT_BUFFER)\n & ~HasSelection()\n & insert_mode\n & ~cursor_in_leading_ws\n ))(display_completions_like_readline)\n\n if sys.platform == 'win32':\n registry.add_binding(Keys.ControlV,\n filter=(\n HasFocus(\n DEFAULT_BUFFER) & ~ViMode()\n ))(win_paste)\n\n\ndef newline_or_execute_outer(shell):\n def newline_or_execute(event):\n \"\"\"When the user presses return, insert a newline or execute the code.\"\"\"\n b = event.current_buffer\n d = b.document\n\n if b.complete_state:\n cc = b.complete_state.current_completion\n if cc:\n b.apply_completion(cc)\n else:\n b.cancel_completion()\n return\n\n before_text = d.text[:d.cursor_position]\n status, indent = shell.input_splitter.check_complete(before_text + '\\n')\n\n if not (d.on_last_line or\n d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end()\n ):\n b.insert_text('\\n' + (' ' * (indent or 0)))\n return\n\n if (status != 'incomplete') and b.accept_action.is_returnable:\n b.accept_action.validate_and_handle(event.cli, b)\n else:\n b.insert_text('\\n' + (' ' * (indent or 0)))\n return newline_or_execute\n\n\ndef previous_history_or_previous_completion(event):\n \"\"\"\n Control-P in vi edit mode on readline is history next, unlike default prompt toolkit.\n\n If completer is open this still select previous completion.\n \"\"\"\n event.current_buffer.auto_up()\n\n\ndef next_history_or_next_completion(event):\n \"\"\"\n Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit.\n\n If completer is open this still select next completion.\n \"\"\"\n event.current_buffer.auto_down()\n\n\ndef dismiss_completion(event):\n b = event.current_buffer\n if b.complete_state:\n b.cancel_completion()\n\n\ndef reset_buffer(event):\n b = event.current_buffer\n if b.complete_state:\n b.cancel_completion()\n else:\n b.reset()\n\n\ndef reset_search_buffer(event):\n if event.current_buffer.document.text:\n event.current_buffer.reset()\n else:\n event.cli.push_focus(DEFAULT_BUFFER)\n\ndef suspend_to_bg(event):\n event.cli.suspend_to_background()\n\ndef force_exit(event):\n \"\"\"\n Force exit (with a non-zero return value)\n \"\"\"\n sys.exit(\"Quit\")\n\ndef indent_buffer(event):\n event.current_buffer.insert_text(' ' * 4)\n\ndef newline_with_copy_margin(event):\n \"\"\"\n Preserve margin and cursor position when using\n Control-O to insert a newline in EMACS mode\n \"\"\"\n b = event.current_buffer\n cursor_start_pos = b.document.cursor_position_col\n b.newline(copy_margin=True)\n b.cursor_up(count=1)\n cursor_end_pos = b.document.cursor_position_col\n if cursor_start_pos != cursor_end_pos:\n pos_diff = cursor_start_pos - cursor_end_pos\n b.cursor_right(count=pos_diff)\n\ndef open_input_in_editor(event):\n event.cli.current_buffer.tempfile_suffix = \".py\"\n event.cli.current_buffer.open_in_editor(event.cli)\n\n\nif sys.platform == 'win32':\n from IPython.core.error import TryNext\n from IPython.lib.clipboard import (ClipboardEmpty,\n win32_clipboard_get,\n tkinter_clipboard_get)\n\n @undoc\n def win_paste(event):\n try:\n text = win32_clipboard_get()\n except TryNext:\n try:\n text = tkinter_clipboard_get()\n except (TryNext, ClipboardEmpty):\n return\n except ClipboardEmpty:\n return\n event.current_buffer.insert_text(text.replace('\\t', ' ' * 4))\n", "path": "IPython/terminal/shortcuts.py"}]}
2,179
241
gh_patches_debug_23562
rasdani/github-patches
git_diff
internetarchive__openlibrary-6807
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- librarian merge queue fixes Closes #6807 - allows flexible sorting with ?order=asc or desc -- piggy backs on #6785 - adds total counts to Open and Closed - removes "All" - fixes bug where page? persists when switching modes -- fixes **half** of #6782 (i.e. mode part, not submitter!) <!-- What does this PR achieve? [feature|hotfix|fix|refactor] --> ### Stakeholders <!-- @ tag stakeholders of this bug --> @jimchamp <!-- Attribution Disclaimer: By proposing this pull request, I affirm to have made a best-effort and exercised my discretion to make sure relevant sections of this code which substantially leverage code suggestions, code generation, or code snippets from sources (e.g. Stack Overflow, GitHub) have been annotated with basic attribution so reviewers & contributors may have confidence and access to the correct context to evaluate and use this code. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openlibrary/plugins/upstream/edits.py` Content: ``` 1 """Librarian Edits 2 """ 3 4 import json 5 import web 6 7 from openlibrary import accounts 8 from openlibrary.core.edits import CommunityEditsQueue, get_status_for_view 9 from infogami.utils import delegate 10 from infogami.utils.view import render_template 11 12 13 def create_request(olids: str, username: str, comment: str = None): 14 work_ids = olids.split(',') 15 return CommunityEditsQueue.submit_work_merge_request( 16 work_ids, 17 submitter=username, 18 comment=comment, 19 ) 20 21 22 class community_edits_queue(delegate.page): 23 path = '/merges' 24 25 def POST(self): 26 def response(status='ok', **kwargs): 27 return {'status': status, **kwargs} 28 29 i = web.input( 30 work_ids="", # Comma-separated OLIDs (OL1W,OL2W,OL3W,...,OL111W) 31 rtype="merge-works", 32 mrid=None, 33 action=None, # create, approve, decline, comment, unassign, create-merged 34 comment=None, 35 ) 36 user = accounts.get_current_user() 37 username = user['key'].split('/')[-1] 38 if i.mrid: # We are updating an existing merge request 39 if i.action == 'comment': 40 if i.comment: 41 CommunityEditsQueue.comment_request(i.mrid, username, i.comment) 42 return delegate.RawText( 43 json.dumps(response()), content_type="application/json" 44 ) 45 else: 46 return delegate.RawText( 47 json.dumps( 48 response( 49 status='error', error='No comment sent in request.' 50 ) 51 ) 52 ) 53 elif i.action == 'claim': 54 result = CommunityEditsQueue.assign_request(i.mrid, username) 55 return delegate.RawText( 56 json.dumps(response(**result)), content_type="application/json" 57 ) 58 elif i.action == 'unassign': 59 CommunityEditsQueue.unassign_request(i.mrid) 60 status = get_status_for_view(CommunityEditsQueue.STATUS['PENDING']) 61 return delegate.RawText(json.dumps(response(newStatus=status))) 62 else: 63 if i.action == "decline": 64 status = CommunityEditsQueue.STATUS['DECLINED'] 65 elif i.action == 'approve': 66 status = CommunityEditsQueue.STATUS['MERGED'] 67 CommunityEditsQueue.update_request_status( 68 i.mrid, status, username, comment=i.comment 69 ) 70 return delegate.RawText( 71 json.dumps(response()), content_type="application/json" 72 ) 73 elif i.rtype == "merge-works": 74 if i.action == 'create': 75 result = create_request(i.work_ids, username, i.comment) 76 resp = ( 77 response(id=result) 78 if result 79 else response( 80 status='error', 81 error='A request to merge these works has already been submitted.', 82 ) 83 ) 84 return delegate.RawText( 85 json.dumps(resp), content_type="application/json" 86 ) 87 elif i.action == 'create-merged': 88 result = CommunityEditsQueue.submit_work_merge_request( 89 i.work_ids.split(','), 90 submitter=username, 91 reviewer=username, 92 status=CommunityEditsQueue.STATUS['MERGED'], 93 ) 94 return delegate.RawText( 95 json.dumps(response(id=result)), content_type='application/json' 96 ) 97 98 def GET(self): 99 i = web.input(page=1, limit=25, mode="open", submitter=None, reviewer=None) 100 merge_requests = CommunityEditsQueue.get_requests( 101 page=int(i.page), 102 limit=int(i.limit), 103 mode=i.mode, 104 submitter=i.submitter, 105 reviewer=i.reviewer, 106 order='created desc', 107 ).list() 108 109 total_found = CommunityEditsQueue.get_counts_by_mode( 110 mode=i.mode, submitter=i.submitter, reviewer=i.reviewer 111 ) 112 return render_template( 113 'merge_queue/merge_queue', 114 total_found, 115 merge_requests=merge_requests, 116 ) 117 118 def extract_olids(self, url): 119 query_string = url.split('?')[1] 120 split_params = query_string.split('&') 121 params = {} 122 for p in split_params: 123 kv = p.split('=') 124 params[kv[0]] = kv[1] 125 return params['records'].split(',') 126 127 128 class ui_partials(delegate.page): 129 path = '/merges/partials' 130 131 def GET(self): 132 i = web.input(type=None, comment='') 133 if i.type == 'comment': 134 component = render_template('merge_queue/comment', comment_str=i.comment) 135 return delegate.RawText(component) 136 137 138 def setup(): 139 pass 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openlibrary/plugins/upstream/edits.py b/openlibrary/plugins/upstream/edits.py --- a/openlibrary/plugins/upstream/edits.py +++ b/openlibrary/plugins/upstream/edits.py @@ -96,19 +96,26 @@ ) def GET(self): - i = web.input(page=1, limit=25, mode="open", submitter=None, reviewer=None) + i = web.input( + page=1, limit=25, mode="open", submitter=None, reviewer=None, order='desc' + ) merge_requests = CommunityEditsQueue.get_requests( page=int(i.page), limit=int(i.limit), mode=i.mode, submitter=i.submitter, reviewer=i.reviewer, - order='created desc', + order=f'created {i.order}', ).list() - total_found = CommunityEditsQueue.get_counts_by_mode( - mode=i.mode, submitter=i.submitter, reviewer=i.reviewer - ) + total_found = { + "open": CommunityEditsQueue.get_counts_by_mode( + mode='open', submitter=i.submitter, reviewer=i.reviewer + ), + "closed": CommunityEditsQueue.get_counts_by_mode( + mode='closed', submitter=i.submitter, reviewer=i.reviewer + ), + } return render_template( 'merge_queue/merge_queue', total_found,
{"golden_diff": "diff --git a/openlibrary/plugins/upstream/edits.py b/openlibrary/plugins/upstream/edits.py\n--- a/openlibrary/plugins/upstream/edits.py\n+++ b/openlibrary/plugins/upstream/edits.py\n@@ -96,19 +96,26 @@\n )\n \n def GET(self):\n- i = web.input(page=1, limit=25, mode=\"open\", submitter=None, reviewer=None)\n+ i = web.input(\n+ page=1, limit=25, mode=\"open\", submitter=None, reviewer=None, order='desc'\n+ )\n merge_requests = CommunityEditsQueue.get_requests(\n page=int(i.page),\n limit=int(i.limit),\n mode=i.mode,\n submitter=i.submitter,\n reviewer=i.reviewer,\n- order='created desc',\n+ order=f'created {i.order}',\n ).list()\n \n- total_found = CommunityEditsQueue.get_counts_by_mode(\n- mode=i.mode, submitter=i.submitter, reviewer=i.reviewer\n- )\n+ total_found = {\n+ \"open\": CommunityEditsQueue.get_counts_by_mode(\n+ mode='open', submitter=i.submitter, reviewer=i.reviewer\n+ ),\n+ \"closed\": CommunityEditsQueue.get_counts_by_mode(\n+ mode='closed', submitter=i.submitter, reviewer=i.reviewer\n+ ),\n+ }\n return render_template(\n 'merge_queue/merge_queue',\n total_found,\n", "issue": "librarian merge queue fixes\nCloses #6807\r\n\r\n- allows flexible sorting with ?order=asc or desc -- piggy backs on #6785 \r\n- adds total counts to Open and Closed\r\n- removes \"All\"\r\n- fixes bug where page? persists when switching modes -- fixes **half** of #6782 (i.e. mode part, not submitter!)\r\n\r\n\r\n<!-- What does this PR achieve? [feature|hotfix|fix|refactor] -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@jimchamp \r\n\r\n<!-- Attribution Disclaimer: By proposing this pull request, I affirm to have made a best-effort and exercised my discretion to make sure relevant sections of this code which substantially leverage code suggestions, code generation, or code snippets from sources (e.g. Stack Overflow, GitHub) have been annotated with basic attribution so reviewers & contributors may have confidence and access to the correct context to evaluate and use this code. -->\r\n\n", "before_files": [{"content": "\"\"\"Librarian Edits\n\"\"\"\n\nimport json\nimport web\n\nfrom openlibrary import accounts\nfrom openlibrary.core.edits import CommunityEditsQueue, get_status_for_view\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template\n\n\ndef create_request(olids: str, username: str, comment: str = None):\n work_ids = olids.split(',')\n return CommunityEditsQueue.submit_work_merge_request(\n work_ids,\n submitter=username,\n comment=comment,\n )\n\n\nclass community_edits_queue(delegate.page):\n path = '/merges'\n\n def POST(self):\n def response(status='ok', **kwargs):\n return {'status': status, **kwargs}\n\n i = web.input(\n work_ids=\"\", # Comma-separated OLIDs (OL1W,OL2W,OL3W,...,OL111W)\n rtype=\"merge-works\",\n mrid=None,\n action=None, # create, approve, decline, comment, unassign, create-merged\n comment=None,\n )\n user = accounts.get_current_user()\n username = user['key'].split('/')[-1]\n if i.mrid: # We are updating an existing merge request\n if i.action == 'comment':\n if i.comment:\n CommunityEditsQueue.comment_request(i.mrid, username, i.comment)\n return delegate.RawText(\n json.dumps(response()), content_type=\"application/json\"\n )\n else:\n return delegate.RawText(\n json.dumps(\n response(\n status='error', error='No comment sent in request.'\n )\n )\n )\n elif i.action == 'claim':\n result = CommunityEditsQueue.assign_request(i.mrid, username)\n return delegate.RawText(\n json.dumps(response(**result)), content_type=\"application/json\"\n )\n elif i.action == 'unassign':\n CommunityEditsQueue.unassign_request(i.mrid)\n status = get_status_for_view(CommunityEditsQueue.STATUS['PENDING'])\n return delegate.RawText(json.dumps(response(newStatus=status)))\n else:\n if i.action == \"decline\":\n status = CommunityEditsQueue.STATUS['DECLINED']\n elif i.action == 'approve':\n status = CommunityEditsQueue.STATUS['MERGED']\n CommunityEditsQueue.update_request_status(\n i.mrid, status, username, comment=i.comment\n )\n return delegate.RawText(\n json.dumps(response()), content_type=\"application/json\"\n )\n elif i.rtype == \"merge-works\":\n if i.action == 'create':\n result = create_request(i.work_ids, username, i.comment)\n resp = (\n response(id=result)\n if result\n else response(\n status='error',\n error='A request to merge these works has already been submitted.',\n )\n )\n return delegate.RawText(\n json.dumps(resp), content_type=\"application/json\"\n )\n elif i.action == 'create-merged':\n result = CommunityEditsQueue.submit_work_merge_request(\n i.work_ids.split(','),\n submitter=username,\n reviewer=username,\n status=CommunityEditsQueue.STATUS['MERGED'],\n )\n return delegate.RawText(\n json.dumps(response(id=result)), content_type='application/json'\n )\n\n def GET(self):\n i = web.input(page=1, limit=25, mode=\"open\", submitter=None, reviewer=None)\n merge_requests = CommunityEditsQueue.get_requests(\n page=int(i.page),\n limit=int(i.limit),\n mode=i.mode,\n submitter=i.submitter,\n reviewer=i.reviewer,\n order='created desc',\n ).list()\n\n total_found = CommunityEditsQueue.get_counts_by_mode(\n mode=i.mode, submitter=i.submitter, reviewer=i.reviewer\n )\n return render_template(\n 'merge_queue/merge_queue',\n total_found,\n merge_requests=merge_requests,\n )\n\n def extract_olids(self, url):\n query_string = url.split('?')[1]\n split_params = query_string.split('&')\n params = {}\n for p in split_params:\n kv = p.split('=')\n params[kv[0]] = kv[1]\n return params['records'].split(',')\n\n\nclass ui_partials(delegate.page):\n path = '/merges/partials'\n\n def GET(self):\n i = web.input(type=None, comment='')\n if i.type == 'comment':\n component = render_template('merge_queue/comment', comment_str=i.comment)\n return delegate.RawText(component)\n\n\ndef setup():\n pass\n", "path": "openlibrary/plugins/upstream/edits.py"}], "after_files": [{"content": "\"\"\"Librarian Edits\n\"\"\"\n\nimport json\nimport web\n\nfrom openlibrary import accounts\nfrom openlibrary.core.edits import CommunityEditsQueue, get_status_for_view\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template\n\n\ndef create_request(olids: str, username: str, comment: str = None):\n work_ids = olids.split(',')\n return CommunityEditsQueue.submit_work_merge_request(\n work_ids,\n submitter=username,\n comment=comment,\n )\n\n\nclass community_edits_queue(delegate.page):\n path = '/merges'\n\n def POST(self):\n def response(status='ok', **kwargs):\n return {'status': status, **kwargs}\n\n i = web.input(\n work_ids=\"\", # Comma-separated OLIDs (OL1W,OL2W,OL3W,...,OL111W)\n rtype=\"merge-works\",\n mrid=None,\n action=None, # create, approve, decline, comment, unassign, create-merged\n comment=None,\n )\n user = accounts.get_current_user()\n username = user['key'].split('/')[-1]\n if i.mrid: # We are updating an existing merge request\n if i.action == 'comment':\n if i.comment:\n CommunityEditsQueue.comment_request(i.mrid, username, i.comment)\n return delegate.RawText(\n json.dumps(response()), content_type=\"application/json\"\n )\n else:\n return delegate.RawText(\n json.dumps(\n response(\n status='error', error='No comment sent in request.'\n )\n )\n )\n elif i.action == 'claim':\n result = CommunityEditsQueue.assign_request(i.mrid, username)\n return delegate.RawText(\n json.dumps(response(**result)), content_type=\"application/json\"\n )\n elif i.action == 'unassign':\n CommunityEditsQueue.unassign_request(i.mrid)\n status = get_status_for_view(CommunityEditsQueue.STATUS['PENDING'])\n return delegate.RawText(json.dumps(response(newStatus=status)))\n else:\n if i.action == \"decline\":\n status = CommunityEditsQueue.STATUS['DECLINED']\n elif i.action == 'approve':\n status = CommunityEditsQueue.STATUS['MERGED']\n CommunityEditsQueue.update_request_status(\n i.mrid, status, username, comment=i.comment\n )\n return delegate.RawText(\n json.dumps(response()), content_type=\"application/json\"\n )\n elif i.rtype == \"merge-works\":\n if i.action == 'create':\n result = create_request(i.work_ids, username, i.comment)\n resp = (\n response(id=result)\n if result\n else response(\n status='error',\n error='A request to merge these works has already been submitted.',\n )\n )\n return delegate.RawText(\n json.dumps(resp), content_type=\"application/json\"\n )\n elif i.action == 'create-merged':\n result = CommunityEditsQueue.submit_work_merge_request(\n i.work_ids.split(','),\n submitter=username,\n reviewer=username,\n status=CommunityEditsQueue.STATUS['MERGED'],\n )\n return delegate.RawText(\n json.dumps(response(id=result)), content_type='application/json'\n )\n\n def GET(self):\n i = web.input(\n page=1, limit=25, mode=\"open\", submitter=None, reviewer=None, order='desc'\n )\n merge_requests = CommunityEditsQueue.get_requests(\n page=int(i.page),\n limit=int(i.limit),\n mode=i.mode,\n submitter=i.submitter,\n reviewer=i.reviewer,\n order=f'created {i.order}',\n ).list()\n\n total_found = {\n \"open\": CommunityEditsQueue.get_counts_by_mode(\n mode='open', submitter=i.submitter, reviewer=i.reviewer\n ),\n \"closed\": CommunityEditsQueue.get_counts_by_mode(\n mode='closed', submitter=i.submitter, reviewer=i.reviewer\n ),\n }\n return render_template(\n 'merge_queue/merge_queue',\n total_found,\n merge_requests=merge_requests,\n )\n\n def extract_olids(self, url):\n query_string = url.split('?')[1]\n split_params = query_string.split('&')\n params = {}\n for p in split_params:\n kv = p.split('=')\n params[kv[0]] = kv[1]\n return params['records'].split(',')\n\n\nclass ui_partials(delegate.page):\n path = '/merges/partials'\n\n def GET(self):\n i = web.input(type=None, comment='')\n if i.type == 'comment':\n component = render_template('merge_queue/comment', comment_str=i.comment)\n return delegate.RawText(component)\n\n\ndef setup():\n pass\n", "path": "openlibrary/plugins/upstream/edits.py"}]}
1,755
323
gh_patches_debug_15287
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-642
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Expand Environment Variables in Cookiecutter Configuration File I set my cookiecutterrc file via an environment variable, like this: ``` export COOKIECUTTER_CONFIG="$XDG_CONFIG_HOME/cookiecutter/cookiecutterrc" ``` In my cookiecutterrc, I'd like to use those same environment variables to set paths, however they don't currently expand: ``` default_context: full_name: "Nathan Farrar" email: "[email protected]" github_username: "nfarrar" cookiecutters_dir: "$XDG_CACHE_HOME/cookiecutter/template" replay_dir: "$XDG_CACHE_HOME/cookiecutter/replay" abbreviations: pp: https://github.com/audreyr/cookiecutter-pypackage.git gh: https://github.com/{0}.git bb: https://bitbucket.org/{0} ``` For example: ``` $ cookiecutter pp $ ls ~/ ... drwxr-xr-x 3 nfarrar staff 102 Feb 28 07:37 '$XDG_CACHE_HOME' ... ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/config.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.config 6 ------------------- 7 8 Global configuration handling 9 """ 10 11 from __future__ import unicode_literals 12 import copy 13 import logging 14 import os 15 import io 16 17 import poyo 18 19 from .exceptions import ConfigDoesNotExistException 20 from .exceptions import InvalidConfiguration 21 22 23 logger = logging.getLogger(__name__) 24 25 USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc') 26 27 DEFAULT_CONFIG = { 28 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'), 29 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'), 30 'default_context': {} 31 } 32 33 34 def get_config(config_path): 35 """ 36 Retrieve the config from the specified path, returning it as a config dict. 37 """ 38 39 if not os.path.exists(config_path): 40 raise ConfigDoesNotExistException 41 42 logger.debug('config_path is {0}'.format(config_path)) 43 with io.open(config_path, encoding='utf-8') as file_handle: 44 try: 45 yaml_dict = poyo.parse_string(file_handle.read()) 46 except poyo.exceptions.PoyoException as e: 47 raise InvalidConfiguration( 48 'Unable to parse YAML file {}. Error: {}' 49 ''.format(config_path, e) 50 ) 51 52 config_dict = copy.copy(DEFAULT_CONFIG) 53 config_dict.update(yaml_dict) 54 55 return config_dict 56 57 58 def get_user_config(config_file=USER_CONFIG_PATH): 59 """Retrieve the config from a file or return the defaults if None is 60 passed. If an environment variable `COOKIECUTTER_CONFIG` is set up, try 61 to load its value. Otherwise fall back to a default file or config. 62 """ 63 # Do NOT load a config. Return defaults instead. 64 if config_file is None: 65 return copy.copy(DEFAULT_CONFIG) 66 67 # Load the given config file 68 if config_file and config_file is not USER_CONFIG_PATH: 69 return get_config(config_file) 70 71 try: 72 # Does the user set up a config environment variable? 73 env_config_file = os.environ['COOKIECUTTER_CONFIG'] 74 except KeyError: 75 # Load an optional user config if it exists 76 # otherwise return the defaults 77 if os.path.exists(USER_CONFIG_PATH): 78 return get_config(USER_CONFIG_PATH) 79 else: 80 return copy.copy(DEFAULT_CONFIG) 81 else: 82 # There is a config environment variable. Try to load it. 83 # Do not check for existence, so invalid file paths raise an error. 84 return get_config(env_config_file) 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/config.py b/cookiecutter/config.py --- a/cookiecutter/config.py +++ b/cookiecutter/config.py @@ -31,6 +31,13 @@ } +def _expand_path(path): + """Expand both environment variables and user home in the given path.""" + path = os.path.expandvars(path) + path = os.path.expanduser(path) + return path + + def get_config(config_path): """ Retrieve the config from the specified path, returning it as a config dict. @@ -52,6 +59,12 @@ config_dict = copy.copy(DEFAULT_CONFIG) config_dict.update(yaml_dict) + raw_replay_dir = config_dict['replay_dir'] + config_dict['replay_dir'] = _expand_path(raw_replay_dir) + + raw_cookies_dir = config_dict['cookiecutters_dir'] + config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir) + return config_dict
{"golden_diff": "diff --git a/cookiecutter/config.py b/cookiecutter/config.py\n--- a/cookiecutter/config.py\n+++ b/cookiecutter/config.py\n@@ -31,6 +31,13 @@\n }\n \n \n+def _expand_path(path):\n+ \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n+ path = os.path.expandvars(path)\n+ path = os.path.expanduser(path)\n+ return path\n+\n+\n def get_config(config_path):\n \"\"\"\n Retrieve the config from the specified path, returning it as a config dict.\n@@ -52,6 +59,12 @@\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n \n+ raw_replay_dir = config_dict['replay_dir']\n+ config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n+\n+ raw_cookies_dir = config_dict['cookiecutters_dir']\n+ config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n+\n return config_dict\n", "issue": "Expand Environment Variables in Cookiecutter Configuration File\nI set my cookiecutterrc file via an environment variable, like this:\n\n```\nexport COOKIECUTTER_CONFIG=\"$XDG_CONFIG_HOME/cookiecutter/cookiecutterrc\"\n```\n\nIn my cookiecutterrc, I'd like to use those same environment variables to set paths, however they don't currently expand:\n\n```\ndefault_context:\n full_name: \"Nathan Farrar\"\n email: \"[email protected]\"\n github_username: \"nfarrar\"\ncookiecutters_dir: \"$XDG_CACHE_HOME/cookiecutter/template\"\nreplay_dir: \"$XDG_CACHE_HOME/cookiecutter/replay\"\nabbreviations:\n pp: https://github.com/audreyr/cookiecutter-pypackage.git\n gh: https://github.com/{0}.git\n bb: https://bitbucket.org/{0}\n```\n\nFor example:\n\n```\n$ cookiecutter pp\n$ ls ~/\n...\ndrwxr-xr-x 3 nfarrar staff 102 Feb 28 07:37 '$XDG_CACHE_HOME'\n...\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.config\n-------------------\n\nGlobal configuration handling\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {}\n}\n\n\ndef get_config(config_path):\n \"\"\"\n Retrieve the config from the specified path, returning it as a config dict.\n \"\"\"\n\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n return config_dict\n\n\ndef get_user_config(config_file=USER_CONFIG_PATH):\n \"\"\"Retrieve the config from a file or return the defaults if None is\n passed. If an environment variable `COOKIECUTTER_CONFIG` is set up, try\n to load its value. Otherwise fall back to a default file or config.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if config_file is None:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.config\n-------------------\n\nGlobal configuration handling\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {}\n}\n\n\ndef _expand_path(path):\n \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n return path\n\n\ndef get_config(config_path):\n \"\"\"\n Retrieve the config from the specified path, returning it as a config dict.\n \"\"\"\n\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n\n raw_cookies_dir = config_dict['cookiecutters_dir']\n config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n\n return config_dict\n\n\ndef get_user_config(config_file=USER_CONFIG_PATH):\n \"\"\"Retrieve the config from a file or return the defaults if None is\n passed. If an environment variable `COOKIECUTTER_CONFIG` is set up, try\n to load its value. Otherwise fall back to a default file or config.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if config_file is None:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}]}
1,210
226
gh_patches_debug_60945
rasdani/github-patches
git_diff
Netflix__lemur-766
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Set lemur to log to stdout When running lemur inside docker I would like to have it log everything to `stdout` so that I can forward logs to splunk. At the moment `lemur.config.py` has a `LEMUR_LOG` parameter that expects a filename. Is there a way to configure lemur to log to stdout instead of a file? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lemur/factory.py` Content: ``` 1 """ 2 .. module: lemur.factory 3 :platform: Unix 4 :synopsis: This module contains all the needed functions to allow 5 the factory app creation. 6 7 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more 8 :license: Apache, see LICENSE for more details. 9 .. moduleauthor:: Kevin Glisson <[email protected]> 10 11 """ 12 import os 13 import imp 14 import errno 15 import pkg_resources 16 17 from logging import Formatter, StreamHandler 18 from logging.handlers import RotatingFileHandler 19 20 from flask import Flask 21 from lemur.common.health import mod as health 22 from lemur.extensions import db, migrate, principal, smtp_mail, metrics 23 24 25 DEFAULT_BLUEPRINTS = ( 26 health, 27 ) 28 29 API_VERSION = 1 30 31 32 def create_app(app_name=None, blueprints=None, config=None): 33 """ 34 Lemur application factory 35 36 :param config: 37 :param app_name: 38 :param blueprints: 39 :return: 40 """ 41 if not blueprints: 42 blueprints = DEFAULT_BLUEPRINTS 43 else: 44 blueprints = blueprints + DEFAULT_BLUEPRINTS 45 46 if not app_name: 47 app_name = __name__ 48 49 app = Flask(app_name) 50 configure_app(app, config) 51 configure_blueprints(app, blueprints) 52 configure_extensions(app) 53 configure_logging(app) 54 install_plugins(app) 55 56 @app.teardown_appcontext 57 def teardown(exception=None): 58 if db.session: 59 db.session.remove() 60 61 return app 62 63 64 def from_file(file_path, silent=False): 65 """ 66 Updates the values in the config from a Python file. This function 67 behaves as if the file was imported as module with the 68 69 :param file_path: 70 :param silent: 71 """ 72 d = imp.new_module('config') 73 d.__file__ = file_path 74 try: 75 with open(file_path) as config_file: 76 exec(compile(config_file.read(), # nosec: config file safe 77 file_path, 'exec'), d.__dict__) 78 except IOError as e: 79 if silent and e.errno in (errno.ENOENT, errno.EISDIR): 80 return False 81 e.strerror = 'Unable to load configuration file (%s)' % e.strerror 82 raise 83 return d 84 85 86 def configure_app(app, config=None): 87 """ 88 Different ways of configuration 89 90 :param app: 91 :param config: 92 :return: 93 """ 94 # respect the config first 95 if config and config != 'None': 96 app.config['CONFIG_PATH'] = config 97 app.config.from_object(from_file(config)) 98 else: 99 try: 100 app.config.from_envvar("LEMUR_CONF") 101 except RuntimeError: 102 # look in default paths 103 if os.path.isfile(os.path.expanduser("~/.lemur/lemur.conf.py")): 104 app.config.from_object(from_file(os.path.expanduser("~/.lemur/lemur.conf.py"))) 105 else: 106 app.config.from_object(from_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default.conf.py'))) 107 108 # we don't use this 109 app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False 110 111 112 def configure_extensions(app): 113 """ 114 Attaches and configures any needed flask extensions 115 to our app. 116 117 :param app: 118 """ 119 db.init_app(app) 120 migrate.init_app(app, db) 121 principal.init_app(app) 122 smtp_mail.init_app(app) 123 metrics.init_app(app) 124 125 126 def configure_blueprints(app, blueprints): 127 """ 128 We prefix our APIs with their given version so that we can support 129 multiple concurrent API versions. 130 131 :param app: 132 :param blueprints: 133 """ 134 for blueprint in blueprints: 135 app.register_blueprint(blueprint, url_prefix="/api/{0}".format(API_VERSION)) 136 137 138 def configure_logging(app): 139 """ 140 Sets up application wide logging. 141 142 :param app: 143 """ 144 handler = RotatingFileHandler(app.config.get('LOG_FILE', 'lemur.log'), maxBytes=10000000, backupCount=100) 145 146 handler.setFormatter(Formatter( 147 '%(asctime)s %(levelname)s: %(message)s ' 148 '[in %(pathname)s:%(lineno)d]' 149 )) 150 151 handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG')) 152 app.logger.setLevel(app.config.get('LOG_LEVEL', 'DEBUG')) 153 app.logger.addHandler(handler) 154 155 stream_handler = StreamHandler() 156 stream_handler.setLevel(app.config.get('LOG_LEVEL')) 157 app.logger.addHandler(stream_handler) 158 159 160 def install_plugins(app): 161 """ 162 Installs new issuers that are not currently bundled with Lemur. 163 164 :param app: 165 :return: 166 """ 167 from lemur.plugins import plugins 168 from lemur.plugins.base import register 169 # entry_points={ 170 # 'lemur.plugins': [ 171 # 'verisign = lemur_verisign.plugin:VerisignPlugin' 172 # ], 173 # }, 174 for ep in pkg_resources.iter_entry_points('lemur.plugins'): 175 try: 176 plugin = ep.load() 177 except Exception: 178 import traceback 179 app.logger.error("Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc())) 180 else: 181 register(plugin) 182 183 # ensure that we have some way to notify 184 with app.app_context(): 185 try: 186 slug = app.config.get("LEMUR_DEFAULT_NOTIFICATION_PLUGIN", "email-notification") 187 plugins.get(slug) 188 except KeyError: 189 raise Exception("Unable to location notification plugin: {slug}. Ensure that LEMUR_DEFAULT_NOTIFICATION_PLUGIN is set to a valid and installed notification plugin.".format(slug=slug)) 190 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lemur/factory.py b/lemur/factory.py --- a/lemur/factory.py +++ b/lemur/factory.py @@ -153,7 +153,7 @@ app.logger.addHandler(handler) stream_handler = StreamHandler() - stream_handler.setLevel(app.config.get('LOG_LEVEL')) + stream_handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG')) app.logger.addHandler(stream_handler)
{"golden_diff": "diff --git a/lemur/factory.py b/lemur/factory.py\n--- a/lemur/factory.py\n+++ b/lemur/factory.py\n@@ -153,7 +153,7 @@\n app.logger.addHandler(handler)\n \n stream_handler = StreamHandler()\n- stream_handler.setLevel(app.config.get('LOG_LEVEL'))\n+ stream_handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))\n app.logger.addHandler(stream_handler)\n", "issue": "Set lemur to log to stdout\nWhen running lemur inside docker I would like to have it log everything to `stdout` so that I can forward logs to splunk. At the moment `lemur.config.py` has a `LEMUR_LOG` parameter that expects a filename. Is there a way to configure lemur to log to stdout instead of a file?\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.factory\n :platform: Unix\n :synopsis: This module contains all the needed functions to allow\n the factory app creation.\n\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nimport os\nimport imp\nimport errno\nimport pkg_resources\n\nfrom logging import Formatter, StreamHandler\nfrom logging.handlers import RotatingFileHandler\n\nfrom flask import Flask\nfrom lemur.common.health import mod as health\nfrom lemur.extensions import db, migrate, principal, smtp_mail, metrics\n\n\nDEFAULT_BLUEPRINTS = (\n health,\n)\n\nAPI_VERSION = 1\n\n\ndef create_app(app_name=None, blueprints=None, config=None):\n \"\"\"\n Lemur application factory\n\n :param config:\n :param app_name:\n :param blueprints:\n :return:\n \"\"\"\n if not blueprints:\n blueprints = DEFAULT_BLUEPRINTS\n else:\n blueprints = blueprints + DEFAULT_BLUEPRINTS\n\n if not app_name:\n app_name = __name__\n\n app = Flask(app_name)\n configure_app(app, config)\n configure_blueprints(app, blueprints)\n configure_extensions(app)\n configure_logging(app)\n install_plugins(app)\n\n @app.teardown_appcontext\n def teardown(exception=None):\n if db.session:\n db.session.remove()\n\n return app\n\n\ndef from_file(file_path, silent=False):\n \"\"\"\n Updates the values in the config from a Python file. This function\n behaves as if the file was imported as module with the\n\n :param file_path:\n :param silent:\n \"\"\"\n d = imp.new_module('config')\n d.__file__ = file_path\n try:\n with open(file_path) as config_file:\n exec(compile(config_file.read(), # nosec: config file safe\n file_path, 'exec'), d.__dict__)\n except IOError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n e.strerror = 'Unable to load configuration file (%s)' % e.strerror\n raise\n return d\n\n\ndef configure_app(app, config=None):\n \"\"\"\n Different ways of configuration\n\n :param app:\n :param config:\n :return:\n \"\"\"\n # respect the config first\n if config and config != 'None':\n app.config['CONFIG_PATH'] = config\n app.config.from_object(from_file(config))\n else:\n try:\n app.config.from_envvar(\"LEMUR_CONF\")\n except RuntimeError:\n # look in default paths\n if os.path.isfile(os.path.expanduser(\"~/.lemur/lemur.conf.py\")):\n app.config.from_object(from_file(os.path.expanduser(\"~/.lemur/lemur.conf.py\")))\n else:\n app.config.from_object(from_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default.conf.py')))\n\n # we don't use this\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\ndef configure_extensions(app):\n \"\"\"\n Attaches and configures any needed flask extensions\n to our app.\n\n :param app:\n \"\"\"\n db.init_app(app)\n migrate.init_app(app, db)\n principal.init_app(app)\n smtp_mail.init_app(app)\n metrics.init_app(app)\n\n\ndef configure_blueprints(app, blueprints):\n \"\"\"\n We prefix our APIs with their given version so that we can support\n multiple concurrent API versions.\n\n :param app:\n :param blueprints:\n \"\"\"\n for blueprint in blueprints:\n app.register_blueprint(blueprint, url_prefix=\"/api/{0}\".format(API_VERSION))\n\n\ndef configure_logging(app):\n \"\"\"\n Sets up application wide logging.\n\n :param app:\n \"\"\"\n handler = RotatingFileHandler(app.config.get('LOG_FILE', 'lemur.log'), maxBytes=10000000, backupCount=100)\n\n handler.setFormatter(Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'\n ))\n\n handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))\n app.logger.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))\n app.logger.addHandler(handler)\n\n stream_handler = StreamHandler()\n stream_handler.setLevel(app.config.get('LOG_LEVEL'))\n app.logger.addHandler(stream_handler)\n\n\ndef install_plugins(app):\n \"\"\"\n Installs new issuers that are not currently bundled with Lemur.\n\n :param app:\n :return:\n \"\"\"\n from lemur.plugins import plugins\n from lemur.plugins.base import register\n # entry_points={\n # 'lemur.plugins': [\n # 'verisign = lemur_verisign.plugin:VerisignPlugin'\n # ],\n # },\n for ep in pkg_resources.iter_entry_points('lemur.plugins'):\n try:\n plugin = ep.load()\n except Exception:\n import traceback\n app.logger.error(\"Failed to load plugin %r:\\n%s\\n\" % (ep.name, traceback.format_exc()))\n else:\n register(plugin)\n\n # ensure that we have some way to notify\n with app.app_context():\n try:\n slug = app.config.get(\"LEMUR_DEFAULT_NOTIFICATION_PLUGIN\", \"email-notification\")\n plugins.get(slug)\n except KeyError:\n raise Exception(\"Unable to location notification plugin: {slug}. Ensure that LEMUR_DEFAULT_NOTIFICATION_PLUGIN is set to a valid and installed notification plugin.\".format(slug=slug))\n", "path": "lemur/factory.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.factory\n :platform: Unix\n :synopsis: This module contains all the needed functions to allow\n the factory app creation.\n\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nimport os\nimport imp\nimport errno\nimport pkg_resources\n\nfrom logging import Formatter, StreamHandler\nfrom logging.handlers import RotatingFileHandler\n\nfrom flask import Flask\nfrom lemur.common.health import mod as health\nfrom lemur.extensions import db, migrate, principal, smtp_mail, metrics\n\n\nDEFAULT_BLUEPRINTS = (\n health,\n)\n\nAPI_VERSION = 1\n\n\ndef create_app(app_name=None, blueprints=None, config=None):\n \"\"\"\n Lemur application factory\n\n :param config:\n :param app_name:\n :param blueprints:\n :return:\n \"\"\"\n if not blueprints:\n blueprints = DEFAULT_BLUEPRINTS\n else:\n blueprints = blueprints + DEFAULT_BLUEPRINTS\n\n if not app_name:\n app_name = __name__\n\n app = Flask(app_name)\n configure_app(app, config)\n configure_blueprints(app, blueprints)\n configure_extensions(app)\n configure_logging(app)\n install_plugins(app)\n\n @app.teardown_appcontext\n def teardown(exception=None):\n if db.session:\n db.session.remove()\n\n return app\n\n\ndef from_file(file_path, silent=False):\n \"\"\"\n Updates the values in the config from a Python file. This function\n behaves as if the file was imported as module with the\n\n :param file_path:\n :param silent:\n \"\"\"\n d = imp.new_module('config')\n d.__file__ = file_path\n try:\n with open(file_path) as config_file:\n exec(compile(config_file.read(), # nosec: config file safe\n file_path, 'exec'), d.__dict__)\n except IOError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n e.strerror = 'Unable to load configuration file (%s)' % e.strerror\n raise\n return d\n\n\ndef configure_app(app, config=None):\n \"\"\"\n Different ways of configuration\n\n :param app:\n :param config:\n :return:\n \"\"\"\n # respect the config first\n if config and config != 'None':\n app.config['CONFIG_PATH'] = config\n app.config.from_object(from_file(config))\n else:\n try:\n app.config.from_envvar(\"LEMUR_CONF\")\n except RuntimeError:\n # look in default paths\n if os.path.isfile(os.path.expanduser(\"~/.lemur/lemur.conf.py\")):\n app.config.from_object(from_file(os.path.expanduser(\"~/.lemur/lemur.conf.py\")))\n else:\n app.config.from_object(from_file(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'default.conf.py')))\n\n # we don't use this\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n\ndef configure_extensions(app):\n \"\"\"\n Attaches and configures any needed flask extensions\n to our app.\n\n :param app:\n \"\"\"\n db.init_app(app)\n migrate.init_app(app, db)\n principal.init_app(app)\n smtp_mail.init_app(app)\n metrics.init_app(app)\n\n\ndef configure_blueprints(app, blueprints):\n \"\"\"\n We prefix our APIs with their given version so that we can support\n multiple concurrent API versions.\n\n :param app:\n :param blueprints:\n \"\"\"\n for blueprint in blueprints:\n app.register_blueprint(blueprint, url_prefix=\"/api/{0}\".format(API_VERSION))\n\n\ndef configure_logging(app):\n \"\"\"\n Sets up application wide logging.\n\n :param app:\n \"\"\"\n handler = RotatingFileHandler(app.config.get('LOG_FILE', 'lemur.log'), maxBytes=10000000, backupCount=100)\n\n handler.setFormatter(Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]'\n ))\n\n handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))\n app.logger.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))\n app.logger.addHandler(handler)\n\n stream_handler = StreamHandler()\n stream_handler.setLevel(app.config.get('LOG_LEVEL', 'DEBUG'))\n app.logger.addHandler(stream_handler)\n\n\ndef install_plugins(app):\n \"\"\"\n Installs new issuers that are not currently bundled with Lemur.\n\n :param app:\n :return:\n \"\"\"\n from lemur.plugins import plugins\n from lemur.plugins.base import register\n # entry_points={\n # 'lemur.plugins': [\n # 'verisign = lemur_verisign.plugin:VerisignPlugin'\n # ],\n # },\n for ep in pkg_resources.iter_entry_points('lemur.plugins'):\n try:\n plugin = ep.load()\n except Exception:\n import traceback\n app.logger.error(\"Failed to load plugin %r:\\n%s\\n\" % (ep.name, traceback.format_exc()))\n else:\n register(plugin)\n\n # ensure that we have some way to notify\n with app.app_context():\n try:\n slug = app.config.get(\"LEMUR_DEFAULT_NOTIFICATION_PLUGIN\", \"email-notification\")\n plugins.get(slug)\n except KeyError:\n raise Exception(\"Unable to location notification plugin: {slug}. Ensure that LEMUR_DEFAULT_NOTIFICATION_PLUGIN is set to a valid and installed notification plugin.\".format(slug=slug))\n", "path": "lemur/factory.py"}]}
2,021
98
gh_patches_debug_29313
rasdani/github-patches
git_diff
bokeh__bokeh-7934
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- flask_gunicorn_embed.py does not work with Tornado 5 ref: https://github.com/bokeh/bokeh/blob/master/examples/howto/server_embed/flask_gunicorn_embed.py Running as is gets: ``` Exception in thread Thread-1: Traceback (most recent call last): File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/threading.py", line 916, in _bootstrap_inner self.run() File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/threading.py", line 864, in run self._target(*self._args, **self._kwargs) File "/Users/bryanv/work/bokeh/examples/howto/server_embed/flask_gunicorn_embed.py", line 72, in bk_worker server.start() File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/bokeh/server/server.py", line 149, in start self._tornado.start() File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/bokeh/server/tornado.py", line 372, in start self._stats_job.start() File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/tornado/ioloop.py", line 1185, in start self.io_loop = IOLoop.current() File "/Use rs/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/tornado/ioloop.py", line 282, in current loop = asyncio.get_event_loop() File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/asyncio/events.py", line 694, in get_event_loop return get_event_loop_policy().get_event_loop() File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/asyncio/events.py", line 602, in get_event_loop % threading.current_thread().name) RuntimeError: There is no current event loop in thread 'Thread-1'. ``` Tried changing worker to ``` def bk_worker(): io_loop = IOLoop()) server = BaseServer(io_loop, bokeh_tornado, bokeh_http) server.start() server.io_loop.start() ``` but then the http requests to the `HTTPServer` just hang (the workers are getting executed the right number of times though) cc @bdarnell any quick ideas? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/howto/server_embed/flask_gunicorn_embed.py` Content: ``` 1 from flask import Flask, render_template 2 3 from tornado.httpserver import HTTPServer 4 from tornado.ioloop import IOLoop 5 6 from bokeh.application import Application 7 from bokeh.application.handlers import FunctionHandler 8 from bokeh.embed import server_document 9 from bokeh.layouts import column 10 from bokeh.models import ColumnDataSource, Slider 11 from bokeh.plotting import figure 12 from bokeh.server.server import BaseServer 13 from bokeh.server.tornado import BokehTornado 14 from bokeh.server.util import bind_sockets 15 from bokeh.themes import Theme 16 17 if __name__ == '__main__': 18 print('This script is intended to be run with gunicorn. e.g.') 19 print() 20 print(' gunicorn -w 4 flask_gunicorn_embed:app') 21 print() 22 print('will start the app on four processes') 23 import sys 24 sys.exit() 25 26 from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature 27 28 app = Flask(__name__) 29 30 def modify_doc(doc): 31 df = sea_surface_temperature.copy() 32 source = ColumnDataSource(data=df) 33 34 plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)', 35 title="Sea Surface Temperature at 43.18, -70.43") 36 plot.line('time', 'temperature', source=source) 37 38 def callback(attr, old, new): 39 if new == 0: 40 data = df 41 else: 42 data = df.rolling('{0}D'.format(new)).mean() 43 source.data = ColumnDataSource(data=data).data 44 45 slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days") 46 slider.on_change('value', callback) 47 48 doc.add_root(column(slider, plot)) 49 50 doc.theme = Theme(filename="theme.yaml") 51 52 # can't use shortcuts here, since we are passing to low level BokehTornado 53 bkapp = Application(FunctionHandler(modify_doc)) 54 55 bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=["localhost:8000"]) 56 bokeh_http = HTTPServer(bokeh_tornado) 57 58 # This is so that if this app is run using something like "gunicorn -w 4" then 59 # each process will listen on its own port 60 sockets, port = bind_sockets("localhost", 0) 61 bokeh_http.add_sockets(sockets) 62 63 @app.route('/', methods=['GET']) 64 def bkapp_page(): 65 script = server_document('http://localhost:%d/bkapp' % port) 66 return render_template("embed.html", script=script, template="Flask") 67 68 def bk_worker(): 69 io_loop = IOLoop.current() 70 server = BaseServer(io_loop, bokeh_tornado, bokeh_http) 71 server.start() 72 server.io_loop.start() 73 74 from threading import Thread 75 Thread(target=bk_worker).start() 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/howto/server_embed/flask_gunicorn_embed.py b/examples/howto/server_embed/flask_gunicorn_embed.py --- a/examples/howto/server_embed/flask_gunicorn_embed.py +++ b/examples/howto/server_embed/flask_gunicorn_embed.py @@ -1,3 +1,8 @@ +try: + import asyncio +except ImportError: + raise RuntimeError("This example requries Python3 / asyncio") + from flask import Flask, render_template from tornado.httpserver import HTTPServer @@ -52,13 +57,9 @@ # can't use shortcuts here, since we are passing to low level BokehTornado bkapp = Application(FunctionHandler(modify_doc)) -bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=["localhost:8000"]) -bokeh_http = HTTPServer(bokeh_tornado) - # This is so that if this app is run using something like "gunicorn -w 4" then # each process will listen on its own port sockets, port = bind_sockets("localhost", 0) -bokeh_http.add_sockets(sockets) @app.route('/', methods=['GET']) def bkapp_page(): @@ -66,8 +67,13 @@ return render_template("embed.html", script=script, template="Flask") def bk_worker(): - io_loop = IOLoop.current() - server = BaseServer(io_loop, bokeh_tornado, bokeh_http) + asyncio.set_event_loop(asyncio.new_event_loop()) + + bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=["localhost:8000"]) + bokeh_http = HTTPServer(bokeh_tornado) + bokeh_http.add_sockets(sockets) + + server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http) server.start() server.io_loop.start()
{"golden_diff": "diff --git a/examples/howto/server_embed/flask_gunicorn_embed.py b/examples/howto/server_embed/flask_gunicorn_embed.py\n--- a/examples/howto/server_embed/flask_gunicorn_embed.py\n+++ b/examples/howto/server_embed/flask_gunicorn_embed.py\n@@ -1,3 +1,8 @@\n+try:\n+ import asyncio\n+except ImportError:\n+ raise RuntimeError(\"This example requries Python3 / asyncio\")\n+\n from flask import Flask, render_template\n \n from tornado.httpserver import HTTPServer\n@@ -52,13 +57,9 @@\n # can't use shortcuts here, since we are passing to low level BokehTornado\n bkapp = Application(FunctionHandler(modify_doc))\n \n-bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=[\"localhost:8000\"])\n-bokeh_http = HTTPServer(bokeh_tornado)\n-\n # This is so that if this app is run using something like \"gunicorn -w 4\" then\n # each process will listen on its own port\n sockets, port = bind_sockets(\"localhost\", 0)\n-bokeh_http.add_sockets(sockets)\n \n @app.route('/', methods=['GET'])\n def bkapp_page():\n@@ -66,8 +67,13 @@\n return render_template(\"embed.html\", script=script, template=\"Flask\")\n \n def bk_worker():\n- io_loop = IOLoop.current()\n- server = BaseServer(io_loop, bokeh_tornado, bokeh_http)\n+ asyncio.set_event_loop(asyncio.new_event_loop())\n+\n+ bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=[\"localhost:8000\"])\n+ bokeh_http = HTTPServer(bokeh_tornado)\n+ bokeh_http.add_sockets(sockets)\n+\n+ server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)\n server.start()\n server.io_loop.start()\n", "issue": "flask_gunicorn_embed.py does not work with Tornado 5\nref: https://github.com/bokeh/bokeh/blob/master/examples/howto/server_embed/flask_gunicorn_embed.py\r\n\r\nRunning as is gets:\r\n```\r\nException in thread Thread-1:\r\nTraceback (most recent call last):\r\n File \"/Users/bryanv/anaconda/envs/01216/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/Users/bryanv/anaconda/envs/01216/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/Users/bryanv/work/bokeh/examples/howto/server_embed/flask_gunicorn_embed.py\", line 72, in bk_worker\r\n server.start()\r\n File \"/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/bokeh/server/server.py\", line 149, in start\r\n self._tornado.start()\r\n File \"/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/bokeh/server/tornado.py\", line 372, in start\r\n self._stats_job.start()\r\n File \"/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/tornado/ioloop.py\", line 1185, in start\r\n self.io_loop = IOLoop.current()\r\n File \"/Use\r\nrs/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/tornado/ioloop.py\", line 282, in current\r\n loop = asyncio.get_event_loop()\r\n File \"/Users/bryanv/anaconda/envs/01216/lib/python3.6/asyncio/events.py\", line 694, in get_event_loop\r\n return get_event_loop_policy().get_event_loop()\r\n File \"/Users/bryanv/anaconda/envs/01216/lib/python3.6/asyncio/events.py\", line 602, in get_event_loop\r\n % threading.current_thread().name)\r\nRuntimeError: There is no current event loop in thread 'Thread-1'.\r\n```\r\n\r\nTried changing worker to \r\n```\r\ndef bk_worker():\r\n io_loop = IOLoop())\r\n server = BaseServer(io_loop, bokeh_tornado, bokeh_http)\r\n server.start()\r\n server.io_loop.start()\r\n```\r\n\r\n\r\nbut then the http requests to the `HTTPServer` just hang (the workers are getting executed the right number of times though)\r\n\r\ncc @bdarnell any quick ideas?\n", "before_files": [{"content": "from flask import Flask, render_template\n\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.embed import server_document\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, Slider\nfrom bokeh.plotting import figure\nfrom bokeh.server.server import BaseServer\nfrom bokeh.server.tornado import BokehTornado\nfrom bokeh.server.util import bind_sockets\nfrom bokeh.themes import Theme\n\nif __name__ == '__main__':\n print('This script is intended to be run with gunicorn. e.g.')\n print()\n print(' gunicorn -w 4 flask_gunicorn_embed:app')\n print()\n print('will start the app on four processes')\n import sys\n sys.exit()\n\nfrom bokeh.sampledata.sea_surface_temperature import sea_surface_temperature\n\napp = Flask(__name__)\n\ndef modify_doc(doc):\n df = sea_surface_temperature.copy()\n source = ColumnDataSource(data=df)\n\n plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',\n title=\"Sea Surface Temperature at 43.18, -70.43\")\n plot.line('time', 'temperature', source=source)\n\n def callback(attr, old, new):\n if new == 0:\n data = df\n else:\n data = df.rolling('{0}D'.format(new)).mean()\n source.data = ColumnDataSource(data=data).data\n\n slider = Slider(start=0, end=30, value=0, step=1, title=\"Smoothing by N Days\")\n slider.on_change('value', callback)\n\n doc.add_root(column(slider, plot))\n\n doc.theme = Theme(filename=\"theme.yaml\")\n\n# can't use shortcuts here, since we are passing to low level BokehTornado\nbkapp = Application(FunctionHandler(modify_doc))\n\nbokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=[\"localhost:8000\"])\nbokeh_http = HTTPServer(bokeh_tornado)\n\n# This is so that if this app is run using something like \"gunicorn -w 4\" then\n# each process will listen on its own port\nsockets, port = bind_sockets(\"localhost\", 0)\nbokeh_http.add_sockets(sockets)\n\[email protected]('/', methods=['GET'])\ndef bkapp_page():\n script = server_document('http://localhost:%d/bkapp' % port)\n return render_template(\"embed.html\", script=script, template=\"Flask\")\n\ndef bk_worker():\n io_loop = IOLoop.current()\n server = BaseServer(io_loop, bokeh_tornado, bokeh_http)\n server.start()\n server.io_loop.start()\n\nfrom threading import Thread\nThread(target=bk_worker).start()\n", "path": "examples/howto/server_embed/flask_gunicorn_embed.py"}], "after_files": [{"content": "try:\n import asyncio\nexcept ImportError:\n raise RuntimeError(\"This example requries Python3 / asyncio\")\n\nfrom flask import Flask, render_template\n\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.embed import server_document\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, Slider\nfrom bokeh.plotting import figure\nfrom bokeh.server.server import BaseServer\nfrom bokeh.server.tornado import BokehTornado\nfrom bokeh.server.util import bind_sockets\nfrom bokeh.themes import Theme\n\nif __name__ == '__main__':\n print('This script is intended to be run with gunicorn. e.g.')\n print()\n print(' gunicorn -w 4 flask_gunicorn_embed:app')\n print()\n print('will start the app on four processes')\n import sys\n sys.exit()\n\nfrom bokeh.sampledata.sea_surface_temperature import sea_surface_temperature\n\napp = Flask(__name__)\n\ndef modify_doc(doc):\n df = sea_surface_temperature.copy()\n source = ColumnDataSource(data=df)\n\n plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',\n title=\"Sea Surface Temperature at 43.18, -70.43\")\n plot.line('time', 'temperature', source=source)\n\n def callback(attr, old, new):\n if new == 0:\n data = df\n else:\n data = df.rolling('{0}D'.format(new)).mean()\n source.data = ColumnDataSource(data=data).data\n\n slider = Slider(start=0, end=30, value=0, step=1, title=\"Smoothing by N Days\")\n slider.on_change('value', callback)\n\n doc.add_root(column(slider, plot))\n\n doc.theme = Theme(filename=\"theme.yaml\")\n\n# can't use shortcuts here, since we are passing to low level BokehTornado\nbkapp = Application(FunctionHandler(modify_doc))\n\n# This is so that if this app is run using something like \"gunicorn -w 4\" then\n# each process will listen on its own port\nsockets, port = bind_sockets(\"localhost\", 0)\n\[email protected]('/', methods=['GET'])\ndef bkapp_page():\n script = server_document('http://localhost:%d/bkapp' % port)\n return render_template(\"embed.html\", script=script, template=\"Flask\")\n\ndef bk_worker():\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=[\"localhost:8000\"])\n bokeh_http = HTTPServer(bokeh_tornado)\n bokeh_http.add_sockets(sockets)\n\n server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)\n server.start()\n server.io_loop.start()\n\nfrom threading import Thread\nThread(target=bk_worker).start()\n", "path": "examples/howto/server_embed/flask_gunicorn_embed.py"}]}
1,638
428
gh_patches_debug_19411
rasdani/github-patches
git_diff
MongoEngine__mongoengine-1668
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'readPreference' param in config URI was ignored in mongoengine.register_connection I have a config string `'mongodb://mongodb01.test.vpc,mongodb02.test.vpc,mongodb03.test.vpc/prod?readPreference=secondaryPreferred'` it was parsed to dict below by pymongo.uri_parser.parse_uri ``` {'collection': None, 'database': 'prod', 'nodelist': [('mongodb01.test.vpc', 27017), ('mongodb02.test.vpc', 27017), ('mongodb03.test.vpc', 27017)], 'options': {'readpreference': 'secondaryPreferred'}, 'password': None, 'username': None} ``` but mongoengine only read in 3 params if I read correctly, ignored 'readpreference' ``` if 'replicaset' in uri_options: conn_settings['replicaSet'] = uri_options['replicaset'] if 'authsource' in uri_options: conn_settings['authentication_source'] = uri_options['authsource'] if 'authmechanism' in uri_options: conn_settings['authentication_mechanism'] = uri_options['authmechanism'] ``` thus made my config not functioning as I needed. I don't know if this is a bug or a feature. To achieve my goal do I have to explicitly invoke `connect(..., read_preference=secondaryPreferred)`? Isn't controlling readpreference in a config string be more flexible? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mongoengine/connection.py` Content: ``` 1 from pymongo import MongoClient, ReadPreference, uri_parser 2 import six 3 4 from mongoengine.python_support import IS_PYMONGO_3 5 6 __all__ = ['MongoEngineConnectionError', 'connect', 'register_connection', 7 'DEFAULT_CONNECTION_NAME'] 8 9 10 DEFAULT_CONNECTION_NAME = 'default' 11 12 if IS_PYMONGO_3: 13 READ_PREFERENCE = ReadPreference.PRIMARY 14 else: 15 from pymongo import MongoReplicaSetClient 16 READ_PREFERENCE = False 17 18 19 class MongoEngineConnectionError(Exception): 20 """Error raised when the database connection can't be established or 21 when a connection with a requested alias can't be retrieved. 22 """ 23 pass 24 25 26 _connection_settings = {} 27 _connections = {} 28 _dbs = {} 29 30 31 def register_connection(alias, name=None, host=None, port=None, 32 read_preference=READ_PREFERENCE, 33 username=None, password=None, 34 authentication_source=None, 35 authentication_mechanism=None, 36 **kwargs): 37 """Add a connection. 38 39 :param alias: the name that will be used to refer to this connection 40 throughout MongoEngine 41 :param name: the name of the specific database to use 42 :param host: the host name of the :program:`mongod` instance to connect to 43 :param port: the port that the :program:`mongod` instance is running on 44 :param read_preference: The read preference for the collection 45 ** Added pymongo 2.1 46 :param username: username to authenticate with 47 :param password: password to authenticate with 48 :param authentication_source: database to authenticate against 49 :param authentication_mechanism: database authentication mechanisms. 50 By default, use SCRAM-SHA-1 with MongoDB 3.0 and later, 51 MONGODB-CR (MongoDB Challenge Response protocol) for older servers. 52 :param is_mock: explicitly use mongomock for this connection 53 (can also be done by using `mongomock://` as db host prefix) 54 :param kwargs: ad-hoc parameters to be passed into the pymongo driver, 55 for example maxpoolsize, tz_aware, etc. See the documentation 56 for pymongo's `MongoClient` for a full list. 57 58 .. versionchanged:: 0.10.6 - added mongomock support 59 """ 60 conn_settings = { 61 'name': name or 'test', 62 'host': host or 'localhost', 63 'port': port or 27017, 64 'read_preference': read_preference, 65 'username': username, 66 'password': password, 67 'authentication_source': authentication_source, 68 'authentication_mechanism': authentication_mechanism 69 } 70 71 conn_host = conn_settings['host'] 72 73 # Host can be a list or a string, so if string, force to a list. 74 if isinstance(conn_host, six.string_types): 75 conn_host = [conn_host] 76 77 resolved_hosts = [] 78 for entity in conn_host: 79 80 # Handle Mongomock 81 if entity.startswith('mongomock://'): 82 conn_settings['is_mock'] = True 83 # `mongomock://` is not a valid url prefix and must be replaced by `mongodb://` 84 resolved_hosts.append(entity.replace('mongomock://', 'mongodb://', 1)) 85 86 # Handle URI style connections, only updating connection params which 87 # were explicitly specified in the URI. 88 elif '://' in entity: 89 uri_dict = uri_parser.parse_uri(entity) 90 resolved_hosts.append(entity) 91 92 if uri_dict.get('database'): 93 conn_settings['name'] = uri_dict.get('database') 94 95 for param in ('read_preference', 'username', 'password'): 96 if uri_dict.get(param): 97 conn_settings[param] = uri_dict[param] 98 99 uri_options = uri_dict['options'] 100 if 'replicaset' in uri_options: 101 conn_settings['replicaSet'] = uri_options['replicaset'] 102 if 'authsource' in uri_options: 103 conn_settings['authentication_source'] = uri_options['authsource'] 104 if 'authmechanism' in uri_options: 105 conn_settings['authentication_mechanism'] = uri_options['authmechanism'] 106 else: 107 resolved_hosts.append(entity) 108 conn_settings['host'] = resolved_hosts 109 110 # Deprecated parameters that should not be passed on 111 kwargs.pop('slaves', None) 112 kwargs.pop('is_slave', None) 113 114 conn_settings.update(kwargs) 115 _connection_settings[alias] = conn_settings 116 117 118 def disconnect(alias=DEFAULT_CONNECTION_NAME): 119 """Close the connection with a given alias.""" 120 if alias in _connections: 121 get_connection(alias=alias).close() 122 del _connections[alias] 123 if alias in _dbs: 124 del _dbs[alias] 125 126 127 def get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False): 128 """Return a connection with a given alias.""" 129 130 # Connect to the database if not already connected 131 if reconnect: 132 disconnect(alias) 133 134 # If the requested alias already exists in the _connections list, return 135 # it immediately. 136 if alias in _connections: 137 return _connections[alias] 138 139 # Validate that the requested alias exists in the _connection_settings. 140 # Raise MongoEngineConnectionError if it doesn't. 141 if alias not in _connection_settings: 142 if alias == DEFAULT_CONNECTION_NAME: 143 msg = 'You have not defined a default connection' 144 else: 145 msg = 'Connection with alias "%s" has not been defined' % alias 146 raise MongoEngineConnectionError(msg) 147 148 def _clean_settings(settings_dict): 149 # set literal more efficient than calling set function 150 irrelevant_fields_set = { 151 'name', 'username', 'password', 152 'authentication_source', 'authentication_mechanism' 153 } 154 return { 155 k: v for k, v in settings_dict.items() 156 if k not in irrelevant_fields_set 157 } 158 159 # Retrieve a copy of the connection settings associated with the requested 160 # alias and remove the database name and authentication info (we don't 161 # care about them at this point). 162 conn_settings = _clean_settings(_connection_settings[alias].copy()) 163 164 # Determine if we should use PyMongo's or mongomock's MongoClient. 165 is_mock = conn_settings.pop('is_mock', False) 166 if is_mock: 167 try: 168 import mongomock 169 except ImportError: 170 raise RuntimeError('You need mongomock installed to mock ' 171 'MongoEngine.') 172 connection_class = mongomock.MongoClient 173 else: 174 connection_class = MongoClient 175 176 # For replica set connections with PyMongo 2.x, use 177 # MongoReplicaSetClient. 178 # TODO remove this once we stop supporting PyMongo 2.x. 179 if 'replicaSet' in conn_settings and not IS_PYMONGO_3: 180 connection_class = MongoReplicaSetClient 181 conn_settings['hosts_or_uri'] = conn_settings.pop('host', None) 182 183 # hosts_or_uri has to be a string, so if 'host' was provided 184 # as a list, join its parts and separate them by ',' 185 if isinstance(conn_settings['hosts_or_uri'], list): 186 conn_settings['hosts_or_uri'] = ','.join( 187 conn_settings['hosts_or_uri']) 188 189 # Discard port since it can't be used on MongoReplicaSetClient 190 conn_settings.pop('port', None) 191 192 # Iterate over all of the connection settings and if a connection with 193 # the same parameters is already established, use it instead of creating 194 # a new one. 195 existing_connection = None 196 connection_settings_iterator = ( 197 (db_alias, settings.copy()) 198 for db_alias, settings in _connection_settings.items() 199 ) 200 for db_alias, connection_settings in connection_settings_iterator: 201 connection_settings = _clean_settings(connection_settings) 202 if conn_settings == connection_settings and _connections.get(db_alias): 203 existing_connection = _connections[db_alias] 204 break 205 206 # If an existing connection was found, assign it to the new alias 207 if existing_connection: 208 _connections[alias] = existing_connection 209 else: 210 # Otherwise, create the new connection for this alias. Raise 211 # MongoEngineConnectionError if it can't be established. 212 try: 213 _connections[alias] = connection_class(**conn_settings) 214 except Exception as e: 215 raise MongoEngineConnectionError( 216 'Cannot connect to database %s :\n%s' % (alias, e)) 217 218 return _connections[alias] 219 220 221 def get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False): 222 if reconnect: 223 disconnect(alias) 224 225 if alias not in _dbs: 226 conn = get_connection(alias) 227 conn_settings = _connection_settings[alias] 228 db = conn[conn_settings['name']] 229 auth_kwargs = {'source': conn_settings['authentication_source']} 230 if conn_settings['authentication_mechanism'] is not None: 231 auth_kwargs['mechanism'] = conn_settings['authentication_mechanism'] 232 # Authenticate if necessary 233 if conn_settings['username'] and (conn_settings['password'] or 234 conn_settings['authentication_mechanism'] == 'MONGODB-X509'): 235 db.authenticate(conn_settings['username'], conn_settings['password'], **auth_kwargs) 236 _dbs[alias] = db 237 return _dbs[alias] 238 239 240 def connect(db=None, alias=DEFAULT_CONNECTION_NAME, **kwargs): 241 """Connect to the database specified by the 'db' argument. 242 243 Connection settings may be provided here as well if the database is not 244 running on the default port on localhost. If authentication is needed, 245 provide username and password arguments as well. 246 247 Multiple databases are supported by using aliases. Provide a separate 248 `alias` to connect to a different instance of :program:`mongod`. 249 250 See the docstring for `register_connection` for more details about all 251 supported kwargs. 252 253 .. versionchanged:: 0.6 - added multiple database support. 254 """ 255 if alias not in _connections: 256 register_connection(alias, db, **kwargs) 257 258 return get_connection(alias) 259 260 261 # Support old naming convention 262 _get_connection = get_connection 263 _get_db = get_db 264 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mongoengine/connection.py b/mongoengine/connection.py --- a/mongoengine/connection.py +++ b/mongoengine/connection.py @@ -103,6 +103,18 @@ conn_settings['authentication_source'] = uri_options['authsource'] if 'authmechanism' in uri_options: conn_settings['authentication_mechanism'] = uri_options['authmechanism'] + if IS_PYMONGO_3 and 'readpreference' in uri_options: + read_preferences = ( + ReadPreference.NEAREST, + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED) + read_pf_mode = uri_options['readpreference'].lower() + for preference in read_preferences: + if preference.name.lower() == read_pf_mode: + conn_settings['read_preference'] = preference + break else: resolved_hosts.append(entity) conn_settings['host'] = resolved_hosts
{"golden_diff": "diff --git a/mongoengine/connection.py b/mongoengine/connection.py\n--- a/mongoengine/connection.py\n+++ b/mongoengine/connection.py\n@@ -103,6 +103,18 @@\n conn_settings['authentication_source'] = uri_options['authsource']\n if 'authmechanism' in uri_options:\n conn_settings['authentication_mechanism'] = uri_options['authmechanism']\n+ if IS_PYMONGO_3 and 'readpreference' in uri_options:\n+ read_preferences = (\n+ ReadPreference.NEAREST,\n+ ReadPreference.PRIMARY,\n+ ReadPreference.PRIMARY_PREFERRED,\n+ ReadPreference.SECONDARY,\n+ ReadPreference.SECONDARY_PREFERRED)\n+ read_pf_mode = uri_options['readpreference'].lower()\n+ for preference in read_preferences:\n+ if preference.name.lower() == read_pf_mode:\n+ conn_settings['read_preference'] = preference\n+ break\n else:\n resolved_hosts.append(entity)\n conn_settings['host'] = resolved_hosts\n", "issue": "'readPreference' param in config URI was ignored in mongoengine.register_connection\nI have a config string `'mongodb://mongodb01.test.vpc,mongodb02.test.vpc,mongodb03.test.vpc/prod?readPreference=secondaryPreferred'`\r\n\r\nit was parsed to dict below by pymongo.uri_parser.parse_uri\r\n```\r\n{'collection': None,\r\n 'database': 'prod',\r\n 'nodelist': [('mongodb01.test.vpc', 27017),\r\n ('mongodb02.test.vpc', 27017),\r\n ('mongodb03.test.vpc', 27017)],\r\n 'options': {'readpreference': 'secondaryPreferred'},\r\n 'password': None,\r\n 'username': None}\r\n```\r\nbut mongoengine only read in 3 params if I read correctly, ignored 'readpreference'\r\n```\r\n if 'replicaset' in uri_options:\r\n conn_settings['replicaSet'] = uri_options['replicaset']\r\n if 'authsource' in uri_options:\r\n conn_settings['authentication_source'] = uri_options['authsource']\r\n if 'authmechanism' in uri_options:\r\n conn_settings['authentication_mechanism'] = uri_options['authmechanism']\r\n```\r\nthus made my config not functioning as I needed.\r\n\r\n I don't know if this is a bug or a feature. To achieve my goal do I have to explicitly invoke `connect(..., read_preference=secondaryPreferred)`? Isn't controlling readpreference in a config string be more flexible?\n", "before_files": [{"content": "from pymongo import MongoClient, ReadPreference, uri_parser\nimport six\n\nfrom mongoengine.python_support import IS_PYMONGO_3\n\n__all__ = ['MongoEngineConnectionError', 'connect', 'register_connection',\n 'DEFAULT_CONNECTION_NAME']\n\n\nDEFAULT_CONNECTION_NAME = 'default'\n\nif IS_PYMONGO_3:\n READ_PREFERENCE = ReadPreference.PRIMARY\nelse:\n from pymongo import MongoReplicaSetClient\n READ_PREFERENCE = False\n\n\nclass MongoEngineConnectionError(Exception):\n \"\"\"Error raised when the database connection can't be established or\n when a connection with a requested alias can't be retrieved.\n \"\"\"\n pass\n\n\n_connection_settings = {}\n_connections = {}\n_dbs = {}\n\n\ndef register_connection(alias, name=None, host=None, port=None,\n read_preference=READ_PREFERENCE,\n username=None, password=None,\n authentication_source=None,\n authentication_mechanism=None,\n **kwargs):\n \"\"\"Add a connection.\n\n :param alias: the name that will be used to refer to this connection\n throughout MongoEngine\n :param name: the name of the specific database to use\n :param host: the host name of the :program:`mongod` instance to connect to\n :param port: the port that the :program:`mongod` instance is running on\n :param read_preference: The read preference for the collection\n ** Added pymongo 2.1\n :param username: username to authenticate with\n :param password: password to authenticate with\n :param authentication_source: database to authenticate against\n :param authentication_mechanism: database authentication mechanisms.\n By default, use SCRAM-SHA-1 with MongoDB 3.0 and later,\n MONGODB-CR (MongoDB Challenge Response protocol) for older servers.\n :param is_mock: explicitly use mongomock for this connection\n (can also be done by using `mongomock://` as db host prefix)\n :param kwargs: ad-hoc parameters to be passed into the pymongo driver,\n for example maxpoolsize, tz_aware, etc. See the documentation\n for pymongo's `MongoClient` for a full list.\n\n .. versionchanged:: 0.10.6 - added mongomock support\n \"\"\"\n conn_settings = {\n 'name': name or 'test',\n 'host': host or 'localhost',\n 'port': port or 27017,\n 'read_preference': read_preference,\n 'username': username,\n 'password': password,\n 'authentication_source': authentication_source,\n 'authentication_mechanism': authentication_mechanism\n }\n\n conn_host = conn_settings['host']\n\n # Host can be a list or a string, so if string, force to a list.\n if isinstance(conn_host, six.string_types):\n conn_host = [conn_host]\n\n resolved_hosts = []\n for entity in conn_host:\n\n # Handle Mongomock\n if entity.startswith('mongomock://'):\n conn_settings['is_mock'] = True\n # `mongomock://` is not a valid url prefix and must be replaced by `mongodb://`\n resolved_hosts.append(entity.replace('mongomock://', 'mongodb://', 1))\n\n # Handle URI style connections, only updating connection params which\n # were explicitly specified in the URI.\n elif '://' in entity:\n uri_dict = uri_parser.parse_uri(entity)\n resolved_hosts.append(entity)\n\n if uri_dict.get('database'):\n conn_settings['name'] = uri_dict.get('database')\n\n for param in ('read_preference', 'username', 'password'):\n if uri_dict.get(param):\n conn_settings[param] = uri_dict[param]\n\n uri_options = uri_dict['options']\n if 'replicaset' in uri_options:\n conn_settings['replicaSet'] = uri_options['replicaset']\n if 'authsource' in uri_options:\n conn_settings['authentication_source'] = uri_options['authsource']\n if 'authmechanism' in uri_options:\n conn_settings['authentication_mechanism'] = uri_options['authmechanism']\n else:\n resolved_hosts.append(entity)\n conn_settings['host'] = resolved_hosts\n\n # Deprecated parameters that should not be passed on\n kwargs.pop('slaves', None)\n kwargs.pop('is_slave', None)\n\n conn_settings.update(kwargs)\n _connection_settings[alias] = conn_settings\n\n\ndef disconnect(alias=DEFAULT_CONNECTION_NAME):\n \"\"\"Close the connection with a given alias.\"\"\"\n if alias in _connections:\n get_connection(alias=alias).close()\n del _connections[alias]\n if alias in _dbs:\n del _dbs[alias]\n\n\ndef get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False):\n \"\"\"Return a connection with a given alias.\"\"\"\n\n # Connect to the database if not already connected\n if reconnect:\n disconnect(alias)\n\n # If the requested alias already exists in the _connections list, return\n # it immediately.\n if alias in _connections:\n return _connections[alias]\n\n # Validate that the requested alias exists in the _connection_settings.\n # Raise MongoEngineConnectionError if it doesn't.\n if alias not in _connection_settings:\n if alias == DEFAULT_CONNECTION_NAME:\n msg = 'You have not defined a default connection'\n else:\n msg = 'Connection with alias \"%s\" has not been defined' % alias\n raise MongoEngineConnectionError(msg)\n\n def _clean_settings(settings_dict):\n # set literal more efficient than calling set function\n irrelevant_fields_set = {\n 'name', 'username', 'password',\n 'authentication_source', 'authentication_mechanism'\n }\n return {\n k: v for k, v in settings_dict.items()\n if k not in irrelevant_fields_set\n }\n\n # Retrieve a copy of the connection settings associated with the requested\n # alias and remove the database name and authentication info (we don't\n # care about them at this point).\n conn_settings = _clean_settings(_connection_settings[alias].copy())\n\n # Determine if we should use PyMongo's or mongomock's MongoClient.\n is_mock = conn_settings.pop('is_mock', False)\n if is_mock:\n try:\n import mongomock\n except ImportError:\n raise RuntimeError('You need mongomock installed to mock '\n 'MongoEngine.')\n connection_class = mongomock.MongoClient\n else:\n connection_class = MongoClient\n\n # For replica set connections with PyMongo 2.x, use\n # MongoReplicaSetClient.\n # TODO remove this once we stop supporting PyMongo 2.x.\n if 'replicaSet' in conn_settings and not IS_PYMONGO_3:\n connection_class = MongoReplicaSetClient\n conn_settings['hosts_or_uri'] = conn_settings.pop('host', None)\n\n # hosts_or_uri has to be a string, so if 'host' was provided\n # as a list, join its parts and separate them by ','\n if isinstance(conn_settings['hosts_or_uri'], list):\n conn_settings['hosts_or_uri'] = ','.join(\n conn_settings['hosts_or_uri'])\n\n # Discard port since it can't be used on MongoReplicaSetClient\n conn_settings.pop('port', None)\n\n # Iterate over all of the connection settings and if a connection with\n # the same parameters is already established, use it instead of creating\n # a new one.\n existing_connection = None\n connection_settings_iterator = (\n (db_alias, settings.copy())\n for db_alias, settings in _connection_settings.items()\n )\n for db_alias, connection_settings in connection_settings_iterator:\n connection_settings = _clean_settings(connection_settings)\n if conn_settings == connection_settings and _connections.get(db_alias):\n existing_connection = _connections[db_alias]\n break\n\n # If an existing connection was found, assign it to the new alias\n if existing_connection:\n _connections[alias] = existing_connection\n else:\n # Otherwise, create the new connection for this alias. Raise\n # MongoEngineConnectionError if it can't be established.\n try:\n _connections[alias] = connection_class(**conn_settings)\n except Exception as e:\n raise MongoEngineConnectionError(\n 'Cannot connect to database %s :\\n%s' % (alias, e))\n\n return _connections[alias]\n\n\ndef get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False):\n if reconnect:\n disconnect(alias)\n\n if alias not in _dbs:\n conn = get_connection(alias)\n conn_settings = _connection_settings[alias]\n db = conn[conn_settings['name']]\n auth_kwargs = {'source': conn_settings['authentication_source']}\n if conn_settings['authentication_mechanism'] is not None:\n auth_kwargs['mechanism'] = conn_settings['authentication_mechanism']\n # Authenticate if necessary\n if conn_settings['username'] and (conn_settings['password'] or\n conn_settings['authentication_mechanism'] == 'MONGODB-X509'):\n db.authenticate(conn_settings['username'], conn_settings['password'], **auth_kwargs)\n _dbs[alias] = db\n return _dbs[alias]\n\n\ndef connect(db=None, alias=DEFAULT_CONNECTION_NAME, **kwargs):\n \"\"\"Connect to the database specified by the 'db' argument.\n\n Connection settings may be provided here as well if the database is not\n running on the default port on localhost. If authentication is needed,\n provide username and password arguments as well.\n\n Multiple databases are supported by using aliases. Provide a separate\n `alias` to connect to a different instance of :program:`mongod`.\n\n See the docstring for `register_connection` for more details about all\n supported kwargs.\n\n .. versionchanged:: 0.6 - added multiple database support.\n \"\"\"\n if alias not in _connections:\n register_connection(alias, db, **kwargs)\n\n return get_connection(alias)\n\n\n# Support old naming convention\n_get_connection = get_connection\n_get_db = get_db\n", "path": "mongoengine/connection.py"}], "after_files": [{"content": "from pymongo import MongoClient, ReadPreference, uri_parser\nimport six\n\nfrom mongoengine.python_support import IS_PYMONGO_3\n\n__all__ = ['MongoEngineConnectionError', 'connect', 'register_connection',\n 'DEFAULT_CONNECTION_NAME']\n\n\nDEFAULT_CONNECTION_NAME = 'default'\n\nif IS_PYMONGO_3:\n READ_PREFERENCE = ReadPreference.PRIMARY\nelse:\n from pymongo import MongoReplicaSetClient\n READ_PREFERENCE = False\n\n\nclass MongoEngineConnectionError(Exception):\n \"\"\"Error raised when the database connection can't be established or\n when a connection with a requested alias can't be retrieved.\n \"\"\"\n pass\n\n\n_connection_settings = {}\n_connections = {}\n_dbs = {}\n\n\ndef register_connection(alias, name=None, host=None, port=None,\n read_preference=READ_PREFERENCE,\n username=None, password=None,\n authentication_source=None,\n authentication_mechanism=None,\n **kwargs):\n \"\"\"Add a connection.\n\n :param alias: the name that will be used to refer to this connection\n throughout MongoEngine\n :param name: the name of the specific database to use\n :param host: the host name of the :program:`mongod` instance to connect to\n :param port: the port that the :program:`mongod` instance is running on\n :param read_preference: The read preference for the collection\n ** Added pymongo 2.1\n :param username: username to authenticate with\n :param password: password to authenticate with\n :param authentication_source: database to authenticate against\n :param authentication_mechanism: database authentication mechanisms.\n By default, use SCRAM-SHA-1 with MongoDB 3.0 and later,\n MONGODB-CR (MongoDB Challenge Response protocol) for older servers.\n :param is_mock: explicitly use mongomock for this connection\n (can also be done by using `mongomock://` as db host prefix)\n :param kwargs: ad-hoc parameters to be passed into the pymongo driver,\n for example maxpoolsize, tz_aware, etc. See the documentation\n for pymongo's `MongoClient` for a full list.\n\n .. versionchanged:: 0.10.6 - added mongomock support\n \"\"\"\n conn_settings = {\n 'name': name or 'test',\n 'host': host or 'localhost',\n 'port': port or 27017,\n 'read_preference': read_preference,\n 'username': username,\n 'password': password,\n 'authentication_source': authentication_source,\n 'authentication_mechanism': authentication_mechanism\n }\n\n conn_host = conn_settings['host']\n\n # Host can be a list or a string, so if string, force to a list.\n if isinstance(conn_host, six.string_types):\n conn_host = [conn_host]\n\n resolved_hosts = []\n for entity in conn_host:\n\n # Handle Mongomock\n if entity.startswith('mongomock://'):\n conn_settings['is_mock'] = True\n # `mongomock://` is not a valid url prefix and must be replaced by `mongodb://`\n resolved_hosts.append(entity.replace('mongomock://', 'mongodb://', 1))\n\n # Handle URI style connections, only updating connection params which\n # were explicitly specified in the URI.\n elif '://' in entity:\n uri_dict = uri_parser.parse_uri(entity)\n resolved_hosts.append(entity)\n\n if uri_dict.get('database'):\n conn_settings['name'] = uri_dict.get('database')\n\n for param in ('read_preference', 'username', 'password'):\n if uri_dict.get(param):\n conn_settings[param] = uri_dict[param]\n\n uri_options = uri_dict['options']\n if 'replicaset' in uri_options:\n conn_settings['replicaSet'] = uri_options['replicaset']\n if 'authsource' in uri_options:\n conn_settings['authentication_source'] = uri_options['authsource']\n if 'authmechanism' in uri_options:\n conn_settings['authentication_mechanism'] = uri_options['authmechanism']\n if IS_PYMONGO_3 and 'readpreference' in uri_options:\n read_preferences = (\n ReadPreference.NEAREST,\n ReadPreference.PRIMARY,\n ReadPreference.PRIMARY_PREFERRED,\n ReadPreference.SECONDARY,\n ReadPreference.SECONDARY_PREFERRED)\n read_pf_mode = uri_options['readpreference'].lower()\n for preference in read_preferences:\n if preference.name.lower() == read_pf_mode:\n conn_settings['read_preference'] = preference\n break\n else:\n resolved_hosts.append(entity)\n conn_settings['host'] = resolved_hosts\n\n # Deprecated parameters that should not be passed on\n kwargs.pop('slaves', None)\n kwargs.pop('is_slave', None)\n\n conn_settings.update(kwargs)\n _connection_settings[alias] = conn_settings\n\n\ndef disconnect(alias=DEFAULT_CONNECTION_NAME):\n \"\"\"Close the connection with a given alias.\"\"\"\n if alias in _connections:\n get_connection(alias=alias).close()\n del _connections[alias]\n if alias in _dbs:\n del _dbs[alias]\n\n\ndef get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False):\n \"\"\"Return a connection with a given alias.\"\"\"\n\n # Connect to the database if not already connected\n if reconnect:\n disconnect(alias)\n\n # If the requested alias already exists in the _connections list, return\n # it immediately.\n if alias in _connections:\n return _connections[alias]\n\n # Validate that the requested alias exists in the _connection_settings.\n # Raise MongoEngineConnectionError if it doesn't.\n if alias not in _connection_settings:\n if alias == DEFAULT_CONNECTION_NAME:\n msg = 'You have not defined a default connection'\n else:\n msg = 'Connection with alias \"%s\" has not been defined' % alias\n raise MongoEngineConnectionError(msg)\n\n def _clean_settings(settings_dict):\n # set literal more efficient than calling set function\n irrelevant_fields_set = {\n 'name', 'username', 'password',\n 'authentication_source', 'authentication_mechanism'\n }\n return {\n k: v for k, v in settings_dict.items()\n if k not in irrelevant_fields_set\n }\n\n # Retrieve a copy of the connection settings associated with the requested\n # alias and remove the database name and authentication info (we don't\n # care about them at this point).\n conn_settings = _clean_settings(_connection_settings[alias].copy())\n\n # Determine if we should use PyMongo's or mongomock's MongoClient.\n is_mock = conn_settings.pop('is_mock', False)\n if is_mock:\n try:\n import mongomock\n except ImportError:\n raise RuntimeError('You need mongomock installed to mock '\n 'MongoEngine.')\n connection_class = mongomock.MongoClient\n else:\n connection_class = MongoClient\n\n # For replica set connections with PyMongo 2.x, use\n # MongoReplicaSetClient.\n # TODO remove this once we stop supporting PyMongo 2.x.\n if 'replicaSet' in conn_settings and not IS_PYMONGO_3:\n connection_class = MongoReplicaSetClient\n conn_settings['hosts_or_uri'] = conn_settings.pop('host', None)\n\n # hosts_or_uri has to be a string, so if 'host' was provided\n # as a list, join its parts and separate them by ','\n if isinstance(conn_settings['hosts_or_uri'], list):\n conn_settings['hosts_or_uri'] = ','.join(\n conn_settings['hosts_or_uri'])\n\n # Discard port since it can't be used on MongoReplicaSetClient\n conn_settings.pop('port', None)\n\n # Iterate over all of the connection settings and if a connection with\n # the same parameters is already established, use it instead of creating\n # a new one.\n existing_connection = None\n connection_settings_iterator = (\n (db_alias, settings.copy())\n for db_alias, settings in _connection_settings.items()\n )\n for db_alias, connection_settings in connection_settings_iterator:\n connection_settings = _clean_settings(connection_settings)\n if conn_settings == connection_settings and _connections.get(db_alias):\n existing_connection = _connections[db_alias]\n break\n\n # If an existing connection was found, assign it to the new alias\n if existing_connection:\n _connections[alias] = existing_connection\n else:\n # Otherwise, create the new connection for this alias. Raise\n # MongoEngineConnectionError if it can't be established.\n try:\n _connections[alias] = connection_class(**conn_settings)\n except Exception as e:\n raise MongoEngineConnectionError(\n 'Cannot connect to database %s :\\n%s' % (alias, e))\n\n return _connections[alias]\n\n\ndef get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False):\n if reconnect:\n disconnect(alias)\n\n if alias not in _dbs:\n conn = get_connection(alias)\n conn_settings = _connection_settings[alias]\n db = conn[conn_settings['name']]\n auth_kwargs = {'source': conn_settings['authentication_source']}\n if conn_settings['authentication_mechanism'] is not None:\n auth_kwargs['mechanism'] = conn_settings['authentication_mechanism']\n # Authenticate if necessary\n if conn_settings['username'] and (conn_settings['password'] or\n conn_settings['authentication_mechanism'] == 'MONGODB-X509'):\n db.authenticate(conn_settings['username'], conn_settings['password'], **auth_kwargs)\n _dbs[alias] = db\n return _dbs[alias]\n\n\ndef connect(db=None, alias=DEFAULT_CONNECTION_NAME, **kwargs):\n \"\"\"Connect to the database specified by the 'db' argument.\n\n Connection settings may be provided here as well if the database is not\n running on the default port on localhost. If authentication is needed,\n provide username and password arguments as well.\n\n Multiple databases are supported by using aliases. Provide a separate\n `alias` to connect to a different instance of :program:`mongod`.\n\n See the docstring for `register_connection` for more details about all\n supported kwargs.\n\n .. versionchanged:: 0.6 - added multiple database support.\n \"\"\"\n if alias not in _connections:\n register_connection(alias, db, **kwargs)\n\n return get_connection(alias)\n\n\n# Support old naming convention\n_get_connection = get_connection\n_get_db = get_db\n", "path": "mongoengine/connection.py"}]}
3,443
227
gh_patches_debug_9231
rasdani/github-patches
git_diff
privacyidea__privacyidea-2615
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- no serial in polling on /ttype/push We may have a bug in push polling. This exception occurs on polling https://gist.github.com/laclaro/743618d11f61f8a817e273db6b804a9a This may be related to #2534. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `privacyidea/api/ttype.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # http://www.privacyidea.org 4 # (c) Cornelius Kölbel, privacyidea.org 5 # 6 # 2015-09-01 Cornelius Kölbel, <[email protected]> 7 # Initial writeup 8 # 9 # This code is free software; you can redistribute it and/or 10 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE 11 # License as published by the Free Software Foundation; either 12 # version 3 of the License, or any later version. 13 # 14 # This code is distributed in the hope that it will be useful, 15 # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 # GNU AFFERO GENERAL PUBLIC LICENSE for more details. 18 # 19 # You should have received a copy of the GNU Affero General Public 20 # License along with this program. If not, see <http://www.gnu.org/licenses/>. 21 # 22 """ 23 This API endpoint is a generic endpoint that can be used by any token 24 type. 25 26 The tokentype needs to implement a classmethod *api_endpoint* and can then be 27 called by /ttype/<tokentype>. 28 This way, each tokentype can create its own API without the need to change 29 the core API. 30 31 The TiQR Token uses this API to implement its special functionalities. See 32 :ref:`code_tiqr_token`. 33 """ 34 from flask import (Blueprint, 35 request) 36 from .lib.utils import getParam 37 from ..lib.log import log_with 38 from flask import g, jsonify, current_app 39 import logging 40 from privacyidea.api.lib.utils import get_all_params 41 from privacyidea.lib.policy import PolicyClass 42 from privacyidea.lib.audit import getAudit 43 from privacyidea.lib.config import (get_token_class, get_from_config, 44 SYSCONF, ensure_no_config_object) 45 from privacyidea.lib.user import get_user_from_param 46 from privacyidea.lib.utils import get_client_ip 47 import json 48 49 log = logging.getLogger(__name__) 50 51 ttype_blueprint = Blueprint('ttype_blueprint', __name__) 52 53 54 @ttype_blueprint.before_request 55 def before_request(): 56 """ 57 This is executed before the request 58 """ 59 ensure_no_config_object() 60 request.all_data = get_all_params(request.values, request.data) 61 privacyidea_server = current_app.config.get("PI_AUDIT_SERVERNAME") or \ 62 request.host 63 # Create a policy_object, that reads the database audit settings 64 # and contains the complete policy definition during the request. 65 # This audit_object can be used in the postpolicy and prepolicy and it 66 # can be passed to the innerpolicies. 67 g.policy_object = PolicyClass() 68 g.audit_object = getAudit(current_app.config) 69 # access_route contains the ip adresses of all clients, hops and proxies. 70 g.client_ip = get_client_ip(request, 71 get_from_config(SYSCONF.OVERRIDECLIENT)) 72 g.audit_object.log({"success": False, 73 "action_detail": "", 74 "client": g.client_ip, 75 "client_user_agent": request.user_agent.browser, 76 "privacyidea_server": privacyidea_server, 77 "action": "{0!s} {1!s}".format(request.method, request.url_rule), 78 "info": ""}) 79 80 81 @ttype_blueprint.route('/<ttype>', methods=['POST', 'GET']) 82 @log_with(log) 83 def token(ttype=None): 84 """ 85 This is a special token function. Each token type can define an 86 additional API call, that does not need authentication on the REST API 87 level. 88 89 :return: Token Type dependent 90 """ 91 tokenc = get_token_class(ttype) 92 res = tokenc.api_endpoint(request, g) 93 serial = getParam(request.all_data, "serial") 94 user = get_user_from_param(request.all_data) 95 g.audit_object.log({"success": 1, 96 "user": user.login, 97 "realm": user.realm, 98 "serial": serial, 99 "token_type": ttype}) 100 if res[0] == "json": 101 return jsonify(res[1]) 102 elif res[0] in ["html", "plain"]: 103 return current_app.response_class(res[1], mimetype="text/{0!s}".format(res[0])) 104 elif len(res) == 2: 105 return current_app.response_class(json.dumps(res[1]), 106 mimetype="application/{0!s}".format(res[0])) 107 else: 108 return current_app.response_class(res[1], mimetype="application/octet-binary", 109 headers=res[2]) 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/privacyidea/api/ttype.py b/privacyidea/api/ttype.py --- a/privacyidea/api/ttype.py +++ b/privacyidea/api/ttype.py @@ -69,6 +69,7 @@ # access_route contains the ip adresses of all clients, hops and proxies. g.client_ip = get_client_ip(request, get_from_config(SYSCONF.OVERRIDECLIENT)) + g.serial = getParam(request.all_data, "serial") or None g.audit_object.log({"success": False, "action_detail": "", "client": g.client_ip,
{"golden_diff": "diff --git a/privacyidea/api/ttype.py b/privacyidea/api/ttype.py\n--- a/privacyidea/api/ttype.py\n+++ b/privacyidea/api/ttype.py\n@@ -69,6 +69,7 @@\n # access_route contains the ip adresses of all clients, hops and proxies.\n g.client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n+ g.serial = getParam(request.all_data, \"serial\") or None\n g.audit_object.log({\"success\": False,\n \"action_detail\": \"\",\n \"client\": g.client_ip,\n", "issue": "no serial in polling on /ttype/push\nWe may have a bug in push polling. This exception occurs on polling\r\n\r\nhttps://gist.github.com/laclaro/743618d11f61f8a817e273db6b804a9a\r\n\r\nThis may be related to #2534.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# http://www.privacyidea.org\n# (c) Cornelius K\u00f6lbel, privacyidea.org\n#\n# 2015-09-01 Cornelius K\u00f6lbel, <[email protected]>\n# Initial writeup\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis API endpoint is a generic endpoint that can be used by any token\ntype.\n\nThe tokentype needs to implement a classmethod *api_endpoint* and can then be\ncalled by /ttype/<tokentype>.\nThis way, each tokentype can create its own API without the need to change\nthe core API.\n\nThe TiQR Token uses this API to implement its special functionalities. See\n:ref:`code_tiqr_token`.\n\"\"\"\nfrom flask import (Blueprint,\n request)\nfrom .lib.utils import getParam\nfrom ..lib.log import log_with\nfrom flask import g, jsonify, current_app\nimport logging\nfrom privacyidea.api.lib.utils import get_all_params\nfrom privacyidea.lib.policy import PolicyClass\nfrom privacyidea.lib.audit import getAudit\nfrom privacyidea.lib.config import (get_token_class, get_from_config,\n SYSCONF, ensure_no_config_object)\nfrom privacyidea.lib.user import get_user_from_param\nfrom privacyidea.lib.utils import get_client_ip\nimport json\n\nlog = logging.getLogger(__name__)\n\nttype_blueprint = Blueprint('ttype_blueprint', __name__)\n\n\n@ttype_blueprint.before_request\ndef before_request():\n \"\"\"\n This is executed before the request\n \"\"\"\n ensure_no_config_object()\n request.all_data = get_all_params(request.values, request.data)\n privacyidea_server = current_app.config.get(\"PI_AUDIT_SERVERNAME\") or \\\n request.host\n # Create a policy_object, that reads the database audit settings\n # and contains the complete policy definition during the request.\n # This audit_object can be used in the postpolicy and prepolicy and it\n # can be passed to the innerpolicies.\n g.policy_object = PolicyClass()\n g.audit_object = getAudit(current_app.config)\n # access_route contains the ip adresses of all clients, hops and proxies.\n g.client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n g.audit_object.log({\"success\": False,\n \"action_detail\": \"\",\n \"client\": g.client_ip,\n \"client_user_agent\": request.user_agent.browser,\n \"privacyidea_server\": privacyidea_server,\n \"action\": \"{0!s} {1!s}\".format(request.method, request.url_rule),\n \"info\": \"\"})\n\n\n@ttype_blueprint.route('/<ttype>', methods=['POST', 'GET'])\n@log_with(log)\ndef token(ttype=None):\n \"\"\"\n This is a special token function. Each token type can define an\n additional API call, that does not need authentication on the REST API\n level.\n\n :return: Token Type dependent\n \"\"\"\n tokenc = get_token_class(ttype)\n res = tokenc.api_endpoint(request, g)\n serial = getParam(request.all_data, \"serial\")\n user = get_user_from_param(request.all_data)\n g.audit_object.log({\"success\": 1,\n \"user\": user.login,\n \"realm\": user.realm,\n \"serial\": serial,\n \"token_type\": ttype})\n if res[0] == \"json\":\n return jsonify(res[1])\n elif res[0] in [\"html\", \"plain\"]:\n return current_app.response_class(res[1], mimetype=\"text/{0!s}\".format(res[0]))\n elif len(res) == 2:\n return current_app.response_class(json.dumps(res[1]),\n mimetype=\"application/{0!s}\".format(res[0]))\n else:\n return current_app.response_class(res[1], mimetype=\"application/octet-binary\",\n headers=res[2])\n", "path": "privacyidea/api/ttype.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# http://www.privacyidea.org\n# (c) Cornelius K\u00f6lbel, privacyidea.org\n#\n# 2015-09-01 Cornelius K\u00f6lbel, <[email protected]>\n# Initial writeup\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis API endpoint is a generic endpoint that can be used by any token\ntype.\n\nThe tokentype needs to implement a classmethod *api_endpoint* and can then be\ncalled by /ttype/<tokentype>.\nThis way, each tokentype can create its own API without the need to change\nthe core API.\n\nThe TiQR Token uses this API to implement its special functionalities. See\n:ref:`code_tiqr_token`.\n\"\"\"\nfrom flask import (Blueprint,\n request)\nfrom .lib.utils import getParam\nfrom ..lib.log import log_with\nfrom flask import g, jsonify, current_app\nimport logging\nfrom privacyidea.api.lib.utils import get_all_params\nfrom privacyidea.lib.policy import PolicyClass\nfrom privacyidea.lib.audit import getAudit\nfrom privacyidea.lib.config import (get_token_class, get_from_config,\n SYSCONF, ensure_no_config_object)\nfrom privacyidea.lib.user import get_user_from_param\nfrom privacyidea.lib.utils import get_client_ip\nimport json\n\nlog = logging.getLogger(__name__)\n\nttype_blueprint = Blueprint('ttype_blueprint', __name__)\n\n\n@ttype_blueprint.before_request\ndef before_request():\n \"\"\"\n This is executed before the request\n \"\"\"\n ensure_no_config_object()\n request.all_data = get_all_params(request.values, request.data)\n privacyidea_server = current_app.config.get(\"PI_AUDIT_SERVERNAME\") or \\\n request.host\n # Create a policy_object, that reads the database audit settings\n # and contains the complete policy definition during the request.\n # This audit_object can be used in the postpolicy and prepolicy and it\n # can be passed to the innerpolicies.\n g.policy_object = PolicyClass()\n g.audit_object = getAudit(current_app.config)\n # access_route contains the ip adresses of all clients, hops and proxies.\n g.client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n g.serial = getParam(request.all_data, \"serial\") or None\n g.audit_object.log({\"success\": False,\n \"action_detail\": \"\",\n \"client\": g.client_ip,\n \"client_user_agent\": request.user_agent.browser,\n \"privacyidea_server\": privacyidea_server,\n \"action\": \"{0!s} {1!s}\".format(request.method, request.url_rule),\n \"info\": \"\"})\n\n\n@ttype_blueprint.route('/<ttype>', methods=['POST', 'GET'])\n@log_with(log)\ndef token(ttype=None):\n \"\"\"\n This is a special token function. Each token type can define an\n additional API call, that does not need authentication on the REST API\n level.\n\n :return: Token Type dependent\n \"\"\"\n tokenc = get_token_class(ttype)\n res = tokenc.api_endpoint(request, g)\n serial = getParam(request.all_data, \"serial\")\n user = get_user_from_param(request.all_data)\n g.audit_object.log({\"success\": 1,\n \"user\": user.login,\n \"realm\": user.realm,\n \"serial\": serial,\n \"token_type\": ttype})\n if res[0] == \"json\":\n return jsonify(res[1])\n elif res[0] in [\"html\", \"plain\"]:\n return current_app.response_class(res[1], mimetype=\"text/{0!s}\".format(res[0]))\n elif len(res) == 2:\n return current_app.response_class(json.dumps(res[1]),\n mimetype=\"application/{0!s}\".format(res[0]))\n else:\n return current_app.response_class(res[1], mimetype=\"application/octet-binary\",\n headers=res[2])\n", "path": "privacyidea/api/ttype.py"}]}
1,534
134
gh_patches_debug_6576
rasdani/github-patches
git_diff
ephios-dev__ephios-757
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make minors identifiable on event detail page As an Einsatzleiter, I want to quickly grasp which participants are younger than 18 years. For that purpose, I want to have the participation boxes on the event detail page/shift box to display a small warning/indication, e.g. a red corner or similar. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ephios/core/signup/participants.py` Content: ``` 1 import dataclasses 2 import functools 3 from datetime import date 4 from typing import Optional 5 6 from django.contrib.auth import get_user_model 7 from django.db.models import QuerySet 8 from django.urls import reverse 9 from django.utils.safestring import mark_safe 10 from django.utils.translation import gettext_lazy as _ 11 12 from ephios.core.models import AbstractParticipation, LocalParticipation, Qualification 13 from ephios.core.models.events import PlaceholderParticipation 14 15 16 @dataclasses.dataclass(frozen=True) 17 class AbstractParticipant: 18 first_name: str 19 last_name: str 20 qualifications: QuerySet = dataclasses.field(hash=False) 21 date_of_birth: Optional[date] 22 email: Optional[str] # if set to None, no notifications are sent 23 24 def get_age(self, today: date = None): 25 if self.date_of_birth is None: 26 return None 27 today, born = today or date.today(), self.date_of_birth 28 return today.year - born.year - ((today.month, today.day) < (born.month, born.day)) 29 30 def __str__(self): 31 return f"{self.first_name} {self.last_name}" 32 33 def new_participation(self, shift): 34 raise NotImplementedError 35 36 def participation_for(self, shift): 37 """Return the participation object for a shift. Return None if it does not exist.""" 38 raise NotImplementedError 39 40 def all_participations(self): 41 """Return all participations for this participant""" 42 raise NotImplementedError 43 44 @functools.lru_cache(maxsize=64) 45 def collect_all_qualifications(self) -> set: 46 return Qualification.collect_all_included_qualifications(self.qualifications) 47 48 def has_qualifications(self, qualifications): 49 return set(qualifications) <= self.collect_all_qualifications() 50 51 def reverse_signup_action(self, shift): 52 raise NotImplementedError 53 54 def reverse_event_detail(self, event): 55 raise NotImplementedError 56 57 @property 58 def icon(self): 59 return mark_safe('<span class="fa fa-user"></span>') 60 61 62 @dataclasses.dataclass(frozen=True) 63 class LocalUserParticipant(AbstractParticipant): 64 user: get_user_model() 65 66 def new_participation(self, shift): 67 return LocalParticipation(shift=shift, user=self.user) 68 69 def participation_for(self, shift): 70 try: 71 return LocalParticipation.objects.get(shift=shift, user=self.user) 72 except LocalParticipation.DoesNotExist: 73 return None 74 75 def all_participations(self): 76 return LocalParticipation.objects.filter(user=self.user) 77 78 def reverse_signup_action(self, shift): 79 return reverse("core:signup_action", kwargs=dict(pk=shift.pk)) 80 81 def reverse_event_detail(self, event): 82 return event.get_absolute_url() 83 84 85 @dataclasses.dataclass(frozen=True) 86 class PlaceholderParticipant(AbstractParticipant): 87 def new_participation(self, shift): 88 return PlaceholderParticipation( 89 shift=shift, first_name=self.first_name, last_name=self.last_name 90 ) 91 92 def participation_for(self, shift): 93 try: 94 return PlaceholderParticipation.objects.get( 95 shift=shift, first_name=self.first_name, last_name=self.last_name 96 ) 97 except PlaceholderParticipation.DoesNotExist: 98 return None 99 100 def all_participations(self): 101 return AbstractParticipation.objects.none() 102 103 def reverse_signup_action(self, shift): 104 raise NotImplementedError 105 106 def reverse_event_detail(self, event): 107 raise NotImplementedError 108 109 @property 110 def icon(self): 111 return mark_safe( 112 f'<span class="fa fa-user-tag" data-toggle="tooltip" data-placement="left" title="{_("Placeholder")}"></span>' 113 ) 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ephios/core/signup/participants.py b/ephios/core/signup/participants.py --- a/ephios/core/signup/participants.py +++ b/ephios/core/signup/participants.py @@ -27,6 +27,12 @@ today, born = today or date.today(), self.date_of_birth return today.year - born.year - ((today.month, today.day) < (born.month, born.day)) + @property + def is_minor(self): + if age := self.get_age(): + return age < 18 + return False + def __str__(self): return f"{self.first_name} {self.last_name}"
{"golden_diff": "diff --git a/ephios/core/signup/participants.py b/ephios/core/signup/participants.py\n--- a/ephios/core/signup/participants.py\n+++ b/ephios/core/signup/participants.py\n@@ -27,6 +27,12 @@\n today, born = today or date.today(), self.date_of_birth\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n \n+ @property\n+ def is_minor(self):\n+ if age := self.get_age():\n+ return age < 18\n+ return False\n+\n def __str__(self):\n return f\"{self.first_name} {self.last_name}\"\n", "issue": "Make minors identifiable on event detail page\nAs an Einsatzleiter, I want to quickly grasp which participants are younger than 18 years. For that purpose, I want to have the participation boxes on the event detail page/shift box to display a small warning/indication, e.g. a red corner or similar.\n", "before_files": [{"content": "import dataclasses\nimport functools\nfrom datetime import date\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import QuerySet\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom ephios.core.models import AbstractParticipation, LocalParticipation, Qualification\nfrom ephios.core.models.events import PlaceholderParticipation\n\n\[email protected](frozen=True)\nclass AbstractParticipant:\n first_name: str\n last_name: str\n qualifications: QuerySet = dataclasses.field(hash=False)\n date_of_birth: Optional[date]\n email: Optional[str] # if set to None, no notifications are sent\n\n def get_age(self, today: date = None):\n if self.date_of_birth is None:\n return None\n today, born = today or date.today(), self.date_of_birth\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\n def __str__(self):\n return f\"{self.first_name} {self.last_name}\"\n\n def new_participation(self, shift):\n raise NotImplementedError\n\n def participation_for(self, shift):\n \"\"\"Return the participation object for a shift. Return None if it does not exist.\"\"\"\n raise NotImplementedError\n\n def all_participations(self):\n \"\"\"Return all participations for this participant\"\"\"\n raise NotImplementedError\n\n @functools.lru_cache(maxsize=64)\n def collect_all_qualifications(self) -> set:\n return Qualification.collect_all_included_qualifications(self.qualifications)\n\n def has_qualifications(self, qualifications):\n return set(qualifications) <= self.collect_all_qualifications()\n\n def reverse_signup_action(self, shift):\n raise NotImplementedError\n\n def reverse_event_detail(self, event):\n raise NotImplementedError\n\n @property\n def icon(self):\n return mark_safe('<span class=\"fa fa-user\"></span>')\n\n\[email protected](frozen=True)\nclass LocalUserParticipant(AbstractParticipant):\n user: get_user_model()\n\n def new_participation(self, shift):\n return LocalParticipation(shift=shift, user=self.user)\n\n def participation_for(self, shift):\n try:\n return LocalParticipation.objects.get(shift=shift, user=self.user)\n except LocalParticipation.DoesNotExist:\n return None\n\n def all_participations(self):\n return LocalParticipation.objects.filter(user=self.user)\n\n def reverse_signup_action(self, shift):\n return reverse(\"core:signup_action\", kwargs=dict(pk=shift.pk))\n\n def reverse_event_detail(self, event):\n return event.get_absolute_url()\n\n\[email protected](frozen=True)\nclass PlaceholderParticipant(AbstractParticipant):\n def new_participation(self, shift):\n return PlaceholderParticipation(\n shift=shift, first_name=self.first_name, last_name=self.last_name\n )\n\n def participation_for(self, shift):\n try:\n return PlaceholderParticipation.objects.get(\n shift=shift, first_name=self.first_name, last_name=self.last_name\n )\n except PlaceholderParticipation.DoesNotExist:\n return None\n\n def all_participations(self):\n return AbstractParticipation.objects.none()\n\n def reverse_signup_action(self, shift):\n raise NotImplementedError\n\n def reverse_event_detail(self, event):\n raise NotImplementedError\n\n @property\n def icon(self):\n return mark_safe(\n f'<span class=\"fa fa-user-tag\" data-toggle=\"tooltip\" data-placement=\"left\" title=\"{_(\"Placeholder\")}\"></span>'\n )\n", "path": "ephios/core/signup/participants.py"}], "after_files": [{"content": "import dataclasses\nimport functools\nfrom datetime import date\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import QuerySet\nfrom django.urls import reverse\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nfrom ephios.core.models import AbstractParticipation, LocalParticipation, Qualification\nfrom ephios.core.models.events import PlaceholderParticipation\n\n\[email protected](frozen=True)\nclass AbstractParticipant:\n first_name: str\n last_name: str\n qualifications: QuerySet = dataclasses.field(hash=False)\n date_of_birth: Optional[date]\n email: Optional[str] # if set to None, no notifications are sent\n\n def get_age(self, today: date = None):\n if self.date_of_birth is None:\n return None\n today, born = today or date.today(), self.date_of_birth\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\n @property\n def is_minor(self):\n if age := self.get_age():\n return age < 18\n return False\n\n def __str__(self):\n return f\"{self.first_name} {self.last_name}\"\n\n def new_participation(self, shift):\n raise NotImplementedError\n\n def participation_for(self, shift):\n \"\"\"Return the participation object for a shift. Return None if it does not exist.\"\"\"\n raise NotImplementedError\n\n def all_participations(self):\n \"\"\"Return all participations for this participant\"\"\"\n raise NotImplementedError\n\n @functools.lru_cache(maxsize=64)\n def collect_all_qualifications(self) -> set:\n return Qualification.collect_all_included_qualifications(self.qualifications)\n\n def has_qualifications(self, qualifications):\n return set(qualifications) <= self.collect_all_qualifications()\n\n def reverse_signup_action(self, shift):\n raise NotImplementedError\n\n def reverse_event_detail(self, event):\n raise NotImplementedError\n\n @property\n def icon(self):\n return mark_safe('<span class=\"fa fa-user\"></span>')\n\n\[email protected](frozen=True)\nclass LocalUserParticipant(AbstractParticipant):\n user: get_user_model()\n\n def new_participation(self, shift):\n return LocalParticipation(shift=shift, user=self.user)\n\n def participation_for(self, shift):\n try:\n return LocalParticipation.objects.get(shift=shift, user=self.user)\n except LocalParticipation.DoesNotExist:\n return None\n\n def all_participations(self):\n return LocalParticipation.objects.filter(user=self.user)\n\n def reverse_signup_action(self, shift):\n return reverse(\"core:signup_action\", kwargs=dict(pk=shift.pk))\n\n def reverse_event_detail(self, event):\n return event.get_absolute_url()\n\n\[email protected](frozen=True)\nclass PlaceholderParticipant(AbstractParticipant):\n def new_participation(self, shift):\n return PlaceholderParticipation(\n shift=shift, first_name=self.first_name, last_name=self.last_name\n )\n\n def participation_for(self, shift):\n try:\n return PlaceholderParticipation.objects.get(\n shift=shift, first_name=self.first_name, last_name=self.last_name\n )\n except PlaceholderParticipation.DoesNotExist:\n return None\n\n def all_participations(self):\n return AbstractParticipation.objects.none()\n\n def reverse_signup_action(self, shift):\n raise NotImplementedError\n\n def reverse_event_detail(self, event):\n raise NotImplementedError\n\n @property\n def icon(self):\n return mark_safe(\n f'<span class=\"fa fa-user-tag\" data-toggle=\"tooltip\" data-placement=\"left\" title=\"{_(\"Placeholder\")}\"></span>'\n )\n", "path": "ephios/core/signup/participants.py"}]}
1,327
152
gh_patches_debug_37692
rasdani/github-patches
git_diff
astronomer__astro-sdk-325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Astro Build's Integration Test breaking on 0.8.1 broken on - 0.8.1 , but works with 0.7.0. In this test dag, task_5 joins tables from task_3 (snowflake) and task_4 (postgres). The task’s print statement shows the joined table, suggesting successful ingestion and transformation. However, the error below suggests the the postgres output conn is expected to contain the database element of the snowflake connection. In other words, the task fails becausepostgres_conn doesn’t have the database attribute associated with snowflake_conn. ``` import time from datetime import datetime import pandas as pd from airflow.decorators import dag, task from airflow.models import Variable from airflow.utils import timezone from airflow.utils.dates import days_ago from astro import dataframe as df from astro import sql as aql from astro.sql.table import Table, TempTable @df() def task_1_func(): return pd.DataFrame({'a':[1,2,3]}) @aql.transform(conn_id='postgres_conn') def task_2_func(execution_date: Table): return """SELECT * FROM actor WHERE startdate < '{{ execution_date }}'""" @aql.transform(conn_id='snowflake_conn') def task_3_func(): return """SELECT * FROM "ASTROBUILD"."BUILDSCHEMA"."MYTABLE" LIMIT 10;""" @aql.transform(conn_id='postgres_conn') def task_4_func(): return """SELECT * FROM actor LIMIT 10;""" @df(conn_id='postgres_conn') def task_5_func(task_3: pd.DataFrame, task_4: pd.DataFrame): df=task_3.join(task_4) print(df) return df @dag(schedule_interval='0 0 * * *', start_date=datetime(2022, 4, 15, 11, 28, 8), catchup=False, tags=['tag_1', 'tag_1']) def dag_1(): task_1 = task_1_func() task_2 = task_2_func(output_table=Table(conn_id='postgres_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_2'), execution_date=Table(conn_id='postgres_conn', table_name='execution_date')) task_3 = task_3_func(output_table=Table(conn_id='snowflake_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_3')) task_4 = task_4_func(output_table=Table(conn_id='postgres_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_4')) task_5 = task_5_func(task_3, task_4, output_table=Table(conn_id='postgres_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_5')) dag_obj = dag_1() ``` Error: ``` INFO - Using connection to: id: postgres_conn. Host: 127.0.0.1, Port: 8999, Schema: postgres, Login: postgres, Password: ***, extra: {} *** psycopg2.OperationalError: connection to server at "127.0.0.1", port 8999 failed: FATAL: database "ASTROBUILD" does not exist ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/astro/utils/table_handler.py` Content: ``` 1 import inspect 2 from typing import Optional 3 4 import pandas 5 6 from astro.settings import SCHEMA 7 from astro.sql.table import Table 8 9 10 class TableHandler: 11 def _set_variables_from_first_table(self): 12 """ 13 When we create our SQL operation, we run with the assumption that the first table given is the "main table". 14 This means that a user doesn't need to define default conn_id, database, etc. in the function unless they want 15 to create default values. 16 """ 17 first_table: Optional[Table] = None 18 if self.op_args: 19 table_index = [x for x, t in enumerate(self.op_args) if type(t) == Table] 20 if table_index: 21 first_table = self.op_args[table_index[0]] 22 elif not first_table: 23 table_kwargs = [ 24 x 25 for x in inspect.signature(self.python_callable).parameters.values() 26 if ( 27 x.annotation == Table 28 and type(self.op_kwargs[x.name]) == Table 29 or x.annotation == pandas.DataFrame 30 and type(self.op_kwargs[x.name]) == Table 31 ) 32 ] 33 if table_kwargs: 34 first_table = self.op_kwargs[table_kwargs[0].name] 35 36 # If there is no first table via op_ags or kwargs, we check the parameters 37 elif not first_table: 38 if self.parameters: 39 param_tables = [t for t in self.parameters.values() if type(t) == Table] 40 if param_tables: 41 first_table = param_tables[0] 42 43 if first_table: 44 self.conn_id = first_table.conn_id or self.conn_id 45 self.database = first_table.database or self.database 46 self.schema = first_table.schema or self.schema 47 self.warehouse = first_table.warehouse or self.warehouse 48 self.role = first_table.role or self.role 49 50 def populate_output_table(self): 51 self.output_table.conn_id = self.output_table.conn_id or self.conn_id 52 self.output_table.database = self.output_table.database or self.database 53 self.output_table.warehouse = self.output_table.warehouse or self.warehouse 54 self.output_table.schema = self.output_table.schema or SCHEMA 55 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/astro/utils/table_handler.py b/src/astro/utils/table_handler.py --- a/src/astro/utils/table_handler.py +++ b/src/astro/utils/table_handler.py @@ -16,28 +16,51 @@ """ first_table: Optional[Table] = None if self.op_args: - table_index = [x for x, t in enumerate(self.op_args) if type(t) == Table] - if table_index: + table_index = [ + x for x, t in enumerate(self.op_args) if isinstance(t, Table) + ] + conn_id_set = {x.conn_id for x in self.op_args if isinstance(x, Table)} + # Check to see if all tables belong to same conn_id. Otherwise, we this can go wrong for cases + # 1. When we have tables from different DBs. + # 2. When we have tables from different conn_id, since they can be configured with different + # database/schema etc. + if table_index and len(conn_id_set) == 1: first_table = self.op_args[table_index[0]] - elif not first_table: + + if not first_table and self.op_kwargs and self.python_callable: table_kwargs = [ x for x in inspect.signature(self.python_callable).parameters.values() if ( x.annotation == Table - and type(self.op_kwargs[x.name]) == Table + and isinstance(self.op_kwargs[x.name], Table) or x.annotation == pandas.DataFrame - and type(self.op_kwargs[x.name]) == Table + and isinstance(self.op_kwargs[x.name], Table) ) ] - if table_kwargs: + conn_id_set = { + self.op_kwargs[x.name].conn_id + for x in inspect.signature(self.python_callable).parameters.values() + if ( + x.annotation == Table + and isinstance(self.op_kwargs[x.name], Table) + or x.annotation == pandas.DataFrame + and isinstance(self.op_kwargs[x.name], Table) + ) + } + if table_kwargs and len(conn_id_set) == 1: first_table = self.op_kwargs[table_kwargs[0].name] # If there is no first table via op_ags or kwargs, we check the parameters - elif not first_table: + if not first_table and self.parameters: if self.parameters: - param_tables = [t for t in self.parameters.values() if type(t) == Table] - if param_tables: + param_tables = [ + t for t in self.parameters.values() if isinstance(t, Table) + ] + conn_id_set = { + t.conn_id for t in self.parameters.values() if isinstance(t, Table) + } + if param_tables and len(conn_id_set) == 1: first_table = param_tables[0] if first_table:
{"golden_diff": "diff --git a/src/astro/utils/table_handler.py b/src/astro/utils/table_handler.py\n--- a/src/astro/utils/table_handler.py\n+++ b/src/astro/utils/table_handler.py\n@@ -16,28 +16,51 @@\n \"\"\"\n first_table: Optional[Table] = None\n if self.op_args:\n- table_index = [x for x, t in enumerate(self.op_args) if type(t) == Table]\n- if table_index:\n+ table_index = [\n+ x for x, t in enumerate(self.op_args) if isinstance(t, Table)\n+ ]\n+ conn_id_set = {x.conn_id for x in self.op_args if isinstance(x, Table)}\n+ # Check to see if all tables belong to same conn_id. Otherwise, we this can go wrong for cases\n+ # 1. When we have tables from different DBs.\n+ # 2. When we have tables from different conn_id, since they can be configured with different\n+ # database/schema etc.\n+ if table_index and len(conn_id_set) == 1:\n first_table = self.op_args[table_index[0]]\n- elif not first_table:\n+\n+ if not first_table and self.op_kwargs and self.python_callable:\n table_kwargs = [\n x\n for x in inspect.signature(self.python_callable).parameters.values()\n if (\n x.annotation == Table\n- and type(self.op_kwargs[x.name]) == Table\n+ and isinstance(self.op_kwargs[x.name], Table)\n or x.annotation == pandas.DataFrame\n- and type(self.op_kwargs[x.name]) == Table\n+ and isinstance(self.op_kwargs[x.name], Table)\n )\n ]\n- if table_kwargs:\n+ conn_id_set = {\n+ self.op_kwargs[x.name].conn_id\n+ for x in inspect.signature(self.python_callable).parameters.values()\n+ if (\n+ x.annotation == Table\n+ and isinstance(self.op_kwargs[x.name], Table)\n+ or x.annotation == pandas.DataFrame\n+ and isinstance(self.op_kwargs[x.name], Table)\n+ )\n+ }\n+ if table_kwargs and len(conn_id_set) == 1:\n first_table = self.op_kwargs[table_kwargs[0].name]\n \n # If there is no first table via op_ags or kwargs, we check the parameters\n- elif not first_table:\n+ if not first_table and self.parameters:\n if self.parameters:\n- param_tables = [t for t in self.parameters.values() if type(t) == Table]\n- if param_tables:\n+ param_tables = [\n+ t for t in self.parameters.values() if isinstance(t, Table)\n+ ]\n+ conn_id_set = {\n+ t.conn_id for t in self.parameters.values() if isinstance(t, Table)\n+ }\n+ if param_tables and len(conn_id_set) == 1:\n first_table = param_tables[0]\n \n if first_table:\n", "issue": "Astro Build's Integration Test breaking on 0.8.1\nbroken on - 0.8.1 , but works with 0.7.0.\n\nIn this test dag, task_5 joins tables from task_3 (snowflake) and task_4 (postgres). The task\u2019s print statement shows the joined table, suggesting successful ingestion and transformation. However, the error below suggests the the postgres output conn is expected to contain the database element of the snowflake connection.\nIn other words, the task fails becausepostgres_conn doesn\u2019t have the database attribute associated with snowflake_conn.\n\n```\nimport time\nfrom datetime import datetime\n\nimport pandas as pd\nfrom airflow.decorators import dag, task\nfrom airflow.models import Variable\nfrom airflow.utils import timezone\nfrom airflow.utils.dates import days_ago\nfrom astro import dataframe as df\nfrom astro import sql as aql\nfrom astro.sql.table import Table, TempTable\n\n@df()\ndef task_1_func():\n return pd.DataFrame({'a':[1,2,3]})\n\[email protected](conn_id='postgres_conn')\ndef task_2_func(execution_date: Table):\n return \"\"\"SELECT * FROM actor WHERE startdate < '{{ execution_date }}'\"\"\"\n\[email protected](conn_id='snowflake_conn')\ndef task_3_func():\n return \"\"\"SELECT * FROM \"ASTROBUILD\".\"BUILDSCHEMA\".\"MYTABLE\" LIMIT 10;\"\"\"\n\[email protected](conn_id='postgres_conn')\ndef task_4_func():\n return \"\"\"SELECT * FROM actor LIMIT 10;\"\"\"\n\n@df(conn_id='postgres_conn')\ndef task_5_func(task_3: pd.DataFrame, task_4: pd.DataFrame):\n df=task_3.join(task_4)\n print(df)\n return df\n\n@dag(schedule_interval='0 0 * * *', start_date=datetime(2022, 4, 15, 11, 28, 8), catchup=False, tags=['tag_1', 'tag_1'])\ndef dag_1():\n task_1 = task_1_func()\n task_2 = task_2_func(output_table=Table(conn_id='postgres_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_2'), execution_date=Table(conn_id='postgres_conn', table_name='execution_date'))\n task_3 = task_3_func(output_table=Table(conn_id='snowflake_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_3'))\n task_4 = task_4_func(output_table=Table(conn_id='postgres_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_4'))\n task_5 = task_5_func(task_3, task_4, output_table=Table(conn_id='postgres_conn', schema='tmp_astro', table_name='tmp_astro_dag_1_task_5'))\n\ndag_obj = dag_1()\n```\n\nError:\n\n```\nINFO - Using connection to: id: postgres_conn. Host: 127.0.0.1, Port: 8999, Schema: postgres, Login: postgres, Password: ***, extra: {}\n*** psycopg2.OperationalError: connection to server at \"127.0.0.1\", port 8999 failed: FATAL: database \"ASTROBUILD\" does not exist\n```\n", "before_files": [{"content": "import inspect\nfrom typing import Optional\n\nimport pandas\n\nfrom astro.settings import SCHEMA\nfrom astro.sql.table import Table\n\n\nclass TableHandler:\n def _set_variables_from_first_table(self):\n \"\"\"\n When we create our SQL operation, we run with the assumption that the first table given is the \"main table\".\n This means that a user doesn't need to define default conn_id, database, etc. in the function unless they want\n to create default values.\n \"\"\"\n first_table: Optional[Table] = None\n if self.op_args:\n table_index = [x for x, t in enumerate(self.op_args) if type(t) == Table]\n if table_index:\n first_table = self.op_args[table_index[0]]\n elif not first_table:\n table_kwargs = [\n x\n for x in inspect.signature(self.python_callable).parameters.values()\n if (\n x.annotation == Table\n and type(self.op_kwargs[x.name]) == Table\n or x.annotation == pandas.DataFrame\n and type(self.op_kwargs[x.name]) == Table\n )\n ]\n if table_kwargs:\n first_table = self.op_kwargs[table_kwargs[0].name]\n\n # If there is no first table via op_ags or kwargs, we check the parameters\n elif not first_table:\n if self.parameters:\n param_tables = [t for t in self.parameters.values() if type(t) == Table]\n if param_tables:\n first_table = param_tables[0]\n\n if first_table:\n self.conn_id = first_table.conn_id or self.conn_id\n self.database = first_table.database or self.database\n self.schema = first_table.schema or self.schema\n self.warehouse = first_table.warehouse or self.warehouse\n self.role = first_table.role or self.role\n\n def populate_output_table(self):\n self.output_table.conn_id = self.output_table.conn_id or self.conn_id\n self.output_table.database = self.output_table.database or self.database\n self.output_table.warehouse = self.output_table.warehouse or self.warehouse\n self.output_table.schema = self.output_table.schema or SCHEMA\n", "path": "src/astro/utils/table_handler.py"}], "after_files": [{"content": "import inspect\nfrom typing import Optional\n\nimport pandas\n\nfrom astro.settings import SCHEMA\nfrom astro.sql.table import Table\n\n\nclass TableHandler:\n def _set_variables_from_first_table(self):\n \"\"\"\n When we create our SQL operation, we run with the assumption that the first table given is the \"main table\".\n This means that a user doesn't need to define default conn_id, database, etc. in the function unless they want\n to create default values.\n \"\"\"\n first_table: Optional[Table] = None\n if self.op_args:\n table_index = [\n x for x, t in enumerate(self.op_args) if isinstance(t, Table)\n ]\n conn_id_set = {x.conn_id for x in self.op_args if isinstance(x, Table)}\n # Check to see if all tables belong to same conn_id. Otherwise, we this can go wrong for cases\n # 1. When we have tables from different DBs.\n # 2. When we have tables from different conn_id, since they can be configured with different\n # database/schema etc.\n if table_index and len(conn_id_set) == 1:\n first_table = self.op_args[table_index[0]]\n\n if not first_table and self.op_kwargs and self.python_callable:\n table_kwargs = [\n x\n for x in inspect.signature(self.python_callable).parameters.values()\n if (\n x.annotation == Table\n and isinstance(self.op_kwargs[x.name], Table)\n or x.annotation == pandas.DataFrame\n and isinstance(self.op_kwargs[x.name], Table)\n )\n ]\n conn_id_set = {\n self.op_kwargs[x.name].conn_id\n for x in inspect.signature(self.python_callable).parameters.values()\n if (\n x.annotation == Table\n and isinstance(self.op_kwargs[x.name], Table)\n or x.annotation == pandas.DataFrame\n and isinstance(self.op_kwargs[x.name], Table)\n )\n }\n if table_kwargs and len(conn_id_set) == 1:\n first_table = self.op_kwargs[table_kwargs[0].name]\n\n # If there is no first table via op_ags or kwargs, we check the parameters\n if not first_table and self.parameters:\n if self.parameters:\n param_tables = [\n t for t in self.parameters.values() if isinstance(t, Table)\n ]\n conn_id_set = {\n t.conn_id for t in self.parameters.values() if isinstance(t, Table)\n }\n if param_tables and len(conn_id_set) == 1:\n first_table = param_tables[0]\n\n if first_table:\n self.conn_id = first_table.conn_id or self.conn_id\n self.database = first_table.database or self.database\n self.schema = first_table.schema or self.schema\n self.warehouse = first_table.warehouse or self.warehouse\n self.role = first_table.role or self.role\n\n def populate_output_table(self):\n self.output_table.conn_id = self.output_table.conn_id or self.conn_id\n self.output_table.database = self.output_table.database or self.database\n self.output_table.warehouse = self.output_table.warehouse or self.warehouse\n self.output_table.schema = self.output_table.schema or SCHEMA\n", "path": "src/astro/utils/table_handler.py"}]}
1,539
635
gh_patches_debug_14014
rasdani/github-patches
git_diff
sanic-org__sanic-1857
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Nightly build fails due to websockets version not matching setup.py on setup.py: >=0.7.0,<0.9 on tox.ini: >=0.7.0,<0.8 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sanic/websocket.py` Content: ``` 1 from typing import ( 2 Any, 3 Awaitable, 4 Callable, 5 Dict, 6 MutableMapping, 7 Optional, 8 Union, 9 ) 10 11 from httptools import HttpParserUpgrade # type: ignore 12 from websockets import ( # type: ignore 13 ConnectionClosed, 14 InvalidHandshake, 15 WebSocketCommonProtocol, 16 handshake, 17 ) 18 19 from sanic.exceptions import InvalidUsage 20 from sanic.server import HttpProtocol 21 22 23 __all__ = ["ConnectionClosed", "WebSocketProtocol", "WebSocketConnection"] 24 25 ASIMessage = MutableMapping[str, Any] 26 27 28 class WebSocketProtocol(HttpProtocol): 29 def __init__( 30 self, 31 *args, 32 websocket_timeout=10, 33 websocket_max_size=None, 34 websocket_max_queue=None, 35 websocket_read_limit=2 ** 16, 36 websocket_write_limit=2 ** 16, 37 **kwargs 38 ): 39 super().__init__(*args, **kwargs) 40 self.websocket = None 41 # self.app = None 42 self.websocket_timeout = websocket_timeout 43 self.websocket_max_size = websocket_max_size 44 self.websocket_max_queue = websocket_max_queue 45 self.websocket_read_limit = websocket_read_limit 46 self.websocket_write_limit = websocket_write_limit 47 48 # timeouts make no sense for websocket routes 49 def request_timeout_callback(self): 50 if self.websocket is None: 51 super().request_timeout_callback() 52 53 def response_timeout_callback(self): 54 if self.websocket is None: 55 super().response_timeout_callback() 56 57 def keep_alive_timeout_callback(self): 58 if self.websocket is None: 59 super().keep_alive_timeout_callback() 60 61 def connection_lost(self, exc): 62 if self.websocket is not None: 63 self.websocket.connection_lost(exc) 64 super().connection_lost(exc) 65 66 def data_received(self, data): 67 if self.websocket is not None: 68 # pass the data to the websocket protocol 69 self.websocket.data_received(data) 70 else: 71 try: 72 super().data_received(data) 73 except HttpParserUpgrade: 74 # this is okay, it just indicates we've got an upgrade request 75 pass 76 77 def write_response(self, response): 78 if self.websocket is not None: 79 # websocket requests do not write a response 80 self.transport.close() 81 else: 82 super().write_response(response) 83 84 async def websocket_handshake(self, request, subprotocols=None): 85 # let the websockets package do the handshake with the client 86 headers = {} 87 88 try: 89 key = handshake.check_request(request.headers) 90 handshake.build_response(headers, key) 91 except InvalidHandshake: 92 raise InvalidUsage("Invalid websocket request") 93 94 subprotocol = None 95 if subprotocols and "Sec-Websocket-Protocol" in request.headers: 96 # select a subprotocol 97 client_subprotocols = [ 98 p.strip() 99 for p in request.headers["Sec-Websocket-Protocol"].split(",") 100 ] 101 for p in client_subprotocols: 102 if p in subprotocols: 103 subprotocol = p 104 headers["Sec-Websocket-Protocol"] = subprotocol 105 break 106 107 # write the 101 response back to the client 108 rv = b"HTTP/1.1 101 Switching Protocols\r\n" 109 for k, v in headers.items(): 110 rv += k.encode("utf-8") + b": " + v.encode("utf-8") + b"\r\n" 111 rv += b"\r\n" 112 request.transport.write(rv) 113 114 # hook up the websocket protocol 115 self.websocket = WebSocketCommonProtocol( 116 timeout=self.websocket_timeout, 117 max_size=self.websocket_max_size, 118 max_queue=self.websocket_max_queue, 119 read_limit=self.websocket_read_limit, 120 write_limit=self.websocket_write_limit, 121 ) 122 # Following two lines are required for websockets 8.x 123 self.websocket.is_client = False 124 self.websocket.side = "server" 125 self.websocket.subprotocol = subprotocol 126 self.websocket.connection_made(request.transport) 127 self.websocket.connection_open() 128 return self.websocket 129 130 131 class WebSocketConnection: 132 133 # TODO 134 # - Implement ping/pong 135 136 def __init__( 137 self, 138 send: Callable[[ASIMessage], Awaitable[None]], 139 receive: Callable[[], Awaitable[ASIMessage]], 140 ) -> None: 141 self._send = send 142 self._receive = receive 143 144 async def send(self, data: Union[str, bytes], *args, **kwargs) -> None: 145 message: Dict[str, Union[str, bytes]] = {"type": "websocket.send"} 146 147 if isinstance(data, bytes): 148 message.update({"bytes": data}) 149 else: 150 message.update({"text": str(data)}) 151 152 await self._send(message) 153 154 async def recv(self, *args, **kwargs) -> Optional[str]: 155 message = await self._receive() 156 157 if message["type"] == "websocket.receive": 158 return message["text"] 159 elif message["type"] == "websocket.disconnect": 160 pass 161 162 return None 163 164 receive = recv 165 166 async def accept(self) -> None: 167 await self._send({"type": "websocket.accept", "subprotocol": ""}) 168 169 async def close(self) -> None: 170 pass 171 ``` Path: `setup.py` Content: ``` 1 """ 2 Sanic 3 """ 4 import codecs 5 import os 6 import re 7 import sys 8 from distutils.util import strtobool 9 10 from setuptools import setup 11 from setuptools.command.test import test as TestCommand 12 13 14 class PyTest(TestCommand): 15 """ 16 Provide a Test runner to be used from setup.py to run unit tests 17 """ 18 19 user_options = [("pytest-args=", "a", "Arguments to pass to pytest")] 20 21 def initialize_options(self): 22 TestCommand.initialize_options(self) 23 self.pytest_args = "" 24 25 def run_tests(self): 26 import shlex 27 import pytest 28 29 errno = pytest.main(shlex.split(self.pytest_args)) 30 sys.exit(errno) 31 32 33 def open_local(paths, mode="r", encoding="utf8"): 34 path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths) 35 36 return codecs.open(path, mode, encoding) 37 38 39 with open_local(["sanic", "__version__.py"], encoding="latin1") as fp: 40 try: 41 version = re.findall(r"^__version__ = \"([^']+)\"\r?$", fp.read(), re.M)[0] 42 except IndexError: 43 raise RuntimeError("Unable to determine version.") 44 45 with open_local(["README.rst"]) as rm: 46 long_description = rm.read() 47 48 setup_kwargs = { 49 "name": "sanic", 50 "version": version, 51 "url": "http://github.com/huge-success/sanic/", 52 "license": "MIT", 53 "author": "Sanic Community", 54 "author_email": "[email protected]", 55 "description": ( 56 "A web server and web framework that's written to go fast. Build fast. Run fast." 57 ), 58 "long_description": long_description, 59 "packages": ["sanic"], 60 "platforms": "any", 61 "python_requires": ">=3.6", 62 "classifiers": [ 63 "Development Status :: 4 - Beta", 64 "Environment :: Web Environment", 65 "License :: OSI Approved :: MIT License", 66 "Programming Language :: Python :: 3.6", 67 "Programming Language :: Python :: 3.7", 68 "Programming Language :: Python :: 3.8", 69 ], 70 "entry_points": {"console_scripts": ["sanic = sanic.__main__:main"]}, 71 } 72 73 env_dependency = '; sys_platform != "win32" ' 'and implementation_name == "cpython"' 74 ujson = "ujson>=1.35" + env_dependency 75 uvloop = "uvloop>=0.5.3" + env_dependency 76 77 requirements = [ 78 "httptools>=0.0.10", 79 uvloop, 80 ujson, 81 "aiofiles>=0.3.0", 82 "websockets>=7.0,<9.0", 83 "multidict>=4.0,<5.0", 84 "httpx==0.11.1", 85 ] 86 87 tests_require = [ 88 "pytest==5.2.1", 89 "multidict>=4.0,<5.0", 90 "gunicorn", 91 "pytest-cov", 92 "httpcore==0.3.0", 93 "beautifulsoup4", 94 uvloop, 95 ujson, 96 "pytest-sanic", 97 "pytest-sugar", 98 "pytest-benchmark", 99 ] 100 101 docs_require = [ 102 "sphinx>=2.1.2", 103 "sphinx_rtd_theme", 104 "recommonmark>=0.5.0", 105 "docutils", 106 "pygments", 107 ] 108 109 dev_require = tests_require + [ 110 "aiofiles", 111 "tox", 112 "black", 113 "flake8", 114 "bandit", 115 "towncrier", 116 ] 117 118 all_require = dev_require + docs_require 119 120 if strtobool(os.environ.get("SANIC_NO_UJSON", "no")): 121 print("Installing without uJSON") 122 requirements.remove(ujson) 123 tests_require.remove(ujson) 124 125 # 'nt' means windows OS 126 if strtobool(os.environ.get("SANIC_NO_UVLOOP", "no")): 127 print("Installing without uvLoop") 128 requirements.remove(uvloop) 129 tests_require.remove(uvloop) 130 131 extras_require = { 132 "test": tests_require, 133 "dev": dev_require, 134 "docs": docs_require, 135 "all": all_require, 136 } 137 138 setup_kwargs["install_requires"] = requirements 139 setup_kwargs["tests_require"] = tests_require 140 setup_kwargs["extras_require"] = extras_require 141 setup_kwargs["cmdclass"] = {"test": PyTest} 142 setup(**setup_kwargs) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sanic/websocket.py b/sanic/websocket.py --- a/sanic/websocket.py +++ b/sanic/websocket.py @@ -113,7 +113,7 @@ # hook up the websocket protocol self.websocket = WebSocketCommonProtocol( - timeout=self.websocket_timeout, + close_timeout=self.websocket_timeout, max_size=self.websocket_max_size, max_queue=self.websocket_max_queue, read_limit=self.websocket_read_limit, diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -79,7 +79,7 @@ uvloop, ujson, "aiofiles>=0.3.0", - "websockets>=7.0,<9.0", + "websockets>=8.1,<9.0", "multidict>=4.0,<5.0", "httpx==0.11.1", ]
{"golden_diff": "diff --git a/sanic/websocket.py b/sanic/websocket.py\n--- a/sanic/websocket.py\n+++ b/sanic/websocket.py\n@@ -113,7 +113,7 @@\n \n # hook up the websocket protocol\n self.websocket = WebSocketCommonProtocol(\n- timeout=self.websocket_timeout,\n+ close_timeout=self.websocket_timeout,\n max_size=self.websocket_max_size,\n max_queue=self.websocket_max_queue,\n read_limit=self.websocket_read_limit,\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -79,7 +79,7 @@\n uvloop,\n ujson,\n \"aiofiles>=0.3.0\",\n- \"websockets>=7.0,<9.0\",\n+ \"websockets>=8.1,<9.0\",\n \"multidict>=4.0,<5.0\",\n \"httpx==0.11.1\",\n ]\n", "issue": "Nightly build fails due to websockets version not matching setup.py\non setup.py: >=0.7.0,<0.9\r\non tox.ini: >=0.7.0,<0.8\n", "before_files": [{"content": "from typing import (\n Any,\n Awaitable,\n Callable,\n Dict,\n MutableMapping,\n Optional,\n Union,\n)\n\nfrom httptools import HttpParserUpgrade # type: ignore\nfrom websockets import ( # type: ignore\n ConnectionClosed,\n InvalidHandshake,\n WebSocketCommonProtocol,\n handshake,\n)\n\nfrom sanic.exceptions import InvalidUsage\nfrom sanic.server import HttpProtocol\n\n\n__all__ = [\"ConnectionClosed\", \"WebSocketProtocol\", \"WebSocketConnection\"]\n\nASIMessage = MutableMapping[str, Any]\n\n\nclass WebSocketProtocol(HttpProtocol):\n def __init__(\n self,\n *args,\n websocket_timeout=10,\n websocket_max_size=None,\n websocket_max_queue=None,\n websocket_read_limit=2 ** 16,\n websocket_write_limit=2 ** 16,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.websocket = None\n # self.app = None\n self.websocket_timeout = websocket_timeout\n self.websocket_max_size = websocket_max_size\n self.websocket_max_queue = websocket_max_queue\n self.websocket_read_limit = websocket_read_limit\n self.websocket_write_limit = websocket_write_limit\n\n # timeouts make no sense for websocket routes\n def request_timeout_callback(self):\n if self.websocket is None:\n super().request_timeout_callback()\n\n def response_timeout_callback(self):\n if self.websocket is None:\n super().response_timeout_callback()\n\n def keep_alive_timeout_callback(self):\n if self.websocket is None:\n super().keep_alive_timeout_callback()\n\n def connection_lost(self, exc):\n if self.websocket is not None:\n self.websocket.connection_lost(exc)\n super().connection_lost(exc)\n\n def data_received(self, data):\n if self.websocket is not None:\n # pass the data to the websocket protocol\n self.websocket.data_received(data)\n else:\n try:\n super().data_received(data)\n except HttpParserUpgrade:\n # this is okay, it just indicates we've got an upgrade request\n pass\n\n def write_response(self, response):\n if self.websocket is not None:\n # websocket requests do not write a response\n self.transport.close()\n else:\n super().write_response(response)\n\n async def websocket_handshake(self, request, subprotocols=None):\n # let the websockets package do the handshake with the client\n headers = {}\n\n try:\n key = handshake.check_request(request.headers)\n handshake.build_response(headers, key)\n except InvalidHandshake:\n raise InvalidUsage(\"Invalid websocket request\")\n\n subprotocol = None\n if subprotocols and \"Sec-Websocket-Protocol\" in request.headers:\n # select a subprotocol\n client_subprotocols = [\n p.strip()\n for p in request.headers[\"Sec-Websocket-Protocol\"].split(\",\")\n ]\n for p in client_subprotocols:\n if p in subprotocols:\n subprotocol = p\n headers[\"Sec-Websocket-Protocol\"] = subprotocol\n break\n\n # write the 101 response back to the client\n rv = b\"HTTP/1.1 101 Switching Protocols\\r\\n\"\n for k, v in headers.items():\n rv += k.encode(\"utf-8\") + b\": \" + v.encode(\"utf-8\") + b\"\\r\\n\"\n rv += b\"\\r\\n\"\n request.transport.write(rv)\n\n # hook up the websocket protocol\n self.websocket = WebSocketCommonProtocol(\n timeout=self.websocket_timeout,\n max_size=self.websocket_max_size,\n max_queue=self.websocket_max_queue,\n read_limit=self.websocket_read_limit,\n write_limit=self.websocket_write_limit,\n )\n # Following two lines are required for websockets 8.x\n self.websocket.is_client = False\n self.websocket.side = \"server\"\n self.websocket.subprotocol = subprotocol\n self.websocket.connection_made(request.transport)\n self.websocket.connection_open()\n return self.websocket\n\n\nclass WebSocketConnection:\n\n # TODO\n # - Implement ping/pong\n\n def __init__(\n self,\n send: Callable[[ASIMessage], Awaitable[None]],\n receive: Callable[[], Awaitable[ASIMessage]],\n ) -> None:\n self._send = send\n self._receive = receive\n\n async def send(self, data: Union[str, bytes], *args, **kwargs) -> None:\n message: Dict[str, Union[str, bytes]] = {\"type\": \"websocket.send\"}\n\n if isinstance(data, bytes):\n message.update({\"bytes\": data})\n else:\n message.update({\"text\": str(data)})\n\n await self._send(message)\n\n async def recv(self, *args, **kwargs) -> Optional[str]:\n message = await self._receive()\n\n if message[\"type\"] == \"websocket.receive\":\n return message[\"text\"]\n elif message[\"type\"] == \"websocket.disconnect\":\n pass\n\n return None\n\n receive = recv\n\n async def accept(self) -> None:\n await self._send({\"type\": \"websocket.accept\", \"subprotocol\": \"\"})\n\n async def close(self) -> None:\n pass\n", "path": "sanic/websocket.py"}, {"content": "\"\"\"\nSanic\n\"\"\"\nimport codecs\nimport os\nimport re\nimport sys\nfrom distutils.util import strtobool\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n \"\"\"\n Provide a Test runner to be used from setup.py to run unit tests\n \"\"\"\n\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to pytest\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def run_tests(self):\n import shlex\n import pytest\n\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ndef open_local(paths, mode=\"r\", encoding=\"utf8\"):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)\n\n return codecs.open(path, mode, encoding)\n\n\nwith open_local([\"sanic\", \"__version__.py\"], encoding=\"latin1\") as fp:\n try:\n version = re.findall(r\"^__version__ = \\\"([^']+)\\\"\\r?$\", fp.read(), re.M)[0]\n except IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\nwith open_local([\"README.rst\"]) as rm:\n long_description = rm.read()\n\nsetup_kwargs = {\n \"name\": \"sanic\",\n \"version\": version,\n \"url\": \"http://github.com/huge-success/sanic/\",\n \"license\": \"MIT\",\n \"author\": \"Sanic Community\",\n \"author_email\": \"[email protected]\",\n \"description\": (\n \"A web server and web framework that's written to go fast. Build fast. Run fast.\"\n ),\n \"long_description\": long_description,\n \"packages\": [\"sanic\"],\n \"platforms\": \"any\",\n \"python_requires\": \">=3.6\",\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n \"entry_points\": {\"console_scripts\": [\"sanic = sanic.__main__:main\"]},\n}\n\nenv_dependency = '; sys_platform != \"win32\" ' 'and implementation_name == \"cpython\"'\nujson = \"ujson>=1.35\" + env_dependency\nuvloop = \"uvloop>=0.5.3\" + env_dependency\n\nrequirements = [\n \"httptools>=0.0.10\",\n uvloop,\n ujson,\n \"aiofiles>=0.3.0\",\n \"websockets>=7.0,<9.0\",\n \"multidict>=4.0,<5.0\",\n \"httpx==0.11.1\",\n]\n\ntests_require = [\n \"pytest==5.2.1\",\n \"multidict>=4.0,<5.0\",\n \"gunicorn\",\n \"pytest-cov\",\n \"httpcore==0.3.0\",\n \"beautifulsoup4\",\n uvloop,\n ujson,\n \"pytest-sanic\",\n \"pytest-sugar\",\n \"pytest-benchmark\",\n]\n\ndocs_require = [\n \"sphinx>=2.1.2\",\n \"sphinx_rtd_theme\",\n \"recommonmark>=0.5.0\",\n \"docutils\",\n \"pygments\",\n]\n\ndev_require = tests_require + [\n \"aiofiles\",\n \"tox\",\n \"black\",\n \"flake8\",\n \"bandit\",\n \"towncrier\",\n]\n\nall_require = dev_require + docs_require\n\nif strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n print(\"Installing without uJSON\")\n requirements.remove(ujson)\n tests_require.remove(ujson)\n\n# 'nt' means windows OS\nif strtobool(os.environ.get(\"SANIC_NO_UVLOOP\", \"no\")):\n print(\"Installing without uvLoop\")\n requirements.remove(uvloop)\n tests_require.remove(uvloop)\n\nextras_require = {\n \"test\": tests_require,\n \"dev\": dev_require,\n \"docs\": docs_require,\n \"all\": all_require,\n}\n\nsetup_kwargs[\"install_requires\"] = requirements\nsetup_kwargs[\"tests_require\"] = tests_require\nsetup_kwargs[\"extras_require\"] = extras_require\nsetup_kwargs[\"cmdclass\"] = {\"test\": PyTest}\nsetup(**setup_kwargs)\n", "path": "setup.py"}], "after_files": [{"content": "from typing import (\n Any,\n Awaitable,\n Callable,\n Dict,\n MutableMapping,\n Optional,\n Union,\n)\n\nfrom httptools import HttpParserUpgrade # type: ignore\nfrom websockets import ( # type: ignore\n ConnectionClosed,\n InvalidHandshake,\n WebSocketCommonProtocol,\n handshake,\n)\n\nfrom sanic.exceptions import InvalidUsage\nfrom sanic.server import HttpProtocol\n\n\n__all__ = [\"ConnectionClosed\", \"WebSocketProtocol\", \"WebSocketConnection\"]\n\nASIMessage = MutableMapping[str, Any]\n\n\nclass WebSocketProtocol(HttpProtocol):\n def __init__(\n self,\n *args,\n websocket_timeout=10,\n websocket_max_size=None,\n websocket_max_queue=None,\n websocket_read_limit=2 ** 16,\n websocket_write_limit=2 ** 16,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.websocket = None\n # self.app = None\n self.websocket_timeout = websocket_timeout\n self.websocket_max_size = websocket_max_size\n self.websocket_max_queue = websocket_max_queue\n self.websocket_read_limit = websocket_read_limit\n self.websocket_write_limit = websocket_write_limit\n\n # timeouts make no sense for websocket routes\n def request_timeout_callback(self):\n if self.websocket is None:\n super().request_timeout_callback()\n\n def response_timeout_callback(self):\n if self.websocket is None:\n super().response_timeout_callback()\n\n def keep_alive_timeout_callback(self):\n if self.websocket is None:\n super().keep_alive_timeout_callback()\n\n def connection_lost(self, exc):\n if self.websocket is not None:\n self.websocket.connection_lost(exc)\n super().connection_lost(exc)\n\n def data_received(self, data):\n if self.websocket is not None:\n # pass the data to the websocket protocol\n self.websocket.data_received(data)\n else:\n try:\n super().data_received(data)\n except HttpParserUpgrade:\n # this is okay, it just indicates we've got an upgrade request\n pass\n\n def write_response(self, response):\n if self.websocket is not None:\n # websocket requests do not write a response\n self.transport.close()\n else:\n super().write_response(response)\n\n async def websocket_handshake(self, request, subprotocols=None):\n # let the websockets package do the handshake with the client\n headers = {}\n\n try:\n key = handshake.check_request(request.headers)\n handshake.build_response(headers, key)\n except InvalidHandshake:\n raise InvalidUsage(\"Invalid websocket request\")\n\n subprotocol = None\n if subprotocols and \"Sec-Websocket-Protocol\" in request.headers:\n # select a subprotocol\n client_subprotocols = [\n p.strip()\n for p in request.headers[\"Sec-Websocket-Protocol\"].split(\",\")\n ]\n for p in client_subprotocols:\n if p in subprotocols:\n subprotocol = p\n headers[\"Sec-Websocket-Protocol\"] = subprotocol\n break\n\n # write the 101 response back to the client\n rv = b\"HTTP/1.1 101 Switching Protocols\\r\\n\"\n for k, v in headers.items():\n rv += k.encode(\"utf-8\") + b\": \" + v.encode(\"utf-8\") + b\"\\r\\n\"\n rv += b\"\\r\\n\"\n request.transport.write(rv)\n\n # hook up the websocket protocol\n self.websocket = WebSocketCommonProtocol(\n close_timeout=self.websocket_timeout,\n max_size=self.websocket_max_size,\n max_queue=self.websocket_max_queue,\n read_limit=self.websocket_read_limit,\n write_limit=self.websocket_write_limit,\n )\n # Following two lines are required for websockets 8.x\n self.websocket.is_client = False\n self.websocket.side = \"server\"\n self.websocket.subprotocol = subprotocol\n self.websocket.connection_made(request.transport)\n self.websocket.connection_open()\n return self.websocket\n\n\nclass WebSocketConnection:\n\n # TODO\n # - Implement ping/pong\n\n def __init__(\n self,\n send: Callable[[ASIMessage], Awaitable[None]],\n receive: Callable[[], Awaitable[ASIMessage]],\n ) -> None:\n self._send = send\n self._receive = receive\n\n async def send(self, data: Union[str, bytes], *args, **kwargs) -> None:\n message: Dict[str, Union[str, bytes]] = {\"type\": \"websocket.send\"}\n\n if isinstance(data, bytes):\n message.update({\"bytes\": data})\n else:\n message.update({\"text\": str(data)})\n\n await self._send(message)\n\n async def recv(self, *args, **kwargs) -> Optional[str]:\n message = await self._receive()\n\n if message[\"type\"] == \"websocket.receive\":\n return message[\"text\"]\n elif message[\"type\"] == \"websocket.disconnect\":\n pass\n\n return None\n\n receive = recv\n\n async def accept(self) -> None:\n await self._send({\"type\": \"websocket.accept\", \"subprotocol\": \"\"})\n\n async def close(self) -> None:\n pass\n", "path": "sanic/websocket.py"}, {"content": "\"\"\"\nSanic\n\"\"\"\nimport codecs\nimport os\nimport re\nimport sys\nfrom distutils.util import strtobool\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n \"\"\"\n Provide a Test runner to be used from setup.py to run unit tests\n \"\"\"\n\n user_options = [(\"pytest-args=\", \"a\", \"Arguments to pass to pytest\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = \"\"\n\n def run_tests(self):\n import shlex\n import pytest\n\n errno = pytest.main(shlex.split(self.pytest_args))\n sys.exit(errno)\n\n\ndef open_local(paths, mode=\"r\", encoding=\"utf8\"):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths)\n\n return codecs.open(path, mode, encoding)\n\n\nwith open_local([\"sanic\", \"__version__.py\"], encoding=\"latin1\") as fp:\n try:\n version = re.findall(r\"^__version__ = \\\"([^']+)\\\"\\r?$\", fp.read(), re.M)[0]\n except IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\nwith open_local([\"README.rst\"]) as rm:\n long_description = rm.read()\n\nsetup_kwargs = {\n \"name\": \"sanic\",\n \"version\": version,\n \"url\": \"http://github.com/huge-success/sanic/\",\n \"license\": \"MIT\",\n \"author\": \"Sanic Community\",\n \"author_email\": \"[email protected]\",\n \"description\": (\n \"A web server and web framework that's written to go fast. Build fast. Run fast.\"\n ),\n \"long_description\": long_description,\n \"packages\": [\"sanic\"],\n \"platforms\": \"any\",\n \"python_requires\": \">=3.6\",\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n \"entry_points\": {\"console_scripts\": [\"sanic = sanic.__main__:main\"]},\n}\n\nenv_dependency = '; sys_platform != \"win32\" ' 'and implementation_name == \"cpython\"'\nujson = \"ujson>=1.35\" + env_dependency\nuvloop = \"uvloop>=0.5.3\" + env_dependency\n\nrequirements = [\n \"httptools>=0.0.10\",\n uvloop,\n ujson,\n \"aiofiles>=0.3.0\",\n \"websockets>=8.1,<9.0\",\n \"multidict>=4.0,<5.0\",\n \"httpx==0.11.1\",\n]\n\ntests_require = [\n \"pytest==5.2.1\",\n \"multidict>=4.0,<5.0\",\n \"gunicorn\",\n \"pytest-cov\",\n \"httpcore==0.3.0\",\n \"beautifulsoup4\",\n uvloop,\n ujson,\n \"pytest-sanic\",\n \"pytest-sugar\",\n \"pytest-benchmark\",\n]\n\ndocs_require = [\n \"sphinx>=2.1.2\",\n \"sphinx_rtd_theme\",\n \"recommonmark>=0.5.0\",\n \"docutils\",\n \"pygments\",\n]\n\ndev_require = tests_require + [\n \"aiofiles\",\n \"tox\",\n \"black\",\n \"flake8\",\n \"bandit\",\n \"towncrier\",\n]\n\nall_require = dev_require + docs_require\n\nif strtobool(os.environ.get(\"SANIC_NO_UJSON\", \"no\")):\n print(\"Installing without uJSON\")\n requirements.remove(ujson)\n tests_require.remove(ujson)\n\n# 'nt' means windows OS\nif strtobool(os.environ.get(\"SANIC_NO_UVLOOP\", \"no\")):\n print(\"Installing without uvLoop\")\n requirements.remove(uvloop)\n tests_require.remove(uvloop)\n\nextras_require = {\n \"test\": tests_require,\n \"dev\": dev_require,\n \"docs\": docs_require,\n \"all\": all_require,\n}\n\nsetup_kwargs[\"install_requires\"] = requirements\nsetup_kwargs[\"tests_require\"] = tests_require\nsetup_kwargs[\"extras_require\"] = extras_require\nsetup_kwargs[\"cmdclass\"] = {\"test\": PyTest}\nsetup(**setup_kwargs)\n", "path": "setup.py"}]}
3,133
209
gh_patches_debug_29685
rasdani/github-patches
git_diff
avocado-framework__avocado-4644
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Functional test for requirements resolver fails easily The PASS or FAIL result for the functional tests for the requirement resolver is very much a hit/miss, but it's somewhat easy to reproduce with: ``` $ export CI=1 $ dnf remove hello $ dnf clean all $ avocado run --test-runner=nrunner selftests/functional/test_requirements.py JOB ID : 35d4cf58034c04eb47be0276197f8ae5f17af82b JOB LOG : /home/cleber/avocado/job-results/job-2021-05-26T23.55-35d4cf5/job.log (3/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_success: STARTED (2/4) selftests/functional/test_requirements.py:BasicTest.test_single_fail: STARTED (1/4) selftests/functional/test_requirements.py:BasicTest.test_single_success: STARTED (4/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_fails: STARTED (2/4) selftests/functional/test_requirements.py:BasicTest.test_single_fail: PASS (4.75 s) (1/4) selftests/functional/test_requirements.py:BasicTest.test_single_success: PASS (5.29 s) (3/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_success: FAIL (14.22 s) (4/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_fails: PASS (20.34 s) RESULTS : PASS 3 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0 JOB HTML : /home/cleber/avocado/job-results/job-2021-05-26T23.55-35d4cf5/results.html JOB TIME : 21.52 s ``` The reason for the failure is that, multiple tests contain: ``` :avocado: requirement={"type": "package", "name": "hello"} ``` And because the nrunner will run those tests in parallel, it will also run those requirements in parallel. But, given that dnf will hold a systemwide lock, the others can fail. This is reproducible if you try to run multiple `avocado-runner-requirement-package` in parallel manually, such as this which succeeds: ``` {'status': 'running', 'time': 1345096.780925351} {'status': 'running', 'time': 1345097.281267814} {'type': 'stdout', 'log': b'Package(s) hello installed successfully', 'status': 'running', 'time': 1345097.281664319} {'type': 'stderr', 'log': b'', 'status': 'running', 'time': 1345097.281689778} {'result': 'pass', 'status': 'finished', 'time': 1345097.281706051} ``` And this one that ends up failing: ``` {'status': 'running', 'time': 1345097.18655851} {'status': 'running', 'time': 1345097.687243183} {'status': 'running', 'time': 1345098.187887105} {'type': 'stdout', 'log': b'', 'status': 'running', 'time': 1345098.188174427} {'type': 'stderr', 'log': b'Failed to install hello. Check the package(s) name and if sudo permission is granted.', 'status': 'running', 'time': 1345098.188197785} {'result': 'error', 'status': 'finished', 'time': 1345098.188217684} ``` This issue became apparent because of the problems addressed in #4619 which causes test errors in nrunner to really be signaled as an exit code. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `avocado/core/runners/requirement_package.py` Content: ``` 1 import time 2 from multiprocessing import Process, SimpleQueue 3 4 from ...utils.software_manager.main import MESSAGES 5 from ...utils.software_manager.manager import SoftwareManager 6 from .. import nrunner 7 8 9 class RequirementPackageRunner(nrunner.BaseRunner): 10 """Runner for requirements of type package 11 12 This runner handles, the installation, verification and removal of 13 packages using the avocado-software-manager. 14 15 Runnable attributes usage: 16 17 * kind: 'requirement-package' 18 19 * uri: not used 20 21 * args: not used 22 23 * kwargs: 24 - name: the package name (required) 25 - action: one of 'install', 'check', or 'remove' (optional, defaults 26 to 'install') 27 """ 28 29 @staticmethod 30 def _check(software_manager, package): 31 if software_manager.check_installed(package): 32 result = 'pass' 33 stdout = MESSAGES['check-installed']['success'] % package 34 stderr = '' 35 else: 36 result = 'error' 37 stdout = '' 38 stderr = MESSAGES['check-installed']['fail'] % package 39 return result, stdout, stderr 40 41 @staticmethod 42 def _install(software_manager, cmd, package): 43 result = 'pass' 44 stderr = '' 45 if not software_manager.check_installed(package): 46 if software_manager.install(package): 47 stdout = MESSAGES[cmd]['success'] % package 48 else: 49 result = 'error' 50 stdout = '' 51 stderr = MESSAGES[cmd]['fail'] % package 52 else: 53 stdout = MESSAGES['check-installed']['success'] % package 54 return result, stdout, stderr 55 56 @staticmethod 57 def _remove(software_manager, cmd, package): 58 result = 'pass' 59 stderr = '' 60 if software_manager.check_installed(package): 61 if software_manager.remove(package): 62 stdout = MESSAGES[cmd]['success'] % package 63 else: 64 result = 'error' 65 stdout = '' 66 stderr = MESSAGES[cmd]['fail'] % package 67 else: 68 stdout = MESSAGES['check-installed']['fail'] % package 69 return result, stdout, stderr 70 71 def _run_software_manager(self, cmd, package, queue): 72 software_manager = SoftwareManager() 73 74 if not software_manager.is_capable(): 75 output = {'result': 'error', 76 'stdout': '', 77 'stderr': ('Package manager not supported or not' 78 ' available.')} 79 queue.put(output) 80 81 if cmd == 'install': 82 result, stdout, stderr = self._install(software_manager, cmd, 83 package) 84 85 elif cmd == 'remove': 86 result, stdout, stderr = self._remove(software_manager, cmd, 87 package) 88 89 elif cmd == 'check': 90 result, stdout, stderr = self._check(software_manager, package) 91 92 output = {'result': result, 93 'stdout': stdout, 94 'stderr': stderr} 95 queue.put(output) 96 97 def run(self): 98 yield self.prepare_status('started') 99 # check if there is a valid 'action' argument 100 cmd = self.runnable.kwargs.get('action', 'install') 101 # avoid invalid arguments 102 if cmd not in ['install', 'check', 'remove']: 103 stderr = ("Invalid action %s. Use one of 'install', 'check' or" 104 " 'remove'" % cmd) 105 yield self.prepare_status('running', 106 {'type': 'stderr', 'log': stderr}) 107 yield self.prepare_status('finished', {'result': 'error'}) 108 return 109 110 package = self.runnable.kwargs.get('name') 111 # if package was passed correctly, run avocado-software-manager 112 if package is not None: 113 # let's spawn it to another process to be able to update the 114 # status messages and avoid the software-manager to lock this 115 # process 116 queue = SimpleQueue() 117 process = Process(target=self._run_software_manager, 118 args=(cmd, package, queue)) 119 process.start() 120 121 while queue.empty(): 122 time.sleep(nrunner.RUNNER_RUN_STATUS_INTERVAL) 123 yield self.prepare_status('running') 124 125 output = queue.get() 126 result = output['result'] 127 stdout = output['stdout'] 128 stderr = output['stderr'] 129 else: 130 # Otherwise, log the missing package name 131 result = 'error' 132 stdout = '' 133 stderr = ('Package name should be passed as kwargs using' 134 ' name="package_name".') 135 136 yield self.prepare_status('running', 137 {'type': 'stdout', 138 'log': stdout.encode()}) 139 yield self.prepare_status('running', 140 {'type': 'stderr', 141 'log': stderr.encode()}) 142 yield self.prepare_status('finished', {'result': result}) 143 144 145 class RunnerApp(nrunner.BaseRunnerApp): 146 PROG_NAME = 'avocado-runner-requirement-package' 147 PROG_DESCRIPTION = ('nrunner application for requirements of type package') 148 RUNNABLE_KINDS_CAPABLE = {'requirement-package': RequirementPackageRunner} 149 150 151 def main(): 152 nrunner.main(RunnerApp) 153 154 155 if __name__ == '__main__': 156 main() 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/avocado/core/runners/requirement_package.py b/avocado/core/runners/requirement_package.py --- a/avocado/core/runners/requirement_package.py +++ b/avocado/core/runners/requirement_package.py @@ -46,9 +46,14 @@ if software_manager.install(package): stdout = MESSAGES[cmd]['success'] % package else: - result = 'error' - stdout = '' - stderr = MESSAGES[cmd]['fail'] % package + # check if the error is a false negative because of package + # installation collision + if software_manager.check_installed(package): + stdout = MESSAGES[cmd]['success'] % package + else: + result = 'error' + stdout = '' + stderr = MESSAGES[cmd]['fail'] % package else: stdout = MESSAGES['check-installed']['success'] % package return result, stdout, stderr @@ -61,9 +66,14 @@ if software_manager.remove(package): stdout = MESSAGES[cmd]['success'] % package else: - result = 'error' - stdout = '' - stderr = MESSAGES[cmd]['fail'] % package + # check if the error is a false negative because of package + # installation collision + if not software_manager.check_installed(package): + stdout = MESSAGES[cmd]['success'] % package + else: + result = 'error' + stdout = '' + stderr = MESSAGES[cmd]['fail'] % package else: stdout = MESSAGES['check-installed']['fail'] % package return result, stdout, stderr
{"golden_diff": "diff --git a/avocado/core/runners/requirement_package.py b/avocado/core/runners/requirement_package.py\n--- a/avocado/core/runners/requirement_package.py\n+++ b/avocado/core/runners/requirement_package.py\n@@ -46,9 +46,14 @@\n if software_manager.install(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n- result = 'error'\n- stdout = ''\n- stderr = MESSAGES[cmd]['fail'] % package\n+ # check if the error is a false negative because of package\n+ # installation collision\n+ if software_manager.check_installed(package):\n+ stdout = MESSAGES[cmd]['success'] % package\n+ else:\n+ result = 'error'\n+ stdout = ''\n+ stderr = MESSAGES[cmd]['fail'] % package\n else:\n stdout = MESSAGES['check-installed']['success'] % package\n return result, stdout, stderr\n@@ -61,9 +66,14 @@\n if software_manager.remove(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n- result = 'error'\n- stdout = ''\n- stderr = MESSAGES[cmd]['fail'] % package\n+ # check if the error is a false negative because of package\n+ # installation collision\n+ if not software_manager.check_installed(package):\n+ stdout = MESSAGES[cmd]['success'] % package\n+ else:\n+ result = 'error'\n+ stdout = ''\n+ stderr = MESSAGES[cmd]['fail'] % package\n else:\n stdout = MESSAGES['check-installed']['fail'] % package\n return result, stdout, stderr\n", "issue": "Functional test for requirements resolver fails easily\nThe PASS or FAIL result for the functional tests for the requirement resolver is very much a hit/miss, but it's somewhat easy to reproduce with:\r\n\r\n```\r\n$ export CI=1\r\n$ dnf remove hello\r\n$ dnf clean all\r\n$ avocado run --test-runner=nrunner selftests/functional/test_requirements.py \r\nJOB ID : 35d4cf58034c04eb47be0276197f8ae5f17af82b\r\nJOB LOG : /home/cleber/avocado/job-results/job-2021-05-26T23.55-35d4cf5/job.log\r\n (3/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_success: STARTED\r\n (2/4) selftests/functional/test_requirements.py:BasicTest.test_single_fail: STARTED\r\n (1/4) selftests/functional/test_requirements.py:BasicTest.test_single_success: STARTED\r\n (4/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_fails: STARTED\r\n (2/4) selftests/functional/test_requirements.py:BasicTest.test_single_fail: PASS (4.75 s)\r\n (1/4) selftests/functional/test_requirements.py:BasicTest.test_single_success: PASS (5.29 s)\r\n (3/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_success: FAIL (14.22 s)\r\n (4/4) selftests/functional/test_requirements.py:BasicTest.test_multiple_fails: PASS (20.34 s)\r\nRESULTS : PASS 3 | ERROR 0 | FAIL 1 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0\r\nJOB HTML : /home/cleber/avocado/job-results/job-2021-05-26T23.55-35d4cf5/results.html\r\nJOB TIME : 21.52 s\r\n```\r\n\r\nThe reason for the failure is that, multiple tests contain:\r\n\r\n```\r\n:avocado: requirement={\"type\": \"package\", \"name\": \"hello\"}\r\n```\r\n\r\nAnd because the nrunner will run those tests in parallel, it will also run those requirements in parallel. But, given that dnf will hold a systemwide lock, the others can fail. This is reproducible if you try to run multiple `avocado-runner-requirement-package` in parallel manually, such as this which succeeds:\r\n\r\n```\r\n{'status': 'running', 'time': 1345096.780925351}\r\n{'status': 'running', 'time': 1345097.281267814}\r\n{'type': 'stdout', 'log': b'Package(s) hello installed successfully', 'status': 'running', 'time': 1345097.281664319}\r\n{'type': 'stderr', 'log': b'', 'status': 'running', 'time': 1345097.281689778}\r\n{'result': 'pass', 'status': 'finished', 'time': 1345097.281706051}\r\n```\r\n\r\nAnd this one that ends up failing:\r\n\r\n```\r\n{'status': 'running', 'time': 1345097.18655851}\r\n{'status': 'running', 'time': 1345097.687243183}\r\n{'status': 'running', 'time': 1345098.187887105}\r\n{'type': 'stdout', 'log': b'', 'status': 'running', 'time': 1345098.188174427}\r\n{'type': 'stderr', 'log': b'Failed to install hello. Check the package(s) name and if sudo permission is granted.', 'status': 'running', 'time': 1345098.188197785}\r\n{'result': 'error', 'status': 'finished', 'time': 1345098.188217684}\r\n```\r\n\r\nThis issue became apparent because of the problems addressed in #4619 which causes test errors in nrunner to really be signaled as an exit code.\n", "before_files": [{"content": "import time\nfrom multiprocessing import Process, SimpleQueue\n\nfrom ...utils.software_manager.main import MESSAGES\nfrom ...utils.software_manager.manager import SoftwareManager\nfrom .. import nrunner\n\n\nclass RequirementPackageRunner(nrunner.BaseRunner):\n \"\"\"Runner for requirements of type package\n\n This runner handles, the installation, verification and removal of\n packages using the avocado-software-manager.\n\n Runnable attributes usage:\n\n * kind: 'requirement-package'\n\n * uri: not used\n\n * args: not used\n\n * kwargs:\n - name: the package name (required)\n - action: one of 'install', 'check', or 'remove' (optional, defaults\n to 'install')\n \"\"\"\n\n @staticmethod\n def _check(software_manager, package):\n if software_manager.check_installed(package):\n result = 'pass'\n stdout = MESSAGES['check-installed']['success'] % package\n stderr = ''\n else:\n result = 'error'\n stdout = ''\n stderr = MESSAGES['check-installed']['fail'] % package\n return result, stdout, stderr\n\n @staticmethod\n def _install(software_manager, cmd, package):\n result = 'pass'\n stderr = ''\n if not software_manager.check_installed(package):\n if software_manager.install(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n result = 'error'\n stdout = ''\n stderr = MESSAGES[cmd]['fail'] % package\n else:\n stdout = MESSAGES['check-installed']['success'] % package\n return result, stdout, stderr\n\n @staticmethod\n def _remove(software_manager, cmd, package):\n result = 'pass'\n stderr = ''\n if software_manager.check_installed(package):\n if software_manager.remove(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n result = 'error'\n stdout = ''\n stderr = MESSAGES[cmd]['fail'] % package\n else:\n stdout = MESSAGES['check-installed']['fail'] % package\n return result, stdout, stderr\n\n def _run_software_manager(self, cmd, package, queue):\n software_manager = SoftwareManager()\n\n if not software_manager.is_capable():\n output = {'result': 'error',\n 'stdout': '',\n 'stderr': ('Package manager not supported or not'\n ' available.')}\n queue.put(output)\n\n if cmd == 'install':\n result, stdout, stderr = self._install(software_manager, cmd,\n package)\n\n elif cmd == 'remove':\n result, stdout, stderr = self._remove(software_manager, cmd,\n package)\n\n elif cmd == 'check':\n result, stdout, stderr = self._check(software_manager, package)\n\n output = {'result': result,\n 'stdout': stdout,\n 'stderr': stderr}\n queue.put(output)\n\n def run(self):\n yield self.prepare_status('started')\n # check if there is a valid 'action' argument\n cmd = self.runnable.kwargs.get('action', 'install')\n # avoid invalid arguments\n if cmd not in ['install', 'check', 'remove']:\n stderr = (\"Invalid action %s. Use one of 'install', 'check' or\"\n \" 'remove'\" % cmd)\n yield self.prepare_status('running',\n {'type': 'stderr', 'log': stderr})\n yield self.prepare_status('finished', {'result': 'error'})\n return\n\n package = self.runnable.kwargs.get('name')\n # if package was passed correctly, run avocado-software-manager\n if package is not None:\n # let's spawn it to another process to be able to update the\n # status messages and avoid the software-manager to lock this\n # process\n queue = SimpleQueue()\n process = Process(target=self._run_software_manager,\n args=(cmd, package, queue))\n process.start()\n\n while queue.empty():\n time.sleep(nrunner.RUNNER_RUN_STATUS_INTERVAL)\n yield self.prepare_status('running')\n\n output = queue.get()\n result = output['result']\n stdout = output['stdout']\n stderr = output['stderr']\n else:\n # Otherwise, log the missing package name\n result = 'error'\n stdout = ''\n stderr = ('Package name should be passed as kwargs using'\n ' name=\"package_name\".')\n\n yield self.prepare_status('running',\n {'type': 'stdout',\n 'log': stdout.encode()})\n yield self.prepare_status('running',\n {'type': 'stderr',\n 'log': stderr.encode()})\n yield self.prepare_status('finished', {'result': result})\n\n\nclass RunnerApp(nrunner.BaseRunnerApp):\n PROG_NAME = 'avocado-runner-requirement-package'\n PROG_DESCRIPTION = ('nrunner application for requirements of type package')\n RUNNABLE_KINDS_CAPABLE = {'requirement-package': RequirementPackageRunner}\n\n\ndef main():\n nrunner.main(RunnerApp)\n\n\nif __name__ == '__main__':\n main()\n", "path": "avocado/core/runners/requirement_package.py"}], "after_files": [{"content": "import time\nfrom multiprocessing import Process, SimpleQueue\n\nfrom ...utils.software_manager.main import MESSAGES\nfrom ...utils.software_manager.manager import SoftwareManager\nfrom .. import nrunner\n\n\nclass RequirementPackageRunner(nrunner.BaseRunner):\n \"\"\"Runner for requirements of type package\n\n This runner handles, the installation, verification and removal of\n packages using the avocado-software-manager.\n\n Runnable attributes usage:\n\n * kind: 'requirement-package'\n\n * uri: not used\n\n * args: not used\n\n * kwargs:\n - name: the package name (required)\n - action: one of 'install', 'check', or 'remove' (optional, defaults\n to 'install')\n \"\"\"\n\n @staticmethod\n def _check(software_manager, package):\n if software_manager.check_installed(package):\n result = 'pass'\n stdout = MESSAGES['check-installed']['success'] % package\n stderr = ''\n else:\n result = 'error'\n stdout = ''\n stderr = MESSAGES['check-installed']['fail'] % package\n return result, stdout, stderr\n\n @staticmethod\n def _install(software_manager, cmd, package):\n result = 'pass'\n stderr = ''\n if not software_manager.check_installed(package):\n if software_manager.install(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n # check if the error is a false negative because of package\n # installation collision\n if software_manager.check_installed(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n result = 'error'\n stdout = ''\n stderr = MESSAGES[cmd]['fail'] % package\n else:\n stdout = MESSAGES['check-installed']['success'] % package\n return result, stdout, stderr\n\n @staticmethod\n def _remove(software_manager, cmd, package):\n result = 'pass'\n stderr = ''\n if software_manager.check_installed(package):\n if software_manager.remove(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n # check if the error is a false negative because of package\n # installation collision\n if not software_manager.check_installed(package):\n stdout = MESSAGES[cmd]['success'] % package\n else:\n result = 'error'\n stdout = ''\n stderr = MESSAGES[cmd]['fail'] % package\n else:\n stdout = MESSAGES['check-installed']['fail'] % package\n return result, stdout, stderr\n\n def _run_software_manager(self, cmd, package, queue):\n software_manager = SoftwareManager()\n\n if not software_manager.is_capable():\n output = {'result': 'error',\n 'stdout': '',\n 'stderr': ('Package manager not supported or not'\n ' available.')}\n queue.put(output)\n\n if cmd == 'install':\n result, stdout, stderr = self._install(software_manager, cmd,\n package)\n\n elif cmd == 'remove':\n result, stdout, stderr = self._remove(software_manager, cmd,\n package)\n\n elif cmd == 'check':\n result, stdout, stderr = self._check(software_manager, package)\n\n output = {'result': result,\n 'stdout': stdout,\n 'stderr': stderr}\n queue.put(output)\n\n def run(self):\n yield self.prepare_status('started')\n # check if there is a valid 'action' argument\n cmd = self.runnable.kwargs.get('action', 'install')\n # avoid invalid arguments\n if cmd not in ['install', 'check', 'remove']:\n stderr = (\"Invalid action %s. Use one of 'install', 'check' or\"\n \" 'remove'\" % cmd)\n yield self.prepare_status('running',\n {'type': 'stderr', 'log': stderr})\n yield self.prepare_status('finished', {'result': 'error'})\n return\n\n package = self.runnable.kwargs.get('name')\n # if package was passed correctly, run avocado-software-manager\n if package is not None:\n # let's spawn it to another process to be able to update the\n # status messages and avoid the software-manager to lock this\n # process\n queue = SimpleQueue()\n process = Process(target=self._run_software_manager,\n args=(cmd, package, queue))\n process.start()\n\n while queue.empty():\n time.sleep(nrunner.RUNNER_RUN_STATUS_INTERVAL)\n yield self.prepare_status('running')\n\n output = queue.get()\n result = output['result']\n stdout = output['stdout']\n stderr = output['stderr']\n else:\n # Otherwise, log the missing package name\n result = 'error'\n stdout = ''\n stderr = ('Package name should be passed as kwargs using'\n ' name=\"package_name\".')\n\n yield self.prepare_status('running',\n {'type': 'stdout',\n 'log': stdout.encode()})\n yield self.prepare_status('running',\n {'type': 'stderr',\n 'log': stderr.encode()})\n yield self.prepare_status('finished', {'result': result})\n\n\nclass RunnerApp(nrunner.BaseRunnerApp):\n PROG_NAME = 'avocado-runner-requirement-package'\n PROG_DESCRIPTION = ('nrunner application for requirements of type package')\n RUNNABLE_KINDS_CAPABLE = {'requirement-package': RequirementPackageRunner}\n\n\ndef main():\n nrunner.main(RunnerApp)\n\n\nif __name__ == '__main__':\n main()\n", "path": "avocado/core/runners/requirement_package.py"}]}
2,704
381
gh_patches_debug_64110
rasdani/github-patches
git_diff
projectmesa__mesa-561
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update tests to use pytest, not nose Update tests to use pytest, not nose. nose is not maintained anymore. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 import re 4 5 from setuptools import setup, find_packages 6 from codecs import open 7 8 requires = [ 9 'click', 10 'cookiecutter', 11 'jupyter', 12 'networkx', 13 'numpy', 14 'pandas', 15 'tornado >= 4.2, < 5.0.0', 16 'tqdm', 17 ] 18 19 extras_require = { 20 'dev': [ 21 'coverage', 22 'flake8', 23 'nose', 24 'sphinx', 25 ], 26 'docs': [ 27 'sphinx', 28 ] 29 } 30 31 version = '' 32 with open('mesa/__init__.py', 'r') as fd: 33 version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', 34 fd.read(), re.MULTILINE).group(1) 35 36 with open('README.rst', 'rb', encoding='utf-8') as f: 37 readme = f.read() 38 39 setup( 40 name='Mesa', 41 version=version, 42 description="Agent-based modeling (ABM) in Python 3+", 43 long_description=readme, 44 author='Project Mesa Team', 45 author_email='[email protected]', 46 url='https://github.com/projectmesa/mesa', 47 packages=find_packages(), 48 package_data={'mesa': ['visualization/templates/*.html', 'visualization/templates/css/*', 49 'visualization/templates/fonts/*', 'visualization/templates/js/*'], 50 'cookiecutter-mesa': ['cookiecutter-mesa/*']}, 51 include_package_data=True, 52 install_requires=requires, 53 extras_require=extras_require, 54 keywords='agent based modeling model ABM simulation multi-agent', 55 license='Apache 2.0', 56 zip_safe=False, 57 classifiers=[ 58 'Topic :: Scientific/Engineering', 59 'Topic :: Scientific/Engineering :: Artificial Life', 60 'Topic :: Scientific/Engineering :: Artificial Intelligence', 61 'Intended Audience :: Science/Research', 62 'Programming Language :: Python :: 3 :: Only', 63 'License :: OSI Approved :: Apache Software License', 64 'Operating System :: OS Independent', 65 'Development Status :: 3 - Alpha', 66 'Natural Language :: English', 67 ], 68 entry_points=''' 69 [console_scripts] 70 mesa=mesa.main:cli 71 ''', 72 ) 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -20,7 +20,8 @@ 'dev': [ 'coverage', 'flake8', - 'nose', + 'pytest', + 'pytest-cov', 'sphinx', ], 'docs': [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,8 @@\n 'dev': [\n 'coverage',\n 'flake8',\n- 'nose',\n+ 'pytest',\n+ 'pytest-cov',\n 'sphinx',\n ],\n 'docs': [\n", "issue": "Update tests to use pytest, not nose\nUpdate tests to use pytest, not nose. nose is not maintained anymore. \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\n 'click',\n 'cookiecutter',\n 'jupyter',\n 'networkx',\n 'numpy',\n 'pandas',\n 'tornado >= 4.2, < 5.0.0',\n 'tqdm',\n]\n\nextras_require = {\n 'dev': [\n 'coverage',\n 'flake8',\n 'nose',\n 'sphinx',\n ],\n 'docs': [\n 'sphinx',\n ]\n}\n\nversion = ''\nwith open('mesa/__init__.py', 'r') as fd:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n fd.read(), re.MULTILINE).group(1)\n\nwith open('README.rst', 'rb', encoding='utf-8') as f:\n readme = f.read()\n\nsetup(\n name='Mesa',\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author='Project Mesa Team',\n author_email='[email protected]',\n url='https://github.com/projectmesa/mesa',\n packages=find_packages(),\n package_data={'mesa': ['visualization/templates/*.html', 'visualization/templates/css/*',\n 'visualization/templates/fonts/*', 'visualization/templates/js/*'],\n 'cookiecutter-mesa': ['cookiecutter-mesa/*']},\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords='agent based modeling model ABM simulation multi-agent',\n license='Apache 2.0',\n zip_safe=False,\n classifiers=[\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Life',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Development Status :: 3 - Alpha',\n 'Natural Language :: English',\n ],\n entry_points='''\n [console_scripts]\n mesa=mesa.main:cli\n ''',\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\n 'click',\n 'cookiecutter',\n 'jupyter',\n 'networkx',\n 'numpy',\n 'pandas',\n 'tornado >= 4.2, < 5.0.0',\n 'tqdm',\n]\n\nextras_require = {\n 'dev': [\n 'coverage',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'sphinx',\n ],\n 'docs': [\n 'sphinx',\n ]\n}\n\nversion = ''\nwith open('mesa/__init__.py', 'r') as fd:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n fd.read(), re.MULTILINE).group(1)\n\nwith open('README.rst', 'rb', encoding='utf-8') as f:\n readme = f.read()\n\nsetup(\n name='Mesa',\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author='Project Mesa Team',\n author_email='[email protected]',\n url='https://github.com/projectmesa/mesa',\n packages=find_packages(),\n package_data={'mesa': ['visualization/templates/*.html', 'visualization/templates/css/*',\n 'visualization/templates/fonts/*', 'visualization/templates/js/*'],\n 'cookiecutter-mesa': ['cookiecutter-mesa/*']},\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords='agent based modeling model ABM simulation multi-agent',\n license='Apache 2.0',\n zip_safe=False,\n classifiers=[\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Life',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Development Status :: 3 - Alpha',\n 'Natural Language :: English',\n ],\n entry_points='''\n [console_scripts]\n mesa=mesa.main:cli\n ''',\n)\n", "path": "setup.py"}]}
920
76
gh_patches_debug_20766
rasdani/github-patches
git_diff
microsoft__botbuilder-python-2069
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Recommended change to 3.8.6 or above https://github.com/microsoft/botbuilder-python/blob/7b064bb9f916afc10e931f3713183f57e1d7ca47/libraries/botbuilder-integration-aiohttp/setup.py#L13 I have a conflict when introducing llamaindex, which requires version 3.8.6 or higher! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botbuilder-integration-aiohttp/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" 8 REQUIRES = [ 9 "botbuilder-schema==4.15.0", 10 "botframework-connector==4.15.0", 11 "botbuilder-core==4.15.0", 12 "yarl>=1.8.1", 13 "aiohttp==3.8.5", 14 ] 15 16 root = os.path.abspath(os.path.dirname(__file__)) 17 18 with open(os.path.join(root, "botbuilder", "integration", "aiohttp", "about.py")) as f: 19 package_info = {} 20 info = f.read() 21 exec(info, package_info) 22 23 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 24 long_description = f.read() 25 26 setup( 27 name=package_info["__title__"], 28 version=package_info["__version__"], 29 url=package_info["__uri__"], 30 author=package_info["__author__"], 31 description=package_info["__description__"], 32 keywords=[ 33 "BotBuilderIntegrationAiohttp", 34 "bots", 35 "ai", 36 "botframework", 37 "botbuilder", 38 ], 39 long_description=long_description, 40 long_description_content_type="text/x-rst", 41 license=package_info["__license__"], 42 packages=[ 43 "botbuilder.integration.aiohttp", 44 "botbuilder.integration.aiohttp.skills", 45 "botbuilder.integration.aiohttp.streaming", 46 ], 47 install_requires=REQUIRES, 48 classifiers=[ 49 "Programming Language :: Python :: 3.7", 50 "Intended Audience :: Developers", 51 "License :: OSI Approved :: MIT License", 52 "Operating System :: OS Independent", 53 "Development Status :: 5 - Production/Stable", 54 "Topic :: Scientific/Engineering :: Artificial Intelligence", 55 ], 56 ) 57 ``` Path: `libraries/botbuilder-ai/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 REQUIRES = [ 8 "azure-cognitiveservices-language-luis==0.2.0", 9 "botbuilder-schema==4.15.0", 10 "botbuilder-core==4.15.0", 11 "aiohttp==3.8.5", 12 ] 13 14 TESTS_REQUIRES = ["aiounittest>=1.1.0"] 15 16 root = os.path.abspath(os.path.dirname(__file__)) 17 18 with open(os.path.join(root, "botbuilder", "ai", "about.py")) as f: 19 package_info = {} 20 info = f.read() 21 exec(info, package_info) 22 23 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 24 long_description = f.read() 25 26 setup( 27 name=package_info["__title__"], 28 version=package_info["__version__"], 29 url=package_info["__uri__"], 30 author=package_info["__author__"], 31 description=package_info["__description__"], 32 keywords="botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder", 33 long_description=long_description, 34 long_description_content_type="text/x-rst", 35 license=package_info["__license__"], 36 packages=[ 37 "botbuilder.ai", 38 "botbuilder.ai.qna", 39 "botbuilder.ai.luis", 40 "botbuilder.ai.qna.models", 41 "botbuilder.ai.qna.utils", 42 "botbuilder.ai.qna.dialogs", 43 ], 44 install_requires=REQUIRES + TESTS_REQUIRES, 45 tests_require=TESTS_REQUIRES, 46 include_package_data=True, 47 classifiers=[ 48 "Programming Language :: Python :: 3.7", 49 "Intended Audience :: Developers", 50 "License :: OSI Approved :: MIT License", 51 "Operating System :: OS Independent", 52 "Development Status :: 5 - Production/Stable", 53 "Topic :: Scientific/Engineering :: Artificial Intelligence", 54 ], 55 ) 56 ``` Path: `libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 REQUIRES = [ 8 "applicationinsights>=0.11.9", 9 "aiohttp==3.8.5", 10 "botbuilder-schema==4.15.0", 11 "botframework-connector==4.15.0", 12 "botbuilder-core==4.15.0", 13 "botbuilder-applicationinsights==4.15.0", 14 ] 15 TESTS_REQUIRES = [ 16 "aiounittest==1.3.0", 17 ] 18 19 root = os.path.abspath(os.path.dirname(__file__)) 20 21 with open( 22 os.path.join( 23 root, "botbuilder", "integration", "applicationinsights", "aiohttp", "about.py" 24 ) 25 ) as f: 26 package_info = {} 27 info = f.read() 28 exec(info, package_info) 29 30 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 31 long_description = f.read() 32 33 setup( 34 name=package_info["__title__"], 35 version=package_info["__version__"], 36 url=package_info["__uri__"], 37 author=package_info["__author__"], 38 description=package_info["__description__"], 39 keywords=[ 40 "BotBuilderApplicationInsights", 41 "bots", 42 "ai", 43 "botframework", 44 "botbuilder", 45 "aiohttp", 46 ], 47 long_description=long_description, 48 long_description_content_type="text/x-rst", 49 license=package_info["__license__"], 50 packages=["botbuilder.integration.applicationinsights.aiohttp"], 51 install_requires=REQUIRES + TESTS_REQUIRES, 52 tests_require=TESTS_REQUIRES, 53 include_package_data=True, 54 classifiers=[ 55 "Programming Language :: Python :: 3.7", 56 "Intended Audience :: Developers", 57 "License :: OSI Approved :: MIT License", 58 "Operating System :: OS Independent", 59 "Development Status :: 5 - Production/Stable", 60 "Topic :: Scientific/Engineering :: Artificial Intelligence", 61 ], 62 ) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-ai/setup.py b/libraries/botbuilder-ai/setup.py --- a/libraries/botbuilder-ai/setup.py +++ b/libraries/botbuilder-ai/setup.py @@ -8,7 +8,7 @@ "azure-cognitiveservices-language-luis==0.2.0", "botbuilder-schema==4.15.0", "botbuilder-core==4.15.0", - "aiohttp==3.8.5", + "aiohttp==3.9.3", ] TESTS_REQUIRES = ["aiounittest>=1.1.0"] diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py --- a/libraries/botbuilder-integration-aiohttp/setup.py +++ b/libraries/botbuilder-integration-aiohttp/setup.py @@ -10,7 +10,7 @@ "botframework-connector==4.15.0", "botbuilder-core==4.15.0", "yarl>=1.8.1", - "aiohttp==3.8.5", + "aiohttp==3.9.3", ] root = os.path.abspath(os.path.dirname(__file__)) diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py --- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py +++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py @@ -6,7 +6,7 @@ REQUIRES = [ "applicationinsights>=0.11.9", - "aiohttp==3.8.5", + "aiohttp==3.9.3", "botbuilder-schema==4.15.0", "botframework-connector==4.15.0", "botbuilder-core==4.15.0",
{"golden_diff": "diff --git a/libraries/botbuilder-ai/setup.py b/libraries/botbuilder-ai/setup.py\n--- a/libraries/botbuilder-ai/setup.py\n+++ b/libraries/botbuilder-ai/setup.py\n@@ -8,7 +8,7 @@\n \"azure-cognitiveservices-language-luis==0.2.0\",\n \"botbuilder-schema==4.15.0\",\n \"botbuilder-core==4.15.0\",\n- \"aiohttp==3.8.5\",\n+ \"aiohttp==3.9.3\",\n ]\n \n TESTS_REQUIRES = [\"aiounittest>=1.1.0\"]\ndiff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py\n--- a/libraries/botbuilder-integration-aiohttp/setup.py\n+++ b/libraries/botbuilder-integration-aiohttp/setup.py\n@@ -10,7 +10,7 @@\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n \"yarl>=1.8.1\",\n- \"aiohttp==3.8.5\",\n+ \"aiohttp==3.9.3\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\ndiff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n--- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n+++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n@@ -6,7 +6,7 @@\n \n REQUIRES = [\n \"applicationinsights>=0.11.9\",\n- \"aiohttp==3.8.5\",\n+ \"aiohttp==3.9.3\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n", "issue": "Recommended change to 3.8.6 or above\nhttps://github.com/microsoft/botbuilder-python/blob/7b064bb9f916afc10e931f3713183f57e1d7ca47/libraries/botbuilder-integration-aiohttp/setup.py#L13\r\n\r\nI have a conflict when introducing llamaindex, which requires version 3.8.6 or higher!\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n \"yarl>=1.8.1\",\n \"aiohttp==3.8.5\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"integration\", \"aiohttp\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderIntegrationAiohttp\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.integration.aiohttp\",\n \"botbuilder.integration.aiohttp.skills\",\n \"botbuilder.integration.aiohttp.streaming\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-aiohttp/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cognitiveservices-language-luis==0.2.0\",\n \"botbuilder-schema==4.15.0\",\n \"botbuilder-core==4.15.0\",\n \"aiohttp==3.8.5\",\n]\n\nTESTS_REQUIRES = [\"aiounittest>=1.1.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"ai\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.ai\",\n \"botbuilder.ai.qna\",\n \"botbuilder.ai.luis\",\n \"botbuilder.ai.qna.models\",\n \"botbuilder.ai.qna.utils\",\n \"botbuilder.ai.qna.dialogs\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-ai/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights>=0.11.9\",\n \"aiohttp==3.8.5\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n \"botbuilder-applicationinsights==4.15.0\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\n os.path.join(\n root, \"botbuilder\", \"integration\", \"applicationinsights\", \"aiohttp\", \"about.py\"\n )\n) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n \"aiohttp\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.integration.applicationinsights.aiohttp\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n \"yarl>=1.8.1\",\n \"aiohttp==3.9.3\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"integration\", \"aiohttp\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderIntegrationAiohttp\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.integration.aiohttp\",\n \"botbuilder.integration.aiohttp.skills\",\n \"botbuilder.integration.aiohttp.streaming\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-aiohttp/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"azure-cognitiveservices-language-luis==0.2.0\",\n \"botbuilder-schema==4.15.0\",\n \"botbuilder-core==4.15.0\",\n \"aiohttp==3.9.3\",\n]\n\nTESTS_REQUIRES = [\"aiounittest>=1.1.0\"]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"ai\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=\"botbuilder-ai LUIS QnAMaker bots ai botframework botbuilder\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.ai\",\n \"botbuilder.ai.qna\",\n \"botbuilder.ai.luis\",\n \"botbuilder.ai.qna.models\",\n \"botbuilder.ai.qna.utils\",\n \"botbuilder.ai.qna.dialogs\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-ai/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights>=0.11.9\",\n \"aiohttp==3.9.3\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n \"botbuilder-applicationinsights==4.15.0\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\n os.path.join(\n root, \"botbuilder\", \"integration\", \"applicationinsights\", \"aiohttp\", \"about.py\"\n )\n) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n \"aiohttp\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.integration.applicationinsights.aiohttp\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py"}]}
2,055
460
gh_patches_debug_33101
rasdani/github-patches
git_diff
pypa__virtualenv-1578
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- virtualenv 20: is the symlink hack really worth it? I did some timing and it seems like the trouble it causes is not really worth it -- at the very least I'd like an option which copies instead of symlinks Here's some timing I did to try and guage the differences -- since there's no options I could find I toggled this line to `if False` to get my "copy" data: https://github.com/pypa/virtualenv/blob/8c2985c2946e767bb6f74a7e22f51add17b38987/src/virtualenv/seed/via_app_data/via_app_data.py#L92 ### with symlinks my platform for this example is relatively low powered, a 2015 MBP ```console $ rm -rf vvv; time virtualenv vvv real 0m0.128s user 0m0.107s sys 0m0.023s $ rm -rf vvv; time virtualenv vvv real 0m0.128s user 0m0.118s sys 0m0.012s $ rm -rf vvv; time virtualenv vvv real 0m0.123s user 0m0.121s sys 0m0.004s $ rm -rf vvv; time virtualenv vvv real 0m0.119s user 0m0.117s sys 0m0.004s $ rm -rf vvv; time virtualenv vvv real 0m0.127s user 0m0.109s sys 0m0.020s ``` disk usage: ```console $ du -hs vvv 128K vvv ``` problems this can cause: ```console $ # copied to same path on other machine $ ./vvv/bin/python -c 'import setuptools' Traceback (most recent call last): File "<string>", line 1, in <module> ModuleNotFoundError: No module named 'setuptools' $ ./vvv/bin/pip --help Traceback (most recent call last): File "./vvv/bin/pip", line 6, in <module> from pip._internal.cli.main import main ModuleNotFoundError: No module named 'pip' ``` ### with copies ```console $ rm -rf vvv; time virtualenv vvv real 0m0.179s user 0m0.155s sys 0m0.050s $ rm -rf vvv; time virtualenv vvv real 0m0.185s user 0m0.158s sys 0m0.050s $ rm -rf vvv; time virtualenv vvv real 0m0.183s user 0m0.160s sys 0m0.048s $ rm -rf vvv; time virtualenv vvv real 0m0.172s user 0m0.162s sys 0m0.035s $ rm -rf vvv; time virtualenv vvv real 0m0.181s user 0m0.142s sys 0m0.065s ``` ```console $ du -hs vvv 7.5M vvv ``` ### trade off so we're looking at ~60ms of time overhead -- which (imo) isn't that much -- the disk usage is another concern but we're still taking that usage one way or another ### other considerations hardlinks would be another consideration -- it would alleviate the problems I have with symlinks (caches, using virtualenv as a deployment mechanism, etc.) -- I'd have to do some implementation work to verify that case --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/virtualenv/seed/via_app_data/via_app_data.py` Content: ``` 1 """Bootstrap""" 2 from __future__ import absolute_import, unicode_literals 3 4 import logging 5 import shutil 6 from contextlib import contextmanager 7 from threading import Lock, Thread 8 9 import six 10 11 from virtualenv.dirs import default_data_dir 12 from virtualenv.seed.embed.base_embed import BaseEmbed 13 from virtualenv.seed.embed.wheels.acquire import get_wheels 14 15 from .pip_install.copy import CopyPipInstall 16 from .pip_install.symlink import SymlinkPipInstall 17 18 19 class FromAppData(BaseEmbed): 20 def __init__(self, options): 21 super(FromAppData, self).__init__(options) 22 self.clear = options.clear_app_data 23 self.app_data_dir = default_data_dir() / "seed-v1" 24 self.symlinks = getattr(options, "copies", False) is False 25 26 @classmethod 27 def add_parser_arguments(cls, parser, interpreter): 28 super(FromAppData, cls).add_parser_arguments(parser, interpreter) 29 parser.add_argument( 30 "--clear-app-data", 31 dest="clear_app_data", 32 action="store_true", 33 help="clear the app data folder of seed images ({})".format((default_data_dir() / "seed-v1").path), 34 default=False, 35 ) 36 37 def run(self, creator): 38 if not self.enabled: 39 return 40 base_cache = self.app_data_dir / creator.interpreter.version_release_str 41 with self._get_seed_wheels(creator, base_cache) as name_to_whl: 42 pip_version = name_to_whl["pip"].stem.split("-")[1] 43 installer_class = self.installer_class(pip_version) 44 45 def _install(name, wheel): 46 logging.debug("install %s from wheel %s via %s", name, wheel, installer_class.__name__) 47 image_folder = base_cache.path / "image" / installer_class.__name__ / wheel.stem 48 installer = installer_class(wheel, creator, image_folder) 49 if self.clear: 50 installer.clear() 51 if not installer.has_image(): 52 installer.build_image() 53 installer.install() 54 55 threads = list(Thread(target=_install, args=(n, w)) for n, w in name_to_whl.items()) 56 for thread in threads: 57 thread.start() 58 for thread in threads: 59 thread.join() 60 61 @contextmanager 62 def _get_seed_wheels(self, creator, base_cache): 63 with base_cache.lock_for_key("wheels"): 64 wheels_to = base_cache.path / "wheels" 65 if self.clear and wheels_to.exists(): 66 shutil.rmtree(six.ensure_text(str(wheels_to))) 67 wheels_to.mkdir(parents=True, exist_ok=True) 68 name_to_whl, lock = {}, Lock() 69 70 def _get(package, version): 71 result = get_wheels( 72 creator.interpreter.version_release_str, 73 wheels_to, 74 self.extra_search_dir, 75 self.download, 76 {package: version}, 77 ) 78 with lock: 79 name_to_whl.update(result) 80 81 threads = list(Thread(target=_get, args=(pkg, v)) for pkg, v in self.package_version().items()) 82 for thread in threads: 83 thread.start() 84 for thread in threads: 85 thread.join() 86 87 yield name_to_whl 88 89 def installer_class(self, pip_version): 90 if self.symlinks: 91 # symlink support requires pip 19.3+ 92 pip_version_int = tuple(int(i) for i in pip_version.split(".")[0:2]) 93 if pip_version_int >= (19, 3): 94 return SymlinkPipInstall 95 return CopyPipInstall 96 97 def __unicode__(self): 98 return super(FromAppData, self).__unicode__() + " app_data_dir={}".format(self.app_data_dir.path) 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/virtualenv/seed/via_app_data/via_app_data.py b/src/virtualenv/seed/via_app_data/via_app_data.py --- a/src/virtualenv/seed/via_app_data/via_app_data.py +++ b/src/virtualenv/seed/via_app_data/via_app_data.py @@ -9,6 +9,7 @@ import six from virtualenv.dirs import default_data_dir +from virtualenv.info import fs_supports_symlink from virtualenv.seed.embed.base_embed import BaseEmbed from virtualenv.seed.embed.wheels.acquire import get_wheels @@ -21,7 +22,7 @@ super(FromAppData, self).__init__(options) self.clear = options.clear_app_data self.app_data_dir = default_data_dir() / "seed-v1" - self.symlinks = getattr(options, "copies", False) is False + self.symlinks = options.symlink_app_data @classmethod def add_parser_arguments(cls, parser, interpreter): @@ -33,6 +34,16 @@ help="clear the app data folder of seed images ({})".format((default_data_dir() / "seed-v1").path), default=False, ) + can_symlink = fs_supports_symlink() + parser.add_argument( + "--symlink-app-data", + dest="symlink_app_data", + action="store_true" if can_symlink else "store_false", + help="{} symlink the python packages from the app-data folder (requires seed pip>=19.3)".format( + "" if can_symlink else "not supported - " + ), + default=False, + ) def run(self, creator): if not self.enabled: @@ -95,4 +106,6 @@ return CopyPipInstall def __unicode__(self): - return super(FromAppData, self).__unicode__() + " app_data_dir={}".format(self.app_data_dir.path) + return super(FromAppData, self).__unicode__() + " app_data_dir={} via={}".format( + self.app_data_dir.path, "symlink" if self.symlinks else "copy" + )
{"golden_diff": "diff --git a/src/virtualenv/seed/via_app_data/via_app_data.py b/src/virtualenv/seed/via_app_data/via_app_data.py\n--- a/src/virtualenv/seed/via_app_data/via_app_data.py\n+++ b/src/virtualenv/seed/via_app_data/via_app_data.py\n@@ -9,6 +9,7 @@\n import six\n \n from virtualenv.dirs import default_data_dir\n+from virtualenv.info import fs_supports_symlink\n from virtualenv.seed.embed.base_embed import BaseEmbed\n from virtualenv.seed.embed.wheels.acquire import get_wheels\n \n@@ -21,7 +22,7 @@\n super(FromAppData, self).__init__(options)\n self.clear = options.clear_app_data\n self.app_data_dir = default_data_dir() / \"seed-v1\"\n- self.symlinks = getattr(options, \"copies\", False) is False\n+ self.symlinks = options.symlink_app_data\n \n @classmethod\n def add_parser_arguments(cls, parser, interpreter):\n@@ -33,6 +34,16 @@\n help=\"clear the app data folder of seed images ({})\".format((default_data_dir() / \"seed-v1\").path),\n default=False,\n )\n+ can_symlink = fs_supports_symlink()\n+ parser.add_argument(\n+ \"--symlink-app-data\",\n+ dest=\"symlink_app_data\",\n+ action=\"store_true\" if can_symlink else \"store_false\",\n+ help=\"{} symlink the python packages from the app-data folder (requires seed pip>=19.3)\".format(\n+ \"\" if can_symlink else \"not supported - \"\n+ ),\n+ default=False,\n+ )\n \n def run(self, creator):\n if not self.enabled:\n@@ -95,4 +106,6 @@\n return CopyPipInstall\n \n def __unicode__(self):\n- return super(FromAppData, self).__unicode__() + \" app_data_dir={}\".format(self.app_data_dir.path)\n+ return super(FromAppData, self).__unicode__() + \" app_data_dir={} via={}\".format(\n+ self.app_data_dir.path, \"symlink\" if self.symlinks else \"copy\"\n+ )\n", "issue": "virtualenv 20: is the symlink hack really worth it?\nI did some timing and it seems like the trouble it causes is not really worth it -- at the very least I'd like an option which copies instead of symlinks\r\n\r\nHere's some timing I did to try and guage the differences -- since there's no options I could find I toggled this line to `if False` to get my \"copy\" data: https://github.com/pypa/virtualenv/blob/8c2985c2946e767bb6f74a7e22f51add17b38987/src/virtualenv/seed/via_app_data/via_app_data.py#L92\r\n\r\n### with symlinks\r\n\r\nmy platform for this example is relatively low powered, a 2015 MBP\r\n\r\n```console\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.128s\r\nuser\t0m0.107s\r\nsys\t0m0.023s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.128s\r\nuser\t0m0.118s\r\nsys\t0m0.012s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.123s\r\nuser\t0m0.121s\r\nsys\t0m0.004s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.119s\r\nuser\t0m0.117s\r\nsys\t0m0.004s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.127s\r\nuser\t0m0.109s\r\nsys\t0m0.020s\r\n```\r\n\r\ndisk usage:\r\n\r\n```console\r\n$ du -hs vvv\r\n128K\tvvv\r\n```\r\n\r\nproblems this can cause:\r\n\r\n```console\r\n$ # copied to same path on other machine\r\n$ ./vvv/bin/python -c 'import setuptools'\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nModuleNotFoundError: No module named 'setuptools'\r\n$ ./vvv/bin/pip --help\r\nTraceback (most recent call last):\r\n File \"./vvv/bin/pip\", line 6, in <module>\r\n from pip._internal.cli.main import main\r\nModuleNotFoundError: No module named 'pip'\r\n```\r\n\r\n### with copies\r\n\r\n```console\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.179s\r\nuser\t0m0.155s\r\nsys\t0m0.050s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.185s\r\nuser\t0m0.158s\r\nsys\t0m0.050s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.183s\r\nuser\t0m0.160s\r\nsys\t0m0.048s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.172s\r\nuser\t0m0.162s\r\nsys\t0m0.035s\r\n$ rm -rf vvv; time virtualenv vvv\r\n\r\nreal\t0m0.181s\r\nuser\t0m0.142s\r\nsys\t0m0.065s\r\n```\r\n\r\n```console\r\n$ du -hs vvv\r\n7.5M\tvvv\r\n```\r\n\r\n### trade off\r\n\r\nso we're looking at ~60ms of time overhead -- which (imo) isn't that much -- the disk usage is another concern but we're still taking that usage one way or another\r\n\r\n### other considerations\r\n\r\nhardlinks would be another consideration -- it would alleviate the problems I have with symlinks (caches, using virtualenv as a deployment mechanism, etc.) -- I'd have to do some implementation work to verify that case\n", "before_files": [{"content": "\"\"\"Bootstrap\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport shutil\nfrom contextlib import contextmanager\nfrom threading import Lock, Thread\n\nimport six\n\nfrom virtualenv.dirs import default_data_dir\nfrom virtualenv.seed.embed.base_embed import BaseEmbed\nfrom virtualenv.seed.embed.wheels.acquire import get_wheels\n\nfrom .pip_install.copy import CopyPipInstall\nfrom .pip_install.symlink import SymlinkPipInstall\n\n\nclass FromAppData(BaseEmbed):\n def __init__(self, options):\n super(FromAppData, self).__init__(options)\n self.clear = options.clear_app_data\n self.app_data_dir = default_data_dir() / \"seed-v1\"\n self.symlinks = getattr(options, \"copies\", False) is False\n\n @classmethod\n def add_parser_arguments(cls, parser, interpreter):\n super(FromAppData, cls).add_parser_arguments(parser, interpreter)\n parser.add_argument(\n \"--clear-app-data\",\n dest=\"clear_app_data\",\n action=\"store_true\",\n help=\"clear the app data folder of seed images ({})\".format((default_data_dir() / \"seed-v1\").path),\n default=False,\n )\n\n def run(self, creator):\n if not self.enabled:\n return\n base_cache = self.app_data_dir / creator.interpreter.version_release_str\n with self._get_seed_wheels(creator, base_cache) as name_to_whl:\n pip_version = name_to_whl[\"pip\"].stem.split(\"-\")[1]\n installer_class = self.installer_class(pip_version)\n\n def _install(name, wheel):\n logging.debug(\"install %s from wheel %s via %s\", name, wheel, installer_class.__name__)\n image_folder = base_cache.path / \"image\" / installer_class.__name__ / wheel.stem\n installer = installer_class(wheel, creator, image_folder)\n if self.clear:\n installer.clear()\n if not installer.has_image():\n installer.build_image()\n installer.install()\n\n threads = list(Thread(target=_install, args=(n, w)) for n, w in name_to_whl.items())\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n @contextmanager\n def _get_seed_wheels(self, creator, base_cache):\n with base_cache.lock_for_key(\"wheels\"):\n wheels_to = base_cache.path / \"wheels\"\n if self.clear and wheels_to.exists():\n shutil.rmtree(six.ensure_text(str(wheels_to)))\n wheels_to.mkdir(parents=True, exist_ok=True)\n name_to_whl, lock = {}, Lock()\n\n def _get(package, version):\n result = get_wheels(\n creator.interpreter.version_release_str,\n wheels_to,\n self.extra_search_dir,\n self.download,\n {package: version},\n )\n with lock:\n name_to_whl.update(result)\n\n threads = list(Thread(target=_get, args=(pkg, v)) for pkg, v in self.package_version().items())\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n yield name_to_whl\n\n def installer_class(self, pip_version):\n if self.symlinks:\n # symlink support requires pip 19.3+\n pip_version_int = tuple(int(i) for i in pip_version.split(\".\")[0:2])\n if pip_version_int >= (19, 3):\n return SymlinkPipInstall\n return CopyPipInstall\n\n def __unicode__(self):\n return super(FromAppData, self).__unicode__() + \" app_data_dir={}\".format(self.app_data_dir.path)\n", "path": "src/virtualenv/seed/via_app_data/via_app_data.py"}], "after_files": [{"content": "\"\"\"Bootstrap\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport logging\nimport shutil\nfrom contextlib import contextmanager\nfrom threading import Lock, Thread\n\nimport six\n\nfrom virtualenv.dirs import default_data_dir\nfrom virtualenv.info import fs_supports_symlink\nfrom virtualenv.seed.embed.base_embed import BaseEmbed\nfrom virtualenv.seed.embed.wheels.acquire import get_wheels\n\nfrom .pip_install.copy import CopyPipInstall\nfrom .pip_install.symlink import SymlinkPipInstall\n\n\nclass FromAppData(BaseEmbed):\n def __init__(self, options):\n super(FromAppData, self).__init__(options)\n self.clear = options.clear_app_data\n self.app_data_dir = default_data_dir() / \"seed-v1\"\n self.symlinks = options.symlink_app_data\n\n @classmethod\n def add_parser_arguments(cls, parser, interpreter):\n super(FromAppData, cls).add_parser_arguments(parser, interpreter)\n parser.add_argument(\n \"--clear-app-data\",\n dest=\"clear_app_data\",\n action=\"store_true\",\n help=\"clear the app data folder of seed images ({})\".format((default_data_dir() / \"seed-v1\").path),\n default=False,\n )\n can_symlink = fs_supports_symlink()\n parser.add_argument(\n \"--symlink-app-data\",\n dest=\"symlink_app_data\",\n action=\"store_true\" if can_symlink else \"store_false\",\n help=\"{} symlink the python packages from the app-data folder (requires seed pip>=19.3)\".format(\n \"\" if can_symlink else \"not supported - \"\n ),\n default=False,\n )\n\n def run(self, creator):\n if not self.enabled:\n return\n base_cache = self.app_data_dir / creator.interpreter.version_release_str\n with self._get_seed_wheels(creator, base_cache) as name_to_whl:\n pip_version = name_to_whl[\"pip\"].stem.split(\"-\")[1]\n installer_class = self.installer_class(pip_version)\n\n def _install(name, wheel):\n logging.debug(\"install %s from wheel %s via %s\", name, wheel, installer_class.__name__)\n image_folder = base_cache.path / \"image\" / installer_class.__name__ / wheel.stem\n installer = installer_class(wheel, creator, image_folder)\n if self.clear:\n installer.clear()\n if not installer.has_image():\n installer.build_image()\n installer.install()\n\n threads = list(Thread(target=_install, args=(n, w)) for n, w in name_to_whl.items())\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n @contextmanager\n def _get_seed_wheels(self, creator, base_cache):\n with base_cache.lock_for_key(\"wheels\"):\n wheels_to = base_cache.path / \"wheels\"\n if self.clear and wheels_to.exists():\n shutil.rmtree(six.ensure_text(str(wheels_to)))\n wheels_to.mkdir(parents=True, exist_ok=True)\n name_to_whl, lock = {}, Lock()\n\n def _get(package, version):\n result = get_wheels(\n creator.interpreter.version_release_str,\n wheels_to,\n self.extra_search_dir,\n self.download,\n {package: version},\n )\n with lock:\n name_to_whl.update(result)\n\n threads = list(Thread(target=_get, args=(pkg, v)) for pkg, v in self.package_version().items())\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n yield name_to_whl\n\n def installer_class(self, pip_version):\n if self.symlinks:\n # symlink support requires pip 19.3+\n pip_version_int = tuple(int(i) for i in pip_version.split(\".\")[0:2])\n if pip_version_int >= (19, 3):\n return SymlinkPipInstall\n return CopyPipInstall\n\n def __unicode__(self):\n return super(FromAppData, self).__unicode__() + \" app_data_dir={} via={}\".format(\n self.app_data_dir.path, \"symlink\" if self.symlinks else \"copy\"\n )\n", "path": "src/virtualenv/seed/via_app_data/via_app_data.py"}]}
2,162
491
gh_patches_debug_41142
rasdani/github-patches
git_diff
yt-dlp__yt-dlp-5677
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Reddit] Unable to download videos inside Reddit text posts. ### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE - [X] I understand that I will be **blocked** if I remove or skip any mandatory\* field ### Checklist - [X] I'm reporting a broken site - [X] I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit) - [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details - [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command) - [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates - [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue) - [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required ### Region United States ### Provide a description that is worded well enough to be understood yt-dlp doesn't download any videos on Reddit text posts that contain multiple videos, it should download all videos. The command `yt-dlp "https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/"` returns the following: ``` [Reddit] wzqkxp: Downloading JSON metadata ERROR: [Reddit] wzqkxp: No media found ``` ### Provide verbose output that clearly demonstrates the problem - [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`) - [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below ### Complete Verbose Output ```shell yt-dlp -vU "https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/" [debug] Command-line config: ['-vU', 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/'] [debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8 (No ANSI), error utf-8, screen utf-8 (No ANSI) [debug] yt-dlp version 2022.11.11 [8b64402] (pip) [debug] Python 3.9.13 (CPython arm64 64bit) - macOS-13.0.1-arm64-arm-64bit (OpenSSL 1.1.1o 3 May 2022) [debug] exe versions: ffmpeg 5.0.1 (setts), ffprobe 5.0.1, rtmpdump 2.4 [debug] Optional libraries: Cryptodome-3.14.1, brotli-None, certifi-2022.09.24, mutagen-1.45.1, secretstorage-3.3.3, sqlite3-2.6.0, websockets-10.1 [debug] Proxy map: {} [debug] Loaded 1723 extractors [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest Latest version: 2022.11.11, Current version: 2022.11.11 yt-dlp is up to date (2022.11.11) [debug] [Reddit] Extracting URL: https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/ [Reddit] wzqkxp: Downloading JSON metadata ERROR: [Reddit] wzqkxp: No media found File "/opt/homebrew/lib/python3.9/site-packages/yt_dlp/extractor/common.py", line 674, in extract ie_result = self._real_extract(url) File "/opt/homebrew/lib/python3.9/site-packages/yt_dlp/extractor/reddit.py", line 107, in _real_extract raise ExtractorError('No media found', expected=True) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `yt_dlp/extractor/reddit.py` Content: ``` 1 import random 2 from urllib.parse import urlparse 3 4 from .common import InfoExtractor 5 from ..utils import ( 6 ExtractorError, 7 int_or_none, 8 float_or_none, 9 try_get, 10 unescapeHTML, 11 url_or_none, 12 traverse_obj 13 ) 14 15 16 class RedditIE(InfoExtractor): 17 _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))' 18 _TESTS = [{ 19 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', 20 'info_dict': { 21 'id': 'zv89llsvexdz', 22 'ext': 'mp4', 23 'display_id': '6rrwyj', 24 'title': 'That small heart attack.', 25 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 26 'thumbnails': 'count:4', 27 'timestamp': 1501941939, 28 'upload_date': '20170805', 29 'uploader': 'Antw87', 30 'duration': 12, 31 'like_count': int, 32 'dislike_count': int, 33 'comment_count': int, 34 'age_limit': 0, 35 }, 36 'params': { 37 'skip_download': True, 38 }, 39 }, { 40 # 1080p fallback format 41 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/', 42 'md5': '8b5902cfda3006bf90faea7adf765a49', 43 'info_dict': { 44 'id': 'gyh95hiqc0b11', 45 'ext': 'mp4', 46 'display_id': '90bu6w', 47 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead', 48 'thumbnail': r're:^https?://.*\.(?:jpg|png)', 49 'thumbnails': 'count:7', 50 'timestamp': 1532051078, 51 'upload_date': '20180720', 52 'uploader': 'FootLoosePickleJuice', 53 'duration': 14, 54 'like_count': int, 55 'dislike_count': int, 56 'comment_count': int, 57 'age_limit': 0, 58 }, 59 }, { 60 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj', 61 'only_matching': True, 62 }, { 63 # imgur 64 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 65 'only_matching': True, 66 }, { 67 # imgur @ old reddit 68 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', 69 'only_matching': True, 70 }, { 71 # streamable 72 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/', 73 'only_matching': True, 74 }, { 75 # youtube 76 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/', 77 'only_matching': True, 78 }, { 79 # reddit video @ nm reddit 80 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/', 81 'only_matching': True, 82 }, { 83 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/', 84 'only_matching': True, 85 }] 86 87 @staticmethod 88 def _gen_session_id(): 89 id_length = 16 90 rand_max = 1 << (id_length * 4) 91 return '%0.*x' % (id_length, random.randrange(rand_max)) 92 93 def _real_extract(self, url): 94 subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id') 95 96 self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id()) 97 self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D') 98 data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False) 99 if not data: 100 # Fall back to old.reddit.com in case the requested subdomain fails 101 data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id) 102 data = data[0]['data']['children'][0]['data'] 103 video_url = data['url'] 104 105 # Avoid recursing into the same reddit URL 106 if 'reddit.com/' in video_url and '/%s/' % video_id in video_url: 107 raise ExtractorError('No media found', expected=True) 108 109 over_18 = data.get('over_18') 110 if over_18 is True: 111 age_limit = 18 112 elif over_18 is False: 113 age_limit = 0 114 else: 115 age_limit = None 116 117 thumbnails = [] 118 119 def add_thumbnail(src): 120 if not isinstance(src, dict): 121 return 122 thumbnail_url = url_or_none(src.get('url')) 123 if not thumbnail_url: 124 return 125 thumbnails.append({ 126 'url': unescapeHTML(thumbnail_url), 127 'width': int_or_none(src.get('width')), 128 'height': int_or_none(src.get('height')), 129 }) 130 131 for image in try_get(data, lambda x: x['preview']['images']) or []: 132 if not isinstance(image, dict): 133 continue 134 add_thumbnail(image.get('source')) 135 resolutions = image.get('resolutions') 136 if isinstance(resolutions, list): 137 for resolution in resolutions: 138 add_thumbnail(resolution) 139 140 info = { 141 'title': data.get('title'), 142 'thumbnails': thumbnails, 143 'timestamp': float_or_none(data.get('created_utc')), 144 'uploader': data.get('author'), 145 'like_count': int_or_none(data.get('ups')), 146 'dislike_count': int_or_none(data.get('downs')), 147 'comment_count': int_or_none(data.get('num_comments')), 148 'age_limit': age_limit, 149 } 150 151 # Check if media is hosted on reddit: 152 reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False) 153 if reddit_video: 154 playlist_urls = [ 155 try_get(reddit_video, lambda x: unescapeHTML(x[y])) 156 for y in ('dash_url', 'hls_url') 157 ] 158 159 # Update video_id 160 display_id = video_id 161 video_id = self._search_regex( 162 r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'], 163 'video_id', default=display_id) 164 165 dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd' 166 hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8' 167 168 formats = [{ 169 'url': unescapeHTML(reddit_video['fallback_url']), 170 'height': int_or_none(reddit_video.get('height')), 171 'width': int_or_none(reddit_video.get('width')), 172 'tbr': int_or_none(reddit_video.get('bitrate_kbps')), 173 'acodec': 'none', 174 'vcodec': 'h264', 175 'ext': 'mp4', 176 'format_id': 'fallback', 177 'format_note': 'DASH video, mp4_dash', 178 }] 179 formats.extend(self._extract_m3u8_formats( 180 hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False)) 181 formats.extend(self._extract_mpd_formats( 182 dash_playlist_url, display_id, mpd_id='dash', fatal=False)) 183 184 return { 185 **info, 186 'id': video_id, 187 'display_id': display_id, 188 'formats': formats, 189 'duration': int_or_none(reddit_video.get('duration')), 190 } 191 192 parsed_url = urlparse(video_url) 193 if parsed_url.netloc == 'v.redd.it': 194 self.raise_no_formats('This video is processing', expected=True, video_id=video_id) 195 return { 196 **info, 197 'id': parsed_url.path.split('/')[1], 198 'display_id': video_id, 199 } 200 201 # Not hosted on reddit, must continue extraction 202 return { 203 **info, 204 'display_id': video_id, 205 '_type': 'url_transparent', 206 'url': video_url, 207 } 208 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/yt_dlp/extractor/reddit.py b/yt_dlp/extractor/reddit.py --- a/yt_dlp/extractor/reddit.py +++ b/yt_dlp/extractor/reddit.py @@ -1,15 +1,15 @@ import random -from urllib.parse import urlparse +import urllib.parse from .common import InfoExtractor from ..utils import ( ExtractorError, - int_or_none, float_or_none, + int_or_none, + traverse_obj, try_get, unescapeHTML, url_or_none, - traverse_obj ) @@ -56,6 +56,14 @@ 'comment_count': int, 'age_limit': 0, }, + }, { + # videos embedded in reddit text post + 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/', + 'playlist_count': 2, + 'info_dict': { + 'id': 'wzqkxp', + 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37', + }, }, { 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj', 'only_matching': True, @@ -102,10 +110,6 @@ data = data[0]['data']['children'][0]['data'] video_url = data['url'] - # Avoid recursing into the same reddit URL - if 'reddit.com/' in video_url and '/%s/' % video_id in video_url: - raise ExtractorError('No media found', expected=True) - over_18 = data.get('over_18') if over_18 is True: age_limit = 18 @@ -148,6 +152,32 @@ 'age_limit': age_limit, } + parsed_url = urllib.parse.urlparse(video_url) + + # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL + if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path: + entries = [] + for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict): + if not media.get('id') or media.get('e') != 'RedditVideo': + continue + formats = [] + if media.get('hlsUrl'): + formats.extend(self._extract_m3u8_formats( + unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False)) + if media.get('dashUrl'): + formats.extend(self._extract_mpd_formats( + unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False)) + if formats: + entries.append({ + 'id': media['id'], + 'display_id': video_id, + 'formats': formats, + **info, + }) + if entries: + return self.playlist_result(entries, video_id, info.get('title')) + raise ExtractorError('No media found', expected=True) + # Check if media is hosted on reddit: reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False) if reddit_video: @@ -189,7 +219,6 @@ 'duration': int_or_none(reddit_video.get('duration')), } - parsed_url = urlparse(video_url) if parsed_url.netloc == 'v.redd.it': self.raise_no_formats('This video is processing', expected=True, video_id=video_id) return {
{"golden_diff": "diff --git a/yt_dlp/extractor/reddit.py b/yt_dlp/extractor/reddit.py\n--- a/yt_dlp/extractor/reddit.py\n+++ b/yt_dlp/extractor/reddit.py\n@@ -1,15 +1,15 @@\n import random\n-from urllib.parse import urlparse\n+import urllib.parse\n \n from .common import InfoExtractor\n from ..utils import (\n ExtractorError,\n- int_or_none,\n float_or_none,\n+ int_or_none,\n+ traverse_obj,\n try_get,\n unescapeHTML,\n url_or_none,\n- traverse_obj\n )\n \n \n@@ -56,6 +56,14 @@\n 'comment_count': int,\n 'age_limit': 0,\n },\n+ }, {\n+ # videos embedded in reddit text post\n+ 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',\n+ 'playlist_count': 2,\n+ 'info_dict': {\n+ 'id': 'wzqkxp',\n+ 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',\n+ },\n }, {\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',\n 'only_matching': True,\n@@ -102,10 +110,6 @@\n data = data[0]['data']['children'][0]['data']\n video_url = data['url']\n \n- # Avoid recursing into the same reddit URL\n- if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:\n- raise ExtractorError('No media found', expected=True)\n-\n over_18 = data.get('over_18')\n if over_18 is True:\n age_limit = 18\n@@ -148,6 +152,32 @@\n 'age_limit': age_limit,\n }\n \n+ parsed_url = urllib.parse.urlparse(video_url)\n+\n+ # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL\n+ if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:\n+ entries = []\n+ for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):\n+ if not media.get('id') or media.get('e') != 'RedditVideo':\n+ continue\n+ formats = []\n+ if media.get('hlsUrl'):\n+ formats.extend(self._extract_m3u8_formats(\n+ unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))\n+ if media.get('dashUrl'):\n+ formats.extend(self._extract_mpd_formats(\n+ unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))\n+ if formats:\n+ entries.append({\n+ 'id': media['id'],\n+ 'display_id': video_id,\n+ 'formats': formats,\n+ **info,\n+ })\n+ if entries:\n+ return self.playlist_result(entries, video_id, info.get('title'))\n+ raise ExtractorError('No media found', expected=True)\n+\n # Check if media is hosted on reddit:\n reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)\n if reddit_video:\n@@ -189,7 +219,6 @@\n 'duration': int_or_none(reddit_video.get('duration')),\n }\n \n- parsed_url = urlparse(video_url)\n if parsed_url.netloc == 'v.redd.it':\n self.raise_no_formats('This video is processing', expected=True, video_id=video_id)\n return {\n", "issue": "[Reddit] Unable to download videos inside Reddit text posts.\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\r\n\r\n- [X] I understand that I will be **blocked** if I remove or skip any mandatory\\* field\r\n\r\n### Checklist\r\n\r\n- [X] I'm reporting a broken site\r\n- [X] I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\r\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\r\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\r\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\r\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\r\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\r\n\r\n### Region\r\n\r\nUnited States\r\n\r\n### Provide a description that is worded well enough to be understood\r\n\r\nyt-dlp doesn't download any videos on Reddit text posts that contain multiple videos, it should download all videos. The command `yt-dlp \"https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/\"` returns the following:\r\n\r\n```\r\n[Reddit] wzqkxp: Downloading JSON metadata\r\nERROR: [Reddit] wzqkxp: No media found\r\n```\r\n\r\n### Provide verbose output that clearly demonstrates the problem\r\n\r\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\r\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\r\n\r\n### Complete Verbose Output\r\n\r\n```shell\r\nyt-dlp -vU \"https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/\"\r\n\r\n[debug] Command-line config: ['-vU', 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8 (No ANSI), error utf-8, screen utf-8 (No ANSI)\r\n[debug] yt-dlp version 2022.11.11 [8b64402] (pip)\r\n[debug] Python 3.9.13 (CPython arm64 64bit) - macOS-13.0.1-arm64-arm-64bit (OpenSSL 1.1.1o 3 May 2022)\r\n[debug] exe versions: ffmpeg 5.0.1 (setts), ffprobe 5.0.1, rtmpdump 2.4\r\n[debug] Optional libraries: Cryptodome-3.14.1, brotli-None, certifi-2022.09.24, mutagen-1.45.1, secretstorage-3.3.3, sqlite3-2.6.0, websockets-10.1\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1723 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nLatest version: 2022.11.11, Current version: 2022.11.11\r\nyt-dlp is up to date (2022.11.11)\r\n[debug] [Reddit] Extracting URL: https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/\r\n[Reddit] wzqkxp: Downloading JSON metadata\r\nERROR: [Reddit] wzqkxp: No media found\r\n File \"/opt/homebrew/lib/python3.9/site-packages/yt_dlp/extractor/common.py\", line 674, in extract\r\n ie_result = self._real_extract(url)\r\n File \"/opt/homebrew/lib/python3.9/site-packages/yt_dlp/extractor/reddit.py\", line 107, in _real_extract\r\n raise ExtractorError('No media found', expected=True)\r\n```\r\n\n", "before_files": [{"content": "import random\nfrom urllib.parse import urlparse\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n int_or_none,\n float_or_none,\n try_get,\n unescapeHTML,\n url_or_none,\n traverse_obj\n)\n\n\nclass RedditIE(InfoExtractor):\n _VALID_URL = r'https?://(?P<subdomain>[^/]+\\.)?reddit(?:media)?\\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'\n _TESTS = [{\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',\n 'info_dict': {\n 'id': 'zv89llsvexdz',\n 'ext': 'mp4',\n 'display_id': '6rrwyj',\n 'title': 'That small heart attack.',\n 'thumbnail': r're:^https?://.*\\.(?:jpg|png)',\n 'thumbnails': 'count:4',\n 'timestamp': 1501941939,\n 'upload_date': '20170805',\n 'uploader': 'Antw87',\n 'duration': 12,\n 'like_count': int,\n 'dislike_count': int,\n 'comment_count': int,\n 'age_limit': 0,\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n # 1080p fallback format\n 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',\n 'md5': '8b5902cfda3006bf90faea7adf765a49',\n 'info_dict': {\n 'id': 'gyh95hiqc0b11',\n 'ext': 'mp4',\n 'display_id': '90bu6w',\n 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',\n 'thumbnail': r're:^https?://.*\\.(?:jpg|png)',\n 'thumbnails': 'count:7',\n 'timestamp': 1532051078,\n 'upload_date': '20180720',\n 'uploader': 'FootLoosePickleJuice',\n 'duration': 14,\n 'like_count': int,\n 'dislike_count': int,\n 'comment_count': int,\n 'age_limit': 0,\n },\n }, {\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',\n 'only_matching': True,\n }, {\n # imgur\n 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # imgur @ old reddit\n 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # streamable\n 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',\n 'only_matching': True,\n }, {\n # youtube\n 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',\n 'only_matching': True,\n }, {\n # reddit video @ nm reddit\n 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',\n 'only_matching': True,\n }, {\n 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',\n 'only_matching': True,\n }]\n\n @staticmethod\n def _gen_session_id():\n id_length = 16\n rand_max = 1 << (id_length * 4)\n return '%0.*x' % (id_length, random.randrange(rand_max))\n\n def _real_extract(self, url):\n subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id')\n\n self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())\n self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')\n data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)\n if not data:\n # Fall back to old.reddit.com in case the requested subdomain fails\n data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)\n data = data[0]['data']['children'][0]['data']\n video_url = data['url']\n\n # Avoid recursing into the same reddit URL\n if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:\n raise ExtractorError('No media found', expected=True)\n\n over_18 = data.get('over_18')\n if over_18 is True:\n age_limit = 18\n elif over_18 is False:\n age_limit = 0\n else:\n age_limit = None\n\n thumbnails = []\n\n def add_thumbnail(src):\n if not isinstance(src, dict):\n return\n thumbnail_url = url_or_none(src.get('url'))\n if not thumbnail_url:\n return\n thumbnails.append({\n 'url': unescapeHTML(thumbnail_url),\n 'width': int_or_none(src.get('width')),\n 'height': int_or_none(src.get('height')),\n })\n\n for image in try_get(data, lambda x: x['preview']['images']) or []:\n if not isinstance(image, dict):\n continue\n add_thumbnail(image.get('source'))\n resolutions = image.get('resolutions')\n if isinstance(resolutions, list):\n for resolution in resolutions:\n add_thumbnail(resolution)\n\n info = {\n 'title': data.get('title'),\n 'thumbnails': thumbnails,\n 'timestamp': float_or_none(data.get('created_utc')),\n 'uploader': data.get('author'),\n 'like_count': int_or_none(data.get('ups')),\n 'dislike_count': int_or_none(data.get('downs')),\n 'comment_count': int_or_none(data.get('num_comments')),\n 'age_limit': age_limit,\n }\n\n # Check if media is hosted on reddit:\n reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)\n if reddit_video:\n playlist_urls = [\n try_get(reddit_video, lambda x: unescapeHTML(x[y]))\n for y in ('dash_url', 'hls_url')\n ]\n\n # Update video_id\n display_id = video_id\n video_id = self._search_regex(\n r'https?://v\\.redd\\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],\n 'video_id', default=display_id)\n\n dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'\n hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'\n\n formats = [{\n 'url': unescapeHTML(reddit_video['fallback_url']),\n 'height': int_or_none(reddit_video.get('height')),\n 'width': int_or_none(reddit_video.get('width')),\n 'tbr': int_or_none(reddit_video.get('bitrate_kbps')),\n 'acodec': 'none',\n 'vcodec': 'h264',\n 'ext': 'mp4',\n 'format_id': 'fallback',\n 'format_note': 'DASH video, mp4_dash',\n }]\n formats.extend(self._extract_m3u8_formats(\n hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False))\n formats.extend(self._extract_mpd_formats(\n dash_playlist_url, display_id, mpd_id='dash', fatal=False))\n\n return {\n **info,\n 'id': video_id,\n 'display_id': display_id,\n 'formats': formats,\n 'duration': int_or_none(reddit_video.get('duration')),\n }\n\n parsed_url = urlparse(video_url)\n if parsed_url.netloc == 'v.redd.it':\n self.raise_no_formats('This video is processing', expected=True, video_id=video_id)\n return {\n **info,\n 'id': parsed_url.path.split('/')[1],\n 'display_id': video_id,\n }\n\n # Not hosted on reddit, must continue extraction\n return {\n **info,\n 'display_id': video_id,\n '_type': 'url_transparent',\n 'url': video_url,\n }\n", "path": "yt_dlp/extractor/reddit.py"}], "after_files": [{"content": "import random\nimport urllib.parse\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n float_or_none,\n int_or_none,\n traverse_obj,\n try_get,\n unescapeHTML,\n url_or_none,\n)\n\n\nclass RedditIE(InfoExtractor):\n _VALID_URL = r'https?://(?P<subdomain>[^/]+\\.)?reddit(?:media)?\\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'\n _TESTS = [{\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',\n 'info_dict': {\n 'id': 'zv89llsvexdz',\n 'ext': 'mp4',\n 'display_id': '6rrwyj',\n 'title': 'That small heart attack.',\n 'thumbnail': r're:^https?://.*\\.(?:jpg|png)',\n 'thumbnails': 'count:4',\n 'timestamp': 1501941939,\n 'upload_date': '20170805',\n 'uploader': 'Antw87',\n 'duration': 12,\n 'like_count': int,\n 'dislike_count': int,\n 'comment_count': int,\n 'age_limit': 0,\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n # 1080p fallback format\n 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',\n 'md5': '8b5902cfda3006bf90faea7adf765a49',\n 'info_dict': {\n 'id': 'gyh95hiqc0b11',\n 'ext': 'mp4',\n 'display_id': '90bu6w',\n 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',\n 'thumbnail': r're:^https?://.*\\.(?:jpg|png)',\n 'thumbnails': 'count:7',\n 'timestamp': 1532051078,\n 'upload_date': '20180720',\n 'uploader': 'FootLoosePickleJuice',\n 'duration': 14,\n 'like_count': int,\n 'dislike_count': int,\n 'comment_count': int,\n 'age_limit': 0,\n },\n }, {\n # videos embedded in reddit text post\n 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',\n 'playlist_count': 2,\n 'info_dict': {\n 'id': 'wzqkxp',\n 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',\n },\n }, {\n 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',\n 'only_matching': True,\n }, {\n # imgur\n 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # imgur @ old reddit\n 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',\n 'only_matching': True,\n }, {\n # streamable\n 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',\n 'only_matching': True,\n }, {\n # youtube\n 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',\n 'only_matching': True,\n }, {\n # reddit video @ nm reddit\n 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',\n 'only_matching': True,\n }, {\n 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',\n 'only_matching': True,\n }]\n\n @staticmethod\n def _gen_session_id():\n id_length = 16\n rand_max = 1 << (id_length * 4)\n return '%0.*x' % (id_length, random.randrange(rand_max))\n\n def _real_extract(self, url):\n subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id')\n\n self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())\n self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')\n data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)\n if not data:\n # Fall back to old.reddit.com in case the requested subdomain fails\n data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)\n data = data[0]['data']['children'][0]['data']\n video_url = data['url']\n\n over_18 = data.get('over_18')\n if over_18 is True:\n age_limit = 18\n elif over_18 is False:\n age_limit = 0\n else:\n age_limit = None\n\n thumbnails = []\n\n def add_thumbnail(src):\n if not isinstance(src, dict):\n return\n thumbnail_url = url_or_none(src.get('url'))\n if not thumbnail_url:\n return\n thumbnails.append({\n 'url': unescapeHTML(thumbnail_url),\n 'width': int_or_none(src.get('width')),\n 'height': int_or_none(src.get('height')),\n })\n\n for image in try_get(data, lambda x: x['preview']['images']) or []:\n if not isinstance(image, dict):\n continue\n add_thumbnail(image.get('source'))\n resolutions = image.get('resolutions')\n if isinstance(resolutions, list):\n for resolution in resolutions:\n add_thumbnail(resolution)\n\n info = {\n 'title': data.get('title'),\n 'thumbnails': thumbnails,\n 'timestamp': float_or_none(data.get('created_utc')),\n 'uploader': data.get('author'),\n 'like_count': int_or_none(data.get('ups')),\n 'dislike_count': int_or_none(data.get('downs')),\n 'comment_count': int_or_none(data.get('num_comments')),\n 'age_limit': age_limit,\n }\n\n parsed_url = urllib.parse.urlparse(video_url)\n\n # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL\n if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:\n entries = []\n for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):\n if not media.get('id') or media.get('e') != 'RedditVideo':\n continue\n formats = []\n if media.get('hlsUrl'):\n formats.extend(self._extract_m3u8_formats(\n unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))\n if media.get('dashUrl'):\n formats.extend(self._extract_mpd_formats(\n unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))\n if formats:\n entries.append({\n 'id': media['id'],\n 'display_id': video_id,\n 'formats': formats,\n **info,\n })\n if entries:\n return self.playlist_result(entries, video_id, info.get('title'))\n raise ExtractorError('No media found', expected=True)\n\n # Check if media is hosted on reddit:\n reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)\n if reddit_video:\n playlist_urls = [\n try_get(reddit_video, lambda x: unescapeHTML(x[y]))\n for y in ('dash_url', 'hls_url')\n ]\n\n # Update video_id\n display_id = video_id\n video_id = self._search_regex(\n r'https?://v\\.redd\\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],\n 'video_id', default=display_id)\n\n dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'\n hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'\n\n formats = [{\n 'url': unescapeHTML(reddit_video['fallback_url']),\n 'height': int_or_none(reddit_video.get('height')),\n 'width': int_or_none(reddit_video.get('width')),\n 'tbr': int_or_none(reddit_video.get('bitrate_kbps')),\n 'acodec': 'none',\n 'vcodec': 'h264',\n 'ext': 'mp4',\n 'format_id': 'fallback',\n 'format_note': 'DASH video, mp4_dash',\n }]\n formats.extend(self._extract_m3u8_formats(\n hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False))\n formats.extend(self._extract_mpd_formats(\n dash_playlist_url, display_id, mpd_id='dash', fatal=False))\n\n return {\n **info,\n 'id': video_id,\n 'display_id': display_id,\n 'formats': formats,\n 'duration': int_or_none(reddit_video.get('duration')),\n }\n\n if parsed_url.netloc == 'v.redd.it':\n self.raise_no_formats('This video is processing', expected=True, video_id=video_id)\n return {\n **info,\n 'id': parsed_url.path.split('/')[1],\n 'display_id': video_id,\n }\n\n # Not hosted on reddit, must continue extraction\n return {\n **info,\n 'display_id': video_id,\n '_type': 'url_transparent',\n 'url': video_url,\n }\n", "path": "yt_dlp/extractor/reddit.py"}]}
3,955
865
gh_patches_debug_3606
rasdani/github-patches
git_diff
OBOFoundry__OBOFoundry.github.io-802
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- travis on master failing, due to metadata violations from new jsonschema checks There are two things wrong: - the validate script assumes a util/reports folder - hp is failing; we already know that hp has a custom license and this should be reported elsewhere and is not a schema violation --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `util/validate-metadata.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import ast 4 import sys 5 import json 6 import jsonschema 7 import re 8 9 # file paths 10 data_file = "../registry/ontologies.jsonld" 11 schema_file = "metadata-schema.json" 12 schema_lite_file = "metadata-schema-lite.json" 13 report_file = "reports/metadata-violations.csv" 14 15 # ultra-escaped regex strings 16 email_sub = 'does not match \'\\^\\[\\^@\\]\\+\\$\'' 17 fmt_sub = ('does not match \'\\^\\[0\\-9A\\-Za\\-z\\-_\\\\\\\\/\\]\\+' 18 '\\\\\\\\.\\(owl\\|obo\\|json\\|omn\\|ofn\\|owx\\|ttl\\|owl' 19 '\\\\\\\\.gz\\)\\$\'') 20 21 def validate(): 22 """ 23 Validate registry metadata. 24 """ 25 print("--- validating metadata against {0} ---".format(schema_file)) 26 data = load_data() 27 schema = load_schema() 28 # validate each object 29 errors = {} 30 for item in data["ontologies"]: 31 if 'is_obsolete' in item and item["is_obsolete"] is True: 32 continue 33 # skip any 'validate: false' ontologies 34 if 'validate' in item and item["validate"] is False: 35 continue 36 ont_id = item["id"] 37 try: 38 jsonschema.validate(item, schema) 39 except jsonschema.exceptions.ValidationError as ve: 40 print("ERROR in {0}".format(ont_id)) 41 errors[ont_id] = format_msg(ve) 42 if errors: 43 write_errors(errors) 44 else: 45 print("SUCCESS - no errors found in metadata") 46 sys.exit(0) 47 48 def format_msg(ve): 49 """ 50 Format exception message from jsonchema.validate(...). 51 """ 52 # replace u characters 53 replace_u = re.sub('u\'', '\'', ve.message) 54 # replace scary regex strings 55 replace_email = re.sub( 56 email_sub, 'is not valid for \'contact.label\'', replace_u) 57 msg = re.sub(fmt_sub, 'is not valid for \'products.id\'', replace_email) 58 59 # check if output is for license error 60 is_license = re.search('({\'url\'.+?\'label\'.+?})', msg) 61 if is_license: 62 return format_license_msg(is_license.group(1)) 63 64 # check if output is for list error 65 is_list = re.search('(\\[.+?\\]) is not of type \'string\'', msg) 66 if is_list: 67 return format_list_msg(is_list.group(1), ve) 68 69 # otherwise return the message 70 return msg 71 72 def format_license_msg(substr): 73 """ 74 Format an exception message for a license issue. 75 """ 76 # process to dict 77 d = json.loads(substr.replace('\'', '"')) 78 url = d['url'] 79 label = d['label'] 80 return '\'{0}\' <{1}> is not valid for \'license\''.format(label, url) 81 82 def format_list_msg(substr, ve): 83 """ 84 Format an exception for an unexpected list. 85 """ 86 l = json.loads(substr.replace('\'', '"')) 87 # use the full message to find the violating property 88 prop_find = re.search('On instance\\[(\'.+?\')\\]', str(ve)) 89 if prop_find: 90 prop = prop_find.group(1) 91 return '{0} expects one value, got {1}'.format(prop, len(l)) 92 else: 93 return substr 94 95 def load_schema(): 96 """ 97 Load the schema to validate against. 98 """ 99 # read the schema 100 with open(schema_file) as f: 101 schema = json.load(f) 102 return schema 103 104 def load_data(): 105 """ 106 Load the data to validate. 107 """ 108 # read the JSON-LD data 109 with open(data_file) as f: 110 data = json.load(f) 111 return data 112 113 def write_errors(errors): 114 """ 115 Write validation errors to a user-friendly report. 116 """ 117 with open(report_file, 'w+') as f: 118 f.write("ID,ERROR\n") 119 for ont_id, msg in errors.items(): 120 f.write('"' + ont_id + '","' + msg + '"\n') 121 print( 122 "VALIDATION FAILED: {0} errors - see {1} for details".format( 123 len(errors), report_file)) 124 sys.exit(1) 125 126 # run the process! 127 if __name__ == '__main__': 128 validate() 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/util/validate-metadata.py b/util/validate-metadata.py --- a/util/validate-metadata.py +++ b/util/validate-metadata.py @@ -7,9 +7,9 @@ import re # file paths -data_file = "../registry/ontologies.jsonld" -schema_file = "metadata-schema.json" -schema_lite_file = "metadata-schema-lite.json" +data_file = "registry/ontologies.jsonld" +schema_file = "util/metadata-schema.json" +schema_lite_file = "util/metadata-schema-lite.json" report_file = "reports/metadata-violations.csv" # ultra-escaped regex strings
{"golden_diff": "diff --git a/util/validate-metadata.py b/util/validate-metadata.py\n--- a/util/validate-metadata.py\n+++ b/util/validate-metadata.py\n@@ -7,9 +7,9 @@\n import re\n \n # file paths\n-data_file = \"../registry/ontologies.jsonld\"\n-schema_file = \"metadata-schema.json\"\n-schema_lite_file = \"metadata-schema-lite.json\"\n+data_file = \"registry/ontologies.jsonld\"\n+schema_file = \"util/metadata-schema.json\"\n+schema_lite_file = \"util/metadata-schema-lite.json\"\n report_file = \"reports/metadata-violations.csv\"\n \n # ultra-escaped regex strings\n", "issue": "travis on master failing, due to metadata violations from new jsonschema checks\nThere are two things wrong:\r\n\r\n - the validate script assumes a util/reports folder\r\n - hp is failing; we already know that hp has a custom license and this should be reported elsewhere and is not a schema violation\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport ast\nimport sys\nimport json\nimport jsonschema\nimport re\n\n# file paths\ndata_file = \"../registry/ontologies.jsonld\"\nschema_file = \"metadata-schema.json\"\nschema_lite_file = \"metadata-schema-lite.json\"\nreport_file = \"reports/metadata-violations.csv\"\n\n# ultra-escaped regex strings\nemail_sub = 'does not match \\'\\\\^\\\\[\\\\^@\\\\]\\\\+\\\\$\\''\nfmt_sub = ('does not match \\'\\\\^\\\\[0\\\\-9A\\\\-Za\\\\-z\\\\-_\\\\\\\\\\\\\\\\/\\\\]\\\\+'\n '\\\\\\\\\\\\\\\\.\\\\(owl\\\\|obo\\\\|json\\\\|omn\\\\|ofn\\\\|owx\\\\|ttl\\\\|owl'\n '\\\\\\\\\\\\\\\\.gz\\\\)\\\\$\\'')\n\ndef validate():\n\t\"\"\"\n\tValidate registry metadata.\n\t\"\"\"\n\tprint(\"--- validating metadata against {0} ---\".format(schema_file))\n\tdata = load_data()\n\tschema = load_schema()\n\t# validate each object\n\terrors = {}\n\tfor item in data[\"ontologies\"]:\n\t\tif 'is_obsolete' in item and item[\"is_obsolete\"] is True:\n\t\t\tcontinue\n\t\t# skip any 'validate: false' ontologies\n\t\tif 'validate' in item and item[\"validate\"] is False:\n\t\t\tcontinue\n\t\tont_id = item[\"id\"]\n\t\ttry:\n\t\t\tjsonschema.validate(item, schema)\n\t\texcept jsonschema.exceptions.ValidationError as ve:\n\t\t\tprint(\"ERROR in {0}\".format(ont_id))\n\t\t\terrors[ont_id] = format_msg(ve)\n\tif errors:\n\t\twrite_errors(errors)\n\telse:\n\t\tprint(\"SUCCESS - no errors found in metadata\")\n\t\tsys.exit(0)\n\ndef format_msg(ve):\n\t\"\"\"\n\tFormat exception message from jsonchema.validate(...).\n\t\"\"\"\n\t# replace u characters\n\treplace_u = re.sub('u\\'', '\\'', ve.message)\n\t# replace scary regex strings\n\treplace_email = re.sub(\n\t\temail_sub, 'is not valid for \\'contact.label\\'', replace_u)\n\tmsg = re.sub(fmt_sub, 'is not valid for \\'products.id\\'', replace_email)\n\n\t# check if output is for license error\n\tis_license = re.search('({\\'url\\'.+?\\'label\\'.+?})', msg)\n\tif is_license:\n\t\treturn format_license_msg(is_license.group(1))\n\n\t# check if output is for list error\n\tis_list = re.search('(\\\\[.+?\\\\]) is not of type \\'string\\'', msg)\n\tif is_list:\n\t\treturn format_list_msg(is_list.group(1), ve)\n\n\t# otherwise return the message\n\treturn msg\n\ndef format_license_msg(substr):\n\t\"\"\"\n\tFormat an exception message for a license issue.\n\t\"\"\"\n\t# process to dict\n\td = json.loads(substr.replace('\\'', '\"'))\n\turl = d['url']\n\tlabel = d['label']\n\treturn '\\'{0}\\' <{1}> is not valid for \\'license\\''.format(label, url)\n\ndef format_list_msg(substr, ve):\n\t\"\"\"\n\tFormat an exception for an unexpected list.\n\t\"\"\"\n\tl = json.loads(substr.replace('\\'', '\"'))\n\t# use the full message to find the violating property\n\tprop_find = re.search('On instance\\\\[(\\'.+?\\')\\\\]', str(ve))\n\tif prop_find:\n\t\tprop = prop_find.group(1)\n\t\treturn '{0} expects one value, got {1}'.format(prop, len(l))\n\telse:\n\t\treturn substr\n\ndef load_schema():\n\t\"\"\"\n\tLoad the schema to validate against.\n\t\"\"\"\n\t# read the schema\n\twith open(schema_file) as f:\n\t\tschema = json.load(f)\n\treturn schema\n\ndef load_data():\n\t\"\"\"\n\tLoad the data to validate.\n\t\"\"\"\n\t# read the JSON-LD data\n\twith open(data_file) as f:\n\t\tdata = json.load(f)\n\treturn data\n\ndef write_errors(errors):\n\t\"\"\"\n\tWrite validation errors to a user-friendly report.\n\t\"\"\"\n\twith open(report_file, 'w+') as f:\n\t\tf.write(\"ID,ERROR\\n\")\n\t\tfor ont_id, msg in errors.items():\n\t\t\tf.write('\"' + ont_id + '\",\"' + msg + '\"\\n')\n\tprint(\n\t\t\"VALIDATION FAILED: {0} errors - see {1} for details\".format(\n\t\t\tlen(errors), report_file))\n\tsys.exit(1)\n\n# run the process!\nif __name__ == '__main__':\n\tvalidate()\n", "path": "util/validate-metadata.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport ast\nimport sys\nimport json\nimport jsonschema\nimport re\n\n# file paths\ndata_file = \"registry/ontologies.jsonld\"\nschema_file = \"util/metadata-schema.json\"\nschema_lite_file = \"util/metadata-schema-lite.json\"\nreport_file = \"reports/metadata-violations.csv\"\n\n# ultra-escaped regex strings\nemail_sub = 'does not match \\'\\\\^\\\\[\\\\^@\\\\]\\\\+\\\\$\\''\nfmt_sub = ('does not match \\'\\\\^\\\\[0\\\\-9A\\\\-Za\\\\-z\\\\-_\\\\\\\\\\\\\\\\/\\\\]\\\\+'\n '\\\\\\\\\\\\\\\\.\\\\(owl\\\\|obo\\\\|json\\\\|omn\\\\|ofn\\\\|owx\\\\|ttl\\\\|owl'\n '\\\\\\\\\\\\\\\\.gz\\\\)\\\\$\\'')\n\ndef validate():\n\t\"\"\"\n\tValidate registry metadata.\n\t\"\"\"\n\tprint(\"--- validating metadata against {0} ---\".format(schema_file))\n\tdata = load_data()\n\tschema = load_schema()\n\t# validate each object\n\terrors = {}\n\tfor item in data[\"ontologies\"]:\n\t\tif 'is_obsolete' in item and item[\"is_obsolete\"] is True:\n\t\t\tcontinue\n\t\t# skip any 'validate: false' ontologies\n\t\tif 'validate' in item and item[\"validate\"] is False:\n\t\t\tcontinue\n\t\tont_id = item[\"id\"]\n\t\ttry:\n\t\t\tjsonschema.validate(item, schema)\n\t\texcept jsonschema.exceptions.ValidationError as ve:\n\t\t\tprint(\"ERROR in {0}\".format(ont_id))\n\t\t\terrors[ont_id] = format_msg(ve)\n\tif errors:\n\t\twrite_errors(errors)\n\telse:\n\t\tprint(\"SUCCESS - no errors found in metadata\")\n\t\tsys.exit(0)\n\ndef format_msg(ve):\n\t\"\"\"\n\tFormat exception message from jsonchema.validate(...).\n\t\"\"\"\n\t# replace u characters\n\treplace_u = re.sub('u\\'', '\\'', ve.message)\n\t# replace scary regex strings\n\treplace_email = re.sub(\n\t\temail_sub, 'is not valid for \\'contact.label\\'', replace_u)\n\tmsg = re.sub(fmt_sub, 'is not valid for \\'products.id\\'', replace_email)\n\n\t# check if output is for license error\n\tis_license = re.search('({\\'url\\'.+?\\'label\\'.+?})', msg)\n\tif is_license:\n\t\treturn format_license_msg(is_license.group(1))\n\n\t# check if output is for list error\n\tis_list = re.search('(\\\\[.+?\\\\]) is not of type \\'string\\'', msg)\n\tif is_list:\n\t\treturn format_list_msg(is_list.group(1), ve)\n\n\t# otherwise return the message\n\treturn msg\n\ndef format_license_msg(substr):\n\t\"\"\"\n\tFormat an exception message for a license issue.\n\t\"\"\"\n\t# process to dict\n\td = json.loads(substr.replace('\\'', '\"'))\n\turl = d['url']\n\tlabel = d['label']\n\treturn '\\'{0}\\' <{1}> is not valid for \\'license\\''.format(label, url)\n\ndef format_list_msg(substr, ve):\n\t\"\"\"\n\tFormat an exception for an unexpected list.\n\t\"\"\"\n\tl = json.loads(substr.replace('\\'', '\"'))\n\t# use the full message to find the violating property\n\tprop_find = re.search('On instance\\\\[(\\'.+?\\')\\\\]', str(ve))\n\tif prop_find:\n\t\tprop = prop_find.group(1)\n\t\treturn '{0} expects one value, got {1}'.format(prop, len(l))\n\telse:\n\t\treturn substr\n\ndef load_schema():\n\t\"\"\"\n\tLoad the schema to validate against.\n\t\"\"\"\n\t# read the schema\n\twith open(schema_file) as f:\n\t\tschema = json.load(f)\n\treturn schema\n\ndef load_data():\n\t\"\"\"\n\tLoad the data to validate.\n\t\"\"\"\n\t# read the JSON-LD data\n\twith open(data_file) as f:\n\t\tdata = json.load(f)\n\treturn data\n\ndef write_errors(errors):\n\t\"\"\"\n\tWrite validation errors to a user-friendly report.\n\t\"\"\"\n\twith open(report_file, 'w+') as f:\n\t\tf.write(\"ID,ERROR\\n\")\n\t\tfor ont_id, msg in errors.items():\n\t\t\tf.write('\"' + ont_id + '\",\"' + msg + '\"\\n')\n\tprint(\n\t\t\"VALIDATION FAILED: {0} errors - see {1} for details\".format(\n\t\t\tlen(errors), report_file))\n\tsys.exit(1)\n\n# run the process!\nif __name__ == '__main__':\n\tvalidate()\n", "path": "util/validate-metadata.py"}]}
1,610
133
gh_patches_debug_437
rasdani/github-patches
git_diff
pypa__setuptools-2584
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add mechanism for side-by-side comparison of setup.py and its equivalent setup.cfg We have many documentation examples that are purely declarative and are either documented as `setup.py` or `setup.cfg`. It would be really awesome if, for each of these, we had the option to have either both versions side-by-side or, even better, in a sort of "tabbed container", like the one in the [code sample at the bottom of this example](https://leetcode.com/articles/median-of-two-sorted-arrays/). Requirements for this: 1. Cannot *link to* any third-party javascript dependencies. Ideally we wouldn't use any at all, but if you do they must be vendored in the documentation. 2. If javascript is disabled, it has to fall back to something intelligible. Ideally it would be implemented in pure CSS / HTML if that's at all possible. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker'] 2 3 master_doc = "index" 4 5 link_files = { 6 '../CHANGES.rst': dict( 7 using=dict( 8 BB='https://bitbucket.org', 9 GH='https://github.com', 10 ), 11 replace=[ 12 dict( 13 pattern=r'(Issue )?#(?P<issue>\d+)', 14 url='{package_url}/issues/{issue}', 15 ), 16 dict( 17 pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)', 18 url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}', 19 ), 20 dict( 21 pattern=r'Distribute #(?P<distribute>\d+)', 22 url='{BB}/tarek/distribute/issue/{distribute}', 23 ), 24 dict( 25 pattern=r'Buildout #(?P<buildout>\d+)', 26 url='{GH}/buildout/buildout/issues/{buildout}', 27 ), 28 dict( 29 pattern=r'Old Setuptools #(?P<old_setuptools>\d+)', 30 url='http://bugs.python.org/setuptools/issue{old_setuptools}', 31 ), 32 dict( 33 pattern=r'Jython #(?P<jython>\d+)', 34 url='http://bugs.jython.org/issue{jython}', 35 ), 36 dict( 37 pattern=r'(Python #|bpo-)(?P<python>\d+)', 38 url='http://bugs.python.org/issue{python}', 39 ), 40 dict( 41 pattern=r'Interop #(?P<interop>\d+)', 42 url='{GH}/pypa/interoperability-peps/issues/{interop}', 43 ), 44 dict( 45 pattern=r'Pip #(?P<pip>\d+)', 46 url='{GH}/pypa/pip/issues/{pip}', 47 ), 48 dict( 49 pattern=r'Packaging #(?P<packaging>\d+)', 50 url='{GH}/pypa/packaging/issues/{packaging}', 51 ), 52 dict( 53 pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)', 54 url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst', 55 ), 56 dict( 57 pattern=r'PEP[- ](?P<pep_number>\d+)', 58 url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/', 59 ), 60 dict( 61 pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)', 62 url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}', 63 ), 64 dict( 65 pattern=r'pypa/distutils#(?P<distutils>\d+)', 66 url='{GH}/pypa/distutils/issues/{distutils}', 67 ), 68 dict( 69 pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n', 70 with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n', 71 ), 72 ], 73 ), 74 } 75 76 intersphinx_mapping = { 77 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None) 78 } 79 80 # Add support for linking usernames 81 github_url = 'https://github.com' 82 github_sponsors_url = f'{github_url}/sponsors' 83 extlinks = { 84 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323 85 } 86 extensions += ['sphinx.ext.extlinks', 'sphinx.ext.intersphinx'] 87 88 # Be strict about any broken references: 89 nitpicky = True 90 91 # Ref: https://github.com/python-attrs/attrs/pull/571/files\ 92 # #diff-85987f48f1258d9ee486e3191495582dR82 93 default_role = 'any' 94 95 # Custom sidebar templates, maps document names to template names. 96 html_theme = 'alabaster' 97 templates_path = ['_templates'] 98 html_sidebars = {'index': ['tidelift-sidebar.html']} 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -93,3 +93,6 @@ html_theme = 'alabaster' templates_path = ['_templates'] html_sidebars = {'index': ['tidelift-sidebar.html']} + +# Add support for inline tabs +extensions += ['sphinx_inline_tabs']
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -93,3 +93,6 @@\n html_theme = 'alabaster'\n templates_path = ['_templates']\n html_sidebars = {'index': ['tidelift-sidebar.html']}\n+\n+# Add support for inline tabs\n+extensions += ['sphinx_inline_tabs']\n", "issue": "Add mechanism for side-by-side comparison of setup.py and its equivalent setup.cfg\nWe have many documentation examples that are purely declarative and are either documented as `setup.py` or `setup.cfg`. It would be really awesome if, for each of these, we had the option to have either both versions side-by-side or, even better, in a sort of \"tabbed container\", like the one in the [code sample at the bottom of this example](https://leetcode.com/articles/median-of-two-sorted-arrays/).\r\n\r\nRequirements for this:\r\n\r\n1. Cannot *link to* any third-party javascript dependencies. Ideally we wouldn't use any at all, but if you do they must be vendored in the documentation.\r\n2. If javascript is disabled, it has to fall back to something intelligible.\r\n\r\nIdeally it would be implemented in pure CSS / HTML if that's at all possible.\n", "before_files": [{"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\nintersphinx_mapping = {\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n}\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks', 'sphinx.ext.intersphinx']\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# Custom sidebar templates, maps document names to template names.\nhtml_theme = 'alabaster'\ntemplates_path = ['_templates']\nhtml_sidebars = {'index': ['tidelift-sidebar.html']}\n", "path": "docs/conf.py"}], "after_files": [{"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# Custom sidebar templates, maps document names to template names.\nhtml_theme = 'alabaster'\ntemplates_path = ['_templates']\nhtml_sidebars = {'index': ['tidelift-sidebar.html']}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n", "path": "docs/conf.py"}]}
1,539
82
gh_patches_debug_22113
rasdani/github-patches
git_diff
rlworkgroup__garage-1879
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TF 2.3.0 incompatibility TF 2.3.0 was released yesterday, and seems to be incompatible with TFP <0.11.0 and breaks imports (https://travis-ci.com/github/rlworkgroup/garage/jobs/365922927#L3061). We pin TFP to <=0.10.0 in the first place to resolve cloudpickle version mismatch (https://github.com/rlworkgroup/garage/issues/1758). Since TFP 0.11.0 still pins cloudpickle to 1.3.0 while gym needs 1.2.x, unpinning TFP won't work. So for now, TF needs to be pinned to <2.3.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """setuptools based setup module.""" 2 import os 3 4 from setuptools import find_packages, setup 5 6 GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git' 7 GYM_VERSION = '0.15.4' 8 9 # Required dependencies 10 REQUIRED = [ 11 # Please keep alphabetized 12 'akro', 13 'click>=2.0', 14 'cloudpickle<1.5', 15 'cma==2.7.0', 16 'dowel==0.0.3', 17 f'gym[atari,box2d,classic_control]=={GYM_VERSION}', 18 'numpy>=1.14.5', 19 'psutil', 20 # Pyglet 1.4.0 introduces some api change which breaks some 21 # gym environments 22 # See: https://github.com/openai/gym/issues/1588 23 'pyglet<1.4.0,>=1.3.0', 24 'python-dateutil', 25 'ray', 26 'scikit-image', 27 'scipy', 28 'setproctitle>=1.0', 29 'tensorflow>=1.14,<2.3.0', 30 'tensorflow-probability<=0.10.0', 31 'torch>=1.0.0,!=1.5.0,<1.6.0', 32 'torchvision>=0.2.1,<0.7.0', 33 ] 34 35 # Dependencies for optional features 36 EXTRAS = {} 37 38 EXTRAS['mujoco'] = [ 39 'mujoco-py<2.1,>=2.0', 40 f'gym[all]=={GYM_VERSION}', 41 ] 42 43 EXTRAS['dm_control'] = [ 44 # dm_control throws an error during install about not being able to 45 # find a build dependency (absl-py). Later pip executes the `install` 46 # command again and the install succeeds because absl-py has been 47 # installed. This is stupid, but harmless. 48 'dm_control', 49 ] 50 51 EXTRAS['bullet'] = ['mpi4py', 'pybullet'] 52 53 EXTRAS['all'] = list(set(sum(EXTRAS.values(), []))) 54 55 # Development dependencies (*not* included in 'all') 56 EXTRAS['dev'] = [ 57 # Please keep alphabetized 58 'flake8', 59 'flake8-docstrings>=1.5.0', 60 'flake8-import-order', 61 f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501 62 'isort>=4.3.21,<5.0.0', 63 'pep8-naming==0.7.0', 64 'pre-commit', 65 'pycodestyle>=2.5.0', 66 'pydocstyle>=4.0.0', 67 'pylint>=2.5.3', 68 'pytest>=4.5.0', # Required for strict-markers 69 'pytest-cov', 70 'pytest-timeout', 71 'pytest-xdist', 72 'recommonmark', 73 'sphinx', 74 'sphinx-autoapi>=1.4.0', 75 'sphinx_rtd_theme', 76 'sphinxcontrib-bibtex', 77 'yapf==0.30.0', 78 ] # yapf: disable 79 80 with open('README.md') as f: 81 README = f.read() 82 83 # Get the package version dynamically 84 with open('VERSION') as v: 85 VERSION = v.read().strip() 86 87 setup( 88 name='garage', 89 version=VERSION, 90 author='Reinforcement Learning Working Group', 91 description='A toolkit for reproducible reinforcement learning research', 92 url='https://github.com/rlworkgroup/garage', 93 packages=find_packages(where='src'), 94 package_dir={'': 'src'}, 95 scripts=['scripts/garage'], 96 python_requires='>=3.6', 97 install_requires=REQUIRED, 98 extras_require=EXTRAS, 99 license='MIT', 100 long_description=README, 101 long_description_content_type='text/markdown', 102 classifiers=[ 103 'Development Status :: 4 - Beta', 104 'Intended Audience :: Developers', 105 'Intended Audience :: Education', 106 'Intended Audience :: Science/Research', 107 'License :: OSI Approved :: MIT License', 108 'Programming Language :: Python :: 3.6', 109 'Programming Language :: Python :: 3.7', 110 'Programming Language :: Python :: 3 :: Only', 111 'Topic :: Scientific/Engineering :: Artificial Intelligence', 112 'Topic :: Scientific/Engineering :: Mathematics', 113 'Topic :: Software Development :: Libraries', 114 ], 115 ) 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,30 +4,26 @@ from setuptools import find_packages, setup GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git' -GYM_VERSION = '0.15.4' +GYM_VERSION = '0.17.2' # Required dependencies REQUIRED = [ # Please keep alphabetized 'akro', 'click>=2.0', - 'cloudpickle<1.5', + 'cloudpickle==1.3', 'cma==2.7.0', 'dowel==0.0.3', f'gym[atari,box2d,classic_control]=={GYM_VERSION}', 'numpy>=1.14.5', 'psutil', - # Pyglet 1.4.0 introduces some api change which breaks some - # gym environments - # See: https://github.com/openai/gym/issues/1588 - 'pyglet<1.4.0,>=1.3.0', 'python-dateutil', 'ray', 'scikit-image', 'scipy', 'setproctitle>=1.0', - 'tensorflow>=1.14,<2.3.0', - 'tensorflow-probability<=0.10.0', + 'tensorflow>=1.14', + 'tensorflow-probability>=0.11.0', 'torch>=1.0.0,!=1.5.0,<1.6.0', 'torchvision>=0.2.1,<0.7.0', ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,30 +4,26 @@\n from setuptools import find_packages, setup\n \n GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\n-GYM_VERSION = '0.15.4'\n+GYM_VERSION = '0.17.2'\n \n # Required dependencies\n REQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n- 'cloudpickle<1.5',\n+ 'cloudpickle==1.3',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n- # Pyglet 1.4.0 introduces some api change which breaks some\n- # gym environments\n- # See: https://github.com/openai/gym/issues/1588\n- 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n- 'tensorflow>=1.14,<2.3.0',\n- 'tensorflow-probability<=0.10.0',\n+ 'tensorflow>=1.14',\n+ 'tensorflow-probability>=0.11.0',\n 'torch>=1.0.0,!=1.5.0,<1.6.0',\n 'torchvision>=0.2.1,<0.7.0',\n ]\n", "issue": "TF 2.3.0 incompatibility\nTF 2.3.0 was released yesterday, and seems to be incompatible with TFP <0.11.0 and breaks imports (https://travis-ci.com/github/rlworkgroup/garage/jobs/365922927#L3061). We pin TFP to <=0.10.0 in the first place to resolve cloudpickle version mismatch (https://github.com/rlworkgroup/garage/issues/1758). Since TFP 0.11.0 still pins cloudpickle to 1.3.0 while gym needs 1.2.x, unpinning TFP won't work. So for now, TF needs to be pinned to <2.3.0\n", "before_files": [{"content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.15.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle<1.5',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14,<2.3.0',\n 'tensorflow-probability<=0.10.0',\n 'torch>=1.0.0,!=1.5.0,<1.6.0',\n 'torchvision>=0.2.1,<0.7.0',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'sphinxcontrib-bibtex',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.17.2'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle==1.3',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14',\n 'tensorflow-probability>=0.11.0',\n 'torch>=1.0.0,!=1.5.0,<1.6.0',\n 'torchvision>=0.2.1,<0.7.0',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'sphinxcontrib-bibtex',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]}
1,717
387