problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_6066 | rasdani/github-patches | git_diff | lutris__lutris-1132 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DXVK versions not updating when dxvk directory is missing
When `~/.local/share/lutris/runtime/dxvk/` is not present (because a user never used dxvk before), Lutris fails to create `dxvk_versions.json` because the parent directory does not exist, which leads to Lutris showing the (outdated) fallback DXVK versions. A workaround is to create the missing directory manually and to restart Lutris.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/dxvk.py`
Content:
```
1 """DXVK helper module"""
2 import os
3 import json
4 import time
5 import shutil
6 import urllib.request
7
8 from lutris.settings import RUNTIME_DIR
9 from lutris.util.log import logger
10 from lutris.util.extract import extract_archive
11 from lutris.util.downloader import Downloader
12
13
14 CACHE_MAX_AGE = 86400 # Re-download DXVK versions every day
15 DXVK_TAGS_URL = "https://api.github.com/repos/doitsujin/dxvk/tags"
16
17
18 def get_dxvk_versions():
19 """Get DXVK versions from GitHub"""
20 versions_path = os.path.join(RUNTIME_DIR, 'dxvk', 'dxvk_versions.json')
21
22 # Download tags if the versions_path does not exist or is more than a day old
23 if (
24 not os.path.exists(versions_path) or
25 os.path.getmtime(versions_path) + CACHE_MAX_AGE < time.time()
26 ):
27 urllib.request.urlretrieve(DXVK_TAGS_URL, versions_path)
28
29 with open(versions_path, "r") as dxvk_tags:
30 dxvk_json = json.load(dxvk_tags)
31 dxvk_versions = [x['name'].replace('v', '') for x in dxvk_json]
32
33 return dxvk_versions
34
35
36 try:
37 DXVK_VERSIONS = get_dxvk_versions()
38 except Exception as ex: # pylint: disable= broad-except
39 logger.error(ex)
40 DXVK_VERSIONS = [
41 "0.71", "0.70", "0.65",
42 "0.64", "0.63", "0.62",
43 "0.54", "0.53", "0.52",
44 "0.42", "0.31", "0.21"
45 ]
46 DXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:]
47
48
49 class DXVKManager:
50 """Utility class to install DXVK dlls to a Wine prefix"""
51 base_url = "https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz"
52 base_dir = os.path.join(RUNTIME_DIR, 'dxvk')
53 dxvk_dlls = ('dxgi', 'd3d11', 'd3d10core', 'd3d10_1', 'd3d10')
54 latest_version = DXVK_LATEST
55
56 def __init__(self, prefix, arch='win64', version=None):
57 self.prefix = prefix
58 if not os.path.isdir(self.base_dir):
59 os.makedirs(self.base_dir)
60 self._version = version
61 self.wine_arch = arch
62
63 @property
64 def version(self):
65 """Return version of DXVK (latest known version if not provided)"""
66 if self._version:
67 return self._version
68 return self.latest_version
69
70 @property
71 def dxvk_path(self):
72 """Return path to DXVK local cache"""
73 return os.path.join(self.base_dir, self.version)
74
75 @staticmethod
76 def is_dxvk_dll(dll_path):
77 """Check if a given DLL path is provided by DXVK
78
79 Very basic check to see if a dll exists and is over 1MB. If this is the
80 case, then consider the DLL to be from DXVK
81 """
82 if os.path.exists(dll_path):
83 dll_stats = os.stat(dll_path)
84 dll_size = dll_stats.st_size
85 else:
86 dll_size = 0
87 return dll_size > 1024 * 1024
88
89 def is_available(self):
90 """Return whether DXVK is cached locally"""
91 return os.path.exists(self.dxvk_path)
92
93 def download(self):
94 """Download DXVK to the local cache"""
95 # There's a glitch in one of the archive's names
96 fixed_version = 'v0.40' if self.version == '0.40' else self.version
97 dxvk_url = self.base_url.format(self.version, fixed_version)
98 if self.is_available():
99 logger.warning("DXVK already available at %s", self.dxvk_path)
100
101 dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))
102 downloader = Downloader(dxvk_url, dxvk_archive_path)
103 downloader.start()
104 while downloader.check_progress() < 1:
105 time.sleep(1)
106 if not os.path.exists(dxvk_archive_path):
107 logger.error("DXVK %s not downloaded")
108 return
109 if os.stat(dxvk_archive_path).st_size:
110 extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)
111 else:
112 logger.error("%s is an empty file", self.dxvk_path)
113 os.remove(dxvk_archive_path)
114
115 def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):
116 """Copies DXVK dlls to the appropriate destination"""
117 wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)
118 logger.info("Replacing %s/%s with DXVK version", system_dir, dll)
119 if not self.is_dxvk_dll(wine_dll_path):
120 # Backing up original version (may not be needed)
121 if os.path.exists(wine_dll_path):
122 shutil.move(wine_dll_path, wine_dll_path + ".orig")
123 # Copying DXVK's version
124 dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, "%s.dll" % dll)
125 if os.path.exists(dxvk_dll_path):
126 if os.path.exists(wine_dll_path):
127 os.remove(wine_dll_path)
128 os.symlink(dxvk_dll_path, wine_dll_path)
129
130 def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):
131 """Remove DXVK DLL from Wine prefix"""
132 wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)
133 if self.is_dxvk_dll(wine_dll_path):
134 logger.info("Removing DXVK dll %s/%s", system_dir, dll)
135 os.remove(wine_dll_path)
136 # Restoring original version (may not be needed)
137 if os.path.exists(wine_dll_path + '.orig'):
138 shutil.move(wine_dll_path + '.orig', wine_dll_path)
139
140 def _iter_dxvk_dlls(self):
141 windows_path = os.path.join(self.prefix, 'drive_c/windows')
142 if self.wine_arch == 'win64':
143 system_dirs = {
144 'x64': os.path.join(windows_path, 'system32'),
145 'x32': os.path.join(windows_path, 'syswow64')
146 }
147 elif self.wine_arch == 'win32':
148 system_dirs = {
149 'x32': os.path.join(windows_path, 'system32'),
150 }
151
152 for dxvk_arch, system_dir in system_dirs.items():
153 for dll in self.dxvk_dlls:
154 yield system_dir, dxvk_arch, dll
155
156 def enable(self):
157 """Enable DXVK for the current prefix"""
158 if not os.path.exists(self.dxvk_path):
159 logger.error("DXVK %s is not available locally", self.version)
160 return
161 for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():
162 self.enable_dxvk_dll(system_dir, dxvk_arch, dll)
163
164 def disable(self):
165 """Disable DXVK for the current prefix"""
166 for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():
167 self.disable_dxvk_dll(system_dir, dxvk_arch, dll)
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/util/dxvk.py b/lutris/util/dxvk.py
--- a/lutris/util/dxvk.py
+++ b/lutris/util/dxvk.py
@@ -17,7 +17,10 @@
def get_dxvk_versions():
"""Get DXVK versions from GitHub"""
- versions_path = os.path.join(RUNTIME_DIR, 'dxvk', 'dxvk_versions.json')
+ dxvk_path = os.path.join(RUNTIME_DIR, 'dxvk');
+ if not os.path.isdir(dxvk_path):
+ os.mkdir(dxvk_path)
+ versions_path = os.path.join(dxvk_path, 'dxvk_versions.json')
# Download tags if the versions_path does not exist or is more than a day old
if (
| {"golden_diff": "diff --git a/lutris/util/dxvk.py b/lutris/util/dxvk.py\n--- a/lutris/util/dxvk.py\n+++ b/lutris/util/dxvk.py\n@@ -17,7 +17,10 @@\n \n def get_dxvk_versions():\n \"\"\"Get DXVK versions from GitHub\"\"\"\n- versions_path = os.path.join(RUNTIME_DIR, 'dxvk', 'dxvk_versions.json')\n+ dxvk_path = os.path.join(RUNTIME_DIR, 'dxvk');\n+ if not os.path.isdir(dxvk_path):\n+ os.mkdir(dxvk_path)\n+ versions_path = os.path.join(dxvk_path, 'dxvk_versions.json')\n \n # Download tags if the versions_path does not exist or is more than a day old\n if (\n", "issue": "DXVK versions not updating when dxvk directory is missing\nWhen `~/.local/share/lutris/runtime/dxvk/` is not present (because a user never used dxvk before), Lutris fails to create `dxvk_versions.json` because the parent directory does not exist, which leads to Lutris showing the (outdated) fallback DXVK versions. A workaround is to create the missing directory manually and to restart Lutris.\r\n\r\n\n", "before_files": [{"content": "\"\"\"DXVK helper module\"\"\"\nimport os\nimport json\nimport time\nimport shutil\nimport urllib.request\n\nfrom lutris.settings import RUNTIME_DIR\nfrom lutris.util.log import logger\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.downloader import Downloader\n\n\nCACHE_MAX_AGE = 86400 # Re-download DXVK versions every day\nDXVK_TAGS_URL = \"https://api.github.com/repos/doitsujin/dxvk/tags\"\n\n\ndef get_dxvk_versions():\n \"\"\"Get DXVK versions from GitHub\"\"\"\n versions_path = os.path.join(RUNTIME_DIR, 'dxvk', 'dxvk_versions.json')\n\n # Download tags if the versions_path does not exist or is more than a day old\n if (\n not os.path.exists(versions_path) or\n os.path.getmtime(versions_path) + CACHE_MAX_AGE < time.time()\n ):\n urllib.request.urlretrieve(DXVK_TAGS_URL, versions_path)\n\n with open(versions_path, \"r\") as dxvk_tags:\n dxvk_json = json.load(dxvk_tags)\n dxvk_versions = [x['name'].replace('v', '') for x in dxvk_json]\n\n return dxvk_versions\n\n\ntry:\n DXVK_VERSIONS = get_dxvk_versions()\nexcept Exception as ex: # pylint: disable= broad-except\n logger.error(ex)\n DXVK_VERSIONS = [\n \"0.71\", \"0.70\", \"0.65\",\n \"0.64\", \"0.63\", \"0.62\",\n \"0.54\", \"0.53\", \"0.52\",\n \"0.42\", \"0.31\", \"0.21\"\n ]\nDXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:]\n\n\nclass DXVKManager:\n \"\"\"Utility class to install DXVK dlls to a Wine prefix\"\"\"\n base_url = \"https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz\"\n base_dir = os.path.join(RUNTIME_DIR, 'dxvk')\n dxvk_dlls = ('dxgi', 'd3d11', 'd3d10core', 'd3d10_1', 'd3d10')\n latest_version = DXVK_LATEST\n\n def __init__(self, prefix, arch='win64', version=None):\n self.prefix = prefix\n if not os.path.isdir(self.base_dir):\n os.makedirs(self.base_dir)\n self._version = version\n self.wine_arch = arch\n\n @property\n def version(self):\n \"\"\"Return version of DXVK (latest known version if not provided)\"\"\"\n if self._version:\n return self._version\n return self.latest_version\n\n @property\n def dxvk_path(self):\n \"\"\"Return path to DXVK local cache\"\"\"\n return os.path.join(self.base_dir, self.version)\n\n @staticmethod\n def is_dxvk_dll(dll_path):\n \"\"\"Check if a given DLL path is provided by DXVK\n\n Very basic check to see if a dll exists and is over 1MB. If this is the\n case, then consider the DLL to be from DXVK\n \"\"\"\n if os.path.exists(dll_path):\n dll_stats = os.stat(dll_path)\n dll_size = dll_stats.st_size\n else:\n dll_size = 0\n return dll_size > 1024 * 1024\n\n def is_available(self):\n \"\"\"Return whether DXVK is cached locally\"\"\"\n return os.path.exists(self.dxvk_path)\n\n def download(self):\n \"\"\"Download DXVK to the local cache\"\"\"\n # There's a glitch in one of the archive's names\n fixed_version = 'v0.40' if self.version == '0.40' else self.version\n dxvk_url = self.base_url.format(self.version, fixed_version)\n if self.is_available():\n logger.warning(\"DXVK already available at %s\", self.dxvk_path)\n\n dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))\n downloader = Downloader(dxvk_url, dxvk_archive_path)\n downloader.start()\n while downloader.check_progress() < 1:\n time.sleep(1)\n if not os.path.exists(dxvk_archive_path):\n logger.error(\"DXVK %s not downloaded\")\n return\n if os.stat(dxvk_archive_path).st_size:\n extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)\n else:\n logger.error(\"%s is an empty file\", self.dxvk_path)\n os.remove(dxvk_archive_path)\n\n def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Copies DXVK dlls to the appropriate destination\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n logger.info(\"Replacing %s/%s with DXVK version\", system_dir, dll)\n if not self.is_dxvk_dll(wine_dll_path):\n # Backing up original version (may not be needed)\n if os.path.exists(wine_dll_path):\n shutil.move(wine_dll_path, wine_dll_path + \".orig\")\n # Copying DXVK's version\n dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, \"%s.dll\" % dll)\n if os.path.exists(dxvk_dll_path):\n if os.path.exists(wine_dll_path):\n os.remove(wine_dll_path)\n os.symlink(dxvk_dll_path, wine_dll_path)\n\n def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Remove DXVK DLL from Wine prefix\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n if self.is_dxvk_dll(wine_dll_path):\n logger.info(\"Removing DXVK dll %s/%s\", system_dir, dll)\n os.remove(wine_dll_path)\n # Restoring original version (may not be needed)\n if os.path.exists(wine_dll_path + '.orig'):\n shutil.move(wine_dll_path + '.orig', wine_dll_path)\n\n def _iter_dxvk_dlls(self):\n windows_path = os.path.join(self.prefix, 'drive_c/windows')\n if self.wine_arch == 'win64':\n system_dirs = {\n 'x64': os.path.join(windows_path, 'system32'),\n 'x32': os.path.join(windows_path, 'syswow64')\n }\n elif self.wine_arch == 'win32':\n system_dirs = {\n 'x32': os.path.join(windows_path, 'system32'),\n }\n\n for dxvk_arch, system_dir in system_dirs.items():\n for dll in self.dxvk_dlls:\n yield system_dir, dxvk_arch, dll\n\n def enable(self):\n \"\"\"Enable DXVK for the current prefix\"\"\"\n if not os.path.exists(self.dxvk_path):\n logger.error(\"DXVK %s is not available locally\", self.version)\n return\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.enable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable(self):\n \"\"\"Disable DXVK for the current prefix\"\"\"\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n", "path": "lutris/util/dxvk.py"}], "after_files": [{"content": "\"\"\"DXVK helper module\"\"\"\nimport os\nimport json\nimport time\nimport shutil\nimport urllib.request\n\nfrom lutris.settings import RUNTIME_DIR\nfrom lutris.util.log import logger\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.downloader import Downloader\n\n\nCACHE_MAX_AGE = 86400 # Re-download DXVK versions every day\nDXVK_TAGS_URL = \"https://api.github.com/repos/doitsujin/dxvk/tags\"\n\n\ndef get_dxvk_versions():\n \"\"\"Get DXVK versions from GitHub\"\"\"\n dxvk_path = os.path.join(RUNTIME_DIR, 'dxvk');\n if not os.path.isdir(dxvk_path):\n os.mkdir(dxvk_path)\n versions_path = os.path.join(dxvk_path, 'dxvk_versions.json')\n\n # Download tags if the versions_path does not exist or is more than a day old\n if (\n not os.path.exists(versions_path) or\n os.path.getmtime(versions_path) + CACHE_MAX_AGE < time.time()\n ):\n urllib.request.urlretrieve(DXVK_TAGS_URL, versions_path)\n\n with open(versions_path, \"r\") as dxvk_tags:\n dxvk_json = json.load(dxvk_tags)\n dxvk_versions = [x['name'].replace('v', '') for x in dxvk_json]\n\n return dxvk_versions\n\n\ntry:\n DXVK_VERSIONS = get_dxvk_versions()\nexcept Exception as ex: # pylint: disable= broad-except\n logger.error(ex)\n DXVK_VERSIONS = [\n \"0.71\", \"0.70\", \"0.65\",\n \"0.64\", \"0.63\", \"0.62\",\n \"0.54\", \"0.53\", \"0.52\",\n \"0.42\", \"0.31\", \"0.21\"\n ]\nDXVK_LATEST, DXVK_PAST_RELEASES = DXVK_VERSIONS[0], DXVK_VERSIONS[1:]\n\n\nclass DXVKManager:\n \"\"\"Utility class to install DXVK dlls to a Wine prefix\"\"\"\n base_url = \"https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz\"\n base_dir = os.path.join(RUNTIME_DIR, 'dxvk')\n dxvk_dlls = ('dxgi', 'd3d11', 'd3d10core', 'd3d10_1', 'd3d10')\n latest_version = DXVK_LATEST\n\n def __init__(self, prefix, arch='win64', version=None):\n self.prefix = prefix\n if not os.path.isdir(self.base_dir):\n os.makedirs(self.base_dir)\n self._version = version\n self.wine_arch = arch\n\n @property\n def version(self):\n \"\"\"Return version of DXVK (latest known version if not provided)\"\"\"\n if self._version:\n return self._version\n return self.latest_version\n\n @property\n def dxvk_path(self):\n \"\"\"Return path to DXVK local cache\"\"\"\n return os.path.join(self.base_dir, self.version)\n\n @staticmethod\n def is_dxvk_dll(dll_path):\n \"\"\"Check if a given DLL path is provided by DXVK\n\n Very basic check to see if a dll exists and is over 1MB. If this is the\n case, then consider the DLL to be from DXVK\n \"\"\"\n if os.path.exists(dll_path):\n dll_stats = os.stat(dll_path)\n dll_size = dll_stats.st_size\n else:\n dll_size = 0\n return dll_size > 1024 * 1024\n\n def is_available(self):\n \"\"\"Return whether DXVK is cached locally\"\"\"\n return os.path.exists(self.dxvk_path)\n\n def download(self):\n \"\"\"Download DXVK to the local cache\"\"\"\n # There's a glitch in one of the archive's names\n fixed_version = 'v0.40' if self.version == '0.40' else self.version\n dxvk_url = self.base_url.format(self.version, fixed_version)\n if self.is_available():\n logger.warning(\"DXVK already available at %s\", self.dxvk_path)\n\n dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))\n downloader = Downloader(dxvk_url, dxvk_archive_path)\n downloader.start()\n while downloader.check_progress() < 1:\n time.sleep(1)\n if not os.path.exists(dxvk_archive_path):\n logger.error(\"DXVK %s not downloaded\")\n return\n if os.stat(dxvk_archive_path).st_size:\n extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)\n else:\n logger.error(\"%s is an empty file\", self.dxvk_path)\n os.remove(dxvk_archive_path)\n\n def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Copies DXVK dlls to the appropriate destination\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n logger.info(\"Replacing %s/%s with DXVK version\", system_dir, dll)\n if not self.is_dxvk_dll(wine_dll_path):\n # Backing up original version (may not be needed)\n if os.path.exists(wine_dll_path):\n shutil.move(wine_dll_path, wine_dll_path + \".orig\")\n # Copying DXVK's version\n dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, \"%s.dll\" % dll)\n if os.path.exists(dxvk_dll_path):\n if os.path.exists(wine_dll_path):\n os.remove(wine_dll_path)\n os.symlink(dxvk_dll_path, wine_dll_path)\n\n def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Remove DXVK DLL from Wine prefix\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n if self.is_dxvk_dll(wine_dll_path):\n logger.info(\"Removing DXVK dll %s/%s\", system_dir, dll)\n os.remove(wine_dll_path)\n # Restoring original version (may not be needed)\n if os.path.exists(wine_dll_path + '.orig'):\n shutil.move(wine_dll_path + '.orig', wine_dll_path)\n\n def _iter_dxvk_dlls(self):\n windows_path = os.path.join(self.prefix, 'drive_c/windows')\n if self.wine_arch == 'win64':\n system_dirs = {\n 'x64': os.path.join(windows_path, 'system32'),\n 'x32': os.path.join(windows_path, 'syswow64')\n }\n elif self.wine_arch == 'win32':\n system_dirs = {\n 'x32': os.path.join(windows_path, 'system32'),\n }\n\n for dxvk_arch, system_dir in system_dirs.items():\n for dll in self.dxvk_dlls:\n yield system_dir, dxvk_arch, dll\n\n def enable(self):\n \"\"\"Enable DXVK for the current prefix\"\"\"\n if not os.path.exists(self.dxvk_path):\n logger.error(\"DXVK %s is not available locally\", self.version)\n return\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.enable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable(self):\n \"\"\"Disable DXVK for the current prefix\"\"\"\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n", "path": "lutris/util/dxvk.py"}]} | 2,457 | 174 |
gh_patches_debug_538 | rasdani/github-patches | git_diff | bokeh__bokeh-5378 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Correct comment in Dimension example plot
The comment in [this example](http://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#dimensions) says `# create a new plot with a title`. I expect this example was copied from the one below is demonstrating how to add a title. This comment should be changed to `# create a new plot with specific dimensions`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/source/docs/user_guide/source_examples/styling_dimensions.py`
Content:
```
1 from bokeh.plotting import figure, output_file, show
2
3 output_file("dimensions.html")
4
5 # create a new plot with a title
6 p = figure(plot_width=700)
7 p.plot_height = 300
8
9 p.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)
10
11 show(p)
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py
--- a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py
+++ b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py
@@ -2,7 +2,7 @@
output_file("dimensions.html")
-# create a new plot with a title
+# create a new plot with specific dimensions
p = figure(plot_width=700)
p.plot_height = 300
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py\n--- a/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py\n+++ b/sphinx/source/docs/user_guide/source_examples/styling_dimensions.py\n@@ -2,7 +2,7 @@\n \n output_file(\"dimensions.html\")\n \n-# create a new plot with a title\n+# create a new plot with specific dimensions\n p = figure(plot_width=700)\n p.plot_height = 300\n", "issue": "Correct comment in Dimension example plot\nThe comment in [this example](http://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#dimensions) says `# create a new plot with a title`. I expect this example was copied from the one below is demonstrating how to add a title. This comment should be changed to `# create a new plot with specific dimensions`.\n\n", "before_files": [{"content": "from bokeh.plotting import figure, output_file, show\n\noutput_file(\"dimensions.html\")\n\n# create a new plot with a title\np = figure(plot_width=700)\np.plot_height = 300\n\np.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/source_examples/styling_dimensions.py"}], "after_files": [{"content": "from bokeh.plotting import figure, output_file, show\n\noutput_file(\"dimensions.html\")\n\n# create a new plot with specific dimensions\np = figure(plot_width=700)\np.plot_height = 300\n\np.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)\n\nshow(p)\n", "path": "sphinx/source/docs/user_guide/source_examples/styling_dimensions.py"}]} | 452 | 116 |
gh_patches_debug_3795 | rasdani/github-patches | git_diff | magenta__magenta-541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
documentation: missing dependencies
I installed magenta from scratch on a clean Mac using the README and some tests from`bazel test //magenta/...` were failing because of the missing dependencies:
```
pip install IPython
pip install intervaltree
```
The other failures are related to https://github.com/tensorflow/magenta/issues/529 and apparently should be fixed with the upcoming version of Bazel
This issue might be related to the fact that I wasn't using conda, which could totally be a user error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `magenta/tools/pip/setup.py`
Content:
```
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """A setuptools based setup module for magenta."""
15
16 from setuptools import find_packages
17 from setuptools import setup
18
19 # Bit of a hack to parse the version string stored in version.py without
20 # executing __init__.py, which will end up requiring a bunch of dependencies to
21 # execute (e.g., tensorflow, pretty_midi, etc.).
22 # Makes the __version__ variable available.
23 execfile('magenta/version.py')
24
25
26 REQUIRED_PACKAGES = [
27 'intervaltree >= 2.1.0',
28 'mido >= 1.1.17',
29 'Pillow >= 3.4.2',
30 'pretty_midi >= 0.2.6',
31 'scipy >= 0.18.1',
32 'tensorflow >= 1.0.0',
33 'matplotlib >= 1.5.3',
34 'wheel',
35 ]
36
37 CONSOLE_SCRIPTS = [
38 'magenta.interfaces.midi.magenta_midi',
39 'magenta.models.drums_rnn.drums_rnn_create_dataset',
40 'magenta.models.drums_rnn.drums_rnn_generate',
41 'magenta.models.drums_rnn.drums_rnn_train',
42 'magenta.models.image_stylization.image_stylization_create_dataset',
43 'magenta.models.image_stylization.image_stylization_evaluate',
44 'magenta.models.image_stylization.image_stylization_finetune',
45 'magenta.models.image_stylization.image_stylization_train',
46 'magenta.models.image_stylization.image_stylization_transform',
47 'magenta.models.improv_rnn.improv_rnn_create_dataset',
48 'magenta.models.improv_rnn.improv_rnn_generate',
49 'magenta.models.improv_rnn.improv_rnn_train',
50 'magenta.models.melody_rnn.melody_rnn_create_dataset',
51 'magenta.models.melody_rnn.melody_rnn_generate',
52 'magenta.models.melody_rnn.melody_rnn_train',
53 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',
54 'magenta.models.polyphony_rnn.polyphony_rnn_generate',
55 'magenta.models.polyphony_rnn.polyphony_rnn_train',
56 'magenta.models.rl_tuner.rl_tuner_train',
57 'magenta.scripts.convert_dir_to_note_sequences',
58 ]
59
60 setup(
61 name='magenta',
62 version=__version__, # pylint: disable=undefined-variable
63 description='Use machine learning to create art and music',
64 long_description='',
65 url='https://magenta.tensorflow.org/',
66 author='Google Inc.',
67 author_email='[email protected]',
68 license='Apache 2',
69 # PyPI package information.
70 classifiers=[
71 'Development Status :: 4 - Beta',
72 'Intended Audience :: Developers',
73 'Intended Audience :: Education',
74 'Intended Audience :: Science/Research',
75 'License :: OSI Approved :: Apache Software License',
76 'Programming Language :: Python :: 2.7',
77 'Topic :: Scientific/Engineering :: Mathematics',
78 'Topic :: Software Development :: Libraries :: Python Modules',
79 'Topic :: Software Development :: Libraries',
80 ],
81 keywords='tensorflow machine learning magenta music art',
82
83 packages=find_packages(),
84 install_requires=REQUIRED_PACKAGES,
85 entry_points={
86 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in
87 ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],
88 },
89
90 include_package_data=True,
91 package_data={
92 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],
93 },
94 )
95
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py
--- a/magenta/tools/pip/setup.py
+++ b/magenta/tools/pip/setup.py
@@ -24,13 +24,14 @@
REQUIRED_PACKAGES = [
+ 'IPython',
+ 'Pillow >= 3.4.2',
'intervaltree >= 2.1.0',
+ 'matplotlib >= 1.5.3',
'mido >= 1.1.17',
- 'Pillow >= 3.4.2',
'pretty_midi >= 0.2.6',
'scipy >= 0.18.1',
'tensorflow >= 1.0.0',
- 'matplotlib >= 1.5.3',
'wheel',
]
| {"golden_diff": "diff --git a/magenta/tools/pip/setup.py b/magenta/tools/pip/setup.py\n--- a/magenta/tools/pip/setup.py\n+++ b/magenta/tools/pip/setup.py\n@@ -24,13 +24,14 @@\n \n \n REQUIRED_PACKAGES = [\n+ 'IPython',\n+ 'Pillow >= 3.4.2',\n 'intervaltree >= 2.1.0',\n+ 'matplotlib >= 1.5.3',\n 'mido >= 1.1.17',\n- 'Pillow >= 3.4.2',\n 'pretty_midi >= 0.2.6',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.0.0',\n- 'matplotlib >= 1.5.3',\n 'wheel',\n ]\n", "issue": "documentation: missing dependencies\nI installed magenta from scratch on a clean Mac using the README and some tests from`bazel test //magenta/...` were failing because of the missing dependencies:\r\n\r\n```\r\npip install IPython\r\npip install intervaltree\r\n```\r\n\r\nThe other failures are related to https://github.com/tensorflow/magenta/issues/529 and apparently should be fixed with the upcoming version of Bazel\r\n\r\nThis issue might be related to the fact that I wasn't using conda, which could totally be a user error.\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'intervaltree >= 2.1.0',\n 'mido >= 1.1.17',\n 'Pillow >= 3.4.2',\n 'pretty_midi >= 0.2.6',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.0.0',\n 'matplotlib >= 1.5.3',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n\n", "path": "magenta/tools/pip/setup.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A setuptools based setup module for magenta.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Bit of a hack to parse the version string stored in version.py without\n# executing __init__.py, which will end up requiring a bunch of dependencies to\n# execute (e.g., tensorflow, pretty_midi, etc.).\n# Makes the __version__ variable available.\nexecfile('magenta/version.py')\n\n\nREQUIRED_PACKAGES = [\n 'IPython',\n 'Pillow >= 3.4.2',\n 'intervaltree >= 2.1.0',\n 'matplotlib >= 1.5.3',\n 'mido >= 1.1.17',\n 'pretty_midi >= 0.2.6',\n 'scipy >= 0.18.1',\n 'tensorflow >= 1.0.0',\n 'wheel',\n]\n\nCONSOLE_SCRIPTS = [\n 'magenta.interfaces.midi.magenta_midi',\n 'magenta.models.drums_rnn.drums_rnn_create_dataset',\n 'magenta.models.drums_rnn.drums_rnn_generate',\n 'magenta.models.drums_rnn.drums_rnn_train',\n 'magenta.models.image_stylization.image_stylization_create_dataset',\n 'magenta.models.image_stylization.image_stylization_evaluate',\n 'magenta.models.image_stylization.image_stylization_finetune',\n 'magenta.models.image_stylization.image_stylization_train',\n 'magenta.models.image_stylization.image_stylization_transform',\n 'magenta.models.improv_rnn.improv_rnn_create_dataset',\n 'magenta.models.improv_rnn.improv_rnn_generate',\n 'magenta.models.improv_rnn.improv_rnn_train',\n 'magenta.models.melody_rnn.melody_rnn_create_dataset',\n 'magenta.models.melody_rnn.melody_rnn_generate',\n 'magenta.models.melody_rnn.melody_rnn_train',\n 'magenta.models.polyphony_rnn.polyphony_rnn_create_dataset',\n 'magenta.models.polyphony_rnn.polyphony_rnn_generate',\n 'magenta.models.polyphony_rnn.polyphony_rnn_train',\n 'magenta.models.rl_tuner.rl_tuner_train',\n 'magenta.scripts.convert_dir_to_note_sequences',\n]\n\nsetup(\n name='magenta',\n version=__version__, # pylint: disable=undefined-variable\n description='Use machine learning to create art and music',\n long_description='',\n url='https://magenta.tensorflow.org/',\n author='Google Inc.',\n author_email='[email protected]',\n license='Apache 2',\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords='tensorflow machine learning magenta music art',\n\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n entry_points={\n 'console_scripts': ['%s = %s:console_entry_point' % (n, p) for n, p in\n ((s.split('.')[-1], s) for s in CONSOLE_SCRIPTS)],\n },\n\n include_package_data=True,\n package_data={\n 'magenta': ['models/image_stylization/evaluation_images/*.jpg'],\n },\n)\n\n", "path": "magenta/tools/pip/setup.py"}]} | 1,450 | 185 |
gh_patches_debug_19493 | rasdani/github-patches | git_diff | xorbitsai__inference-566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ENH: configurable XINFERENCE_HOME
### Is your feature request related to a problem? Please describe
XINFERENCE_HOME should be configurable.
### Describe the solution you'd like
We could starts with supporting the XINFERENCE_HOME env variable. This could be done by adding an util `get_xinference_home` and make sure this is the only way to get xinference home.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xinference/constants.py`
Content:
```
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from pathlib import Path
17
18 XINFERENCE_HOME = str(Path.home() / ".xinference")
19 XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, "cache")
20 XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, "model")
21 XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, "logs")
22 XINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, "image")
23
24 XINFERENCE_DEFAULT_LOCAL_HOST = "127.0.0.1"
25 XINFERENCE_DEFAULT_DISTRIBUTED_HOST = "0.0.0.0"
26 XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997
27
28 XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT"
29 XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC"
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xinference/constants.py b/xinference/constants.py
--- a/xinference/constants.py
+++ b/xinference/constants.py
@@ -15,7 +15,16 @@
import os
from pathlib import Path
-XINFERENCE_HOME = str(Path.home() / ".xinference")
+XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT"
+XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC"
+XINFERENCE_ENV_HOME_PATH = "XINFERENCE_HOME"
+
+
+def get_xinference_home():
+ return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference"))
+
+
+XINFERENCE_HOME = get_xinference_home()
XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, "cache")
XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, "model")
XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, "logs")
@@ -24,6 +33,3 @@
XINFERENCE_DEFAULT_LOCAL_HOST = "127.0.0.1"
XINFERENCE_DEFAULT_DISTRIBUTED_HOST = "0.0.0.0"
XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997
-
-XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT"
-XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC"
| {"golden_diff": "diff --git a/xinference/constants.py b/xinference/constants.py\n--- a/xinference/constants.py\n+++ b/xinference/constants.py\n@@ -15,7 +15,16 @@\n import os\n from pathlib import Path\n \n-XINFERENCE_HOME = str(Path.home() / \".xinference\")\n+XINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\n+XINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\n+XINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\n+\n+\n+def get_xinference_home():\n+ return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n+\n+\n+XINFERENCE_HOME = get_xinference_home()\n XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\n XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\n XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\n@@ -24,6 +33,3 @@\n XINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\n XINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\n XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\n-\n-XINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\n-XINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\n", "issue": "ENH: configurable XINFERENCE_HOME\n### Is your feature request related to a problem? Please describe\r\nXINFERENCE_HOME should be configurable.\r\n\r\n### Describe the solution you'd like\r\nWe could starts with supporting the XINFERENCE_HOME env variable. This could be done by adding an util `get_xinference_home` and make sure this is the only way to get xinference home.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_HOME = str(Path.home() / \".xinference\")\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\n", "path": "xinference/constants.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\nXINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\n\n\ndef get_xinference_home():\n return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n\n\nXINFERENCE_HOME = get_xinference_home()\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\n", "path": "xinference/constants.py"}]} | 697 | 294 |
gh_patches_debug_29802 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-4772 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Celery tags include a raw dictionary that is difficult to use in APM
<!--
Thanks for taking the time for reporting an issue!
Before reporting an issue on dd-trace-py, please be sure to provide all
necessary information.
If you're hitting a bug, make sure that you're using the latest version of this
library.
-->
### Summary of problem
When using celery, context is automatically added to spans as tags from various argument calls. One of the tags for celery is `delivery_info`, which is a dict that provides valuable routing data. This data is difficult to use currently since we can't create facets or filters in APM with the raw dictionary as a string. (Image below)
I expect the dict values should be added as separate tag data, such that `celery.delivery_info.exchange`, `celery.delivery_info.priority`, etc are all separate tags.
<img width="727" alt="image" src="https://user-images.githubusercontent.com/1430167/206885304-9b20bdc5-097a-4e60-ad4e-8b29063092ef.png">
### Which version of dd-trace-py are you using?
1.4.0
### Which version of pip are you using?
22.3
### Which libraries and their versions are you using?
<details>
<summary>`pip freeze`</summary>
celery==5.2.2
</details>
### How can we reproduce your problem?
Run celery with ddtrace with celery integration enabled and observe the resulting tags on the `celery.run` span.
### What is the result that you get?
`celery.run` span has a tag:value pair roughly matching the following (dict is a string repr) `celery.delivery_info: "{'exchange':'', 'routing_key':'queue_name', ...}"`
### What is the result that you expected?
`celery.run` span should have tag:value pairs for each child key in the dictionary
`celery.delivery_info.exchange:''`
`celery.delivery_info.routing_key:'queue_name'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/celery/utils.py`
Content:
```
1 from typing import Any
2 from typing import Dict
3 from weakref import WeakValueDictionary
4
5 from ddtrace.span import Span
6
7 from .constants import CTX_KEY
8
9
10 TAG_KEYS = frozenset(
11 [
12 ("compression", "celery.compression"),
13 ("correlation_id", "celery.correlation_id"),
14 ("countdown", "celery.countdown"),
15 ("delivery_info", "celery.delivery_info"),
16 ("eta", "celery.eta"),
17 ("exchange", "celery.exchange"),
18 ("expires", "celery.expires"),
19 ("hostname", "celery.hostname"),
20 ("id", "celery.id"),
21 ("priority", "celery.priority"),
22 ("queue", "celery.queue"),
23 ("reply_to", "celery.reply_to"),
24 ("retries", "celery.retries"),
25 ("routing_key", "celery.routing_key"),
26 ("serializer", "celery.serializer"),
27 ("timelimit", "celery.timelimit"),
28 # Celery 4.0 uses `origin` instead of `hostname`; this change preserves
29 # the same name for the tag despite Celery version
30 ("origin", "celery.hostname"),
31 ("state", "celery.state"),
32 ]
33 )
34
35
36 def set_tags_from_context(span, context):
37 # type: (Span, Dict[str, Any]) -> None
38 """Helper to extract meta values from a Celery Context"""
39
40 for key, tag_name in TAG_KEYS:
41 value = context.get(key)
42
43 # Skip this key if it is not set
44 if value is None or value == "":
45 continue
46
47 # Skip `timelimit` if it is not set (its default/unset value is a
48 # tuple or a list of `None` values
49 if key == "timelimit" and all(_ is None for _ in value):
50 continue
51
52 # Skip `retries` if its value is `0`
53 if key == "retries" and value == 0:
54 continue
55
56 span.set_tag(tag_name, value)
57
58
59 def attach_span(task, task_id, span, is_publish=False):
60 """Helper to propagate a `Span` for the given `Task` instance. This
61 function uses a `WeakValueDictionary` that stores a Datadog Span using
62 the `(task_id, is_publish)` as a key. This is useful when information must be
63 propagated from one Celery signal to another.
64
65 DEV: We use (task_id, is_publish) for the key to ensure that publishing a
66 task from within another task does not cause any conflicts.
67
68 This mostly happens when either a task fails and a retry policy is in place,
69 or when a task is manually retried (e.g. `task.retry()`), we end up trying
70 to publish a task with the same id as the task currently running.
71
72 Previously publishing the new task would overwrite the existing `celery.run` span
73 in the `weak_dict` causing that span to be forgotten and never finished.
74
75 NOTE: We cannot test for this well yet, because we do not run a celery worker,
76 and cannot run `task.apply_async()`
77 """
78 weak_dict = getattr(task, CTX_KEY, None)
79 if weak_dict is None:
80 weak_dict = WeakValueDictionary()
81 setattr(task, CTX_KEY, weak_dict)
82
83 weak_dict[(task_id, is_publish)] = span
84
85
86 def detach_span(task, task_id, is_publish=False):
87 """Helper to remove a `Span` in a Celery task when it's propagated.
88 This function handles tasks where the `Span` is not attached.
89 """
90 weak_dict = getattr(task, CTX_KEY, None)
91 if weak_dict is None:
92 return
93
94 # DEV: See note in `attach_span` for key info
95 try:
96 del weak_dict[(task_id, is_publish)]
97 except KeyError:
98 pass
99
100
101 def retrieve_span(task, task_id, is_publish=False):
102 """Helper to retrieve an active `Span` stored in a `Task`
103 instance
104 """
105 weak_dict = getattr(task, CTX_KEY, None)
106 if weak_dict is None:
107 return
108 else:
109 # DEV: See note in `attach_span` for key info
110 return weak_dict.get((task_id, is_publish))
111
112
113 def retrieve_task_id(context):
114 """Helper to retrieve the `Task` identifier from the message `body`.
115 This helper supports Protocol Version 1 and 2. The Protocol is well
116 detailed in the official documentation:
117 http://docs.celeryproject.org/en/latest/internals/protocol.html
118 """
119 headers = context.get("headers")
120 body = context.get("body")
121 if headers:
122 # Protocol Version 2 (default from Celery 4.0)
123 return headers.get("id")
124 else:
125 # Protocol Version 1
126 return body.get("id")
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/celery/utils.py b/ddtrace/contrib/celery/utils.py
--- a/ddtrace/contrib/celery/utils.py
+++ b/ddtrace/contrib/celery/utils.py
@@ -2,6 +2,7 @@
from typing import Dict
from weakref import WeakValueDictionary
+from ddtrace.contrib.trace_utils import set_flattened_tags
from ddtrace.span import Span
from .constants import CTX_KEY
@@ -33,27 +34,37 @@
)
+def should_skip_context_value(key, value):
+ # type: (str, Any) -> bool
+ # Skip this key if it is not set
+ if value is None or value == "":
+ return True
+
+ # Skip `timelimit` if it is not set (its default/unset value is a
+ # tuple or a list of `None` values
+ if key == "timelimit" and all(_ is None for _ in value):
+ return True
+
+ # Skip `retries` if its value is `0`
+ if key == "retries" and value == 0:
+ return True
+
+ return False
+
+
def set_tags_from_context(span, context):
# type: (Span, Dict[str, Any]) -> None
"""Helper to extract meta values from a Celery Context"""
+ context_tags = []
for key, tag_name in TAG_KEYS:
value = context.get(key)
-
- # Skip this key if it is not set
- if value is None or value == "":
- continue
-
- # Skip `timelimit` if it is not set (its default/unset value is a
- # tuple or a list of `None` values
- if key == "timelimit" and all(_ is None for _ in value):
+ if should_skip_context_value(key, value):
continue
- # Skip `retries` if its value is `0`
- if key == "retries" and value == 0:
- continue
+ context_tags.append((tag_name, value))
- span.set_tag(tag_name, value)
+ set_flattened_tags(span, context_tags)
def attach_span(task, task_id, span, is_publish=False):
| {"golden_diff": "diff --git a/ddtrace/contrib/celery/utils.py b/ddtrace/contrib/celery/utils.py\n--- a/ddtrace/contrib/celery/utils.py\n+++ b/ddtrace/contrib/celery/utils.py\n@@ -2,6 +2,7 @@\n from typing import Dict\n from weakref import WeakValueDictionary\n \n+from ddtrace.contrib.trace_utils import set_flattened_tags\n from ddtrace.span import Span\n \n from .constants import CTX_KEY\n@@ -33,27 +34,37 @@\n )\n \n \n+def should_skip_context_value(key, value):\n+ # type: (str, Any) -> bool\n+ # Skip this key if it is not set\n+ if value is None or value == \"\":\n+ return True\n+\n+ # Skip `timelimit` if it is not set (its default/unset value is a\n+ # tuple or a list of `None` values\n+ if key == \"timelimit\" and all(_ is None for _ in value):\n+ return True\n+\n+ # Skip `retries` if its value is `0`\n+ if key == \"retries\" and value == 0:\n+ return True\n+\n+ return False\n+\n+\n def set_tags_from_context(span, context):\n # type: (Span, Dict[str, Any]) -> None\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n \n+ context_tags = []\n for key, tag_name in TAG_KEYS:\n value = context.get(key)\n-\n- # Skip this key if it is not set\n- if value is None or value == \"\":\n- continue\n-\n- # Skip `timelimit` if it is not set (its default/unset value is a\n- # tuple or a list of `None` values\n- if key == \"timelimit\" and all(_ is None for _ in value):\n+ if should_skip_context_value(key, value):\n continue\n \n- # Skip `retries` if its value is `0`\n- if key == \"retries\" and value == 0:\n- continue\n+ context_tags.append((tag_name, value))\n \n- span.set_tag(tag_name, value)\n+ set_flattened_tags(span, context_tags)\n \n \n def attach_span(task, task_id, span, is_publish=False):\n", "issue": "Celery tags include a raw dictionary that is difficult to use in APM\n<!--\r\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n-->\r\n\r\n### Summary of problem\r\nWhen using celery, context is automatically added to spans as tags from various argument calls. One of the tags for celery is `delivery_info`, which is a dict that provides valuable routing data. This data is difficult to use currently since we can't create facets or filters in APM with the raw dictionary as a string. (Image below)\r\n\r\nI expect the dict values should be added as separate tag data, such that `celery.delivery_info.exchange`, `celery.delivery_info.priority`, etc are all separate tags.\r\n\r\n<img width=\"727\" alt=\"image\" src=\"https://user-images.githubusercontent.com/1430167/206885304-9b20bdc5-097a-4e60-ad4e-8b29063092ef.png\">\r\n\r\n\r\n### Which version of dd-trace-py are you using?\r\n1.4.0\r\n\r\n\r\n### Which version of pip are you using?\r\n22.3\r\n\r\n\r\n### Which libraries and their versions are you using?\r\n\r\n<details>\r\n <summary>`pip freeze`</summary>\r\n celery==5.2.2\r\n</details>\r\n\r\n### How can we reproduce your problem?\r\nRun celery with ddtrace with celery integration enabled and observe the resulting tags on the `celery.run` span.\r\n\r\n\r\n### What is the result that you get?\r\n`celery.run` span has a tag:value pair roughly matching the following (dict is a string repr) `celery.delivery_info: \"{'exchange':'', 'routing_key':'queue_name', ...}\"`\r\n\r\n### What is the result that you expected?\r\n`celery.run` span should have tag:value pairs for each child key in the dictionary\r\n`celery.delivery_info.exchange:''`\r\n`celery.delivery_info.routing_key:'queue_name'`\n", "before_files": [{"content": "from typing import Any\nfrom typing import Dict\nfrom weakref import WeakValueDictionary\n\nfrom ddtrace.span import Span\n\nfrom .constants import CTX_KEY\n\n\nTAG_KEYS = frozenset(\n [\n (\"compression\", \"celery.compression\"),\n (\"correlation_id\", \"celery.correlation_id\"),\n (\"countdown\", \"celery.countdown\"),\n (\"delivery_info\", \"celery.delivery_info\"),\n (\"eta\", \"celery.eta\"),\n (\"exchange\", \"celery.exchange\"),\n (\"expires\", \"celery.expires\"),\n (\"hostname\", \"celery.hostname\"),\n (\"id\", \"celery.id\"),\n (\"priority\", \"celery.priority\"),\n (\"queue\", \"celery.queue\"),\n (\"reply_to\", \"celery.reply_to\"),\n (\"retries\", \"celery.retries\"),\n (\"routing_key\", \"celery.routing_key\"),\n (\"serializer\", \"celery.serializer\"),\n (\"timelimit\", \"celery.timelimit\"),\n # Celery 4.0 uses `origin` instead of `hostname`; this change preserves\n # the same name for the tag despite Celery version\n (\"origin\", \"celery.hostname\"),\n (\"state\", \"celery.state\"),\n ]\n)\n\n\ndef set_tags_from_context(span, context):\n # type: (Span, Dict[str, Any]) -> None\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n\n for key, tag_name in TAG_KEYS:\n value = context.get(key)\n\n # Skip this key if it is not set\n if value is None or value == \"\":\n continue\n\n # Skip `timelimit` if it is not set (its default/unset value is a\n # tuple or a list of `None` values\n if key == \"timelimit\" and all(_ is None for _ in value):\n continue\n\n # Skip `retries` if its value is `0`\n if key == \"retries\" and value == 0:\n continue\n\n span.set_tag(tag_name, value)\n\n\ndef attach_span(task, task_id, span, is_publish=False):\n \"\"\"Helper to propagate a `Span` for the given `Task` instance. This\n function uses a `WeakValueDictionary` that stores a Datadog Span using\n the `(task_id, is_publish)` as a key. This is useful when information must be\n propagated from one Celery signal to another.\n\n DEV: We use (task_id, is_publish) for the key to ensure that publishing a\n task from within another task does not cause any conflicts.\n\n This mostly happens when either a task fails and a retry policy is in place,\n or when a task is manually retried (e.g. `task.retry()`), we end up trying\n to publish a task with the same id as the task currently running.\n\n Previously publishing the new task would overwrite the existing `celery.run` span\n in the `weak_dict` causing that span to be forgotten and never finished.\n\n NOTE: We cannot test for this well yet, because we do not run a celery worker,\n and cannot run `task.apply_async()`\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n weak_dict = WeakValueDictionary()\n setattr(task, CTX_KEY, weak_dict)\n\n weak_dict[(task_id, is_publish)] = span\n\n\ndef detach_span(task, task_id, is_publish=False):\n \"\"\"Helper to remove a `Span` in a Celery task when it's propagated.\n This function handles tasks where the `Span` is not attached.\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n\n # DEV: See note in `attach_span` for key info\n try:\n del weak_dict[(task_id, is_publish)]\n except KeyError:\n pass\n\n\ndef retrieve_span(task, task_id, is_publish=False):\n \"\"\"Helper to retrieve an active `Span` stored in a `Task`\n instance\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n else:\n # DEV: See note in `attach_span` for key info\n return weak_dict.get((task_id, is_publish))\n\n\ndef retrieve_task_id(context):\n \"\"\"Helper to retrieve the `Task` identifier from the message `body`.\n This helper supports Protocol Version 1 and 2. The Protocol is well\n detailed in the official documentation:\n http://docs.celeryproject.org/en/latest/internals/protocol.html\n \"\"\"\n headers = context.get(\"headers\")\n body = context.get(\"body\")\n if headers:\n # Protocol Version 2 (default from Celery 4.0)\n return headers.get(\"id\")\n else:\n # Protocol Version 1\n return body.get(\"id\")\n", "path": "ddtrace/contrib/celery/utils.py"}], "after_files": [{"content": "from typing import Any\nfrom typing import Dict\nfrom weakref import WeakValueDictionary\n\nfrom ddtrace.contrib.trace_utils import set_flattened_tags\nfrom ddtrace.span import Span\n\nfrom .constants import CTX_KEY\n\n\nTAG_KEYS = frozenset(\n [\n (\"compression\", \"celery.compression\"),\n (\"correlation_id\", \"celery.correlation_id\"),\n (\"countdown\", \"celery.countdown\"),\n (\"delivery_info\", \"celery.delivery_info\"),\n (\"eta\", \"celery.eta\"),\n (\"exchange\", \"celery.exchange\"),\n (\"expires\", \"celery.expires\"),\n (\"hostname\", \"celery.hostname\"),\n (\"id\", \"celery.id\"),\n (\"priority\", \"celery.priority\"),\n (\"queue\", \"celery.queue\"),\n (\"reply_to\", \"celery.reply_to\"),\n (\"retries\", \"celery.retries\"),\n (\"routing_key\", \"celery.routing_key\"),\n (\"serializer\", \"celery.serializer\"),\n (\"timelimit\", \"celery.timelimit\"),\n # Celery 4.0 uses `origin` instead of `hostname`; this change preserves\n # the same name for the tag despite Celery version\n (\"origin\", \"celery.hostname\"),\n (\"state\", \"celery.state\"),\n ]\n)\n\n\ndef should_skip_context_value(key, value):\n # type: (str, Any) -> bool\n # Skip this key if it is not set\n if value is None or value == \"\":\n return True\n\n # Skip `timelimit` if it is not set (its default/unset value is a\n # tuple or a list of `None` values\n if key == \"timelimit\" and all(_ is None for _ in value):\n return True\n\n # Skip `retries` if its value is `0`\n if key == \"retries\" and value == 0:\n return True\n\n return False\n\n\ndef set_tags_from_context(span, context):\n # type: (Span, Dict[str, Any]) -> None\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n\n context_tags = []\n for key, tag_name in TAG_KEYS:\n value = context.get(key)\n if should_skip_context_value(key, value):\n continue\n\n context_tags.append((tag_name, value))\n\n set_flattened_tags(span, context_tags)\n\n\ndef attach_span(task, task_id, span, is_publish=False):\n \"\"\"Helper to propagate a `Span` for the given `Task` instance. This\n function uses a `WeakValueDictionary` that stores a Datadog Span using\n the `(task_id, is_publish)` as a key. This is useful when information must be\n propagated from one Celery signal to another.\n\n DEV: We use (task_id, is_publish) for the key to ensure that publishing a\n task from within another task does not cause any conflicts.\n\n This mostly happens when either a task fails and a retry policy is in place,\n or when a task is manually retried (e.g. `task.retry()`), we end up trying\n to publish a task with the same id as the task currently running.\n\n Previously publishing the new task would overwrite the existing `celery.run` span\n in the `weak_dict` causing that span to be forgotten and never finished.\n\n NOTE: We cannot test for this well yet, because we do not run a celery worker,\n and cannot run `task.apply_async()`\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n weak_dict = WeakValueDictionary()\n setattr(task, CTX_KEY, weak_dict)\n\n weak_dict[(task_id, is_publish)] = span\n\n\ndef detach_span(task, task_id, is_publish=False):\n \"\"\"Helper to remove a `Span` in a Celery task when it's propagated.\n This function handles tasks where the `Span` is not attached.\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n\n # DEV: See note in `attach_span` for key info\n try:\n del weak_dict[(task_id, is_publish)]\n except KeyError:\n pass\n\n\ndef retrieve_span(task, task_id, is_publish=False):\n \"\"\"Helper to retrieve an active `Span` stored in a `Task`\n instance\n \"\"\"\n weak_dict = getattr(task, CTX_KEY, None)\n if weak_dict is None:\n return\n else:\n # DEV: See note in `attach_span` for key info\n return weak_dict.get((task_id, is_publish))\n\n\ndef retrieve_task_id(context):\n \"\"\"Helper to retrieve the `Task` identifier from the message `body`.\n This helper supports Protocol Version 1 and 2. The Protocol is well\n detailed in the official documentation:\n http://docs.celeryproject.org/en/latest/internals/protocol.html\n \"\"\"\n headers = context.get(\"headers\")\n body = context.get(\"body\")\n if headers:\n # Protocol Version 2 (default from Celery 4.0)\n return headers.get(\"id\")\n else:\n # Protocol Version 1\n return body.get(\"id\")\n", "path": "ddtrace/contrib/celery/utils.py"}]} | 2,056 | 514 |
gh_patches_debug_26280 | rasdani/github-patches | git_diff | getmoto__moto-2393 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
iot_data.update_thing_shadow should raise ConflictException, when specified shadow version is not same as backend
## summary
Tthis document says,
>When you supply a version with an update request, the service rejects the request with an HTTP 409 conflict response code if the current version of the state document does not match the version supplied.
https://docs.aws.amazon.com/iot/latest/developerguide/using-device-shadows.html
But, moto will accept update request ,that has different shadow version
## moto and boto version
boto 2.49.0
boto3 1.9.215
moto 1.3.10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/iotdata/exceptions.py`
Content:
```
1 from __future__ import unicode_literals
2 from moto.core.exceptions import JsonRESTError
3
4
5 class IoTDataPlaneClientError(JsonRESTError):
6 code = 400
7
8
9 class ResourceNotFoundException(IoTDataPlaneClientError):
10 def __init__(self):
11 self.code = 404
12 super(ResourceNotFoundException, self).__init__(
13 "ResourceNotFoundException",
14 "The specified resource does not exist"
15 )
16
17
18 class InvalidRequestException(IoTDataPlaneClientError):
19 def __init__(self, message):
20 self.code = 400
21 super(InvalidRequestException, self).__init__(
22 "InvalidRequestException", message
23 )
24
```
Path: `moto/iotdata/models.py`
Content:
```
1 from __future__ import unicode_literals
2 import json
3 import time
4 import boto3
5 import jsondiff
6 from moto.core import BaseBackend, BaseModel
7 from moto.iot import iot_backends
8 from .exceptions import (
9 ResourceNotFoundException,
10 InvalidRequestException
11 )
12
13
14 class FakeShadow(BaseModel):
15 """See the specification:
16 http://docs.aws.amazon.com/iot/latest/developerguide/thing-shadow-document-syntax.html
17 """
18 def __init__(self, desired, reported, requested_payload, version, deleted=False):
19 self.desired = desired
20 self.reported = reported
21 self.requested_payload = requested_payload
22 self.version = version
23 self.timestamp = int(time.time())
24 self.deleted = deleted
25
26 self.metadata_desired = self._create_metadata_from_state(self.desired, self.timestamp)
27 self.metadata_reported = self._create_metadata_from_state(self.reported, self.timestamp)
28
29 @classmethod
30 def create_from_previous_version(cls, previous_shadow, payload):
31 """
32 set None to payload when you want to delete shadow
33 """
34 version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {})
35
36 if payload is None:
37 # if given payload is None, delete existing payload
38 # this means the request was delete_thing_shadow
39 shadow = FakeShadow(None, None, None, version, deleted=True)
40 return shadow
41
42 # we can make sure that payload has 'state' key
43 desired = payload['state'].get(
44 'desired',
45 previous_payload.get('state', {}).get('desired', None)
46 )
47 reported = payload['state'].get(
48 'reported',
49 previous_payload.get('state', {}).get('reported', None)
50 )
51 shadow = FakeShadow(desired, reported, payload, version)
52 return shadow
53
54 @classmethod
55 def parse_payload(cls, desired, reported):
56 if desired is None:
57 delta = reported
58 elif reported is None:
59 delta = desired
60 else:
61 delta = jsondiff.diff(desired, reported)
62 return delta
63
64 def _create_metadata_from_state(self, state, ts):
65 """
66 state must be disired or reported stype dict object
67 replces primitive type with {"timestamp": ts} in dict
68 """
69 if state is None:
70 return None
71
72 def _f(elem, ts):
73 if isinstance(elem, dict):
74 return {_: _f(elem[_], ts) for _ in elem.keys()}
75 if isinstance(elem, list):
76 return [_f(_, ts) for _ in elem]
77 return {"timestamp": ts}
78 return _f(state, ts)
79
80 def to_response_dict(self):
81 desired = self.requested_payload['state'].get('desired', None)
82 reported = self.requested_payload['state'].get('reported', None)
83
84 payload = {}
85 if desired is not None:
86 payload['desired'] = desired
87 if reported is not None:
88 payload['reported'] = reported
89
90 metadata = {}
91 if desired is not None:
92 metadata['desired'] = self._create_metadata_from_state(desired, self.timestamp)
93 if reported is not None:
94 metadata['reported'] = self._create_metadata_from_state(reported, self.timestamp)
95 return {
96 'state': payload,
97 'metadata': metadata,
98 'timestamp': self.timestamp,
99 'version': self.version
100 }
101
102 def to_dict(self, include_delta=True):
103 """returning nothing except for just top-level keys for now.
104 """
105 if self.deleted:
106 return {
107 'timestamp': self.timestamp,
108 'version': self.version
109 }
110 delta = self.parse_payload(self.desired, self.reported)
111 payload = {}
112 if self.desired is not None:
113 payload['desired'] = self.desired
114 if self.reported is not None:
115 payload['reported'] = self.reported
116 if include_delta and (delta is not None and len(delta.keys()) != 0):
117 payload['delta'] = delta
118
119 metadata = {}
120 if self.metadata_desired is not None:
121 metadata['desired'] = self.metadata_desired
122 if self.metadata_reported is not None:
123 metadata['reported'] = self.metadata_reported
124
125 return {
126 'state': payload,
127 'metadata': metadata,
128 'timestamp': self.timestamp,
129 'version': self.version
130 }
131
132
133 class IoTDataPlaneBackend(BaseBackend):
134 def __init__(self, region_name=None):
135 super(IoTDataPlaneBackend, self).__init__()
136 self.region_name = region_name
137
138 def reset(self):
139 region_name = self.region_name
140 self.__dict__ = {}
141 self.__init__(region_name)
142
143 def update_thing_shadow(self, thing_name, payload):
144 """
145 spec of payload:
146 - need node `state`
147 - state node must be an Object
148 - State contains an invalid node: 'foo'
149 """
150 thing = iot_backends[self.region_name].describe_thing(thing_name)
151
152 # validate
153 try:
154 payload = json.loads(payload)
155 except ValueError:
156 raise InvalidRequestException('invalid json')
157 if 'state' not in payload:
158 raise InvalidRequestException('need node `state`')
159 if not isinstance(payload['state'], dict):
160 raise InvalidRequestException('state node must be an Object')
161 if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']):
162 raise InvalidRequestException('State contains an invalid node')
163
164 new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)
165 thing.thing_shadow = new_shadow
166 return thing.thing_shadow
167
168 def get_thing_shadow(self, thing_name):
169 thing = iot_backends[self.region_name].describe_thing(thing_name)
170
171 if thing.thing_shadow is None or thing.thing_shadow.deleted:
172 raise ResourceNotFoundException()
173 return thing.thing_shadow
174
175 def delete_thing_shadow(self, thing_name):
176 """after deleting, get_thing_shadow will raise ResourceNotFound.
177 But version of the shadow keep increasing...
178 """
179 thing = iot_backends[self.region_name].describe_thing(thing_name)
180 if thing.thing_shadow is None:
181 raise ResourceNotFoundException()
182 payload = None
183 new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)
184 thing.thing_shadow = new_shadow
185 return thing.thing_shadow
186
187 def publish(self, topic, qos, payload):
188 # do nothing because client won't know about the result
189 return None
190
191
192 available_regions = boto3.session.Session().get_available_regions("iot-data")
193 iotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions}
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/moto/iotdata/exceptions.py b/moto/iotdata/exceptions.py
--- a/moto/iotdata/exceptions.py
+++ b/moto/iotdata/exceptions.py
@@ -21,3 +21,11 @@
super(InvalidRequestException, self).__init__(
"InvalidRequestException", message
)
+
+
+class ConflictException(IoTDataPlaneClientError):
+ def __init__(self, message):
+ self.code = 409
+ super(ConflictException, self).__init__(
+ "ConflictException", message
+ )
diff --git a/moto/iotdata/models.py b/moto/iotdata/models.py
--- a/moto/iotdata/models.py
+++ b/moto/iotdata/models.py
@@ -6,6 +6,7 @@
from moto.core import BaseBackend, BaseModel
from moto.iot import iot_backends
from .exceptions import (
+ ConflictException,
ResourceNotFoundException,
InvalidRequestException
)
@@ -161,6 +162,8 @@
if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']):
raise InvalidRequestException('State contains an invalid node')
+ if 'version' in payload and thing.thing_shadow.version != payload['version']:
+ raise ConflictException('Version conflict')
new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)
thing.thing_shadow = new_shadow
return thing.thing_shadow
| {"golden_diff": "diff --git a/moto/iotdata/exceptions.py b/moto/iotdata/exceptions.py\n--- a/moto/iotdata/exceptions.py\n+++ b/moto/iotdata/exceptions.py\n@@ -21,3 +21,11 @@\n super(InvalidRequestException, self).__init__(\n \"InvalidRequestException\", message\n )\n+\n+\n+class ConflictException(IoTDataPlaneClientError):\n+ def __init__(self, message):\n+ self.code = 409\n+ super(ConflictException, self).__init__(\n+ \"ConflictException\", message\n+ )\ndiff --git a/moto/iotdata/models.py b/moto/iotdata/models.py\n--- a/moto/iotdata/models.py\n+++ b/moto/iotdata/models.py\n@@ -6,6 +6,7 @@\n from moto.core import BaseBackend, BaseModel\n from moto.iot import iot_backends\n from .exceptions import (\n+ ConflictException,\n ResourceNotFoundException,\n InvalidRequestException\n )\n@@ -161,6 +162,8 @@\n if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']):\n raise InvalidRequestException('State contains an invalid node')\n \n+ if 'version' in payload and thing.thing_shadow.version != payload['version']:\n+ raise ConflictException('Version conflict')\n new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)\n thing.thing_shadow = new_shadow\n return thing.thing_shadow\n", "issue": "iot_data.update_thing_shadow should raise ConflictException, when specified shadow version is not same as backend\n## summary\r\nTthis document says,\r\n>When you supply a version with an update request, the service rejects the request with an HTTP 409 conflict response code if the current version of the state document does not match the version supplied.\r\n\r\nhttps://docs.aws.amazon.com/iot/latest/developerguide/using-device-shadows.html \t\r\n\r\nBut, moto will accept update request ,that has different shadow version\r\n\r\n## moto and boto version\r\nboto 2.49.0\r\nboto3 1.9.215\r\nmoto 1.3.10\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom moto.core.exceptions import JsonRESTError\n\n\nclass IoTDataPlaneClientError(JsonRESTError):\n code = 400\n\n\nclass ResourceNotFoundException(IoTDataPlaneClientError):\n def __init__(self):\n self.code = 404\n super(ResourceNotFoundException, self).__init__(\n \"ResourceNotFoundException\",\n \"The specified resource does not exist\"\n )\n\n\nclass InvalidRequestException(IoTDataPlaneClientError):\n def __init__(self, message):\n self.code = 400\n super(InvalidRequestException, self).__init__(\n \"InvalidRequestException\", message\n )\n", "path": "moto/iotdata/exceptions.py"}, {"content": "from __future__ import unicode_literals\nimport json\nimport time\nimport boto3\nimport jsondiff\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.iot import iot_backends\nfrom .exceptions import (\n ResourceNotFoundException,\n InvalidRequestException\n)\n\n\nclass FakeShadow(BaseModel):\n \"\"\"See the specification:\n http://docs.aws.amazon.com/iot/latest/developerguide/thing-shadow-document-syntax.html\n \"\"\"\n def __init__(self, desired, reported, requested_payload, version, deleted=False):\n self.desired = desired\n self.reported = reported\n self.requested_payload = requested_payload\n self.version = version\n self.timestamp = int(time.time())\n self.deleted = deleted\n\n self.metadata_desired = self._create_metadata_from_state(self.desired, self.timestamp)\n self.metadata_reported = self._create_metadata_from_state(self.reported, self.timestamp)\n\n @classmethod\n def create_from_previous_version(cls, previous_shadow, payload):\n \"\"\"\n set None to payload when you want to delete shadow\n \"\"\"\n version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {})\n\n if payload is None:\n # if given payload is None, delete existing payload\n # this means the request was delete_thing_shadow\n shadow = FakeShadow(None, None, None, version, deleted=True)\n return shadow\n\n # we can make sure that payload has 'state' key\n desired = payload['state'].get(\n 'desired',\n previous_payload.get('state', {}).get('desired', None)\n )\n reported = payload['state'].get(\n 'reported',\n previous_payload.get('state', {}).get('reported', None)\n )\n shadow = FakeShadow(desired, reported, payload, version)\n return shadow\n\n @classmethod\n def parse_payload(cls, desired, reported):\n if desired is None:\n delta = reported\n elif reported is None:\n delta = desired\n else:\n delta = jsondiff.diff(desired, reported)\n return delta\n\n def _create_metadata_from_state(self, state, ts):\n \"\"\"\n state must be disired or reported stype dict object\n replces primitive type with {\"timestamp\": ts} in dict\n \"\"\"\n if state is None:\n return None\n\n def _f(elem, ts):\n if isinstance(elem, dict):\n return {_: _f(elem[_], ts) for _ in elem.keys()}\n if isinstance(elem, list):\n return [_f(_, ts) for _ in elem]\n return {\"timestamp\": ts}\n return _f(state, ts)\n\n def to_response_dict(self):\n desired = self.requested_payload['state'].get('desired', None)\n reported = self.requested_payload['state'].get('reported', None)\n\n payload = {}\n if desired is not None:\n payload['desired'] = desired\n if reported is not None:\n payload['reported'] = reported\n\n metadata = {}\n if desired is not None:\n metadata['desired'] = self._create_metadata_from_state(desired, self.timestamp)\n if reported is not None:\n metadata['reported'] = self._create_metadata_from_state(reported, self.timestamp)\n return {\n 'state': payload,\n 'metadata': metadata,\n 'timestamp': self.timestamp,\n 'version': self.version\n }\n\n def to_dict(self, include_delta=True):\n \"\"\"returning nothing except for just top-level keys for now.\n \"\"\"\n if self.deleted:\n return {\n 'timestamp': self.timestamp,\n 'version': self.version\n }\n delta = self.parse_payload(self.desired, self.reported)\n payload = {}\n if self.desired is not None:\n payload['desired'] = self.desired\n if self.reported is not None:\n payload['reported'] = self.reported\n if include_delta and (delta is not None and len(delta.keys()) != 0):\n payload['delta'] = delta\n\n metadata = {}\n if self.metadata_desired is not None:\n metadata['desired'] = self.metadata_desired\n if self.metadata_reported is not None:\n metadata['reported'] = self.metadata_reported\n\n return {\n 'state': payload,\n 'metadata': metadata,\n 'timestamp': self.timestamp,\n 'version': self.version\n }\n\n\nclass IoTDataPlaneBackend(BaseBackend):\n def __init__(self, region_name=None):\n super(IoTDataPlaneBackend, self).__init__()\n self.region_name = region_name\n\n def reset(self):\n region_name = self.region_name\n self.__dict__ = {}\n self.__init__(region_name)\n\n def update_thing_shadow(self, thing_name, payload):\n \"\"\"\n spec of payload:\n - need node `state`\n - state node must be an Object\n - State contains an invalid node: 'foo'\n \"\"\"\n thing = iot_backends[self.region_name].describe_thing(thing_name)\n\n # validate\n try:\n payload = json.loads(payload)\n except ValueError:\n raise InvalidRequestException('invalid json')\n if 'state' not in payload:\n raise InvalidRequestException('need node `state`')\n if not isinstance(payload['state'], dict):\n raise InvalidRequestException('state node must be an Object')\n if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']):\n raise InvalidRequestException('State contains an invalid node')\n\n new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)\n thing.thing_shadow = new_shadow\n return thing.thing_shadow\n\n def get_thing_shadow(self, thing_name):\n thing = iot_backends[self.region_name].describe_thing(thing_name)\n\n if thing.thing_shadow is None or thing.thing_shadow.deleted:\n raise ResourceNotFoundException()\n return thing.thing_shadow\n\n def delete_thing_shadow(self, thing_name):\n \"\"\"after deleting, get_thing_shadow will raise ResourceNotFound.\n But version of the shadow keep increasing...\n \"\"\"\n thing = iot_backends[self.region_name].describe_thing(thing_name)\n if thing.thing_shadow is None:\n raise ResourceNotFoundException()\n payload = None\n new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)\n thing.thing_shadow = new_shadow\n return thing.thing_shadow\n\n def publish(self, topic, qos, payload):\n # do nothing because client won't know about the result\n return None\n\n\navailable_regions = boto3.session.Session().get_available_regions(\"iot-data\")\niotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions}\n", "path": "moto/iotdata/models.py"}], "after_files": [{"content": "from __future__ import unicode_literals\nfrom moto.core.exceptions import JsonRESTError\n\n\nclass IoTDataPlaneClientError(JsonRESTError):\n code = 400\n\n\nclass ResourceNotFoundException(IoTDataPlaneClientError):\n def __init__(self):\n self.code = 404\n super(ResourceNotFoundException, self).__init__(\n \"ResourceNotFoundException\",\n \"The specified resource does not exist\"\n )\n\n\nclass InvalidRequestException(IoTDataPlaneClientError):\n def __init__(self, message):\n self.code = 400\n super(InvalidRequestException, self).__init__(\n \"InvalidRequestException\", message\n )\n\n\nclass ConflictException(IoTDataPlaneClientError):\n def __init__(self, message):\n self.code = 409\n super(ConflictException, self).__init__(\n \"ConflictException\", message\n )\n", "path": "moto/iotdata/exceptions.py"}, {"content": "from __future__ import unicode_literals\nimport json\nimport time\nimport boto3\nimport jsondiff\nfrom moto.core import BaseBackend, BaseModel\nfrom moto.iot import iot_backends\nfrom .exceptions import (\n ConflictException,\n ResourceNotFoundException,\n InvalidRequestException\n)\n\n\nclass FakeShadow(BaseModel):\n \"\"\"See the specification:\n http://docs.aws.amazon.com/iot/latest/developerguide/thing-shadow-document-syntax.html\n \"\"\"\n def __init__(self, desired, reported, requested_payload, version, deleted=False):\n self.desired = desired\n self.reported = reported\n self.requested_payload = requested_payload\n self.version = version\n self.timestamp = int(time.time())\n self.deleted = deleted\n\n self.metadata_desired = self._create_metadata_from_state(self.desired, self.timestamp)\n self.metadata_reported = self._create_metadata_from_state(self.reported, self.timestamp)\n\n @classmethod\n def create_from_previous_version(cls, previous_shadow, payload):\n \"\"\"\n set None to payload when you want to delete shadow\n \"\"\"\n version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {})\n\n if payload is None:\n # if given payload is None, delete existing payload\n # this means the request was delete_thing_shadow\n shadow = FakeShadow(None, None, None, version, deleted=True)\n return shadow\n\n # we can make sure that payload has 'state' key\n desired = payload['state'].get(\n 'desired',\n previous_payload.get('state', {}).get('desired', None)\n )\n reported = payload['state'].get(\n 'reported',\n previous_payload.get('state', {}).get('reported', None)\n )\n shadow = FakeShadow(desired, reported, payload, version)\n return shadow\n\n @classmethod\n def parse_payload(cls, desired, reported):\n if desired is None:\n delta = reported\n elif reported is None:\n delta = desired\n else:\n delta = jsondiff.diff(desired, reported)\n return delta\n\n def _create_metadata_from_state(self, state, ts):\n \"\"\"\n state must be disired or reported stype dict object\n replces primitive type with {\"timestamp\": ts} in dict\n \"\"\"\n if state is None:\n return None\n\n def _f(elem, ts):\n if isinstance(elem, dict):\n return {_: _f(elem[_], ts) for _ in elem.keys()}\n if isinstance(elem, list):\n return [_f(_, ts) for _ in elem]\n return {\"timestamp\": ts}\n return _f(state, ts)\n\n def to_response_dict(self):\n desired = self.requested_payload['state'].get('desired', None)\n reported = self.requested_payload['state'].get('reported', None)\n\n payload = {}\n if desired is not None:\n payload['desired'] = desired\n if reported is not None:\n payload['reported'] = reported\n\n metadata = {}\n if desired is not None:\n metadata['desired'] = self._create_metadata_from_state(desired, self.timestamp)\n if reported is not None:\n metadata['reported'] = self._create_metadata_from_state(reported, self.timestamp)\n return {\n 'state': payload,\n 'metadata': metadata,\n 'timestamp': self.timestamp,\n 'version': self.version\n }\n\n def to_dict(self, include_delta=True):\n \"\"\"returning nothing except for just top-level keys for now.\n \"\"\"\n if self.deleted:\n return {\n 'timestamp': self.timestamp,\n 'version': self.version\n }\n delta = self.parse_payload(self.desired, self.reported)\n payload = {}\n if self.desired is not None:\n payload['desired'] = self.desired\n if self.reported is not None:\n payload['reported'] = self.reported\n if include_delta and (delta is not None and len(delta.keys()) != 0):\n payload['delta'] = delta\n\n metadata = {}\n if self.metadata_desired is not None:\n metadata['desired'] = self.metadata_desired\n if self.metadata_reported is not None:\n metadata['reported'] = self.metadata_reported\n\n return {\n 'state': payload,\n 'metadata': metadata,\n 'timestamp': self.timestamp,\n 'version': self.version\n }\n\n\nclass IoTDataPlaneBackend(BaseBackend):\n def __init__(self, region_name=None):\n super(IoTDataPlaneBackend, self).__init__()\n self.region_name = region_name\n\n def reset(self):\n region_name = self.region_name\n self.__dict__ = {}\n self.__init__(region_name)\n\n def update_thing_shadow(self, thing_name, payload):\n \"\"\"\n spec of payload:\n - need node `state`\n - state node must be an Object\n - State contains an invalid node: 'foo'\n \"\"\"\n thing = iot_backends[self.region_name].describe_thing(thing_name)\n\n # validate\n try:\n payload = json.loads(payload)\n except ValueError:\n raise InvalidRequestException('invalid json')\n if 'state' not in payload:\n raise InvalidRequestException('need node `state`')\n if not isinstance(payload['state'], dict):\n raise InvalidRequestException('state node must be an Object')\n if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']):\n raise InvalidRequestException('State contains an invalid node')\n\n if 'version' in payload and thing.thing_shadow.version != payload['version']:\n raise ConflictException('Version conflict')\n new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)\n thing.thing_shadow = new_shadow\n return thing.thing_shadow\n\n def get_thing_shadow(self, thing_name):\n thing = iot_backends[self.region_name].describe_thing(thing_name)\n\n if thing.thing_shadow is None or thing.thing_shadow.deleted:\n raise ResourceNotFoundException()\n return thing.thing_shadow\n\n def delete_thing_shadow(self, thing_name):\n \"\"\"after deleting, get_thing_shadow will raise ResourceNotFound.\n But version of the shadow keep increasing...\n \"\"\"\n thing = iot_backends[self.region_name].describe_thing(thing_name)\n if thing.thing_shadow is None:\n raise ResourceNotFoundException()\n payload = None\n new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload)\n thing.thing_shadow = new_shadow\n return thing.thing_shadow\n\n def publish(self, topic, qos, payload):\n # do nothing because client won't know about the result\n return None\n\n\navailable_regions = boto3.session.Session().get_available_regions(\"iot-data\")\niotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions}\n", "path": "moto/iotdata/models.py"}]} | 2,544 | 334 |
gh_patches_debug_60837 | rasdani/github-patches | git_diff | openedx__ecommerce-348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Running migrations for Travis builds
We run migrations to ensure no migrations are missing, and they work on fresh installs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5
6
7 def create_shipping_event(apps, schema_editor):
8 """
9
10 Create a single new shipping event type that can be applied to an order. This will allow us to initiate order
11 shipment.
12
13 """
14 # Create all our Product Types.
15 ShippingEventType = apps.get_model("order", "ShippingEventType")
16 shipped_event = ShippingEventType(code="shipped", name="Shipped")
17 shipped_event.save()
18
19
20 class Migration(migrations.Migration):
21
22 dependencies = [
23 ('order', '0002_auto_20141007_2032'),
24 ]
25
26 operations = [
27 migrations.RunPython(create_shipping_event),
28 ]
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
--- a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
+++ b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py
@@ -13,8 +13,7 @@
"""
# Create all our Product Types.
ShippingEventType = apps.get_model("order", "ShippingEventType")
- shipped_event = ShippingEventType(code="shipped", name="Shipped")
- shipped_event.save()
+ ShippingEventType.objects.create(code="shipped", name="Shipped")
class Migration(migrations.Migration):
| {"golden_diff": "diff --git a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n--- a/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n+++ b/ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py\n@@ -13,8 +13,7 @@\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n- shipped_event = ShippingEventType(code=\"shipped\", name=\"Shipped\")\n- shipped_event.save()\n+ ShippingEventType.objects.create(code=\"shipped\", name=\"Shipped\")\n \n \n class Migration(migrations.Migration):\n", "issue": "Running migrations for Travis builds\nWe run migrations to ensure no migrations are missing, and they work on fresh installs.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_shipping_event(apps, schema_editor):\n \"\"\"\n\n Create a single new shipping event type that can be applied to an order. This will allow us to initiate order\n shipment.\n\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n shipped_event = ShippingEventType(code=\"shipped\", name=\"Shipped\")\n shipped_event.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0002_auto_20141007_2032'),\n ]\n\n operations = [\n migrations.RunPython(create_shipping_event),\n ]\n", "path": "ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef create_shipping_event(apps, schema_editor):\n \"\"\"\n\n Create a single new shipping event type that can be applied to an order. This will allow us to initiate order\n shipment.\n\n \"\"\"\n # Create all our Product Types.\n ShippingEventType = apps.get_model(\"order\", \"ShippingEventType\")\n ShippingEventType.objects.create(code=\"shipped\", name=\"Shipped\")\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0002_auto_20141007_2032'),\n ]\n\n operations = [\n migrations.RunPython(create_shipping_event),\n ]\n", "path": "ecommerce/extensions/order/migrations/0003_auto_20150224_1520.py"}]} | 522 | 207 |
gh_patches_debug_31561 | rasdani/github-patches | git_diff | nilearn__nilearn-1217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
np.corrcoef or ConnectivityMeasure
Two connectivity examples use `np.corrcoef`to compute correlation matrix
- `plot_signal_extraction`
- `plot_probabilistic_atlas_extraction`
Shall we rely on `ConnectivityMeasure((cov_estimator=EmpiricalCovariance(), kind='correlation')` instead ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/03_connectivity/plot_probabilistic_atlas_extraction.py`
Content:
```
1 """
2 Extracting signals of a probabilistic atlas of rest functional regions
3 ========================================================================
4
5 This example extracts the signal on regions defined via a probabilistic
6 atlas, to construct a functional connectome.
7
8 We use the `MSDL atlas
9 <https://team.inria.fr/parietal/research/spatial_patterns/spatial-patterns-in-resting-state/>`_
10 of functional regions in rest.
11
12 The key to extract signals is to use the
13 :class:`nilearn.input_data.NiftiMapsMasker` that can transform nifti
14 objects to time series using a probabilistic atlas.
15
16 As the MSDL atlas comes with (x, y, z) MNI coordinates for the different
17 regions, we can visualize the matrix as a graph of interaction in a
18 brain. To avoid having too dense a graph, we represent only the 20% edges
19 with the highest values.
20
21 """
22 ############################################################################
23 # Retrieve the atlas and the data
24 from nilearn import datasets
25 atlas = datasets.fetch_atlas_msdl()
26 # Loading atlas image stored in 'maps'
27 atlas_filename = atlas['maps']
28 # Loading atlas data stored in 'labels'
29 labels = atlas['labels']
30
31 # Load the functional datasets
32 data = datasets.fetch_adhd(n_subjects=1)
33
34 print('First subject resting-state nifti image (4D) is located at: %s' %
35 data.func[0])
36
37 ############################################################################
38 # Extract the time series
39 from nilearn.input_data import NiftiMapsMasker
40 masker = NiftiMapsMasker(maps_img=atlas_filename, standardize=True,
41 memory='nilearn_cache', verbose=5)
42
43 time_series = masker.fit_transform(data.func[0],
44 confounds=data.confounds)
45
46 ############################################################################
47 # `time_series` is now a 2D matrix, of shape (number of time points x
48 # number of regions)
49 print(time_series.shape)
50
51 ############################################################################
52 # Build and display a correlation matrix
53 import numpy as np
54 correlation_matrix = np.corrcoef(time_series.T)
55
56 # Display the correlation matrix
57 from matplotlib import pyplot as plt
58 plt.figure(figsize=(10, 10))
59 # Mask out the major diagonal
60 np.fill_diagonal(correlation_matrix, 0)
61 plt.imshow(correlation_matrix, interpolation="nearest", cmap="RdBu_r",
62 vmax=0.8, vmin=-0.8)
63 plt.colorbar()
64 # And display the labels
65 x_ticks = plt.xticks(range(len(labels)), labels, rotation=90)
66 y_ticks = plt.yticks(range(len(labels)), labels)
67
68 ############################################################################
69 # And now display the corresponding graph
70 from nilearn import plotting
71 coords = atlas.region_coords
72
73 # We threshold to keep only the 20% of edges with the highest value
74 # because the graph is very dense
75 plotting.plot_connectome(correlation_matrix, coords,
76 edge_threshold="80%", colorbar=True)
77
78 plotting.show()
79
```
Path: `examples/03_connectivity/plot_signal_extraction.py`
Content:
```
1 """
2 Extracting signals from a brain parcellation
3 ============================================
4
5 Here we show how to extract signals from a brain parcellation and compute
6 a correlation matrix.
7
8 We also show the importance of defining good confounds signals: the
9 first correlation matrix is computed after regressing out simple
10 confounds signals: movement regressors, white matter and CSF signals, ...
11 The second one is without any confounds: all regions are connected to
12 each other.
13
14
15 One reference that discusses the importance of confounds is `Varoquaux and
16 Craddock, Learning and comparing functional connectomes across subjects,
17 NeuroImage 2013
18 <http://www.sciencedirect.com/science/article/pii/S1053811913003340>`_.
19
20 This is just a code example, see the :ref:`corresponding section in the
21 documentation <parcellation_time_series>` for more.
22 """
23
24 ##############################################################################
25 # Retrieve the atlas and the data
26 from nilearn import datasets
27
28 dataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')
29 atlas_filename, labels = dataset.maps, dataset.labels
30
31 print('Atlas ROIs are located in nifti image (4D) at: %s' %
32 atlas_filename) # 4D data
33
34 # One subject of resting-state data
35 data = datasets.fetch_adhd(n_subjects=1)
36 fmri_filenames = data.func[0]
37
38 ##############################################################################
39 # Extract signals on a parcellation defined by labels using the
40 # NiftiLabelsMasker
41 from nilearn.input_data import NiftiLabelsMasker
42 masker = NiftiLabelsMasker(labels_img=atlas_filename, standardize=True,
43 memory='nilearn_cache', verbose=5)
44
45 # Here we go from nifti files to the signal time series in a numpy
46 # array. Note how we give confounds to be regressed out during signal
47 # extraction
48 time_series = masker.fit_transform(fmri_filenames, confounds=data.confounds)
49
50
51 ##############################################################################
52 # Compute and display a correlation matrix
53 import numpy as np
54 correlation_matrix = np.corrcoef(time_series.T)
55
56 # Plot the correlation matrix
57 from matplotlib import pyplot as plt
58 plt.figure(figsize=(10, 10))
59 # Mask the main diagonal for visualization:
60 np.fill_diagonal(correlation_matrix, 0)
61
62 plt.imshow(correlation_matrix, interpolation="nearest", cmap="RdBu_r",
63 vmax=0.8, vmin=-0.8)
64
65 # Add labels and adjust margins
66 x_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90)
67 y_ticks = plt.yticks(range(len(labels) - 1), labels[1:])
68 plt.gca().yaxis.tick_right()
69 plt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62)
70
71
72 ###############################################################################
73 # Same thing without confounds, to stress the importance of confounds
74
75 time_series = masker.fit_transform(fmri_filenames)
76 # Note how we did not specify confounds above. This is bad!
77
78 correlation_matrix = np.corrcoef(time_series.T)
79
80 # Mask the main diagonal for visualization:
81 np.fill_diagonal(correlation_matrix, 0)
82
83 plt.figure(figsize=(10, 10))
84 plt.imshow(correlation_matrix, interpolation="nearest", cmap="RdBu_r",
85 vmax=0.8, vmin=-0.8)
86
87 x_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90)
88 y_ticks = plt.yticks(range(len(labels) - 1), labels[1:])
89 plt.gca().yaxis.tick_right()
90 plt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62)
91 plt.suptitle('No confounds', size=27)
92
93 plt.show()
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py
--- a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py
+++ b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py
@@ -50,10 +50,12 @@
############################################################################
# Build and display a correlation matrix
-import numpy as np
-correlation_matrix = np.corrcoef(time_series.T)
+from nilearn.connectome import ConnectivityMeasure
+correlation_measure = ConnectivityMeasure(kind='correlation')
+correlation_matrix = correlation_measure.fit_transform([time_series])[0]
# Display the correlation matrix
+import numpy as np
from matplotlib import pyplot as plt
plt.figure(figsize=(10, 10))
# Mask out the major diagonal
diff --git a/examples/03_connectivity/plot_signal_extraction.py b/examples/03_connectivity/plot_signal_extraction.py
--- a/examples/03_connectivity/plot_signal_extraction.py
+++ b/examples/03_connectivity/plot_signal_extraction.py
@@ -50,10 +50,12 @@
##############################################################################
# Compute and display a correlation matrix
-import numpy as np
-correlation_matrix = np.corrcoef(time_series.T)
+from nilearn.connectome import ConnectivityMeasure
+correlation_measure = ConnectivityMeasure(kind='correlation')
+correlation_matrix = correlation_measure.fit_transform([time_series])[0]
# Plot the correlation matrix
+import numpy as np
from matplotlib import pyplot as plt
plt.figure(figsize=(10, 10))
# Mask the main diagonal for visualization:
@@ -75,7 +77,7 @@
time_series = masker.fit_transform(fmri_filenames)
# Note how we did not specify confounds above. This is bad!
-correlation_matrix = np.corrcoef(time_series.T)
+correlation_matrix = correlation_measure.fit_transform([time_series])[0]
# Mask the main diagonal for visualization:
np.fill_diagonal(correlation_matrix, 0)
| {"golden_diff": "diff --git a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py\n--- a/examples/03_connectivity/plot_probabilistic_atlas_extraction.py\n+++ b/examples/03_connectivity/plot_probabilistic_atlas_extraction.py\n@@ -50,10 +50,12 @@\n \n ############################################################################\n # Build and display a correlation matrix\n-import numpy as np\n-correlation_matrix = np.corrcoef(time_series.T)\n+from nilearn.connectome import ConnectivityMeasure\n+correlation_measure = ConnectivityMeasure(kind='correlation')\n+correlation_matrix = correlation_measure.fit_transform([time_series])[0]\n \n # Display the correlation matrix\n+import numpy as np\n from matplotlib import pyplot as plt\n plt.figure(figsize=(10, 10))\n # Mask out the major diagonal\ndiff --git a/examples/03_connectivity/plot_signal_extraction.py b/examples/03_connectivity/plot_signal_extraction.py\n--- a/examples/03_connectivity/plot_signal_extraction.py\n+++ b/examples/03_connectivity/plot_signal_extraction.py\n@@ -50,10 +50,12 @@\n \n ##############################################################################\n # Compute and display a correlation matrix\n-import numpy as np\n-correlation_matrix = np.corrcoef(time_series.T)\n+from nilearn.connectome import ConnectivityMeasure\n+correlation_measure = ConnectivityMeasure(kind='correlation')\n+correlation_matrix = correlation_measure.fit_transform([time_series])[0]\n \n # Plot the correlation matrix\n+import numpy as np\n from matplotlib import pyplot as plt\n plt.figure(figsize=(10, 10))\n # Mask the main diagonal for visualization:\n@@ -75,7 +77,7 @@\n time_series = masker.fit_transform(fmri_filenames)\n # Note how we did not specify confounds above. This is bad!\n \n-correlation_matrix = np.corrcoef(time_series.T)\n+correlation_matrix = correlation_measure.fit_transform([time_series])[0]\n \n # Mask the main diagonal for visualization:\n np.fill_diagonal(correlation_matrix, 0)\n", "issue": "np.corrcoef or ConnectivityMeasure\nTwo connectivity examples use `np.corrcoef`to compute correlation matrix\n- `plot_signal_extraction`\n- `plot_probabilistic_atlas_extraction`\n\nShall we rely on `ConnectivityMeasure((cov_estimator=EmpiricalCovariance(), kind='correlation')` instead ?\n\n", "before_files": [{"content": "\"\"\"\nExtracting signals of a probabilistic atlas of rest functional regions\n========================================================================\n\nThis example extracts the signal on regions defined via a probabilistic\natlas, to construct a functional connectome.\n\nWe use the `MSDL atlas\n<https://team.inria.fr/parietal/research/spatial_patterns/spatial-patterns-in-resting-state/>`_\nof functional regions in rest.\n\nThe key to extract signals is to use the\n:class:`nilearn.input_data.NiftiMapsMasker` that can transform nifti\nobjects to time series using a probabilistic atlas.\n\nAs the MSDL atlas comes with (x, y, z) MNI coordinates for the different\nregions, we can visualize the matrix as a graph of interaction in a\nbrain. To avoid having too dense a graph, we represent only the 20% edges\nwith the highest values.\n\n\"\"\"\n############################################################################\n# Retrieve the atlas and the data\nfrom nilearn import datasets\natlas = datasets.fetch_atlas_msdl()\n# Loading atlas image stored in 'maps'\natlas_filename = atlas['maps']\n# Loading atlas data stored in 'labels'\nlabels = atlas['labels']\n\n# Load the functional datasets\ndata = datasets.fetch_adhd(n_subjects=1)\n\nprint('First subject resting-state nifti image (4D) is located at: %s' %\n data.func[0])\n\n############################################################################\n# Extract the time series\nfrom nilearn.input_data import NiftiMapsMasker\nmasker = NiftiMapsMasker(maps_img=atlas_filename, standardize=True,\n memory='nilearn_cache', verbose=5)\n\ntime_series = masker.fit_transform(data.func[0],\n confounds=data.confounds)\n\n############################################################################\n# `time_series` is now a 2D matrix, of shape (number of time points x\n# number of regions)\nprint(time_series.shape)\n\n############################################################################\n# Build and display a correlation matrix\nimport numpy as np\ncorrelation_matrix = np.corrcoef(time_series.T)\n\n# Display the correlation matrix\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(10, 10))\n# Mask out the major diagonal\nnp.fill_diagonal(correlation_matrix, 0)\nplt.imshow(correlation_matrix, interpolation=\"nearest\", cmap=\"RdBu_r\",\n vmax=0.8, vmin=-0.8)\nplt.colorbar()\n# And display the labels\nx_ticks = plt.xticks(range(len(labels)), labels, rotation=90)\ny_ticks = plt.yticks(range(len(labels)), labels)\n\n############################################################################\n# And now display the corresponding graph\nfrom nilearn import plotting\ncoords = atlas.region_coords\n\n# We threshold to keep only the 20% of edges with the highest value\n# because the graph is very dense\nplotting.plot_connectome(correlation_matrix, coords,\n edge_threshold=\"80%\", colorbar=True)\n\nplotting.show()\n", "path": "examples/03_connectivity/plot_probabilistic_atlas_extraction.py"}, {"content": "\"\"\"\nExtracting signals from a brain parcellation\n============================================\n\nHere we show how to extract signals from a brain parcellation and compute\na correlation matrix.\n\nWe also show the importance of defining good confounds signals: the\nfirst correlation matrix is computed after regressing out simple\nconfounds signals: movement regressors, white matter and CSF signals, ...\nThe second one is without any confounds: all regions are connected to\neach other.\n\n\nOne reference that discusses the importance of confounds is `Varoquaux and\nCraddock, Learning and comparing functional connectomes across subjects,\nNeuroImage 2013\n<http://www.sciencedirect.com/science/article/pii/S1053811913003340>`_.\n\nThis is just a code example, see the :ref:`corresponding section in the\ndocumentation <parcellation_time_series>` for more.\n\"\"\"\n\n##############################################################################\n# Retrieve the atlas and the data\nfrom nilearn import datasets\n\ndataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')\natlas_filename, labels = dataset.maps, dataset.labels\n\nprint('Atlas ROIs are located in nifti image (4D) at: %s' %\n atlas_filename) # 4D data\n\n# One subject of resting-state data\ndata = datasets.fetch_adhd(n_subjects=1)\nfmri_filenames = data.func[0]\n\n##############################################################################\n# Extract signals on a parcellation defined by labels using the\n# NiftiLabelsMasker\nfrom nilearn.input_data import NiftiLabelsMasker\nmasker = NiftiLabelsMasker(labels_img=atlas_filename, standardize=True,\n memory='nilearn_cache', verbose=5)\n\n# Here we go from nifti files to the signal time series in a numpy\n# array. Note how we give confounds to be regressed out during signal\n# extraction\ntime_series = masker.fit_transform(fmri_filenames, confounds=data.confounds)\n\n\n##############################################################################\n# Compute and display a correlation matrix\nimport numpy as np\ncorrelation_matrix = np.corrcoef(time_series.T)\n\n# Plot the correlation matrix\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(10, 10))\n# Mask the main diagonal for visualization:\nnp.fill_diagonal(correlation_matrix, 0)\n\nplt.imshow(correlation_matrix, interpolation=\"nearest\", cmap=\"RdBu_r\",\n vmax=0.8, vmin=-0.8)\n\n# Add labels and adjust margins\nx_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90)\ny_ticks = plt.yticks(range(len(labels) - 1), labels[1:])\nplt.gca().yaxis.tick_right()\nplt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62)\n\n\n###############################################################################\n# Same thing without confounds, to stress the importance of confounds\n\ntime_series = masker.fit_transform(fmri_filenames)\n# Note how we did not specify confounds above. This is bad!\n\ncorrelation_matrix = np.corrcoef(time_series.T)\n\n# Mask the main diagonal for visualization:\nnp.fill_diagonal(correlation_matrix, 0)\n\nplt.figure(figsize=(10, 10))\nplt.imshow(correlation_matrix, interpolation=\"nearest\", cmap=\"RdBu_r\",\n vmax=0.8, vmin=-0.8)\n\nx_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90)\ny_ticks = plt.yticks(range(len(labels) - 1), labels[1:])\nplt.gca().yaxis.tick_right()\nplt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62)\nplt.suptitle('No confounds', size=27)\n\nplt.show()\n", "path": "examples/03_connectivity/plot_signal_extraction.py"}], "after_files": [{"content": "\"\"\"\nExtracting signals of a probabilistic atlas of rest functional regions\n========================================================================\n\nThis example extracts the signal on regions defined via a probabilistic\natlas, to construct a functional connectome.\n\nWe use the `MSDL atlas\n<https://team.inria.fr/parietal/research/spatial_patterns/spatial-patterns-in-resting-state/>`_\nof functional regions in rest.\n\nThe key to extract signals is to use the\n:class:`nilearn.input_data.NiftiMapsMasker` that can transform nifti\nobjects to time series using a probabilistic atlas.\n\nAs the MSDL atlas comes with (x, y, z) MNI coordinates for the different\nregions, we can visualize the matrix as a graph of interaction in a\nbrain. To avoid having too dense a graph, we represent only the 20% edges\nwith the highest values.\n\n\"\"\"\n############################################################################\n# Retrieve the atlas and the data\nfrom nilearn import datasets\natlas = datasets.fetch_atlas_msdl()\n# Loading atlas image stored in 'maps'\natlas_filename = atlas['maps']\n# Loading atlas data stored in 'labels'\nlabels = atlas['labels']\n\n# Load the functional datasets\ndata = datasets.fetch_adhd(n_subjects=1)\n\nprint('First subject resting-state nifti image (4D) is located at: %s' %\n data.func[0])\n\n############################################################################\n# Extract the time series\nfrom nilearn.input_data import NiftiMapsMasker\nmasker = NiftiMapsMasker(maps_img=atlas_filename, standardize=True,\n memory='nilearn_cache', verbose=5)\n\ntime_series = masker.fit_transform(data.func[0],\n confounds=data.confounds)\n\n############################################################################\n# `time_series` is now a 2D matrix, of shape (number of time points x\n# number of regions)\nprint(time_series.shape)\n\n############################################################################\n# Build and display a correlation matrix\nfrom nilearn.connectome import ConnectivityMeasure\ncorrelation_measure = ConnectivityMeasure(kind='correlation')\ncorrelation_matrix = correlation_measure.fit_transform([time_series])[0]\n\n# Display the correlation matrix\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(10, 10))\n# Mask out the major diagonal\nnp.fill_diagonal(correlation_matrix, 0)\nplt.imshow(correlation_matrix, interpolation=\"nearest\", cmap=\"RdBu_r\",\n vmax=0.8, vmin=-0.8)\nplt.colorbar()\n# And display the labels\nx_ticks = plt.xticks(range(len(labels)), labels, rotation=90)\ny_ticks = plt.yticks(range(len(labels)), labels)\n\n############################################################################\n# And now display the corresponding graph\nfrom nilearn import plotting\ncoords = atlas.region_coords\n\n# We threshold to keep only the 20% of edges with the highest value\n# because the graph is very dense\nplotting.plot_connectome(correlation_matrix, coords,\n edge_threshold=\"80%\", colorbar=True)\n\nplotting.show()\n", "path": "examples/03_connectivity/plot_probabilistic_atlas_extraction.py"}, {"content": "\"\"\"\nExtracting signals from a brain parcellation\n============================================\n\nHere we show how to extract signals from a brain parcellation and compute\na correlation matrix.\n\nWe also show the importance of defining good confounds signals: the\nfirst correlation matrix is computed after regressing out simple\nconfounds signals: movement regressors, white matter and CSF signals, ...\nThe second one is without any confounds: all regions are connected to\neach other.\n\n\nOne reference that discusses the importance of confounds is `Varoquaux and\nCraddock, Learning and comparing functional connectomes across subjects,\nNeuroImage 2013\n<http://www.sciencedirect.com/science/article/pii/S1053811913003340>`_.\n\nThis is just a code example, see the :ref:`corresponding section in the\ndocumentation <parcellation_time_series>` for more.\n\"\"\"\n\n##############################################################################\n# Retrieve the atlas and the data\nfrom nilearn import datasets\n\ndataset = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')\natlas_filename, labels = dataset.maps, dataset.labels\n\nprint('Atlas ROIs are located in nifti image (4D) at: %s' %\n atlas_filename) # 4D data\n\n# One subject of resting-state data\ndata = datasets.fetch_adhd(n_subjects=1)\nfmri_filenames = data.func[0]\n\n##############################################################################\n# Extract signals on a parcellation defined by labels using the\n# NiftiLabelsMasker\nfrom nilearn.input_data import NiftiLabelsMasker\nmasker = NiftiLabelsMasker(labels_img=atlas_filename, standardize=True,\n memory='nilearn_cache', verbose=5)\n\n# Here we go from nifti files to the signal time series in a numpy\n# array. Note how we give confounds to be regressed out during signal\n# extraction\ntime_series = masker.fit_transform(fmri_filenames, confounds=data.confounds)\n\n\n##############################################################################\n# Compute and display a correlation matrix\nfrom nilearn.connectome import ConnectivityMeasure\ncorrelation_measure = ConnectivityMeasure(kind='correlation')\ncorrelation_matrix = correlation_measure.fit_transform([time_series])[0]\n\n# Plot the correlation matrix\nimport numpy as np\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(10, 10))\n# Mask the main diagonal for visualization:\nnp.fill_diagonal(correlation_matrix, 0)\n\nplt.imshow(correlation_matrix, interpolation=\"nearest\", cmap=\"RdBu_r\",\n vmax=0.8, vmin=-0.8)\n\n# Add labels and adjust margins\nx_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90)\ny_ticks = plt.yticks(range(len(labels) - 1), labels[1:])\nplt.gca().yaxis.tick_right()\nplt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62)\n\n\n###############################################################################\n# Same thing without confounds, to stress the importance of confounds\n\ntime_series = masker.fit_transform(fmri_filenames)\n# Note how we did not specify confounds above. This is bad!\n\ncorrelation_matrix = correlation_measure.fit_transform([time_series])[0]\n\n# Mask the main diagonal for visualization:\nnp.fill_diagonal(correlation_matrix, 0)\n\nplt.figure(figsize=(10, 10))\nplt.imshow(correlation_matrix, interpolation=\"nearest\", cmap=\"RdBu_r\",\n vmax=0.8, vmin=-0.8)\n\nx_ticks = plt.xticks(range(len(labels) - 1), labels[1:], rotation=90)\ny_ticks = plt.yticks(range(len(labels) - 1), labels[1:])\nplt.gca().yaxis.tick_right()\nplt.subplots_adjust(left=.01, bottom=.3, top=.99, right=.62)\nplt.suptitle('No confounds', size=27)\n\nplt.show()\n", "path": "examples/03_connectivity/plot_signal_extraction.py"}]} | 2,138 | 451 |
gh_patches_debug_6915 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3596 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_DOCKER_1 rule not failed when using EXPOSE 22/TCP
**Issue**
If it is related to an existing check, CKV_DOCKER_1.
Dockerfile EXPOSE can accept port using tcp proto by defaut if not specified or you can also specify the protocol with /tcp or /udp.
When using "EXPOSE 22/tcp" the rule does not detect the SSH port. something like '22/tcp'.split('/')[0] will return port without protocol or we can test both '22' or '22/tcp' in the rule.
**Examples**
Issue can be replicated with a Dockerfile which uses "EXPOSE 22/tcp".
**Version**
- Checkov Version 2.1.87
**Specification**
EXPOSE specification https://docs.docker.com/engine/reference/builder/#expose
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/checks/ExposePort22.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
7
8 if TYPE_CHECKING:
9 from dockerfile_parse.parser import _Instruction
10
11
12 class ExposePort22(BaseDockerfileCheck):
13 def __init__(self) -> None:
14 name = "Ensure port 22 is not exposed"
15 id = "CKV_DOCKER_1"
16 supported_instructions = ("EXPOSE",)
17 categories = (CheckCategories.NETWORKING,)
18 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
19
20 def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:
21 for expose in conf:
22 if "22" in expose["value"].split(" "):
23 return CheckResult.FAILED, [expose]
24
25 return CheckResult.PASSED, None
26
27
28 check = ExposePort22()
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/dockerfile/checks/ExposePort22.py b/checkov/dockerfile/checks/ExposePort22.py
--- a/checkov/dockerfile/checks/ExposePort22.py
+++ b/checkov/dockerfile/checks/ExposePort22.py
@@ -19,7 +19,7 @@
def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:
for expose in conf:
- if "22" in expose["value"].split(" "):
+ if any(port in expose["value"].split(" ") for port in ("22", "22/tcp")):
return CheckResult.FAILED, [expose]
return CheckResult.PASSED, None
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/ExposePort22.py b/checkov/dockerfile/checks/ExposePort22.py\n--- a/checkov/dockerfile/checks/ExposePort22.py\n+++ b/checkov/dockerfile/checks/ExposePort22.py\n@@ -19,7 +19,7 @@\n \n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n for expose in conf:\n- if \"22\" in expose[\"value\"].split(\" \"):\n+ if any(port in expose[\"value\"].split(\" \") for port in (\"22\", \"22/tcp\")):\n return CheckResult.FAILED, [expose]\n \n return CheckResult.PASSED, None\n", "issue": "CKV_DOCKER_1 rule not failed when using EXPOSE 22/TCP\n**Issue**\r\nIf it is related to an existing check, CKV_DOCKER_1.\r\nDockerfile EXPOSE can accept port using tcp proto by defaut if not specified or you can also specify the protocol with /tcp or /udp.\r\n\r\nWhen using \"EXPOSE 22/tcp\" the rule does not detect the SSH port. something like '22/tcp'.split('/')[0] will return port without protocol or we can test both '22' or '22/tcp' in the rule.\r\n\r\n**Examples**\r\nIssue can be replicated with a Dockerfile which uses \"EXPOSE 22/tcp\".\r\n\r\n**Version**\r\n - Checkov Version 2.1.87\r\n\r\n**Specification**\r\nEXPOSE specification https://docs.docker.com/engine/reference/builder/#expose\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\n\nclass ExposePort22(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure port 22 is not exposed\"\n id = \"CKV_DOCKER_1\"\n supported_instructions = (\"EXPOSE\",)\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n for expose in conf:\n if \"22\" in expose[\"value\"].split(\" \"):\n return CheckResult.FAILED, [expose]\n\n return CheckResult.PASSED, None\n\n\ncheck = ExposePort22()\n", "path": "checkov/dockerfile/checks/ExposePort22.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\n\nclass ExposePort22(BaseDockerfileCheck):\n def __init__(self) -> None:\n name = \"Ensure port 22 is not exposed\"\n id = \"CKV_DOCKER_1\"\n supported_instructions = (\"EXPOSE\",)\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n for expose in conf:\n if any(port in expose[\"value\"].split(\" \") for port in (\"22\", \"22/tcp\")):\n return CheckResult.FAILED, [expose]\n\n return CheckResult.PASSED, None\n\n\ncheck = ExposePort22()\n", "path": "checkov/dockerfile/checks/ExposePort22.py"}]} | 729 | 170 |
gh_patches_debug_51284 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2093 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RuntimeError: Requested component 'jaeger' not found in entry points for 'opentelemetry_exporter'
From the exporters selection of [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection)
>Known values for OTEL_TRACES_EXPORTER are:
>
>- "otlp": OTLP
>- "jaeger": Jaeger gRPC
>- "zipkin": Zipkin (Defaults to protobuf format)
>- "none": No automatically configured exporter for traces.
We have split up the exporters based on protocol + serialisation to avoid taking unnecessary dependency so there is no entry point for `jaeger`. If someone reads the spec and follows that recommendation they will run into this error. We should either add an entry point `jaeger` or an alias that solves this problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15
16 """
17 OpenTelemetry SDK Configurator for Easy Instrumentation with Distros
18 """
19
20 from os import environ
21 from typing import Sequence, Tuple
22
23 from pkg_resources import iter_entry_points
24
25 from opentelemetry import trace
26 from opentelemetry.environment_variables import (
27 OTEL_PYTHON_ID_GENERATOR,
28 OTEL_TRACES_EXPORTER,
29 )
30 from opentelemetry.instrumentation.configurator import BaseConfigurator
31 from opentelemetry.sdk.trace import TracerProvider
32 from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
33 from opentelemetry.sdk.trace.id_generator import IdGenerator
34
35 _EXPORTER_OTLP = "otlp"
36 _EXPORTER_OTLP_SPAN = "otlp_proto_grpc_span"
37
38 _RANDOM_ID_GENERATOR = "random"
39 _DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR
40
41
42 def _get_id_generator() -> str:
43 return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)
44
45
46 def _get_exporter_names() -> Sequence[str]:
47 trace_exporters = environ.get(OTEL_TRACES_EXPORTER)
48
49 exporters = set()
50
51 if trace_exporters and trace_exporters.lower().strip() != "none":
52 exporters.update(
53 {
54 trace_exporter.strip()
55 for trace_exporter in trace_exporters.split(",")
56 }
57 )
58
59 if _EXPORTER_OTLP in exporters:
60 exporters.remove(_EXPORTER_OTLP)
61 exporters.add(_EXPORTER_OTLP_SPAN)
62
63 return list(exporters)
64
65
66 def _init_tracing(
67 exporters: Sequence[SpanExporter], id_generator: IdGenerator
68 ):
69 # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name
70 # from the env variable else defaults to "unknown_service"
71 provider = TracerProvider(
72 id_generator=id_generator(),
73 )
74 trace.set_tracer_provider(provider)
75
76 for _, exporter_class in exporters.items():
77 exporter_args = {}
78 provider.add_span_processor(
79 BatchSpanProcessor(exporter_class(**exporter_args))
80 )
81
82
83 def _import_tracer_provider_config_components(
84 selected_components, entry_point_name
85 ) -> Sequence[Tuple[str, object]]:
86 component_entry_points = {
87 ep.name: ep for ep in iter_entry_points(entry_point_name)
88 }
89 component_impls = []
90 for selected_component in selected_components:
91 entry_point = component_entry_points.get(selected_component, None)
92 if not entry_point:
93 raise RuntimeError(
94 f"Requested component '{selected_component}' not found in entry points for '{entry_point_name}'"
95 )
96
97 component_impl = entry_point.load()
98 component_impls.append((selected_component, component_impl))
99
100 return component_impls
101
102
103 def _import_exporters(
104 exporter_names: Sequence[str],
105 ) -> Sequence[SpanExporter]:
106 trace_exporters = {}
107
108 for (
109 exporter_name,
110 exporter_impl,
111 ) in _import_tracer_provider_config_components(
112 exporter_names, "opentelemetry_exporter"
113 ):
114 if issubclass(exporter_impl, SpanExporter):
115 trace_exporters[exporter_name] = exporter_impl
116 else:
117 raise RuntimeError(f"{exporter_name} is not a trace exporter")
118 return trace_exporters
119
120
121 def _import_id_generator(id_generator_name: str) -> IdGenerator:
122 # pylint: disable=unbalanced-tuple-unpacking
123 [
124 (id_generator_name, id_generator_impl)
125 ] = _import_tracer_provider_config_components(
126 [id_generator_name.strip()], "opentelemetry_id_generator"
127 )
128
129 if issubclass(id_generator_impl, IdGenerator):
130 return id_generator_impl
131
132 raise RuntimeError(f"{id_generator_name} is not an IdGenerator")
133
134
135 def _initialize_components():
136 exporter_names = _get_exporter_names()
137 trace_exporters = _import_exporters(exporter_names)
138 id_generator_name = _get_id_generator()
139 id_generator = _import_id_generator(id_generator_name)
140 _init_tracing(trace_exporters, id_generator)
141
142
143 class _OTelSDKConfigurator(BaseConfigurator):
144 """A basic Configurator by OTel Python for initalizing OTel SDK components
145
146 Initializes several crucial OTel SDK components (i.e. TracerProvider,
147 MeterProvider, Processors...) according to a default implementation. Other
148 Configurators can subclass and slightly alter this initialization.
149
150 NOTE: This class should not be instantiated nor should it become an entry
151 point on the `opentelemetry-sdk` package. Instead, distros should subclass
152 this Configurator and enchance it as needed.
153 """
154
155 def _configure(self, **kwargs):
156 _initialize_components()
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py
@@ -109,7 +109,7 @@
exporter_name,
exporter_impl,
) in _import_tracer_provider_config_components(
- exporter_names, "opentelemetry_exporter"
+ exporter_names, "opentelemetry_traces_exporter"
):
if issubclass(exporter_impl, SpanExporter):
trace_exporters[exporter_name] = exporter_impl
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py\n@@ -109,7 +109,7 @@\n exporter_name,\n exporter_impl,\n ) in _import_tracer_provider_config_components(\n- exporter_names, \"opentelemetry_exporter\"\n+ exporter_names, \"opentelemetry_traces_exporter\"\n ):\n if issubclass(exporter_impl, SpanExporter):\n trace_exporters[exporter_name] = exporter_impl\n", "issue": "RuntimeError: Requested component 'jaeger' not found in entry points for 'opentelemetry_exporter'\nFrom the exporters selection of [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection)\r\n\r\n>Known values for OTEL_TRACES_EXPORTER are:\r\n>\r\n>- \"otlp\": OTLP\r\n>- \"jaeger\": Jaeger gRPC\r\n>- \"zipkin\": Zipkin (Defaults to protobuf format)\r\n>- \"none\": No automatically configured exporter for traces.\r\n\r\nWe have split up the exporters based on protocol + serialisation to avoid taking unnecessary dependency so there is no entry point for `jaeger`. If someone reads the spec and follows that recommendation they will run into this error. We should either add an entry point `jaeger` or an alias that solves this problem.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nOpenTelemetry SDK Configurator for Easy Instrumentation with Distros\n\"\"\"\n\nfrom os import environ\nfrom typing import Sequence, Tuple\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry import trace\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_ID_GENERATOR,\n OTEL_TRACES_EXPORTER,\n)\nfrom opentelemetry.instrumentation.configurator import BaseConfigurator\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter\nfrom opentelemetry.sdk.trace.id_generator import IdGenerator\n\n_EXPORTER_OTLP = \"otlp\"\n_EXPORTER_OTLP_SPAN = \"otlp_proto_grpc_span\"\n\n_RANDOM_ID_GENERATOR = \"random\"\n_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR\n\n\ndef _get_id_generator() -> str:\n return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)\n\n\ndef _get_exporter_names() -> Sequence[str]:\n trace_exporters = environ.get(OTEL_TRACES_EXPORTER)\n\n exporters = set()\n\n if trace_exporters and trace_exporters.lower().strip() != \"none\":\n exporters.update(\n {\n trace_exporter.strip()\n for trace_exporter in trace_exporters.split(\",\")\n }\n )\n\n if _EXPORTER_OTLP in exporters:\n exporters.remove(_EXPORTER_OTLP)\n exporters.add(_EXPORTER_OTLP_SPAN)\n\n return list(exporters)\n\n\ndef _init_tracing(\n exporters: Sequence[SpanExporter], id_generator: IdGenerator\n):\n # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name\n # from the env variable else defaults to \"unknown_service\"\n provider = TracerProvider(\n id_generator=id_generator(),\n )\n trace.set_tracer_provider(provider)\n\n for _, exporter_class in exporters.items():\n exporter_args = {}\n provider.add_span_processor(\n BatchSpanProcessor(exporter_class(**exporter_args))\n )\n\n\ndef _import_tracer_provider_config_components(\n selected_components, entry_point_name\n) -> Sequence[Tuple[str, object]]:\n component_entry_points = {\n ep.name: ep for ep in iter_entry_points(entry_point_name)\n }\n component_impls = []\n for selected_component in selected_components:\n entry_point = component_entry_points.get(selected_component, None)\n if not entry_point:\n raise RuntimeError(\n f\"Requested component '{selected_component}' not found in entry points for '{entry_point_name}'\"\n )\n\n component_impl = entry_point.load()\n component_impls.append((selected_component, component_impl))\n\n return component_impls\n\n\ndef _import_exporters(\n exporter_names: Sequence[str],\n) -> Sequence[SpanExporter]:\n trace_exporters = {}\n\n for (\n exporter_name,\n exporter_impl,\n ) in _import_tracer_provider_config_components(\n exporter_names, \"opentelemetry_exporter\"\n ):\n if issubclass(exporter_impl, SpanExporter):\n trace_exporters[exporter_name] = exporter_impl\n else:\n raise RuntimeError(f\"{exporter_name} is not a trace exporter\")\n return trace_exporters\n\n\ndef _import_id_generator(id_generator_name: str) -> IdGenerator:\n # pylint: disable=unbalanced-tuple-unpacking\n [\n (id_generator_name, id_generator_impl)\n ] = _import_tracer_provider_config_components(\n [id_generator_name.strip()], \"opentelemetry_id_generator\"\n )\n\n if issubclass(id_generator_impl, IdGenerator):\n return id_generator_impl\n\n raise RuntimeError(f\"{id_generator_name} is not an IdGenerator\")\n\n\ndef _initialize_components():\n exporter_names = _get_exporter_names()\n trace_exporters = _import_exporters(exporter_names)\n id_generator_name = _get_id_generator()\n id_generator = _import_id_generator(id_generator_name)\n _init_tracing(trace_exporters, id_generator)\n\n\nclass _OTelSDKConfigurator(BaseConfigurator):\n \"\"\"A basic Configurator by OTel Python for initalizing OTel SDK components\n\n Initializes several crucial OTel SDK components (i.e. TracerProvider,\n MeterProvider, Processors...) according to a default implementation. Other\n Configurators can subclass and slightly alter this initialization.\n\n NOTE: This class should not be instantiated nor should it become an entry\n point on the `opentelemetry-sdk` package. Instead, distros should subclass\n this Configurator and enchance it as needed.\n \"\"\"\n\n def _configure(self, **kwargs):\n _initialize_components()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nOpenTelemetry SDK Configurator for Easy Instrumentation with Distros\n\"\"\"\n\nfrom os import environ\nfrom typing import Sequence, Tuple\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry import trace\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_ID_GENERATOR,\n OTEL_TRACES_EXPORTER,\n)\nfrom opentelemetry.instrumentation.configurator import BaseConfigurator\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter\nfrom opentelemetry.sdk.trace.id_generator import IdGenerator\n\n_EXPORTER_OTLP = \"otlp\"\n_EXPORTER_OTLP_SPAN = \"otlp_proto_grpc_span\"\n\n_RANDOM_ID_GENERATOR = \"random\"\n_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR\n\n\ndef _get_id_generator() -> str:\n return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)\n\n\ndef _get_exporter_names() -> Sequence[str]:\n trace_exporters = environ.get(OTEL_TRACES_EXPORTER)\n\n exporters = set()\n\n if trace_exporters and trace_exporters.lower().strip() != \"none\":\n exporters.update(\n {\n trace_exporter.strip()\n for trace_exporter in trace_exporters.split(\",\")\n }\n )\n\n if _EXPORTER_OTLP in exporters:\n exporters.remove(_EXPORTER_OTLP)\n exporters.add(_EXPORTER_OTLP_SPAN)\n\n return list(exporters)\n\n\ndef _init_tracing(\n exporters: Sequence[SpanExporter], id_generator: IdGenerator\n):\n # if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name\n # from the env variable else defaults to \"unknown_service\"\n provider = TracerProvider(\n id_generator=id_generator(),\n )\n trace.set_tracer_provider(provider)\n\n for _, exporter_class in exporters.items():\n exporter_args = {}\n provider.add_span_processor(\n BatchSpanProcessor(exporter_class(**exporter_args))\n )\n\n\ndef _import_tracer_provider_config_components(\n selected_components, entry_point_name\n) -> Sequence[Tuple[str, object]]:\n component_entry_points = {\n ep.name: ep for ep in iter_entry_points(entry_point_name)\n }\n component_impls = []\n for selected_component in selected_components:\n entry_point = component_entry_points.get(selected_component, None)\n if not entry_point:\n raise RuntimeError(\n f\"Requested component '{selected_component}' not found in entry points for '{entry_point_name}'\"\n )\n\n component_impl = entry_point.load()\n component_impls.append((selected_component, component_impl))\n\n return component_impls\n\n\ndef _import_exporters(\n exporter_names: Sequence[str],\n) -> Sequence[SpanExporter]:\n trace_exporters = {}\n\n for (\n exporter_name,\n exporter_impl,\n ) in _import_tracer_provider_config_components(\n exporter_names, \"opentelemetry_traces_exporter\"\n ):\n if issubclass(exporter_impl, SpanExporter):\n trace_exporters[exporter_name] = exporter_impl\n else:\n raise RuntimeError(f\"{exporter_name} is not a trace exporter\")\n return trace_exporters\n\n\ndef _import_id_generator(id_generator_name: str) -> IdGenerator:\n # pylint: disable=unbalanced-tuple-unpacking\n [\n (id_generator_name, id_generator_impl)\n ] = _import_tracer_provider_config_components(\n [id_generator_name.strip()], \"opentelemetry_id_generator\"\n )\n\n if issubclass(id_generator_impl, IdGenerator):\n return id_generator_impl\n\n raise RuntimeError(f\"{id_generator_name} is not an IdGenerator\")\n\n\ndef _initialize_components():\n exporter_names = _get_exporter_names()\n trace_exporters = _import_exporters(exporter_names)\n id_generator_name = _get_id_generator()\n id_generator = _import_id_generator(id_generator_name)\n _init_tracing(trace_exporters, id_generator)\n\n\nclass _OTelSDKConfigurator(BaseConfigurator):\n \"\"\"A basic Configurator by OTel Python for initalizing OTel SDK components\n\n Initializes several crucial OTel SDK components (i.e. TracerProvider,\n MeterProvider, Processors...) according to a default implementation. Other\n Configurators can subclass and slightly alter this initialization.\n\n NOTE: This class should not be instantiated nor should it become an entry\n point on the `opentelemetry-sdk` package. Instead, distros should subclass\n this Configurator and enchance it as needed.\n \"\"\"\n\n def _configure(self, **kwargs):\n _initialize_components()\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_configuration/__init__.py"}]} | 1,932 | 158 |
gh_patches_debug_30494 | rasdani/github-patches | git_diff | ARM-DOE__ACT-220 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
geodisplay.py projection not working with any other options but the default
While using the GeographicPlotDisplay function when trying to use other cartopy projections the map extent changes or no data appears.
The default is PlateCarree and works fine but when trying to use a Mercator projection the data shows up but the map extent is changed.

When using a LambertConformal projection the extent appears to be off since no features appear that were suppose to be plotted.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/plotting/GeoDisplay.py`
Content:
```
1 """
2 act.plotting.GeoDisplay
3 -----------------------
4
5 Stores the class for GeographicPlotDisplay.
6
7 """
8
9 import matplotlib.pyplot as plt
10 import numpy as np
11 import pandas as pd
12
13 from .plot import Display
14 try:
15 import cartopy.crs as ccrs
16 from cartopy.io.img_tiles import Stamen
17 import cartopy.feature as cfeature
18 CARTOPY_AVAILABLE = True
19 except ImportError:
20 CARTOPY_AVAILABLE = False
21
22
23 class GeographicPlotDisplay(Display):
24 """
25 A class for making geographic tracer plot of aircraft, ship or other moving
26 platform plot..
27
28 This is inherited from the :func:`act.plotting.Display`
29 class and has therefore has the same attributes as that class.
30 See :func:`act.plotting.Display`
31 for more information. There are no additional attributes or parameters
32 to this class.
33
34 In order to create geographic plots, ACT needs the Cartopy package to be
35 installed on your system. More information about
36 Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .
37
38 """
39 def __init__(self, obj, ds_name=None, **kwargs):
40 if not CARTOPY_AVAILABLE:
41 raise ImportError("Cartopy needs to be installed on your "
42 "system to make geographic display plots.")
43 super().__init__(obj, ds_name, **kwargs)
44 if self.fig is None:
45 self.fig = plt.figure(**kwargs)
46
47 def geoplot(self, data_field=None, lat_field='lat',
48 lon_field='lon', dsname=None, cbar_label=None, title=None,
49 projection=ccrs.PlateCarree(), plot_buffer=0.08,
50 stamen='terrain-background', tile=8, cartopy_feature=None,
51 cmap='rainbow', text=None, gridlines=True, **kwargs):
52 """
53 Creates a latttude and longitude plot of a time series data set with
54 data values indicated by color and described with a colorbar.
55 Latitude values must be in degree north (-90 to 90) and
56 longitude must be in degree east (-180 to 180).
57
58 Parameters
59 ----------
60 data_field : str
61 Name of data filed in object to plot.
62 lat_field : str
63 Name of latitude field in object to use.
64 lon_field : str
65 Name of longitude field in object to use.
66 dsname : str or None
67 The name of the datastream to plot. Set to None to make ACT
68 attempt to automatically determine this.
69 cbar_label : str
70 Label to use with colorbar. If set to None will attempt
71 to create label from long_name and units.
72 title : str
73 Plot title.
74 projection : str
75 Project to use on plot.
76 plot_buffer : float
77 Buffer to add around data on plot in lat and lon dimension.
78 stamen : str
79 Dataset to use for background image. Set to None to not use
80 background image.
81 tile : int
82 Tile zoom to use with background image. Higer number indicates
83 more resolution. A value of 8 is typical for a normal sonde plot.
84 cartopy_feature : list of str or str
85 Cartopy feature to add to plot.
86 cmap : str
87 Color map to use for colorbar.
88 text : dictionary
89 Dictionary of {text:[lon,lat]} to add to plot. Can have more
90 than one set of text to add.
91 gridlines : boolean
92 Use latitude and longitude gridlines.
93 **kwargs : keyword arguments
94 Any other keyword arguments that will be passed
95 into :func:`matplotlib.pyplot.scatter` when the figure
96 is made. See the matplotlib documentation for further details
97 on what keyword arguments are available.
98
99 """
100 # Get current plotting figure
101 # del self.axes
102 # if self.fig is None:
103 # self.fig = plt.figure()
104
105 if dsname is None and len(self._arm.keys()) > 1:
106 raise ValueError(("You must choose a datastream when there are 2 "
107 "or more datasets in the GeographicPlotDisplay "
108 "object."))
109 elif dsname is None:
110 dsname = list(self._arm.keys())[0]
111
112 if data_field is None:
113 raise ValueError(("You must enter the name of the data "
114 "to be plotted."))
115
116 # Extract data from object
117 try:
118 lat = self._arm[dsname][lat_field].values
119 except KeyError:
120 raise ValueError(("You will need to provide the name of the "
121 "field if not '{}' to use for latitued "
122 "data.").format(lat_field))
123 try:
124 lon = self._arm[dsname][lon_field].values
125 except KeyError:
126 raise ValueError(("You will need to provide the name of the "
127 "field if not '{}' to use for longitude "
128 "data.").format(lon_field))
129
130 # Set up metadata information for display on plot
131 if cbar_label is None:
132 try:
133 cbar_label = (
134 self._arm[dsname][data_field].attrs['long_name'] +
135 ' (' + self._arm[dsname][data_field].attrs['units'] + ')')
136 except KeyError:
137 cbar_label = data_field
138
139 lat_limits = [np.nanmin(lat), np.nanmax(lat)]
140 lon_limits = [np.nanmin(lon), np.nanmax(lon)]
141 box_size = np.max([np.abs(np.diff(lat_limits)),
142 np.abs(np.diff(lon_limits))])
143 bx_buf = box_size * plot_buffer
144
145 lat_center = np.sum(lat_limits) / 2.
146 lon_center = np.sum(lon_limits) / 2.
147
148 lat_limits = [lat_center - box_size / 2. - bx_buf,
149 lat_center + box_size / 2. + bx_buf]
150 lon_limits = [lon_center - box_size / 2. - bx_buf,
151 lon_center + box_size / 2. + bx_buf]
152
153 data = self._arm[dsname][data_field].values
154
155 # Create base plot projection
156 ax = plt.axes(projection=projection)
157 plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)
158 ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0],
159 lat_limits[1]], crs=projection)
160
161 if title is None:
162 try:
163 dim = list(self._arm[dsname][data_field].dims)
164 ts = pd.to_datetime(str(self._arm[dsname][dim[0]].values[0]))
165 date = ts.strftime('%Y-%m-%d')
166 time_str = ts.strftime('%H:%M:%S')
167 plt.title(' '.join([dsname, 'at', date, time_str]))
168 except NameError:
169 plt.title(dsname)
170 else:
171 plt.title(title)
172
173 if stamen:
174 tiler = Stamen(stamen)
175 ax.add_image(tiler, tile)
176
177 colorbar_map = None
178 if cmap is not None:
179 colorbar_map = plt.cm.get_cmap(cmap)
180 sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)
181 cbar = plt.colorbar(sc)
182 cbar.ax.set_ylabel(cbar_label)
183 if cartopy_feature is not None:
184 if isinstance(cartopy_feature, str):
185 cartopy_feature = [cartopy_feature]
186 cartopy_feature = [ii.upper() for ii in cartopy_feature]
187 if 'STATES' in cartopy_feature:
188 ax.add_feature(cfeature.STATES.with_scale('10m'))
189 if 'LAND' in cartopy_feature:
190 ax.add_feature(cfeature.LAND)
191 if 'OCEAN' in cartopy_feature:
192 ax.add_feature(cfeature.OCEAN)
193 if 'COASTLINE' in cartopy_feature:
194 ax.add_feature(cfeature.COASTLINE)
195 if 'BORDERS' in cartopy_feature:
196 ax.add_feature(cfeature.BORDERS, linestyle=':')
197 if 'LAKES' in cartopy_feature:
198 ax.add_feature(cfeature.LAKES, alpha=0.5)
199 if 'RIVERS' in cartopy_feature:
200 ax.add_feature(cfeature.RIVERS)
201 if text is not None:
202 for label, location in text.items():
203 ax.plot(location[0], location[1], marker='*', color='black')
204 ax.text(location[0], location[1], label, color='black')
205
206 if gridlines:
207 gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
208 linewidth=1, color='gray', alpha=0.5,
209 linestyle='--')
210 gl.xlabels_top = False
211 gl.ylabels_left = True
212 gl.xlabels_bottom = True
213 gl.ylabels_right = False
214 gl.xlabel_style = {'size': 6, 'color': 'gray'}
215 gl.ylabel_style = {'size': 6, 'color': 'gray'}
216
217 return ax
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/plotting/GeoDisplay.py b/act/plotting/GeoDisplay.py
--- a/act/plotting/GeoDisplay.py
+++ b/act/plotting/GeoDisplay.py
@@ -71,8 +71,9 @@
to create label from long_name and units.
title : str
Plot title.
- projection : str
- Project to use on plot.
+ projection : cartopy.crs object
+ Project to use on plot. See
+ https://scitools.org.uk/cartopy/docs/latest/crs/projections.html
plot_buffer : float
Buffer to add around data on plot in lat and lon dimension.
stamen : str
@@ -204,14 +205,19 @@
ax.text(location[0], location[1], label, color='black')
if gridlines:
- gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
- linewidth=1, color='gray', alpha=0.5,
- linestyle='--')
- gl.xlabels_top = False
- gl.ylabels_left = True
- gl.xlabels_bottom = True
- gl.ylabels_right = False
- gl.xlabel_style = {'size': 6, 'color': 'gray'}
- gl.ylabel_style = {'size': 6, 'color': 'gray'}
+ if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:
+ gl = ax.gridlines(crs=projection, draw_labels=True,
+ linewidth=1, color='gray', alpha=0.5,
+ linestyle='--')
+ gl.xlabels_top = False
+ gl.ylabels_left = True
+ gl.xlabels_bottom = True
+ gl.ylabels_right = False
+ gl.xlabel_style = {'size': 6, 'color': 'gray'}
+ gl.ylabel_style = {'size': 6, 'color': 'gray'}
+ else:
+ # Labels are only currently supported for PlateCarree and Mercator
+ gl = ax.gridlines(draw_labels=False, linewidth=1, color='gray',
+ alpha=0.5, linestyle='--')
return ax
| {"golden_diff": "diff --git a/act/plotting/GeoDisplay.py b/act/plotting/GeoDisplay.py\n--- a/act/plotting/GeoDisplay.py\n+++ b/act/plotting/GeoDisplay.py\n@@ -71,8 +71,9 @@\n to create label from long_name and units.\n title : str\n Plot title.\n- projection : str\n- Project to use on plot.\n+ projection : cartopy.crs object\n+ Project to use on plot. See\n+ https://scitools.org.uk/cartopy/docs/latest/crs/projections.html\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n stamen : str\n@@ -204,14 +205,19 @@\n ax.text(location[0], location[1], label, color='black')\n \n if gridlines:\n- gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n- linewidth=1, color='gray', alpha=0.5,\n- linestyle='--')\n- gl.xlabels_top = False\n- gl.ylabels_left = True\n- gl.xlabels_bottom = True\n- gl.ylabels_right = False\n- gl.xlabel_style = {'size': 6, 'color': 'gray'}\n- gl.ylabel_style = {'size': 6, 'color': 'gray'}\n+ if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n+ gl = ax.gridlines(crs=projection, draw_labels=True,\n+ linewidth=1, color='gray', alpha=0.5,\n+ linestyle='--')\n+ gl.xlabels_top = False\n+ gl.ylabels_left = True\n+ gl.xlabels_bottom = True\n+ gl.ylabels_right = False\n+ gl.xlabel_style = {'size': 6, 'color': 'gray'}\n+ gl.ylabel_style = {'size': 6, 'color': 'gray'}\n+ else:\n+ # Labels are only currently supported for PlateCarree and Mercator\n+ gl = ax.gridlines(draw_labels=False, linewidth=1, color='gray',\n+ alpha=0.5, linestyle='--')\n \n return ax\n", "issue": "geodisplay.py projection not working with any other options but the default\nWhile using the GeographicPlotDisplay function when trying to use other cartopy projections the map extent changes or no data appears. \r\n\r\nThe default is PlateCarree and works fine but when trying to use a Mercator projection the data shows up but the map extent is changed. \r\n\r\n\r\nWhen using a LambertConformal projection the extent appears to be off since no features appear that were suppose to be plotted.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nact.plotting.GeoDisplay\n-----------------------\n\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\ntry:\n import cartopy.crs as ccrs\n from cartopy.io.img_tiles import Stamen\n import cartopy.feature as cfeature\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot..\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n def __init__(self, obj, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\"Cartopy needs to be installed on your \"\n \"system to make geographic display plots.\")\n super().__init__(obj, ds_name, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(self, data_field=None, lat_field='lat',\n lon_field='lon', dsname=None, cbar_label=None, title=None,\n projection=ccrs.PlateCarree(), plot_buffer=0.08,\n stamen='terrain-background', tile=8, cartopy_feature=None,\n cmap='rainbow', text=None, gridlines=True, **kwargs):\n \"\"\"\n Creates a latttude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data filed in object to plot.\n lat_field : str\n Name of latitude field in object to use.\n lon_field : str\n Name of longitude field in object to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : str\n Project to use on plot.\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n stamen : str\n Dataset to use for background image. Set to None to not use\n background image.\n tile : int\n Tile zoom to use with background image. Higer number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n \"\"\"\n # Get current plotting figure\n # del self.axes\n # if self.fig is None:\n # self.fig = plt.figure()\n\n if dsname is None and len(self._arm.keys()) > 1:\n raise ValueError((\"You must choose a datastream when there are 2 \"\n \"or more datasets in the GeographicPlotDisplay \"\n \"object.\"))\n elif dsname is None:\n dsname = list(self._arm.keys())[0]\n\n if data_field is None:\n raise ValueError((\"You must enter the name of the data \"\n \"to be plotted.\"))\n\n # Extract data from object\n try:\n lat = self._arm[dsname][lat_field].values\n except KeyError:\n raise ValueError((\"You will need to provide the name of the \"\n \"field if not '{}' to use for latitued \"\n \"data.\").format(lat_field))\n try:\n lon = self._arm[dsname][lon_field].values\n except KeyError:\n raise ValueError((\"You will need to provide the name of the \"\n \"field if not '{}' to use for longitude \"\n \"data.\").format(lon_field))\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._arm[dsname][data_field].attrs['long_name'] +\n ' (' + self._arm[dsname][data_field].attrs['units'] + ')')\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)),\n np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.\n lon_center = np.sum(lon_limits) / 2.\n\n lat_limits = [lat_center - box_size / 2. - bx_buf,\n lat_center + box_size / 2. + bx_buf]\n lon_limits = [lon_center - box_size / 2. - bx_buf,\n lon_center + box_size / 2. + bx_buf]\n\n data = self._arm[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0],\n lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._arm[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._arm[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if stamen:\n tiler = Stamen(stamen)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = plt.cm.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=1, color='gray', alpha=0.5,\n linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = True\n gl.xlabels_bottom = True\n gl.ylabels_right = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n\n return ax\n", "path": "act/plotting/GeoDisplay.py"}], "after_files": [{"content": "\"\"\"\nact.plotting.GeoDisplay\n-----------------------\n\nStores the class for GeographicPlotDisplay.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom .plot import Display\ntry:\n import cartopy.crs as ccrs\n from cartopy.io.img_tiles import Stamen\n import cartopy.feature as cfeature\n CARTOPY_AVAILABLE = True\nexcept ImportError:\n CARTOPY_AVAILABLE = False\n\n\nclass GeographicPlotDisplay(Display):\n \"\"\"\n A class for making geographic tracer plot of aircraft, ship or other moving\n platform plot..\n\n This is inherited from the :func:`act.plotting.Display`\n class and has therefore has the same attributes as that class.\n See :func:`act.plotting.Display`\n for more information. There are no additional attributes or parameters\n to this class.\n\n In order to create geographic plots, ACT needs the Cartopy package to be\n installed on your system. More information about\n Cartopy go here:https://scitools.org.uk/cartopy/docs/latest/ .\n\n \"\"\"\n def __init__(self, obj, ds_name=None, **kwargs):\n if not CARTOPY_AVAILABLE:\n raise ImportError(\"Cartopy needs to be installed on your \"\n \"system to make geographic display plots.\")\n super().__init__(obj, ds_name, **kwargs)\n if self.fig is None:\n self.fig = plt.figure(**kwargs)\n\n def geoplot(self, data_field=None, lat_field='lat',\n lon_field='lon', dsname=None, cbar_label=None, title=None,\n projection=ccrs.PlateCarree(), plot_buffer=0.08,\n stamen='terrain-background', tile=8, cartopy_feature=None,\n cmap='rainbow', text=None, gridlines=True, **kwargs):\n \"\"\"\n Creates a latttude and longitude plot of a time series data set with\n data values indicated by color and described with a colorbar.\n Latitude values must be in degree north (-90 to 90) and\n longitude must be in degree east (-180 to 180).\n\n Parameters\n ----------\n data_field : str\n Name of data filed in object to plot.\n lat_field : str\n Name of latitude field in object to use.\n lon_field : str\n Name of longitude field in object to use.\n dsname : str or None\n The name of the datastream to plot. Set to None to make ACT\n attempt to automatically determine this.\n cbar_label : str\n Label to use with colorbar. If set to None will attempt\n to create label from long_name and units.\n title : str\n Plot title.\n projection : cartopy.crs object\n Project to use on plot. See\n https://scitools.org.uk/cartopy/docs/latest/crs/projections.html\n plot_buffer : float\n Buffer to add around data on plot in lat and lon dimension.\n stamen : str\n Dataset to use for background image. Set to None to not use\n background image.\n tile : int\n Tile zoom to use with background image. Higer number indicates\n more resolution. A value of 8 is typical for a normal sonde plot.\n cartopy_feature : list of str or str\n Cartopy feature to add to plot.\n cmap : str\n Color map to use for colorbar.\n text : dictionary\n Dictionary of {text:[lon,lat]} to add to plot. Can have more\n than one set of text to add.\n gridlines : boolean\n Use latitude and longitude gridlines.\n **kwargs : keyword arguments\n Any other keyword arguments that will be passed\n into :func:`matplotlib.pyplot.scatter` when the figure\n is made. See the matplotlib documentation for further details\n on what keyword arguments are available.\n\n \"\"\"\n # Get current plotting figure\n # del self.axes\n # if self.fig is None:\n # self.fig = plt.figure()\n\n if dsname is None and len(self._arm.keys()) > 1:\n raise ValueError((\"You must choose a datastream when there are 2 \"\n \"or more datasets in the GeographicPlotDisplay \"\n \"object.\"))\n elif dsname is None:\n dsname = list(self._arm.keys())[0]\n\n if data_field is None:\n raise ValueError((\"You must enter the name of the data \"\n \"to be plotted.\"))\n\n # Extract data from object\n try:\n lat = self._arm[dsname][lat_field].values\n except KeyError:\n raise ValueError((\"You will need to provide the name of the \"\n \"field if not '{}' to use for latitued \"\n \"data.\").format(lat_field))\n try:\n lon = self._arm[dsname][lon_field].values\n except KeyError:\n raise ValueError((\"You will need to provide the name of the \"\n \"field if not '{}' to use for longitude \"\n \"data.\").format(lon_field))\n\n # Set up metadata information for display on plot\n if cbar_label is None:\n try:\n cbar_label = (\n self._arm[dsname][data_field].attrs['long_name'] +\n ' (' + self._arm[dsname][data_field].attrs['units'] + ')')\n except KeyError:\n cbar_label = data_field\n\n lat_limits = [np.nanmin(lat), np.nanmax(lat)]\n lon_limits = [np.nanmin(lon), np.nanmax(lon)]\n box_size = np.max([np.abs(np.diff(lat_limits)),\n np.abs(np.diff(lon_limits))])\n bx_buf = box_size * plot_buffer\n\n lat_center = np.sum(lat_limits) / 2.\n lon_center = np.sum(lon_limits) / 2.\n\n lat_limits = [lat_center - box_size / 2. - bx_buf,\n lat_center + box_size / 2. + bx_buf]\n lon_limits = [lon_center - box_size / 2. - bx_buf,\n lon_center + box_size / 2. + bx_buf]\n\n data = self._arm[dsname][data_field].values\n\n # Create base plot projection\n ax = plt.axes(projection=projection)\n plt.subplots_adjust(left=0.01, right=0.99, bottom=0.05, top=0.93)\n ax.set_extent([lon_limits[0], lon_limits[1], lat_limits[0],\n lat_limits[1]], crs=projection)\n\n if title is None:\n try:\n dim = list(self._arm[dsname][data_field].dims)\n ts = pd.to_datetime(str(self._arm[dsname][dim[0]].values[0]))\n date = ts.strftime('%Y-%m-%d')\n time_str = ts.strftime('%H:%M:%S')\n plt.title(' '.join([dsname, 'at', date, time_str]))\n except NameError:\n plt.title(dsname)\n else:\n plt.title(title)\n\n if stamen:\n tiler = Stamen(stamen)\n ax.add_image(tiler, tile)\n\n colorbar_map = None\n if cmap is not None:\n colorbar_map = plt.cm.get_cmap(cmap)\n sc = ax.scatter(lon, lat, c=data, cmap=colorbar_map, **kwargs)\n cbar = plt.colorbar(sc)\n cbar.ax.set_ylabel(cbar_label)\n if cartopy_feature is not None:\n if isinstance(cartopy_feature, str):\n cartopy_feature = [cartopy_feature]\n cartopy_feature = [ii.upper() for ii in cartopy_feature]\n if 'STATES' in cartopy_feature:\n ax.add_feature(cfeature.STATES.with_scale('10m'))\n if 'LAND' in cartopy_feature:\n ax.add_feature(cfeature.LAND)\n if 'OCEAN' in cartopy_feature:\n ax.add_feature(cfeature.OCEAN)\n if 'COASTLINE' in cartopy_feature:\n ax.add_feature(cfeature.COASTLINE)\n if 'BORDERS' in cartopy_feature:\n ax.add_feature(cfeature.BORDERS, linestyle=':')\n if 'LAKES' in cartopy_feature:\n ax.add_feature(cfeature.LAKES, alpha=0.5)\n if 'RIVERS' in cartopy_feature:\n ax.add_feature(cfeature.RIVERS)\n if text is not None:\n for label, location in text.items():\n ax.plot(location[0], location[1], marker='*', color='black')\n ax.text(location[0], location[1], label, color='black')\n\n if gridlines:\n if projection == ccrs.PlateCarree() or projection == ccrs.Mercator:\n gl = ax.gridlines(crs=projection, draw_labels=True,\n linewidth=1, color='gray', alpha=0.5,\n linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = True\n gl.xlabels_bottom = True\n gl.ylabels_right = False\n gl.xlabel_style = {'size': 6, 'color': 'gray'}\n gl.ylabel_style = {'size': 6, 'color': 'gray'}\n else:\n # Labels are only currently supported for PlateCarree and Mercator\n gl = ax.gridlines(draw_labels=False, linewidth=1, color='gray',\n alpha=0.5, linestyle='--')\n\n return ax\n", "path": "act/plotting/GeoDisplay.py"}]} | 2,995 | 501 |
gh_patches_debug_22205 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4523 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The Source Interface's /journalist-key endpoint returns a 404 instead of the Submission Key
## Description
The submission key should be available via the Source Interface at the URL http://[Onion address]/journalist-key, but said URL returns a 404 instead.
## Steps to Reproduce
- set up a production SecureDrop instance on HW or VMs
- visit the source interface, click through to the submission page, and click the ``public key`` link.
## Expected Behavior
The Source Interface responds with the Submission Key
## Actual Behavior
The Source Interface responds with a 404, and the following errors are recorded if logging is enabled:
```
[Thu Jun 13 17:22:12.285962 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup
[Thu Jun 13 17:22:12.286129 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup
[Thu Jun 13 17:22:12.415949 2019] [wsgi:error] [pid 7028:tid 3882000488192] [remote 127.0.0.1:20954] mod_wsgi (pid=7028): Exception occurred processing WSGI script '/var/www/source.wsgi'.
[Thu Jun 13 17:22:12.416036 2019] [wsgi:error] [pid 7028:tid 3882000488192] [remote 127.0.0.1:20954] TypeError: file like object yielded non string type
[Thu Jun 13 17:22:12.416281 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup
[Thu Jun 13 17:22:12.416360 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup
[Thu Jun 13 17:22:13.812032 2019] [authz_core:debug] [pid 7031:tid 3882000144128] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key
[Thu Jun 13 17:22:14.587228 2019] [authz_core:debug] [pid 7031:tid 3881991599872] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key
[Thu Jun 13 17:22:14.587409 2019] [authz_core:debug] [pid 7031:tid 3881991599872] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key
[Thu Jun 13 17:22:15.296269 2019] [authz_core:debug] [pid 7031:tid 3881911961344] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key
```
Please provide screenshots where appropriate.
## Comments
Suggestions to fix, any other relevant information.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/info.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from io import StringIO
4 from flask import Blueprint, render_template, send_file, current_app
5
6
7 def make_blueprint(config):
8 view = Blueprint('info', __name__)
9
10 @view.route('/tor2web-warning')
11 def tor2web_warning():
12 return render_template("tor2web-warning.html")
13
14 @view.route('/use-tor')
15 def recommend_tor_browser():
16 return render_template("use-tor-browser.html")
17
18 @view.route('/journalist-key')
19 def download_journalist_pubkey():
20 journalist_pubkey = current_app.crypto_util.gpg.export_keys(
21 config.JOURNALIST_KEY)
22 return send_file(StringIO(journalist_pubkey),
23 mimetype="application/pgp-keys",
24 attachment_filename=config.JOURNALIST_KEY + ".asc",
25 as_attachment=True)
26
27 @view.route('/why-journalist-key')
28 def why_download_journalist_pubkey():
29 return render_template("why-journalist-key.html")
30
31 return view
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/source_app/info.py b/securedrop/source_app/info.py
--- a/securedrop/source_app/info.py
+++ b/securedrop/source_app/info.py
@@ -1,8 +1,12 @@
# -*- coding: utf-8 -*-
-
-from io import StringIO
+import six
from flask import Blueprint, render_template, send_file, current_app
+if six.PY2:
+ from cStringIO import StringIO # noqa
+else:
+ from io import BytesIO # noqa
+
def make_blueprint(config):
view = Blueprint('info', __name__)
@@ -19,7 +23,11 @@
def download_journalist_pubkey():
journalist_pubkey = current_app.crypto_util.gpg.export_keys(
config.JOURNALIST_KEY)
- return send_file(StringIO(journalist_pubkey),
+ if six.PY2:
+ data = StringIO(journalist_pubkey)
+ else:
+ data = BytesIO(journalist_pubkey.encode('utf-8'))
+ return send_file(data,
mimetype="application/pgp-keys",
attachment_filename=config.JOURNALIST_KEY + ".asc",
as_attachment=True)
| {"golden_diff": "diff --git a/securedrop/source_app/info.py b/securedrop/source_app/info.py\n--- a/securedrop/source_app/info.py\n+++ b/securedrop/source_app/info.py\n@@ -1,8 +1,12 @@\n # -*- coding: utf-8 -*-\n-\n-from io import StringIO\n+import six\n from flask import Blueprint, render_template, send_file, current_app\n \n+if six.PY2:\n+ from cStringIO import StringIO # noqa\n+else:\n+ from io import BytesIO # noqa\n+\n \n def make_blueprint(config):\n view = Blueprint('info', __name__)\n@@ -19,7 +23,11 @@\n def download_journalist_pubkey():\n journalist_pubkey = current_app.crypto_util.gpg.export_keys(\n config.JOURNALIST_KEY)\n- return send_file(StringIO(journalist_pubkey),\n+ if six.PY2:\n+ data = StringIO(journalist_pubkey)\n+ else:\n+ data = BytesIO(journalist_pubkey.encode('utf-8'))\n+ return send_file(data,\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n", "issue": "The Source Interface's /journalist-key endpoint returns a 404 instead of the Submission Key\n## Description\r\n\r\nThe submission key should be available via the Source Interface at the URL http://[Onion address]/journalist-key, but said URL returns a 404 instead.\r\n\r\n## Steps to Reproduce\r\n\r\n- set up a production SecureDrop instance on HW or VMs\r\n- visit the source interface, click through to the submission page, and click the ``public key`` link.\r\n\r\n## Expected Behavior\r\nThe Source Interface responds with the Submission Key\r\n\r\n## Actual Behavior\r\nThe Source Interface responds with a 404, and the following errors are recorded if logging is enabled:\r\n```\r\n[Thu Jun 13 17:22:12.285962 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup\r\n[Thu Jun 13 17:22:12.286129 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup\r\n[Thu Jun 13 17:22:12.415949 2019] [wsgi:error] [pid 7028:tid 3882000488192] [remote 127.0.0.1:20954] mod_wsgi (pid=7028): Exception occurred processing WSGI script '/var/www/source.wsgi'.\r\n[Thu Jun 13 17:22:12.416036 2019] [wsgi:error] [pid 7028:tid 3882000488192] [remote 127.0.0.1:20954] TypeError: file like object yielded non string type\r\n[Thu Jun 13 17:22:12.416281 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup\r\n[Thu Jun 13 17:22:12.416360 2019] [authz_core:debug] [pid 7030:tid 3881911650048] mod_authz_core.c(835): [client 127.0.0.1:45900] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/lookup\r\n[Thu Jun 13 17:22:13.812032 2019] [authz_core:debug] [pid 7031:tid 3882000144128] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key\r\n[Thu Jun 13 17:22:14.587228 2019] [authz_core:debug] [pid 7031:tid 3881991599872] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key\r\n[Thu Jun 13 17:22:14.587409 2019] [authz_core:debug] [pid 7031:tid 3881991599872] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key\r\n[Thu Jun 13 17:22:15.296269 2019] [authz_core:debug] [pid 7031:tid 3881911961344] mod_authz_core.c(835): [client 127.0.0.1:45902] AH01628: authorization result: granted (no directives), referer: http://fehpa6pmg6ke4i3u.onion/journalist-key\r\n```\r\n\r\n\r\nPlease provide screenshots where appropriate.\r\n\r\n## Comments\r\n\r\nSuggestions to fix, any other relevant information.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom io import StringIO\nfrom flask import Blueprint, render_template, send_file, current_app\n\n\ndef make_blueprint(config):\n view = Blueprint('info', __name__)\n\n @view.route('/tor2web-warning')\n def tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n @view.route('/use-tor')\n def recommend_tor_browser():\n return render_template(\"use-tor-browser.html\")\n\n @view.route('/journalist-key')\n def download_journalist_pubkey():\n journalist_pubkey = current_app.crypto_util.gpg.export_keys(\n config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n @view.route('/why-journalist-key')\n def why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n return view\n", "path": "securedrop/source_app/info.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport six\nfrom flask import Blueprint, render_template, send_file, current_app\n\nif six.PY2:\n from cStringIO import StringIO # noqa\nelse:\n from io import BytesIO # noqa\n\n\ndef make_blueprint(config):\n view = Blueprint('info', __name__)\n\n @view.route('/tor2web-warning')\n def tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n @view.route('/use-tor')\n def recommend_tor_browser():\n return render_template(\"use-tor-browser.html\")\n\n @view.route('/journalist-key')\n def download_journalist_pubkey():\n journalist_pubkey = current_app.crypto_util.gpg.export_keys(\n config.JOURNALIST_KEY)\n if six.PY2:\n data = StringIO(journalist_pubkey)\n else:\n data = BytesIO(journalist_pubkey.encode('utf-8'))\n return send_file(data,\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n @view.route('/why-journalist-key')\n def why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n return view\n", "path": "securedrop/source_app/info.py"}]} | 1,891 | 260 |
gh_patches_debug_18749 | rasdani/github-patches | git_diff | Parsl__parsl-149 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add missing `scaling_enabled` method for `TurbineExecutor`
Currently `tests/test_swift.py` fails:
```
======================================================================
ERROR: test_swift.test_except
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/annawoodard/.conda/envs/parsl_py36/lib/python3.6/site-packages/nose/case.py", line 198, in runTest
self.test(*self.arg)
File "/home/annawoodard/parsl/parsl/validation/test_swift.py", line 38, in test_except
tex = TurbineExecutor()
TypeError: Can't instantiate abstract class TurbineExecutor with abstract methods scaling_enabled
-------------------- >> begin captured stdout << ---------------------
Start
--------------------- >> end captured stdout << ----------------------
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/executors/swift_t.py`
Content:
```
1 ''' Sample Executor for integration with SwiftT.
2
3 This follows the model used by `EMEWS <http://www.mcs.anl.gov/~wozniak/papers/Cancer2_2016.pdf>`_
4 to some extent.
5
6 '''
7 from concurrent.futures import Future
8 import logging
9 import uuid
10 import threading
11 import queue
12 import multiprocessing as mp
13
14 from ipyparallel.serialize import pack_apply_message, unpack_apply_message
15 from ipyparallel.serialize import serialize_object, deserialize_object
16
17 from parsl.executors.base import ParslExecutor
18
19 logger = logging.getLogger(__name__)
20
21
22 BUFFER_THRESHOLD = 1024 * 1024
23 ITEM_THRESHOLD = 1024
24
25
26 def runner(incoming_q, outgoing_q):
27 ''' This is a function that mocks the Swift-T side. It listens on the the
28 incoming_q for tasks and posts returns on the outgoing_q
29
30 Args:
31 - incoming_q (Queue object) : The queue to listen on
32 - outgoing_q (Queue object) : Queue to post results on
33
34 The messages posted on the incoming_q will be of the form :
35
36 .. code:: python
37
38 {
39 "task_id" : <uuid.uuid4 string>,
40 "buffer" : serialized buffer containing the fn, args and kwargs
41 }
42
43 If ``None`` is received, the runner will exit.
44
45 Response messages should be of the form:
46
47 .. code:: python
48
49 {
50 "task_id" : <uuid.uuid4 string>,
51 "result" : serialized buffer containing result
52 "exception" : serialized exception object
53 }
54
55 On exiting the runner will post ``None`` to the outgoing_q
56
57 '''
58 logger.debug("[RUNNER] Starting")
59
60 def execute_task(bufs):
61 ''' Deserialize the buf, and execute the task.
62 Returns the serialized result/exception
63 '''
64 all_names = dir(__builtins__)
65 user_ns = locals()
66 user_ns.update({'__builtins__': {k: getattr(__builtins__, k) for k in all_names}})
67
68 f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)
69
70 fname = getattr(f, '__name__', 'f')
71 prefix = "parsl_"
72 fname = prefix + "f"
73 argname = prefix + "args"
74 kwargname = prefix + "kwargs"
75 resultname = prefix + "result"
76
77 user_ns.update({fname: f,
78 argname: args,
79 kwargname: kwargs,
80 resultname: resultname})
81
82 code = "{0} = {1}(*{2}, **{3})".format(resultname, fname,
83 argname, kwargname)
84
85 try:
86
87 print("[RUNNER] Executing : {0}".format(code))
88 exec(code, user_ns, user_ns)
89
90 except Exception as e:
91 logger.warning("Caught errors but will not handled %s", e)
92 raise e
93
94 else:
95 # print("Done : {0}".format(locals()))
96 print("[RUNNER] Result : {0}".format(user_ns.get(resultname)))
97 return user_ns.get(resultname)
98
99 while True:
100 try:
101 # Blocking wait on the queue
102 msg = incoming_q.get(block=True, timeout=10)
103 # logger.debug("[RUNNER] Got message : %s", msg)
104
105 except queue.Empty:
106 # Handle case where no items were on queue
107 logger.debug("[RUNNER] got nothing")
108
109 except IOError as ioerror:
110 logger.debug("[RUNNER] broken pipe, error: %s", ioerror)
111 try:
112 # Attempt to send a stop notification to the management thread
113 outgoing_q.put(None)
114
115 except Exception:
116 pass
117
118 break
119
120 except Exception as e:
121 logger.debug("[RUNNER] caught unknown exception : %s", e)
122
123 else:
124 # Handle received message
125 if not msg:
126 # Empty message is a die request
127 logger.debug("[RUNNER] Received exit request")
128 outgoing_q.put(None)
129 break
130 else:
131 # Received a valid message, handle it
132 logger.debug("[RUNNER] Got a valid task : %s", msg["task_id"])
133 try:
134 response_obj = execute_task(msg['buffer'])
135 response = {"task_id": msg["task_id"],
136 "result": serialize_object(response_obj)}
137
138 logger.warning("[RUNNER] Returing result : %s",
139 deserialize_object(response["result"]))
140
141 except Exception as e:
142 logger.debug("[RUNNER] Caught task exception")
143 response = {"task_id": msg["task_id"],
144 "exception": serialize_object(e)}
145
146 outgoing_q.put(response)
147
148 logger.debug("[RUNNER] Terminating")
149
150
151 class TurbineExecutor(ParslExecutor):
152 ''' The Turbine executor. Bypass the Swift/T language and run on top off the Turbine engines
153 in an MPI environment.
154
155 Here's a simple diagram
156
157 .. code:: python
158
159 | Data | Executor | IPC | External Process(es)
160 | Flow | | |
161 Task | Kernel | | |
162 +----->|-------->|------------>|outgoing_q -|-> Worker_Process
163 | | | | | | |
164 Parsl<---Fut-| | | | result exception
165 ^ | | | | | |
166 | | | Q_mngmnt | | V V
167 | | | Thread<--|incoming_q<-|--- +---------+
168 | | | | | |
169 | | | | | |
170 +----update_fut-----+
171
172 '''
173
174 def _queue_management_worker(self):
175 ''' The queue management worker is responsible for listening to the incoming_q
176 for task status messages and updating tasks with results/exceptions/updates
177
178 It expects the following messages:
179
180 .. code:: python
181
182 {
183 "task_id" : <task_id>
184 "result" : serialized result object, if task succeeded
185 ... more tags could be added later
186 }
187
188 {
189 "task_id" : <task_id>
190 "exception" : serialized exception object, on failure
191 }
192
193 We don't support these yet, but they could be added easily as heartbeat.
194
195 .. code:: python
196
197 {
198 "task_id" : <task_id>
199 "cpu_stat" : <>
200 "mem_stat" : <>
201 "io_stat" : <>
202 "started" : tstamp
203 }
204
205 The None message is a die request.
206 None
207
208 '''
209
210 while True:
211 logger.debug("[MTHREAD] Management thread active")
212 try:
213 msg = self.incoming_q.get(block=True, timeout=1)
214
215 except queue.Empty as e:
216 # timed out.
217 pass
218
219 except IOError as e:
220 logger.debug("[MTHREAD] caught broken queue : %s : errno:%s", e, e.errno)
221 return
222
223 except Exception as e:
224 logger.debug("[MTHREAD] caught unknown exception : %s", e)
225
226 else:
227
228 if msg is None:
229 logger.debug("[MTHREAD] Got None")
230 return
231
232 else:
233 logger.debug("[MTHREAD] Got message : %s", msg)
234 task_fut = self.tasks[msg['task_id']]
235 if 'result' in msg:
236 result, _ = deserialize_object(msg['result'])
237 task_fut.set_result(result)
238
239 elif 'exception' in msg:
240 exception, _ = deserialize_object(msg['exception'])
241 task_fut.set_exception(exception)
242
243 if not self.isAlive:
244 break
245
246 # When the executor gets lost, the weakref callback will wake up
247 # the queue management thread.
248 def weakref_cb(self, q=None):
249 ''' We do not use this yet
250 '''
251
252 q.put(None)
253
254 def _start_queue_management_thread(self):
255 ''' Method to start the management thread as a daemon.
256 Checks if a thread already exists, then starts it.
257 Could be used later as a restart if the management thread dies.
258 '''
259
260 logging.debug("In _start %s", "*" * 40)
261 if self._queue_management_thread is None:
262 logging.debug("Starting management thread ")
263 self._queue_management_thread = threading.Thread(target=self._queue_management_worker)
264 self._queue_management_thread.daemon = True
265 self._queue_management_thread.start()
266
267 else:
268 logging.debug("Management thread already exists, returning")
269
270 def shutdown(self):
271 ''' Shutdown method, to kill the threads and workers.
272 '''
273
274 self.isAlive = False
275 logging.debug("Waking management thread")
276 self.incoming_q.put(None) # Wake up the thread
277 self._queue_management_thread.join() # Force join
278 logging.debug("Exiting thread")
279 self.worker.join()
280 return True
281
282 def __init__(self, swift_attribs=None, config=None, **kwargs):
283 ''' Initialize the thread pool
284 Trying to implement the emews model.
285
286 Kwargs:
287 - swift_attribs : Takes a dict of swift attribs. Fot future.
288
289 '''
290 self.config = config
291 logger.debug("In __init__")
292 self.mp_manager = mp.Manager()
293 self.outgoing_q = self.mp_manager.Queue()
294 self.incoming_q = self.mp_manager.Queue()
295 self.isAlive = True
296
297 self._queue_management_thread = None
298 self._start_queue_management_thread()
299 logger.debug("Created management thread : %s", self._queue_management_thread)
300
301 self.worker = mp.Process(target=runner, args=(self.outgoing_q, self.incoming_q))
302 self.worker.start()
303 logger.debug("Created worker : %s", self.worker)
304 self.tasks = {}
305
306 def submit(self, func, *args, **kwargs):
307 ''' Submits work to the the outgoing_q, an external process listens on this
308 queue for new work. This method is simply pass through and behaves like a
309 submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
310
311 Args:
312 - func (callable) : Callable function
313 - *args (list) : List of arbitrary positional arguments.
314
315 Kwargs:
316 - **kwargs (dict) : A dictionary of arbitrary keyword args for func.
317
318 Returns:
319 Future
320 '''
321 task_id = uuid.uuid4()
322
323 logger.debug("Before pushing to queue : func:%s func_args:%s", func, args)
324
325 self.tasks[task_id] = Future()
326
327 fn_buf = pack_apply_message(func, args, kwargs,
328 buffer_threshold=1024 * 1024,
329 item_threshold=1024)
330
331 msg = {"task_id": task_id,
332 "buffer": fn_buf}
333
334 # Post task to the the outgoing queue
335 self.outgoing_q.put(msg)
336
337 # Return the future
338 return self.tasks[task_id]
339
340 def scale_out(self, workers=1):
341 ''' Scales out the number of active workers by 1
342 This method is notImplemented for threads and will raise the error if called.
343 This would be nice to have, and can be done
344
345 Raises:
346 NotImplemented exception
347 '''
348
349 raise NotImplementedError
350
351 def scale_in(self, workers=1):
352 ''' Scale in the number of active workers by 1
353 This method is notImplemented for threads and will raise the error if called.
354
355 Raises:
356 NotImplemented exception
357 '''
358
359 raise NotImplementedError
360
361
362 if __name__ == "__main__":
363
364 print("Start")
365 turb_x = TurbineExecutor()
366 print("Done")
367
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/executors/swift_t.py b/parsl/executors/swift_t.py
--- a/parsl/executors/swift_t.py
+++ b/parsl/executors/swift_t.py
@@ -302,6 +302,7 @@
self.worker.start()
logger.debug("Created worker : %s", self.worker)
self.tasks = {}
+ self._scaling_enabled = False
def submit(self, func, *args, **kwargs):
''' Submits work to the the outgoing_q, an external process listens on this
@@ -337,6 +338,10 @@
# Return the future
return self.tasks[task_id]
+ @property
+ def scaling_enabled(self):
+ return self._scaling_enabled
+
def scale_out(self, workers=1):
''' Scales out the number of active workers by 1
This method is notImplemented for threads and will raise the error if called.
| {"golden_diff": "diff --git a/parsl/executors/swift_t.py b/parsl/executors/swift_t.py\n--- a/parsl/executors/swift_t.py\n+++ b/parsl/executors/swift_t.py\n@@ -302,6 +302,7 @@\n self.worker.start()\n logger.debug(\"Created worker : %s\", self.worker)\n self.tasks = {}\n+ self._scaling_enabled = False\n \n def submit(self, func, *args, **kwargs):\n ''' Submits work to the the outgoing_q, an external process listens on this\n@@ -337,6 +338,10 @@\n # Return the future\n return self.tasks[task_id]\n \n+ @property\n+ def scaling_enabled(self):\n+ return self._scaling_enabled\n+\n def scale_out(self, workers=1):\n ''' Scales out the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n", "issue": "Add missing `scaling_enabled` method for `TurbineExecutor`\nCurrently `tests/test_swift.py` fails:\r\n```\r\n======================================================================\r\nERROR: test_swift.test_except\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/annawoodard/.conda/envs/parsl_py36/lib/python3.6/site-packages/nose/case.py\", line 198, in runTest\r\n self.test(*self.arg)\r\n File \"/home/annawoodard/parsl/parsl/validation/test_swift.py\", line 38, in test_except\r\n tex = TurbineExecutor()\r\nTypeError: Can't instantiate abstract class TurbineExecutor with abstract methods scaling_enabled\r\n-------------------- >> begin captured stdout << ---------------------\r\nStart\r\n\r\n--------------------- >> end captured stdout << ----------------------\r\n```\r\n\n", "before_files": [{"content": "''' Sample Executor for integration with SwiftT.\n\nThis follows the model used by `EMEWS <http://www.mcs.anl.gov/~wozniak/papers/Cancer2_2016.pdf>`_\nto some extent.\n\n'''\nfrom concurrent.futures import Future\nimport logging\nimport uuid\nimport threading\nimport queue\nimport multiprocessing as mp\n\nfrom ipyparallel.serialize import pack_apply_message, unpack_apply_message\nfrom ipyparallel.serialize import serialize_object, deserialize_object\n\nfrom parsl.executors.base import ParslExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nBUFFER_THRESHOLD = 1024 * 1024\nITEM_THRESHOLD = 1024\n\n\ndef runner(incoming_q, outgoing_q):\n ''' This is a function that mocks the Swift-T side. It listens on the the\n incoming_q for tasks and posts returns on the outgoing_q\n\n Args:\n - incoming_q (Queue object) : The queue to listen on\n - outgoing_q (Queue object) : Queue to post results on\n\n The messages posted on the incoming_q will be of the form :\n\n .. code:: python\n\n {\n \"task_id\" : <uuid.uuid4 string>,\n \"buffer\" : serialized buffer containing the fn, args and kwargs\n }\n\n If ``None`` is received, the runner will exit.\n\n Response messages should be of the form:\n\n .. code:: python\n\n {\n \"task_id\" : <uuid.uuid4 string>,\n \"result\" : serialized buffer containing result\n \"exception\" : serialized exception object\n }\n\n On exiting the runner will post ``None`` to the outgoing_q\n\n '''\n logger.debug(\"[RUNNER] Starting\")\n\n def execute_task(bufs):\n ''' Deserialize the buf, and execute the task.\n Returns the serialized result/exception\n '''\n all_names = dir(__builtins__)\n user_ns = locals()\n user_ns.update({'__builtins__': {k: getattr(__builtins__, k) for k in all_names}})\n\n f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)\n\n fname = getattr(f, '__name__', 'f')\n prefix = \"parsl_\"\n fname = prefix + \"f\"\n argname = prefix + \"args\"\n kwargname = prefix + \"kwargs\"\n resultname = prefix + \"result\"\n\n user_ns.update({fname: f,\n argname: args,\n kwargname: kwargs,\n resultname: resultname})\n\n code = \"{0} = {1}(*{2}, **{3})\".format(resultname, fname,\n argname, kwargname)\n\n try:\n\n print(\"[RUNNER] Executing : {0}\".format(code))\n exec(code, user_ns, user_ns)\n\n except Exception as e:\n logger.warning(\"Caught errors but will not handled %s\", e)\n raise e\n\n else:\n # print(\"Done : {0}\".format(locals()))\n print(\"[RUNNER] Result : {0}\".format(user_ns.get(resultname)))\n return user_ns.get(resultname)\n\n while True:\n try:\n # Blocking wait on the queue\n msg = incoming_q.get(block=True, timeout=10)\n # logger.debug(\"[RUNNER] Got message : %s\", msg)\n\n except queue.Empty:\n # Handle case where no items were on queue\n logger.debug(\"[RUNNER] got nothing\")\n\n except IOError as ioerror:\n logger.debug(\"[RUNNER] broken pipe, error: %s\", ioerror)\n try:\n # Attempt to send a stop notification to the management thread\n outgoing_q.put(None)\n\n except Exception:\n pass\n\n break\n\n except Exception as e:\n logger.debug(\"[RUNNER] caught unknown exception : %s\", e)\n\n else:\n # Handle received message\n if not msg:\n # Empty message is a die request\n logger.debug(\"[RUNNER] Received exit request\")\n outgoing_q.put(None)\n break\n else:\n # Received a valid message, handle it\n logger.debug(\"[RUNNER] Got a valid task : %s\", msg[\"task_id\"])\n try:\n response_obj = execute_task(msg['buffer'])\n response = {\"task_id\": msg[\"task_id\"],\n \"result\": serialize_object(response_obj)}\n\n logger.warning(\"[RUNNER] Returing result : %s\",\n deserialize_object(response[\"result\"]))\n\n except Exception as e:\n logger.debug(\"[RUNNER] Caught task exception\")\n response = {\"task_id\": msg[\"task_id\"],\n \"exception\": serialize_object(e)}\n\n outgoing_q.put(response)\n\n logger.debug(\"[RUNNER] Terminating\")\n\n\nclass TurbineExecutor(ParslExecutor):\n ''' The Turbine executor. Bypass the Swift/T language and run on top off the Turbine engines\n in an MPI environment.\n\n Here's a simple diagram\n\n .. code:: python\n\n | Data | Executor | IPC | External Process(es)\n | Flow | | |\n Task | Kernel | | |\n +----->|-------->|------------>|outgoing_q -|-> Worker_Process\n | | | | | | |\n Parsl<---Fut-| | | | result exception\n ^ | | | | | |\n | | | Q_mngmnt | | V V\n | | | Thread<--|incoming_q<-|--- +---------+\n | | | | | |\n | | | | | |\n +----update_fut-----+\n\n '''\n\n def _queue_management_worker(self):\n ''' The queue management worker is responsible for listening to the incoming_q\n for task status messages and updating tasks with results/exceptions/updates\n\n It expects the following messages:\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"result\" : serialized result object, if task succeeded\n ... more tags could be added later\n }\n\n {\n \"task_id\" : <task_id>\n \"exception\" : serialized exception object, on failure\n }\n\n We don't support these yet, but they could be added easily as heartbeat.\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"cpu_stat\" : <>\n \"mem_stat\" : <>\n \"io_stat\" : <>\n \"started\" : tstamp\n }\n\n The None message is a die request.\n None\n\n '''\n\n while True:\n logger.debug(\"[MTHREAD] Management thread active\")\n try:\n msg = self.incoming_q.get(block=True, timeout=1)\n\n except queue.Empty as e:\n # timed out.\n pass\n\n except IOError as e:\n logger.debug(\"[MTHREAD] caught broken queue : %s : errno:%s\", e, e.errno)\n return\n\n except Exception as e:\n logger.debug(\"[MTHREAD] caught unknown exception : %s\", e)\n\n else:\n\n if msg is None:\n logger.debug(\"[MTHREAD] Got None\")\n return\n\n else:\n logger.debug(\"[MTHREAD] Got message : %s\", msg)\n task_fut = self.tasks[msg['task_id']]\n if 'result' in msg:\n result, _ = deserialize_object(msg['result'])\n task_fut.set_result(result)\n\n elif 'exception' in msg:\n exception, _ = deserialize_object(msg['exception'])\n task_fut.set_exception(exception)\n\n if not self.isAlive:\n break\n\n # When the executor gets lost, the weakref callback will wake up\n # the queue management thread.\n def weakref_cb(self, q=None):\n ''' We do not use this yet\n '''\n\n q.put(None)\n\n def _start_queue_management_thread(self):\n ''' Method to start the management thread as a daemon.\n Checks if a thread already exists, then starts it.\n Could be used later as a restart if the management thread dies.\n '''\n\n logging.debug(\"In _start %s\", \"*\" * 40)\n if self._queue_management_thread is None:\n logging.debug(\"Starting management thread \")\n self._queue_management_thread = threading.Thread(target=self._queue_management_worker)\n self._queue_management_thread.daemon = True\n self._queue_management_thread.start()\n\n else:\n logging.debug(\"Management thread already exists, returning\")\n\n def shutdown(self):\n ''' Shutdown method, to kill the threads and workers.\n '''\n\n self.isAlive = False\n logging.debug(\"Waking management thread\")\n self.incoming_q.put(None) # Wake up the thread\n self._queue_management_thread.join() # Force join\n logging.debug(\"Exiting thread\")\n self.worker.join()\n return True\n\n def __init__(self, swift_attribs=None, config=None, **kwargs):\n ''' Initialize the thread pool\n Trying to implement the emews model.\n\n Kwargs:\n - swift_attribs : Takes a dict of swift attribs. Fot future.\n\n '''\n self.config = config\n logger.debug(\"In __init__\")\n self.mp_manager = mp.Manager()\n self.outgoing_q = self.mp_manager.Queue()\n self.incoming_q = self.mp_manager.Queue()\n self.isAlive = True\n\n self._queue_management_thread = None\n self._start_queue_management_thread()\n logger.debug(\"Created management thread : %s\", self._queue_management_thread)\n\n self.worker = mp.Process(target=runner, args=(self.outgoing_q, self.incoming_q))\n self.worker.start()\n logger.debug(\"Created worker : %s\", self.worker)\n self.tasks = {}\n\n def submit(self, func, *args, **kwargs):\n ''' Submits work to the the outgoing_q, an external process listens on this\n queue for new work. This method is simply pass through and behaves like a\n submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Args:\n - func (callable) : Callable function\n - *args (list) : List of arbitrary positional arguments.\n\n Kwargs:\n - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n Returns:\n Future\n '''\n task_id = uuid.uuid4()\n\n logger.debug(\"Before pushing to queue : func:%s func_args:%s\", func, args)\n\n self.tasks[task_id] = Future()\n\n fn_buf = pack_apply_message(func, args, kwargs,\n buffer_threshold=1024 * 1024,\n item_threshold=1024)\n\n msg = {\"task_id\": task_id,\n \"buffer\": fn_buf}\n\n # Post task to the the outgoing queue\n self.outgoing_q.put(msg)\n\n # Return the future\n return self.tasks[task_id]\n\n def scale_out(self, workers=1):\n ''' Scales out the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n This would be nice to have, and can be done\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplementedError\n\n def scale_in(self, workers=1):\n ''' Scale in the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplementedError\n\n\nif __name__ == \"__main__\":\n\n print(\"Start\")\n turb_x = TurbineExecutor()\n print(\"Done\")\n", "path": "parsl/executors/swift_t.py"}], "after_files": [{"content": "''' Sample Executor for integration with SwiftT.\n\nThis follows the model used by `EMEWS <http://www.mcs.anl.gov/~wozniak/papers/Cancer2_2016.pdf>`_\nto some extent.\n\n'''\nfrom concurrent.futures import Future\nimport logging\nimport uuid\nimport threading\nimport queue\nimport multiprocessing as mp\n\nfrom ipyparallel.serialize import pack_apply_message, unpack_apply_message\nfrom ipyparallel.serialize import serialize_object, deserialize_object\n\nfrom parsl.executors.base import ParslExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nBUFFER_THRESHOLD = 1024 * 1024\nITEM_THRESHOLD = 1024\n\n\ndef runner(incoming_q, outgoing_q):\n ''' This is a function that mocks the Swift-T side. It listens on the the\n incoming_q for tasks and posts returns on the outgoing_q\n\n Args:\n - incoming_q (Queue object) : The queue to listen on\n - outgoing_q (Queue object) : Queue to post results on\n\n The messages posted on the incoming_q will be of the form :\n\n .. code:: python\n\n {\n \"task_id\" : <uuid.uuid4 string>,\n \"buffer\" : serialized buffer containing the fn, args and kwargs\n }\n\n If ``None`` is received, the runner will exit.\n\n Response messages should be of the form:\n\n .. code:: python\n\n {\n \"task_id\" : <uuid.uuid4 string>,\n \"result\" : serialized buffer containing result\n \"exception\" : serialized exception object\n }\n\n On exiting the runner will post ``None`` to the outgoing_q\n\n '''\n logger.debug(\"[RUNNER] Starting\")\n\n def execute_task(bufs):\n ''' Deserialize the buf, and execute the task.\n Returns the serialized result/exception\n '''\n all_names = dir(__builtins__)\n user_ns = locals()\n user_ns.update({'__builtins__': {k: getattr(__builtins__, k) for k in all_names}})\n\n f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False)\n\n fname = getattr(f, '__name__', 'f')\n prefix = \"parsl_\"\n fname = prefix + \"f\"\n argname = prefix + \"args\"\n kwargname = prefix + \"kwargs\"\n resultname = prefix + \"result\"\n\n user_ns.update({fname: f,\n argname: args,\n kwargname: kwargs,\n resultname: resultname})\n\n code = \"{0} = {1}(*{2}, **{3})\".format(resultname, fname,\n argname, kwargname)\n\n try:\n\n print(\"[RUNNER] Executing : {0}\".format(code))\n exec(code, user_ns, user_ns)\n\n except Exception as e:\n logger.warning(\"Caught errors but will not handled %s\", e)\n raise e\n\n else:\n # print(\"Done : {0}\".format(locals()))\n print(\"[RUNNER] Result : {0}\".format(user_ns.get(resultname)))\n return user_ns.get(resultname)\n\n while True:\n try:\n # Blocking wait on the queue\n msg = incoming_q.get(block=True, timeout=10)\n # logger.debug(\"[RUNNER] Got message : %s\", msg)\n\n except queue.Empty:\n # Handle case where no items were on queue\n logger.debug(\"[RUNNER] got nothing\")\n\n except IOError as ioerror:\n logger.debug(\"[RUNNER] broken pipe, error: %s\", ioerror)\n try:\n # Attempt to send a stop notification to the management thread\n outgoing_q.put(None)\n\n except Exception:\n pass\n\n break\n\n except Exception as e:\n logger.debug(\"[RUNNER] caught unknown exception : %s\", e)\n\n else:\n # Handle received message\n if not msg:\n # Empty message is a die request\n logger.debug(\"[RUNNER] Received exit request\")\n outgoing_q.put(None)\n break\n else:\n # Received a valid message, handle it\n logger.debug(\"[RUNNER] Got a valid task : %s\", msg[\"task_id\"])\n try:\n response_obj = execute_task(msg['buffer'])\n response = {\"task_id\": msg[\"task_id\"],\n \"result\": serialize_object(response_obj)}\n\n logger.warning(\"[RUNNER] Returing result : %s\",\n deserialize_object(response[\"result\"]))\n\n except Exception as e:\n logger.debug(\"[RUNNER] Caught task exception\")\n response = {\"task_id\": msg[\"task_id\"],\n \"exception\": serialize_object(e)}\n\n outgoing_q.put(response)\n\n logger.debug(\"[RUNNER] Terminating\")\n\n\nclass TurbineExecutor(ParslExecutor):\n ''' The Turbine executor. Bypass the Swift/T language and run on top off the Turbine engines\n in an MPI environment.\n\n Here's a simple diagram\n\n .. code:: python\n\n | Data | Executor | IPC | External Process(es)\n | Flow | | |\n Task | Kernel | | |\n +----->|-------->|------------>|outgoing_q -|-> Worker_Process\n | | | | | | |\n Parsl<---Fut-| | | | result exception\n ^ | | | | | |\n | | | Q_mngmnt | | V V\n | | | Thread<--|incoming_q<-|--- +---------+\n | | | | | |\n | | | | | |\n +----update_fut-----+\n\n '''\n\n def _queue_management_worker(self):\n ''' The queue management worker is responsible for listening to the incoming_q\n for task status messages and updating tasks with results/exceptions/updates\n\n It expects the following messages:\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"result\" : serialized result object, if task succeeded\n ... more tags could be added later\n }\n\n {\n \"task_id\" : <task_id>\n \"exception\" : serialized exception object, on failure\n }\n\n We don't support these yet, but they could be added easily as heartbeat.\n\n .. code:: python\n\n {\n \"task_id\" : <task_id>\n \"cpu_stat\" : <>\n \"mem_stat\" : <>\n \"io_stat\" : <>\n \"started\" : tstamp\n }\n\n The None message is a die request.\n None\n\n '''\n\n while True:\n logger.debug(\"[MTHREAD] Management thread active\")\n try:\n msg = self.incoming_q.get(block=True, timeout=1)\n\n except queue.Empty as e:\n # timed out.\n pass\n\n except IOError as e:\n logger.debug(\"[MTHREAD] caught broken queue : %s : errno:%s\", e, e.errno)\n return\n\n except Exception as e:\n logger.debug(\"[MTHREAD] caught unknown exception : %s\", e)\n\n else:\n\n if msg is None:\n logger.debug(\"[MTHREAD] Got None\")\n return\n\n else:\n logger.debug(\"[MTHREAD] Got message : %s\", msg)\n task_fut = self.tasks[msg['task_id']]\n if 'result' in msg:\n result, _ = deserialize_object(msg['result'])\n task_fut.set_result(result)\n\n elif 'exception' in msg:\n exception, _ = deserialize_object(msg['exception'])\n task_fut.set_exception(exception)\n\n if not self.isAlive:\n break\n\n # When the executor gets lost, the weakref callback will wake up\n # the queue management thread.\n def weakref_cb(self, q=None):\n ''' We do not use this yet\n '''\n\n q.put(None)\n\n def _start_queue_management_thread(self):\n ''' Method to start the management thread as a daemon.\n Checks if a thread already exists, then starts it.\n Could be used later as a restart if the management thread dies.\n '''\n\n logging.debug(\"In _start %s\", \"*\" * 40)\n if self._queue_management_thread is None:\n logging.debug(\"Starting management thread \")\n self._queue_management_thread = threading.Thread(target=self._queue_management_worker)\n self._queue_management_thread.daemon = True\n self._queue_management_thread.start()\n\n else:\n logging.debug(\"Management thread already exists, returning\")\n\n def shutdown(self):\n ''' Shutdown method, to kill the threads and workers.\n '''\n\n self.isAlive = False\n logging.debug(\"Waking management thread\")\n self.incoming_q.put(None) # Wake up the thread\n self._queue_management_thread.join() # Force join\n logging.debug(\"Exiting thread\")\n self.worker.join()\n return True\n\n def __init__(self, swift_attribs=None, config=None, **kwargs):\n ''' Initialize the thread pool\n Trying to implement the emews model.\n\n Kwargs:\n - swift_attribs : Takes a dict of swift attribs. Fot future.\n\n '''\n self.config = config\n logger.debug(\"In __init__\")\n self.mp_manager = mp.Manager()\n self.outgoing_q = self.mp_manager.Queue()\n self.incoming_q = self.mp_manager.Queue()\n self.isAlive = True\n\n self._queue_management_thread = None\n self._start_queue_management_thread()\n logger.debug(\"Created management thread : %s\", self._queue_management_thread)\n\n self.worker = mp.Process(target=runner, args=(self.outgoing_q, self.incoming_q))\n self.worker.start()\n logger.debug(\"Created worker : %s\", self.worker)\n self.tasks = {}\n self._scaling_enabled = False\n\n def submit(self, func, *args, **kwargs):\n ''' Submits work to the the outgoing_q, an external process listens on this\n queue for new work. This method is simply pass through and behaves like a\n submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Args:\n - func (callable) : Callable function\n - *args (list) : List of arbitrary positional arguments.\n\n Kwargs:\n - **kwargs (dict) : A dictionary of arbitrary keyword args for func.\n\n Returns:\n Future\n '''\n task_id = uuid.uuid4()\n\n logger.debug(\"Before pushing to queue : func:%s func_args:%s\", func, args)\n\n self.tasks[task_id] = Future()\n\n fn_buf = pack_apply_message(func, args, kwargs,\n buffer_threshold=1024 * 1024,\n item_threshold=1024)\n\n msg = {\"task_id\": task_id,\n \"buffer\": fn_buf}\n\n # Post task to the the outgoing queue\n self.outgoing_q.put(msg)\n\n # Return the future\n return self.tasks[task_id]\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def scale_out(self, workers=1):\n ''' Scales out the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n This would be nice to have, and can be done\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplementedError\n\n def scale_in(self, workers=1):\n ''' Scale in the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplementedError\n\n\nif __name__ == \"__main__\":\n\n print(\"Start\")\n turb_x = TurbineExecutor()\n print(\"Done\")\n", "path": "parsl/executors/swift_t.py"}]} | 4,018 | 216 |
gh_patches_debug_32547 | rasdani/github-patches | git_diff | buildbot__buildbot-6996 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
There's a small window where events will be missed by reporters during reconfig
The code in `ReporterBase.reconfigService()` stops consuming events for all event keys and then starts consuming events for all event keys again. This is not necessary for any wanted event keys which are wanted in both old and new configuration. The code should stop consuming events for no longer wanted event keys, start consuming events for newly wanted event keys and leave the rest untouched.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/reporters/base.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import abc
17
18 from twisted.internet import defer
19 from twisted.python import log
20
21 from buildbot import config
22 from buildbot.reporters import utils
23 from buildbot.util import service
24 from buildbot.util import tuplematch
25
26 ENCODING = 'utf-8'
27
28
29 class ReporterBase(service.BuildbotService):
30 name = None
31 __meta__ = abc.ABCMeta
32
33 compare_attrs = ['generators']
34
35 def __init__(self, *args, **kwargs):
36 super().__init__(*args, **kwargs)
37 self.generators = None
38 self._event_consumers = []
39 self._pending_got_event_calls = {}
40
41 def checkConfig(self, generators):
42 if not isinstance(generators, list):
43 config.error('{}: generators argument must be a list')
44
45 for g in generators:
46 g.check()
47
48 if self.name is None:
49 self.name = self.__class__.__name__
50 for g in generators:
51 self.name += "_" + g.generate_name()
52
53 @defer.inlineCallbacks
54 def reconfigService(self, generators):
55
56 for consumer in self._event_consumers:
57 yield consumer.stopConsuming()
58 self._event_consumers = []
59
60 self.generators = generators
61
62 wanted_event_keys = set()
63 for g in self.generators:
64 wanted_event_keys.update(g.wanted_event_keys)
65
66 for key in sorted(list(wanted_event_keys)):
67 consumer = yield self.master.mq.startConsuming(self._got_event, key)
68 self._event_consumers.append(consumer)
69
70 @defer.inlineCallbacks
71 def stopService(self):
72 for consumer in self._event_consumers:
73 yield consumer.stopConsuming()
74 self._event_consumers = []
75
76 for pending_call in list(self._pending_got_event_calls.values()):
77 yield pending_call
78 self._pending_got_event_calls = {}
79
80 yield super().stopService()
81
82 def _does_generator_want_key(self, generator, key):
83 for filter in generator.wanted_event_keys:
84 if tuplematch.matchTuple(key, filter):
85 return True
86 return False
87
88 def _get_chain_key_for_event(self, key, msg):
89 if key[0] in ["builds", "buildrequests"]:
90 return ("buildrequestid", msg["buildrequestid"])
91 return None
92
93 @defer.inlineCallbacks
94 def _got_event(self, key, msg):
95 chain_key = self._get_chain_key_for_event(key, msg)
96 if chain_key is not None:
97 d = defer.Deferred()
98 pending_call = self._pending_got_event_calls.get(chain_key)
99 self._pending_got_event_calls[chain_key] = d
100 # Wait for previously pending call, if any, to ensure
101 # reports are sent out in the order events were queued.
102 if pending_call is not None:
103 yield pending_call
104
105 try:
106 reports = []
107 for g in self.generators:
108 if self._does_generator_want_key(g, key):
109 report = yield g.generate(self.master, self, key, msg)
110 if report is not None:
111 reports.append(report)
112
113 if reports:
114 yield self.sendMessage(reports)
115 except Exception as e:
116 log.err(e, 'Got exception when handling reporter events')
117
118 if chain_key is not None:
119 if self._pending_got_event_calls.get(chain_key) == d:
120 del self._pending_got_event_calls[chain_key]
121 d.callback(None) # This event is now fully handled
122
123 def getResponsibleUsersForBuild(self, master, buildid):
124 # Use library method but subclassers may want to override that
125 return utils.getResponsibleUsersForBuild(master, buildid)
126
127 @abc.abstractmethod
128 def sendMessage(self, reports):
129 pass
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py
--- a/master/buildbot/reporters/base.py
+++ b/master/buildbot/reporters/base.py
@@ -35,7 +35,7 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generators = None
- self._event_consumers = []
+ self._event_consumers = {}
self._pending_got_event_calls = {}
def checkConfig(self, generators):
@@ -52,26 +52,29 @@
@defer.inlineCallbacks
def reconfigService(self, generators):
-
- for consumer in self._event_consumers:
- yield consumer.stopConsuming()
- self._event_consumers = []
-
self.generators = generators
wanted_event_keys = set()
for g in self.generators:
wanted_event_keys.update(g.wanted_event_keys)
+ # Remove consumers for keys that are no longer wanted
+ for key in list(self._event_consumers.keys()):
+ if key not in wanted_event_keys:
+ yield self._event_consumers[key].stopConsuming()
+ del self._event_consumers[key]
+
+ # Add consumers for new keys
for key in sorted(list(wanted_event_keys)):
- consumer = yield self.master.mq.startConsuming(self._got_event, key)
- self._event_consumers.append(consumer)
+ if key not in self._event_consumers:
+ self._event_consumers[key] = \
+ yield self.master.mq.startConsuming(self._got_event, key)
@defer.inlineCallbacks
def stopService(self):
- for consumer in self._event_consumers:
+ for consumer in self._event_consumers.values():
yield consumer.stopConsuming()
- self._event_consumers = []
+ self._event_consumers = {}
for pending_call in list(self._pending_got_event_calls.values()):
yield pending_call
| {"golden_diff": "diff --git a/master/buildbot/reporters/base.py b/master/buildbot/reporters/base.py\n--- a/master/buildbot/reporters/base.py\n+++ b/master/buildbot/reporters/base.py\n@@ -35,7 +35,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n- self._event_consumers = []\n+ self._event_consumers = {}\n self._pending_got_event_calls = {}\n \n def checkConfig(self, generators):\n@@ -52,26 +52,29 @@\n \n @defer.inlineCallbacks\n def reconfigService(self, generators):\n-\n- for consumer in self._event_consumers:\n- yield consumer.stopConsuming()\n- self._event_consumers = []\n-\n self.generators = generators\n \n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n \n+ # Remove consumers for keys that are no longer wanted\n+ for key in list(self._event_consumers.keys()):\n+ if key not in wanted_event_keys:\n+ yield self._event_consumers[key].stopConsuming()\n+ del self._event_consumers[key]\n+\n+ # Add consumers for new keys\n for key in sorted(list(wanted_event_keys)):\n- consumer = yield self.master.mq.startConsuming(self._got_event, key)\n- self._event_consumers.append(consumer)\n+ if key not in self._event_consumers:\n+ self._event_consumers[key] = \\\n+ yield self.master.mq.startConsuming(self._got_event, key)\n \n @defer.inlineCallbacks\n def stopService(self):\n- for consumer in self._event_consumers:\n+ for consumer in self._event_consumers.values():\n yield consumer.stopConsuming()\n- self._event_consumers = []\n+ self._event_consumers = {}\n \n for pending_call in list(self._pending_got_event_calls.values()):\n yield pending_call\n", "issue": "There's a small window where events will be missed by reporters during reconfig\nThe code in `ReporterBase.reconfigService()` stops consuming events for all event keys and then starts consuming events for all event keys again. This is not necessary for any wanted event keys which are wanted in both old and new configuration. The code should stop consuming events for no longer wanted event keys, start consuming events for newly wanted event keys and leave the rest untouched.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport abc\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot.reporters import utils\nfrom buildbot.util import service\nfrom buildbot.util import tuplematch\n\nENCODING = 'utf-8'\n\n\nclass ReporterBase(service.BuildbotService):\n name = None\n __meta__ = abc.ABCMeta\n\n compare_attrs = ['generators']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n self._event_consumers = []\n self._pending_got_event_calls = {}\n\n def checkConfig(self, generators):\n if not isinstance(generators, list):\n config.error('{}: generators argument must be a list')\n\n for g in generators:\n g.check()\n\n if self.name is None:\n self.name = self.__class__.__name__\n for g in generators:\n self.name += \"_\" + g.generate_name()\n\n @defer.inlineCallbacks\n def reconfigService(self, generators):\n\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n\n self.generators = generators\n\n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n\n for key in sorted(list(wanted_event_keys)):\n consumer = yield self.master.mq.startConsuming(self._got_event, key)\n self._event_consumers.append(consumer)\n\n @defer.inlineCallbacks\n def stopService(self):\n for consumer in self._event_consumers:\n yield consumer.stopConsuming()\n self._event_consumers = []\n\n for pending_call in list(self._pending_got_event_calls.values()):\n yield pending_call\n self._pending_got_event_calls = {}\n\n yield super().stopService()\n\n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n if tuplematch.matchTuple(key, filter):\n return True\n return False\n\n def _get_chain_key_for_event(self, key, msg):\n if key[0] in [\"builds\", \"buildrequests\"]:\n return (\"buildrequestid\", msg[\"buildrequestid\"])\n return None\n\n @defer.inlineCallbacks\n def _got_event(self, key, msg):\n chain_key = self._get_chain_key_for_event(key, msg)\n if chain_key is not None:\n d = defer.Deferred()\n pending_call = self._pending_got_event_calls.get(chain_key)\n self._pending_got_event_calls[chain_key] = d\n # Wait for previously pending call, if any, to ensure\n # reports are sent out in the order events were queued.\n if pending_call is not None:\n yield pending_call\n\n try:\n reports = []\n for g in self.generators:\n if self._does_generator_want_key(g, key):\n report = yield g.generate(self.master, self, key, msg)\n if report is not None:\n reports.append(report)\n\n if reports:\n yield self.sendMessage(reports)\n except Exception as e:\n log.err(e, 'Got exception when handling reporter events')\n\n if chain_key is not None:\n if self._pending_got_event_calls.get(chain_key) == d:\n del self._pending_got_event_calls[chain_key]\n d.callback(None) # This event is now fully handled\n\n def getResponsibleUsersForBuild(self, master, buildid):\n # Use library method but subclassers may want to override that\n return utils.getResponsibleUsersForBuild(master, buildid)\n\n @abc.abstractmethod\n def sendMessage(self, reports):\n pass\n", "path": "master/buildbot/reporters/base.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport abc\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot import config\nfrom buildbot.reporters import utils\nfrom buildbot.util import service\nfrom buildbot.util import tuplematch\n\nENCODING = 'utf-8'\n\n\nclass ReporterBase(service.BuildbotService):\n name = None\n __meta__ = abc.ABCMeta\n\n compare_attrs = ['generators']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.generators = None\n self._event_consumers = {}\n self._pending_got_event_calls = {}\n\n def checkConfig(self, generators):\n if not isinstance(generators, list):\n config.error('{}: generators argument must be a list')\n\n for g in generators:\n g.check()\n\n if self.name is None:\n self.name = self.__class__.__name__\n for g in generators:\n self.name += \"_\" + g.generate_name()\n\n @defer.inlineCallbacks\n def reconfigService(self, generators):\n self.generators = generators\n\n wanted_event_keys = set()\n for g in self.generators:\n wanted_event_keys.update(g.wanted_event_keys)\n\n # Remove consumers for keys that are no longer wanted\n for key in list(self._event_consumers.keys()):\n if key not in wanted_event_keys:\n yield self._event_consumers[key].stopConsuming()\n del self._event_consumers[key]\n\n # Add consumers for new keys\n for key in sorted(list(wanted_event_keys)):\n if key not in self._event_consumers:\n self._event_consumers[key] = \\\n yield self.master.mq.startConsuming(self._got_event, key)\n\n @defer.inlineCallbacks\n def stopService(self):\n for consumer in self._event_consumers.values():\n yield consumer.stopConsuming()\n self._event_consumers = {}\n\n for pending_call in list(self._pending_got_event_calls.values()):\n yield pending_call\n self._pending_got_event_calls = {}\n\n yield super().stopService()\n\n def _does_generator_want_key(self, generator, key):\n for filter in generator.wanted_event_keys:\n if tuplematch.matchTuple(key, filter):\n return True\n return False\n\n def _get_chain_key_for_event(self, key, msg):\n if key[0] in [\"builds\", \"buildrequests\"]:\n return (\"buildrequestid\", msg[\"buildrequestid\"])\n return None\n\n @defer.inlineCallbacks\n def _got_event(self, key, msg):\n chain_key = self._get_chain_key_for_event(key, msg)\n if chain_key is not None:\n d = defer.Deferred()\n pending_call = self._pending_got_event_calls.get(chain_key)\n self._pending_got_event_calls[chain_key] = d\n # Wait for previously pending call, if any, to ensure\n # reports are sent out in the order events were queued.\n if pending_call is not None:\n yield pending_call\n\n try:\n reports = []\n for g in self.generators:\n if self._does_generator_want_key(g, key):\n report = yield g.generate(self.master, self, key, msg)\n if report is not None:\n reports.append(report)\n\n if reports:\n yield self.sendMessage(reports)\n except Exception as e:\n log.err(e, 'Got exception when handling reporter events')\n\n if chain_key is not None:\n if self._pending_got_event_calls.get(chain_key) == d:\n del self._pending_got_event_calls[chain_key]\n d.callback(None) # This event is now fully handled\n\n def getResponsibleUsersForBuild(self, master, buildid):\n # Use library method but subclassers may want to override that\n return utils.getResponsibleUsersForBuild(master, buildid)\n\n @abc.abstractmethod\n def sendMessage(self, reports):\n pass\n", "path": "master/buildbot/reporters/base.py"}]} | 1,621 | 446 |
gh_patches_debug_17794 | rasdani/github-patches | git_diff | wagtail__wagtail-7701 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stray checkbox on locked pages report
### Issue Summary
The Locked Pages report (and possibly others inheriting from PageReportView) has a stray additional column with a checkbox, presumably from the bulk actions feature, which causes the column headings to get out of step:

Thanks to @Tijani-Dia for reporting.
### Steps to Reproduce
1. Start a new project with `wagtail start myproject` / `./manage.py migrate` / `./manage.py createsuperuser`
2. Log in, edit the homepage, select 'Lock' from the action menu
3. Go to Reports -> Locked pages
* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
* Python version: 3.8.0
* Django version: 3.2.8
* Wagtail version: 2.15
* Browser version: Chrome 95.0.4638.54
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/views/pages/listing.py`
Content:
```
1 from django.conf import settings
2 from django.core.paginator import Paginator
3 from django.db.models import Count
4 from django.shortcuts import get_object_or_404, redirect
5 from django.template.response import TemplateResponse
6 from django.urls import reverse
7
8 from wagtail.admin.auth import user_has_any_page_permission, user_passes_test
9 from wagtail.admin.navigation import get_explorable_root_page
10 from wagtail.core import hooks
11 from wagtail.core.models import Page, UserPagePermissionsProxy
12
13
14 @user_passes_test(user_has_any_page_permission)
15 def index(request, parent_page_id=None):
16 if parent_page_id:
17 parent_page = get_object_or_404(Page, id=parent_page_id)
18 else:
19 parent_page = Page.get_first_root_node()
20
21 # This will always succeed because of the @user_passes_test above.
22 root_page = get_explorable_root_page(request.user)
23
24 # If this page isn't a descendant of the user's explorable root page,
25 # then redirect to that explorable root page instead.
26 if not (
27 parent_page.pk == root_page.pk
28 or parent_page.is_descendant_of(root_page)
29 ):
30 return redirect('wagtailadmin_explore', root_page.pk)
31
32 parent_page = parent_page.specific
33
34 user_perms = UserPagePermissionsProxy(request.user)
35 pages = (
36 parent_page.get_children().prefetch_related(
37 "content_type", "sites_rooted_here"
38 )
39 & user_perms.explorable_pages()
40 )
41
42 # Get page ordering
43 ordering = request.GET.get('ordering', '-latest_revision_created_at')
44 if ordering not in [
45 'title',
46 '-title',
47 'content_type',
48 '-content_type',
49 'live', '-live',
50 'latest_revision_created_at',
51 '-latest_revision_created_at',
52 'ord'
53 ]:
54 ordering = '-latest_revision_created_at'
55
56 if ordering == 'ord':
57 # preserve the native ordering from get_children()
58 pass
59 elif ordering == 'latest_revision_created_at':
60 # order by oldest revision first.
61 # Special case NULL entries - these should go at the top of the list.
62 # Do this by annotating with Count('latest_revision_created_at'),
63 # which returns 0 for these
64 pages = pages.annotate(
65 null_position=Count('latest_revision_created_at')
66 ).order_by('null_position', 'latest_revision_created_at')
67 elif ordering == '-latest_revision_created_at':
68 # order by oldest revision first.
69 # Special case NULL entries - these should go at the end of the list.
70 pages = pages.annotate(
71 null_position=Count('latest_revision_created_at')
72 ).order_by('-null_position', '-latest_revision_created_at')
73 else:
74 pages = pages.order_by(ordering)
75
76 # Don't paginate if sorting by page order - all pages must be shown to
77 # allow drag-and-drop reordering
78 do_paginate = ordering != 'ord'
79
80 # We want specific page instances, but do not need streamfield values here
81 pages = pages.defer_streamfields().specific()
82
83 # allow hooks defer_streamfieldsyset
84 for hook in hooks.get_hooks('construct_explorer_page_queryset'):
85 pages = hook(parent_page, pages, request)
86
87 # Annotate queryset with various states to be used later for performance optimisations
88 if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):
89 pages = pages.prefetch_workflow_states()
90
91 pages = pages.annotate_site_root_state().annotate_approved_schedule()
92
93 # Pagination
94 if do_paginate:
95 paginator = Paginator(pages, per_page=50)
96 pages = paginator.get_page(request.GET.get('p'))
97
98 context = {
99 'parent_page': parent_page.specific,
100 'ordering': ordering,
101 'pagination_query_params': "ordering=%s" % ordering,
102 'pages': pages,
103 'do_paginate': do_paginate,
104 'locale': None,
105 'translations': [],
106 'show_ordering_column': request.GET.get('ordering') == 'ord'
107 }
108
109 if getattr(settings, 'WAGTAIL_I18N_ENABLED', False) and not parent_page.is_root():
110 context.update({
111 'locale': parent_page.locale,
112 'translations': [
113 {
114 'locale': translation.locale,
115 'url': reverse('wagtailadmin_explore', args=[translation.id]),
116 }
117 for translation in parent_page.get_translations().only('id', 'locale').select_related('locale')
118 ],
119 })
120
121 return TemplateResponse(request, 'wagtailadmin/pages/index.html', context)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/admin/views/pages/listing.py b/wagtail/admin/views/pages/listing.py
--- a/wagtail/admin/views/pages/listing.py
+++ b/wagtail/admin/views/pages/listing.py
@@ -95,6 +95,8 @@
paginator = Paginator(pages, per_page=50)
pages = paginator.get_page(request.GET.get('p'))
+ show_ordering_column = request.GET.get('ordering') == 'ord'
+
context = {
'parent_page': parent_page.specific,
'ordering': ordering,
@@ -103,7 +105,8 @@
'do_paginate': do_paginate,
'locale': None,
'translations': [],
- 'show_ordering_column': request.GET.get('ordering') == 'ord'
+ 'show_ordering_column': show_ordering_column,
+ 'show_bulk_actions': not show_ordering_column,
}
if getattr(settings, 'WAGTAIL_I18N_ENABLED', False) and not parent_page.is_root():
| {"golden_diff": "diff --git a/wagtail/admin/views/pages/listing.py b/wagtail/admin/views/pages/listing.py\n--- a/wagtail/admin/views/pages/listing.py\n+++ b/wagtail/admin/views/pages/listing.py\n@@ -95,6 +95,8 @@\n paginator = Paginator(pages, per_page=50)\n pages = paginator.get_page(request.GET.get('p'))\n \n+ show_ordering_column = request.GET.get('ordering') == 'ord'\n+\n context = {\n 'parent_page': parent_page.specific,\n 'ordering': ordering,\n@@ -103,7 +105,8 @@\n 'do_paginate': do_paginate,\n 'locale': None,\n 'translations': [],\n- 'show_ordering_column': request.GET.get('ordering') == 'ord'\n+ 'show_ordering_column': show_ordering_column,\n+ 'show_bulk_actions': not show_ordering_column,\n }\n \n if getattr(settings, 'WAGTAIL_I18N_ENABLED', False) and not parent_page.is_root():\n", "issue": "Stray checkbox on locked pages report\n### Issue Summary\r\n\r\nThe Locked Pages report (and possibly others inheriting from PageReportView) has a stray additional column with a checkbox, presumably from the bulk actions feature, which causes the column headings to get out of step:\r\n\r\n\r\n\r\nThanks to @Tijani-Dia for reporting.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject` / `./manage.py migrate` / `./manage.py createsuperuser`\r\n2. Log in, edit the homepage, select 'Lock' from the action menu\r\n3. Go to Reports -> Locked pages\r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n\r\n### Technical details\r\n\r\n* Python version: 3.8.0\r\n* Django version: 3.2.8\r\n* Wagtail version: 2.15\r\n* Browser version: Chrome 95.0.4638.54\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\n\nfrom wagtail.admin.auth import user_has_any_page_permission, user_passes_test\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.core import hooks\nfrom wagtail.core.models import Page, UserPagePermissionsProxy\n\n\n@user_passes_test(user_has_any_page_permission)\ndef index(request, parent_page_id=None):\n if parent_page_id:\n parent_page = get_object_or_404(Page, id=parent_page_id)\n else:\n parent_page = Page.get_first_root_node()\n\n # This will always succeed because of the @user_passes_test above.\n root_page = get_explorable_root_page(request.user)\n\n # If this page isn't a descendant of the user's explorable root page,\n # then redirect to that explorable root page instead.\n if not (\n parent_page.pk == root_page.pk\n or parent_page.is_descendant_of(root_page)\n ):\n return redirect('wagtailadmin_explore', root_page.pk)\n\n parent_page = parent_page.specific\n\n user_perms = UserPagePermissionsProxy(request.user)\n pages = (\n parent_page.get_children().prefetch_related(\n \"content_type\", \"sites_rooted_here\"\n )\n & user_perms.explorable_pages()\n )\n\n # Get page ordering\n ordering = request.GET.get('ordering', '-latest_revision_created_at')\n if ordering not in [\n 'title',\n '-title',\n 'content_type',\n '-content_type',\n 'live', '-live',\n 'latest_revision_created_at',\n '-latest_revision_created_at',\n 'ord'\n ]:\n ordering = '-latest_revision_created_at'\n\n if ordering == 'ord':\n # preserve the native ordering from get_children()\n pass\n elif ordering == 'latest_revision_created_at':\n # order by oldest revision first.\n # Special case NULL entries - these should go at the top of the list.\n # Do this by annotating with Count('latest_revision_created_at'),\n # which returns 0 for these\n pages = pages.annotate(\n null_position=Count('latest_revision_created_at')\n ).order_by('null_position', 'latest_revision_created_at')\n elif ordering == '-latest_revision_created_at':\n # order by oldest revision first.\n # Special case NULL entries - these should go at the end of the list.\n pages = pages.annotate(\n null_position=Count('latest_revision_created_at')\n ).order_by('-null_position', '-latest_revision_created_at')\n else:\n pages = pages.order_by(ordering)\n\n # Don't paginate if sorting by page order - all pages must be shown to\n # allow drag-and-drop reordering\n do_paginate = ordering != 'ord'\n\n # We want specific page instances, but do not need streamfield values here\n pages = pages.defer_streamfields().specific()\n\n # allow hooks defer_streamfieldsyset\n for hook in hooks.get_hooks('construct_explorer_page_queryset'):\n pages = hook(parent_page, pages, request)\n\n # Annotate queryset with various states to be used later for performance optimisations\n if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):\n pages = pages.prefetch_workflow_states()\n\n pages = pages.annotate_site_root_state().annotate_approved_schedule()\n\n # Pagination\n if do_paginate:\n paginator = Paginator(pages, per_page=50)\n pages = paginator.get_page(request.GET.get('p'))\n\n context = {\n 'parent_page': parent_page.specific,\n 'ordering': ordering,\n 'pagination_query_params': \"ordering=%s\" % ordering,\n 'pages': pages,\n 'do_paginate': do_paginate,\n 'locale': None,\n 'translations': [],\n 'show_ordering_column': request.GET.get('ordering') == 'ord'\n }\n\n if getattr(settings, 'WAGTAIL_I18N_ENABLED', False) and not parent_page.is_root():\n context.update({\n 'locale': parent_page.locale,\n 'translations': [\n {\n 'locale': translation.locale,\n 'url': reverse('wagtailadmin_explore', args=[translation.id]),\n }\n for translation in parent_page.get_translations().only('id', 'locale').select_related('locale')\n ],\n })\n\n return TemplateResponse(request, 'wagtailadmin/pages/index.html', context)\n", "path": "wagtail/admin/views/pages/listing.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\n\nfrom wagtail.admin.auth import user_has_any_page_permission, user_passes_test\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.core import hooks\nfrom wagtail.core.models import Page, UserPagePermissionsProxy\n\n\n@user_passes_test(user_has_any_page_permission)\ndef index(request, parent_page_id=None):\n if parent_page_id:\n parent_page = get_object_or_404(Page, id=parent_page_id)\n else:\n parent_page = Page.get_first_root_node()\n\n # This will always succeed because of the @user_passes_test above.\n root_page = get_explorable_root_page(request.user)\n\n # If this page isn't a descendant of the user's explorable root page,\n # then redirect to that explorable root page instead.\n if not (\n parent_page.pk == root_page.pk\n or parent_page.is_descendant_of(root_page)\n ):\n return redirect('wagtailadmin_explore', root_page.pk)\n\n parent_page = parent_page.specific\n\n user_perms = UserPagePermissionsProxy(request.user)\n pages = (\n parent_page.get_children().prefetch_related(\n \"content_type\", \"sites_rooted_here\"\n )\n & user_perms.explorable_pages()\n )\n\n # Get page ordering\n ordering = request.GET.get('ordering', '-latest_revision_created_at')\n if ordering not in [\n 'title',\n '-title',\n 'content_type',\n '-content_type',\n 'live', '-live',\n 'latest_revision_created_at',\n '-latest_revision_created_at',\n 'ord'\n ]:\n ordering = '-latest_revision_created_at'\n\n if ordering == 'ord':\n # preserve the native ordering from get_children()\n pass\n elif ordering == 'latest_revision_created_at':\n # order by oldest revision first.\n # Special case NULL entries - these should go at the top of the list.\n # Do this by annotating with Count('latest_revision_created_at'),\n # which returns 0 for these\n pages = pages.annotate(\n null_position=Count('latest_revision_created_at')\n ).order_by('null_position', 'latest_revision_created_at')\n elif ordering == '-latest_revision_created_at':\n # order by oldest revision first.\n # Special case NULL entries - these should go at the end of the list.\n pages = pages.annotate(\n null_position=Count('latest_revision_created_at')\n ).order_by('-null_position', '-latest_revision_created_at')\n else:\n pages = pages.order_by(ordering)\n\n # Don't paginate if sorting by page order - all pages must be shown to\n # allow drag-and-drop reordering\n do_paginate = ordering != 'ord'\n\n # We want specific page instances, but do not need streamfield values here\n pages = pages.defer_streamfields().specific()\n\n # allow hooks defer_streamfieldsyset\n for hook in hooks.get_hooks('construct_explorer_page_queryset'):\n pages = hook(parent_page, pages, request)\n\n # Annotate queryset with various states to be used later for performance optimisations\n if getattr(settings, 'WAGTAIL_WORKFLOW_ENABLED', True):\n pages = pages.prefetch_workflow_states()\n\n pages = pages.annotate_site_root_state().annotate_approved_schedule()\n\n # Pagination\n if do_paginate:\n paginator = Paginator(pages, per_page=50)\n pages = paginator.get_page(request.GET.get('p'))\n\n show_ordering_column = request.GET.get('ordering') == 'ord'\n\n context = {\n 'parent_page': parent_page.specific,\n 'ordering': ordering,\n 'pagination_query_params': \"ordering=%s\" % ordering,\n 'pages': pages,\n 'do_paginate': do_paginate,\n 'locale': None,\n 'translations': [],\n 'show_ordering_column': show_ordering_column,\n 'show_bulk_actions': not show_ordering_column,\n }\n\n if getattr(settings, 'WAGTAIL_I18N_ENABLED', False) and not parent_page.is_root():\n context.update({\n 'locale': parent_page.locale,\n 'translations': [\n {\n 'locale': translation.locale,\n 'url': reverse('wagtailadmin_explore', args=[translation.id]),\n }\n for translation in parent_page.get_translations().only('id', 'locale').select_related('locale')\n ],\n })\n\n return TemplateResponse(request, 'wagtailadmin/pages/index.html', context)\n", "path": "wagtail/admin/views/pages/listing.py"}]} | 1,793 | 229 |
gh_patches_debug_30521 | rasdani/github-patches | git_diff | sublimelsp__LSP-942 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the LspTrimCompletionCommand
Remove the LspTrimCompletionCommand as it is no longer used.
Forced-break in hover popup can break syntax highlighting
Using: OSX / typescript-language-server
Line-break, that LSP forces in the popup, can cause syntax highlighting to break. For example, if breaking a plain string in JS syntax.
<img width="512" alt="line-break" src="https://user-images.githubusercontent.com/153197/72525594-cfa7ff00-3864-11ea-9e8a-c183e07995a1.png">
Notice that in the screenshot the whole string should have a yellow color. Syntax highlighting breaks because line break within a string is not a valid syntax.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/completion.py`
Content:
```
1 import sublime
2 import sublime_plugin
3
4 from .core.protocol import Request, Range, InsertTextFormat
5 from .core.settings import settings, client_configs
6 from .core.logging import debug
7 from .core.completion import parse_completion_response, format_completion
8 from .core.registry import session_for_view, client_from_session, LSPViewEventListener
9 from .core.configurations import is_supported_syntax
10 from .core.sessions import Session
11 from .core.edit import parse_text_edit
12 from .core.views import range_to_region
13 from .core.typing import Any, List, Dict, Tuple, Optional, Union
14 from .core.views import text_document_position_params
15 from .core.restore_lines import RestoreLines
16
17
18 class LspSelectCompletionItemCommand(sublime_plugin.TextCommand):
19 def run(self, edit: Any, item: Any, restore_lines_dict: dict) -> None:
20 insert_text_format = item.get("insertTextFormat")
21
22 text_edit = item.get('textEdit')
23 if text_edit:
24 # restore the lines
25 # so we don't have to calculate the offset for the textEdit range
26 restore_lines = RestoreLines.from_dict(restore_lines_dict)
27 restore_lines.restore_lines(edit, self.view)
28
29 new_text = text_edit.get('newText')
30
31 range = Range.from_lsp(text_edit['range'])
32 edit_region = range_to_region(range, self.view)
33
34 # calculate offset by comparing cursor position with edit_region.begin.
35 # by applying the offset to all selections
36 # the TextEdit becomes valid for all selections
37 cursor = self.view.sel()[0].begin() # type: int
38
39 offset_start = cursor - edit_region.begin()
40 offset_length = edit_region.end() - edit_region.begin()
41
42 # erease regions from bottom to top
43 for sel in reversed(self.view.sel()):
44 begin = sel.begin() - offset_start
45 end = begin + offset_length
46 r = sublime.Region(begin, end)
47 self.view.erase(edit, r)
48
49 if insert_text_format == InsertTextFormat.Snippet:
50 self.view.run_command("insert_snippet", {"contents": new_text})
51 else:
52 # insert text from bottom to top
53 for sel in reversed(self.view.sel()):
54 self.view.insert(edit, sel.begin(), new_text)
55 else:
56 completion = item.get('insertText') or item.get('label') or ""
57 if insert_text_format == InsertTextFormat.Snippet:
58 self.view.run_command("insert_snippet", {"contents": completion})
59 else:
60 for sel in self.view.sel():
61 self.view.insert(edit, sel.begin(), completion)
62
63 # import statements, etc. some servers only return these after a resolve.
64 additional_edits = item.get('additionalTextEdits')
65 if additional_edits:
66 self.apply_additional_edits(additional_edits)
67 else:
68 self.do_resolve(item)
69
70 def do_resolve(self, item: dict) -> None:
71 session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())
72 if not session:
73 return
74
75 client = client_from_session(session)
76 if not client:
77 return
78
79 completion_provider = session.get_capability('completionProvider')
80 has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)
81 if has_resolve_provider:
82 client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)
83
84 def handle_resolve_response(self, response: Optional[dict]) -> None:
85 if response:
86 additional_edits = response.get('additionalTextEdits')
87 if additional_edits:
88 self.apply_additional_edits(additional_edits)
89
90 def apply_additional_edits(self, additional_edits: List[dict]) -> None:
91 edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)
92 debug('applying additional edits:', edits)
93 self.view.run_command("lsp_apply_document_edit", {'changes': edits})
94 sublime.status_message('Applied additional edits for completion')
95
96
97 class LspTrimCompletionCommand(sublime_plugin.TextCommand):
98 def run(self, edit: sublime.Edit, range: Optional[Tuple[int, int]] = None) -> None:
99 if range:
100 start, end = range
101 region = sublime.Region(start, end)
102 self.view.erase(edit, region)
103
104
105 class CompletionHandler(LSPViewEventListener):
106 def __init__(self, view: sublime.View) -> None:
107 super().__init__(view)
108 self.initialized = False
109 self.enabled = False
110
111 @classmethod
112 def is_applicable(cls, view_settings: dict) -> bool:
113 if 'completion' in settings.disabled_capabilities:
114 return False
115
116 syntax = view_settings.get('syntax')
117 return is_supported_syntax(syntax, client_configs.all) if syntax else False
118
119 def initialize(self) -> None:
120 self.initialized = True
121 session = session_for_view(self.view, 'completionProvider')
122 if session:
123 completionProvider = session.get_capability('completionProvider') or dict() # type: dict
124 # A language server may have an empty dict as CompletionOptions. In that case,
125 # no trigger characters will be registered but we'll still respond to Sublime's
126 # usual query for completions. So the explicit check for None is necessary.
127 self.enabled = True
128
129 trigger_chars = completionProvider.get(
130 'triggerCharacters') or []
131 if trigger_chars:
132 self.register_trigger_chars(session, trigger_chars)
133
134 def _view_language(self, config_name: str) -> Optional[str]:
135 languages = self.view.settings().get('lsp_language')
136 return languages.get(config_name) if languages else None
137
138 def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:
139 completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]
140 view_language = self._view_language(session.config.name)
141 if view_language:
142 for language in session.config.languages:
143 if language.id == view_language:
144 for scope in language.scopes:
145 # debug("registering", trigger_chars, "for", scope)
146 scope_trigger = next(
147 (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),
148 None
149 )
150 if not scope_trigger: # do not override user's trigger settings.
151 completion_triggers.append({
152 'characters': "".join(trigger_chars),
153 'selector': scope
154 })
155
156 self.view.settings().set('auto_complete_triggers', completion_triggers)
157
158 def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:
159 if not self.initialized:
160 self.initialize()
161
162 if not self.enabled:
163 return None
164
165 completion_list = sublime.CompletionList()
166
167 self.do_request(completion_list, locations)
168
169 return completion_list
170
171 def do_request(self, completion_list: sublime.CompletionList, locations: List[int]) -> None:
172 # don't store client so we can handle restarts
173 client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))
174 if not client:
175 return
176
177 # save lines to restore them later (only when selecting a completion item with a TextEdit)
178 restore_lines = RestoreLines()
179 restore_lines.save_lines(locations, self.view)
180
181 self.manager.documents.purge_changes(self.view)
182 document_position = text_document_position_params(self.view, locations[0])
183 client.send_request(
184 Request.complete(document_position),
185 lambda res: self.handle_response(res, completion_list, restore_lines),
186 lambda res: self.handle_error(res, completion_list))
187
188 def handle_response(self, response: Optional[Union[dict, List]],
189 completion_list: sublime.CompletionList, restore_lines: RestoreLines) -> None:
190 response_items, response_incomplete = parse_completion_response(response)
191 items = list(format_completion(item, restore_lines) for item in response_items)
192
193 flags = 0
194 if settings.only_show_lsp_completions:
195 flags |= sublime.INHIBIT_WORD_COMPLETIONS
196 flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS
197
198 if response_incomplete:
199 flags |= sublime.DYNAMIC_COMPLETIONS
200 completion_list.set_completions(items, flags)
201
202 def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:
203 completion_list.set_completions([])
204 sublime.status_message('Completion error: ' + str(error.get('message')))
205
```
Path: `boot.py`
Content:
```
1 # Please keep this list sorted (Edit -> Sort Lines)
2 from .plugin.code_actions import LspCodeActionBulbListener
3 from .plugin.code_actions import LspCodeActionsCommand
4 from .plugin.color import LspColorListener
5 from .plugin.completion import CompletionHandler
6 from .plugin.completion import LspSelectCompletionItemCommand
7 from .plugin.completion import LspTrimCompletionCommand
8 from .plugin.configuration import LspDisableLanguageServerGloballyCommand
9 from .plugin.configuration import LspDisableLanguageServerInProjectCommand
10 from .plugin.configuration import LspEnableLanguageServerGloballyCommand
11 from .plugin.configuration import LspEnableLanguageServerInProjectCommand
12 from .plugin.core.documents import DocumentSyncListener
13 from .plugin.core.main import shutdown as plugin_unloaded
14 from .plugin.core.main import startup as plugin_loaded
15 from .plugin.core.panels import LspClearPanelCommand
16 from .plugin.core.panels import LspUpdatePanelCommand
17 from .plugin.core.panels import LspUpdateServerPanelCommand
18 from .plugin.core.registry import LspRestartClientCommand
19 from .plugin.diagnostics import DiagnosticsCursorListener
20 from .plugin.diagnostics import LspClearDiagnosticsCommand
21 from .plugin.diagnostics import LspHideDiagnosticCommand
22 from .plugin.diagnostics import LspNextDiagnosticCommand
23 from .plugin.diagnostics import LspPreviousDiagnosticCommand
24 from .plugin.edit import LspApplyDocumentEditCommand
25 from .plugin.edit import LspApplyWorkspaceEditCommand
26 from .plugin.execute_command import LspExecuteCommand
27 from .plugin.formatting import FormatOnSaveListener
28 from .plugin.formatting import LspFormatDocumentCommand
29 from .plugin.formatting import LspFormatDocumentRangeCommand
30 from .plugin.goto import LspSymbolDeclarationCommand
31 from .plugin.goto import LspSymbolDefinitionCommand
32 from .plugin.goto import LspSymbolImplementationCommand
33 from .plugin.goto import LspSymbolTypeDefinitionCommand
34 from .plugin.highlights import DocumentHighlightListener
35 from .plugin.hover import HoverHandler
36 from .plugin.hover import LspHoverCommand
37 from .plugin.panels import LspShowDiagnosticsPanelCommand
38 from .plugin.panels import LspToggleServerPanelCommand
39 from .plugin.references import LspSymbolReferencesCommand
40 from .plugin.rename import LspSymbolRenameCommand
41 from .plugin.signature_help import SignatureHelpListener
42 from .plugin.symbols import LspDocumentSymbolsCommand
43 from .plugin.workspace_symbol import LspWorkspaceSymbolsCommand
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/boot.py b/boot.py
--- a/boot.py
+++ b/boot.py
@@ -4,7 +4,6 @@
from .plugin.color import LspColorListener
from .plugin.completion import CompletionHandler
from .plugin.completion import LspSelectCompletionItemCommand
-from .plugin.completion import LspTrimCompletionCommand
from .plugin.configuration import LspDisableLanguageServerGloballyCommand
from .plugin.configuration import LspDisableLanguageServerInProjectCommand
from .plugin.configuration import LspEnableLanguageServerGloballyCommand
diff --git a/plugin/completion.py b/plugin/completion.py
--- a/plugin/completion.py
+++ b/plugin/completion.py
@@ -10,7 +10,7 @@
from .core.sessions import Session
from .core.edit import parse_text_edit
from .core.views import range_to_region
-from .core.typing import Any, List, Dict, Tuple, Optional, Union
+from .core.typing import Any, List, Dict, Optional, Union
from .core.views import text_document_position_params
from .core.restore_lines import RestoreLines
@@ -94,14 +94,6 @@
sublime.status_message('Applied additional edits for completion')
-class LspTrimCompletionCommand(sublime_plugin.TextCommand):
- def run(self, edit: sublime.Edit, range: Optional[Tuple[int, int]] = None) -> None:
- if range:
- start, end = range
- region = sublime.Region(start, end)
- self.view.erase(edit, region)
-
-
class CompletionHandler(LSPViewEventListener):
def __init__(self, view: sublime.View) -> None:
super().__init__(view)
| {"golden_diff": "diff --git a/boot.py b/boot.py\n--- a/boot.py\n+++ b/boot.py\n@@ -4,7 +4,6 @@\n from .plugin.color import LspColorListener\n from .plugin.completion import CompletionHandler\n from .plugin.completion import LspSelectCompletionItemCommand\n-from .plugin.completion import LspTrimCompletionCommand\n from .plugin.configuration import LspDisableLanguageServerGloballyCommand\n from .plugin.configuration import LspDisableLanguageServerInProjectCommand\n from .plugin.configuration import LspEnableLanguageServerGloballyCommand\ndiff --git a/plugin/completion.py b/plugin/completion.py\n--- a/plugin/completion.py\n+++ b/plugin/completion.py\n@@ -10,7 +10,7 @@\n from .core.sessions import Session\n from .core.edit import parse_text_edit\n from .core.views import range_to_region\n-from .core.typing import Any, List, Dict, Tuple, Optional, Union\n+from .core.typing import Any, List, Dict, Optional, Union\n from .core.views import text_document_position_params\n from .core.restore_lines import RestoreLines\n \n@@ -94,14 +94,6 @@\n sublime.status_message('Applied additional edits for completion')\n \n \n-class LspTrimCompletionCommand(sublime_plugin.TextCommand):\n- def run(self, edit: sublime.Edit, range: Optional[Tuple[int, int]] = None) -> None:\n- if range:\n- start, end = range\n- region = sublime.Region(start, end)\n- self.view.erase(edit, region)\n-\n-\n class CompletionHandler(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n", "issue": "Remove the LspTrimCompletionCommand\nRemove the LspTrimCompletionCommand as it is no longer used.\nForced-break in hover popup can break syntax highlighting\nUsing: OSX / typescript-language-server\r\n\r\nLine-break, that LSP forces in the popup, can cause syntax highlighting to break. For example, if breaking a plain string in JS syntax.\r\n\r\n<img width=\"512\" alt=\"line-break\" src=\"https://user-images.githubusercontent.com/153197/72525594-cfa7ff00-3864-11ea-9e8a-c183e07995a1.png\">\r\n\r\nNotice that in the screenshot the whole string should have a yellow color. Syntax highlighting breaks because line break within a string is not a valid syntax.\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\nfrom .core.protocol import Request, Range, InsertTextFormat\nfrom .core.settings import settings, client_configs\nfrom .core.logging import debug\nfrom .core.completion import parse_completion_response, format_completion\nfrom .core.registry import session_for_view, client_from_session, LSPViewEventListener\nfrom .core.configurations import is_supported_syntax\nfrom .core.sessions import Session\nfrom .core.edit import parse_text_edit\nfrom .core.views import range_to_region\nfrom .core.typing import Any, List, Dict, Tuple, Optional, Union\nfrom .core.views import text_document_position_params\nfrom .core.restore_lines import RestoreLines\n\n\nclass LspSelectCompletionItemCommand(sublime_plugin.TextCommand):\n def run(self, edit: Any, item: Any, restore_lines_dict: dict) -> None:\n insert_text_format = item.get(\"insertTextFormat\")\n\n text_edit = item.get('textEdit')\n if text_edit:\n # restore the lines\n # so we don't have to calculate the offset for the textEdit range\n restore_lines = RestoreLines.from_dict(restore_lines_dict)\n restore_lines.restore_lines(edit, self.view)\n\n new_text = text_edit.get('newText')\n\n range = Range.from_lsp(text_edit['range'])\n edit_region = range_to_region(range, self.view)\n\n # calculate offset by comparing cursor position with edit_region.begin.\n # by applying the offset to all selections\n # the TextEdit becomes valid for all selections\n cursor = self.view.sel()[0].begin() # type: int\n\n offset_start = cursor - edit_region.begin()\n offset_length = edit_region.end() - edit_region.begin()\n\n # erease regions from bottom to top\n for sel in reversed(self.view.sel()):\n begin = sel.begin() - offset_start\n end = begin + offset_length\n r = sublime.Region(begin, end)\n self.view.erase(edit, r)\n\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n # insert text from bottom to top\n for sel in reversed(self.view.sel()):\n self.view.insert(edit, sel.begin(), new_text)\n else:\n completion = item.get('insertText') or item.get('label') or \"\"\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": completion})\n else:\n for sel in self.view.sel():\n self.view.insert(edit, sel.begin(), completion)\n\n # import statements, etc. some servers only return these after a resolve.\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n else:\n self.do_resolve(item)\n\n def do_resolve(self, item: dict) -> None:\n session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())\n if not session:\n return\n\n client = client_from_session(session)\n if not client:\n return\n\n completion_provider = session.get_capability('completionProvider')\n has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)\n if has_resolve_provider:\n client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n\n def handle_resolve_response(self, response: Optional[dict]) -> None:\n if response:\n additional_edits = response.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n\n def apply_additional_edits(self, additional_edits: List[dict]) -> None:\n edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)\n debug('applying additional edits:', edits)\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n sublime.status_message('Applied additional edits for completion')\n\n\nclass LspTrimCompletionCommand(sublime_plugin.TextCommand):\n def run(self, edit: sublime.Edit, range: Optional[Tuple[int, int]] = None) -> None:\n if range:\n start, end = range\n region = sublime.Region(start, end)\n self.view.erase(edit, region)\n\n\nclass CompletionHandler(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.initialized = False\n self.enabled = False\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'completion' in settings.disabled_capabilities:\n return False\n\n syntax = view_settings.get('syntax')\n return is_supported_syntax(syntax, client_configs.all) if syntax else False\n\n def initialize(self) -> None:\n self.initialized = True\n session = session_for_view(self.view, 'completionProvider')\n if session:\n completionProvider = session.get_capability('completionProvider') or dict() # type: dict\n # A language server may have an empty dict as CompletionOptions. In that case,\n # no trigger characters will be registered but we'll still respond to Sublime's\n # usual query for completions. So the explicit check for None is necessary.\n self.enabled = True\n\n trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if trigger_chars:\n self.register_trigger_chars(session, trigger_chars)\n\n def _view_language(self, config_name: str) -> Optional[str]:\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if not scope_trigger: # do not override user's trigger settings.\n completion_triggers.append({\n 'characters': \"\".join(trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:\n if not self.initialized:\n self.initialize()\n\n if not self.enabled:\n return None\n\n completion_list = sublime.CompletionList()\n\n self.do_request(completion_list, locations)\n\n return completion_list\n\n def do_request(self, completion_list: sublime.CompletionList, locations: List[int]) -> None:\n # don't store client so we can handle restarts\n client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))\n if not client:\n return\n\n # save lines to restore them later (only when selecting a completion item with a TextEdit)\n restore_lines = RestoreLines()\n restore_lines.save_lines(locations, self.view)\n\n self.manager.documents.purge_changes(self.view)\n document_position = text_document_position_params(self.view, locations[0])\n client.send_request(\n Request.complete(document_position),\n lambda res: self.handle_response(res, completion_list, restore_lines),\n lambda res: self.handle_error(res, completion_list))\n\n def handle_response(self, response: Optional[Union[dict, List]],\n completion_list: sublime.CompletionList, restore_lines: RestoreLines) -> None:\n response_items, response_incomplete = parse_completion_response(response)\n items = list(format_completion(item, restore_lines) for item in response_items)\n\n flags = 0\n if settings.only_show_lsp_completions:\n flags |= sublime.INHIBIT_WORD_COMPLETIONS\n flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS\n\n if response_incomplete:\n flags |= sublime.DYNAMIC_COMPLETIONS\n completion_list.set_completions(items, flags)\n\n def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:\n completion_list.set_completions([])\n sublime.status_message('Completion error: ' + str(error.get('message')))\n", "path": "plugin/completion.py"}, {"content": "# Please keep this list sorted (Edit -> Sort Lines)\nfrom .plugin.code_actions import LspCodeActionBulbListener\nfrom .plugin.code_actions import LspCodeActionsCommand\nfrom .plugin.color import LspColorListener\nfrom .plugin.completion import CompletionHandler\nfrom .plugin.completion import LspSelectCompletionItemCommand\nfrom .plugin.completion import LspTrimCompletionCommand\nfrom .plugin.configuration import LspDisableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspDisableLanguageServerInProjectCommand\nfrom .plugin.configuration import LspEnableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspEnableLanguageServerInProjectCommand\nfrom .plugin.core.documents import DocumentSyncListener\nfrom .plugin.core.main import shutdown as plugin_unloaded\nfrom .plugin.core.main import startup as plugin_loaded\nfrom .plugin.core.panels import LspClearPanelCommand\nfrom .plugin.core.panels import LspUpdatePanelCommand\nfrom .plugin.core.panels import LspUpdateServerPanelCommand\nfrom .plugin.core.registry import LspRestartClientCommand\nfrom .plugin.diagnostics import DiagnosticsCursorListener\nfrom .plugin.diagnostics import LspClearDiagnosticsCommand\nfrom .plugin.diagnostics import LspHideDiagnosticCommand\nfrom .plugin.diagnostics import LspNextDiagnosticCommand\nfrom .plugin.diagnostics import LspPreviousDiagnosticCommand\nfrom .plugin.edit import LspApplyDocumentEditCommand\nfrom .plugin.edit import LspApplyWorkspaceEditCommand\nfrom .plugin.execute_command import LspExecuteCommand\nfrom .plugin.formatting import FormatOnSaveListener\nfrom .plugin.formatting import LspFormatDocumentCommand\nfrom .plugin.formatting import LspFormatDocumentRangeCommand\nfrom .plugin.goto import LspSymbolDeclarationCommand\nfrom .plugin.goto import LspSymbolDefinitionCommand\nfrom .plugin.goto import LspSymbolImplementationCommand\nfrom .plugin.goto import LspSymbolTypeDefinitionCommand\nfrom .plugin.highlights import DocumentHighlightListener\nfrom .plugin.hover import HoverHandler\nfrom .plugin.hover import LspHoverCommand\nfrom .plugin.panels import LspShowDiagnosticsPanelCommand\nfrom .plugin.panels import LspToggleServerPanelCommand\nfrom .plugin.references import LspSymbolReferencesCommand\nfrom .plugin.rename import LspSymbolRenameCommand\nfrom .plugin.signature_help import SignatureHelpListener\nfrom .plugin.symbols import LspDocumentSymbolsCommand\nfrom .plugin.workspace_symbol import LspWorkspaceSymbolsCommand\n", "path": "boot.py"}], "after_files": [{"content": "import sublime\nimport sublime_plugin\n\nfrom .core.protocol import Request, Range, InsertTextFormat\nfrom .core.settings import settings, client_configs\nfrom .core.logging import debug\nfrom .core.completion import parse_completion_response, format_completion\nfrom .core.registry import session_for_view, client_from_session, LSPViewEventListener\nfrom .core.configurations import is_supported_syntax\nfrom .core.sessions import Session\nfrom .core.edit import parse_text_edit\nfrom .core.views import range_to_region\nfrom .core.typing import Any, List, Dict, Optional, Union\nfrom .core.views import text_document_position_params\nfrom .core.restore_lines import RestoreLines\n\n\nclass LspSelectCompletionItemCommand(sublime_plugin.TextCommand):\n def run(self, edit: Any, item: Any, restore_lines_dict: dict) -> None:\n insert_text_format = item.get(\"insertTextFormat\")\n\n text_edit = item.get('textEdit')\n if text_edit:\n # restore the lines\n # so we don't have to calculate the offset for the textEdit range\n restore_lines = RestoreLines.from_dict(restore_lines_dict)\n restore_lines.restore_lines(edit, self.view)\n\n new_text = text_edit.get('newText')\n\n range = Range.from_lsp(text_edit['range'])\n edit_region = range_to_region(range, self.view)\n\n # calculate offset by comparing cursor position with edit_region.begin.\n # by applying the offset to all selections\n # the TextEdit becomes valid for all selections\n cursor = self.view.sel()[0].begin() # type: int\n\n offset_start = cursor - edit_region.begin()\n offset_length = edit_region.end() - edit_region.begin()\n\n # erease regions from bottom to top\n for sel in reversed(self.view.sel()):\n begin = sel.begin() - offset_start\n end = begin + offset_length\n r = sublime.Region(begin, end)\n self.view.erase(edit, r)\n\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": new_text})\n else:\n # insert text from bottom to top\n for sel in reversed(self.view.sel()):\n self.view.insert(edit, sel.begin(), new_text)\n else:\n completion = item.get('insertText') or item.get('label') or \"\"\n if insert_text_format == InsertTextFormat.Snippet:\n self.view.run_command(\"insert_snippet\", {\"contents\": completion})\n else:\n for sel in self.view.sel():\n self.view.insert(edit, sel.begin(), completion)\n\n # import statements, etc. some servers only return these after a resolve.\n additional_edits = item.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n else:\n self.do_resolve(item)\n\n def do_resolve(self, item: dict) -> None:\n session = session_for_view(self.view, 'completionProvider', self.view.sel()[0].begin())\n if not session:\n return\n\n client = client_from_session(session)\n if not client:\n return\n\n completion_provider = session.get_capability('completionProvider')\n has_resolve_provider = completion_provider and completion_provider.get('resolveProvider', False)\n if has_resolve_provider:\n client.send_request(Request.resolveCompletionItem(item), self.handle_resolve_response)\n\n def handle_resolve_response(self, response: Optional[dict]) -> None:\n if response:\n additional_edits = response.get('additionalTextEdits')\n if additional_edits:\n self.apply_additional_edits(additional_edits)\n\n def apply_additional_edits(self, additional_edits: List[dict]) -> None:\n edits = list(parse_text_edit(additional_edit) for additional_edit in additional_edits)\n debug('applying additional edits:', edits)\n self.view.run_command(\"lsp_apply_document_edit\", {'changes': edits})\n sublime.status_message('Applied additional edits for completion')\n\n\nclass CompletionHandler(LSPViewEventListener):\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self.initialized = False\n self.enabled = False\n\n @classmethod\n def is_applicable(cls, view_settings: dict) -> bool:\n if 'completion' in settings.disabled_capabilities:\n return False\n\n syntax = view_settings.get('syntax')\n return is_supported_syntax(syntax, client_configs.all) if syntax else False\n\n def initialize(self) -> None:\n self.initialized = True\n session = session_for_view(self.view, 'completionProvider')\n if session:\n completionProvider = session.get_capability('completionProvider') or dict() # type: dict\n # A language server may have an empty dict as CompletionOptions. In that case,\n # no trigger characters will be registered but we'll still respond to Sublime's\n # usual query for completions. So the explicit check for None is necessary.\n self.enabled = True\n\n trigger_chars = completionProvider.get(\n 'triggerCharacters') or []\n if trigger_chars:\n self.register_trigger_chars(session, trigger_chars)\n\n def _view_language(self, config_name: str) -> Optional[str]:\n languages = self.view.settings().get('lsp_language')\n return languages.get(config_name) if languages else None\n\n def register_trigger_chars(self, session: Session, trigger_chars: List[str]) -> None:\n completion_triggers = self.view.settings().get('auto_complete_triggers', []) or [] # type: List[Dict[str, str]]\n view_language = self._view_language(session.config.name)\n if view_language:\n for language in session.config.languages:\n if language.id == view_language:\n for scope in language.scopes:\n # debug(\"registering\", trigger_chars, \"for\", scope)\n scope_trigger = next(\n (trigger for trigger in completion_triggers if trigger.get('selector', None) == scope),\n None\n )\n if not scope_trigger: # do not override user's trigger settings.\n completion_triggers.append({\n 'characters': \"\".join(trigger_chars),\n 'selector': scope\n })\n\n self.view.settings().set('auto_complete_triggers', completion_triggers)\n\n def on_query_completions(self, prefix: str, locations: List[int]) -> Optional[sublime.CompletionList]:\n if not self.initialized:\n self.initialize()\n\n if not self.enabled:\n return None\n\n completion_list = sublime.CompletionList()\n\n self.do_request(completion_list, locations)\n\n return completion_list\n\n def do_request(self, completion_list: sublime.CompletionList, locations: List[int]) -> None:\n # don't store client so we can handle restarts\n client = client_from_session(session_for_view(self.view, 'completionProvider', locations[0]))\n if not client:\n return\n\n # save lines to restore them later (only when selecting a completion item with a TextEdit)\n restore_lines = RestoreLines()\n restore_lines.save_lines(locations, self.view)\n\n self.manager.documents.purge_changes(self.view)\n document_position = text_document_position_params(self.view, locations[0])\n client.send_request(\n Request.complete(document_position),\n lambda res: self.handle_response(res, completion_list, restore_lines),\n lambda res: self.handle_error(res, completion_list))\n\n def handle_response(self, response: Optional[Union[dict, List]],\n completion_list: sublime.CompletionList, restore_lines: RestoreLines) -> None:\n response_items, response_incomplete = parse_completion_response(response)\n items = list(format_completion(item, restore_lines) for item in response_items)\n\n flags = 0\n if settings.only_show_lsp_completions:\n flags |= sublime.INHIBIT_WORD_COMPLETIONS\n flags |= sublime.INHIBIT_EXPLICIT_COMPLETIONS\n\n if response_incomplete:\n flags |= sublime.DYNAMIC_COMPLETIONS\n completion_list.set_completions(items, flags)\n\n def handle_error(self, error: dict, completion_list: sublime.CompletionList) -> None:\n completion_list.set_completions([])\n sublime.status_message('Completion error: ' + str(error.get('message')))\n", "path": "plugin/completion.py"}, {"content": "# Please keep this list sorted (Edit -> Sort Lines)\nfrom .plugin.code_actions import LspCodeActionBulbListener\nfrom .plugin.code_actions import LspCodeActionsCommand\nfrom .plugin.color import LspColorListener\nfrom .plugin.completion import CompletionHandler\nfrom .plugin.completion import LspSelectCompletionItemCommand\nfrom .plugin.configuration import LspDisableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspDisableLanguageServerInProjectCommand\nfrom .plugin.configuration import LspEnableLanguageServerGloballyCommand\nfrom .plugin.configuration import LspEnableLanguageServerInProjectCommand\nfrom .plugin.core.documents import DocumentSyncListener\nfrom .plugin.core.main import shutdown as plugin_unloaded\nfrom .plugin.core.main import startup as plugin_loaded\nfrom .plugin.core.panels import LspClearPanelCommand\nfrom .plugin.core.panels import LspUpdatePanelCommand\nfrom .plugin.core.panels import LspUpdateServerPanelCommand\nfrom .plugin.core.registry import LspRestartClientCommand\nfrom .plugin.diagnostics import DiagnosticsCursorListener\nfrom .plugin.diagnostics import LspClearDiagnosticsCommand\nfrom .plugin.diagnostics import LspHideDiagnosticCommand\nfrom .plugin.diagnostics import LspNextDiagnosticCommand\nfrom .plugin.diagnostics import LspPreviousDiagnosticCommand\nfrom .plugin.edit import LspApplyDocumentEditCommand\nfrom .plugin.edit import LspApplyWorkspaceEditCommand\nfrom .plugin.execute_command import LspExecuteCommand\nfrom .plugin.formatting import FormatOnSaveListener\nfrom .plugin.formatting import LspFormatDocumentCommand\nfrom .plugin.formatting import LspFormatDocumentRangeCommand\nfrom .plugin.goto import LspSymbolDeclarationCommand\nfrom .plugin.goto import LspSymbolDefinitionCommand\nfrom .plugin.goto import LspSymbolImplementationCommand\nfrom .plugin.goto import LspSymbolTypeDefinitionCommand\nfrom .plugin.highlights import DocumentHighlightListener\nfrom .plugin.hover import HoverHandler\nfrom .plugin.hover import LspHoverCommand\nfrom .plugin.panels import LspShowDiagnosticsPanelCommand\nfrom .plugin.panels import LspToggleServerPanelCommand\nfrom .plugin.references import LspSymbolReferencesCommand\nfrom .plugin.rename import LspSymbolRenameCommand\nfrom .plugin.signature_help import SignatureHelpListener\nfrom .plugin.symbols import LspDocumentSymbolsCommand\nfrom .plugin.workspace_symbol import LspWorkspaceSymbolsCommand\n", "path": "boot.py"}]} | 3,366 | 364 |
gh_patches_debug_13512 | rasdani/github-patches | git_diff | larq__larq-596 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
__version__
### Feature motivation
Is there a way to dynamically poll the version of larq (or lce or larq-zoo for that matter)?
If not, could it be done using `__version__` as usual for standard library modules?
### Feature description
```
import larq
print(larq.__version__)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages, setup
2
3
4 def readme():
5 with open("README.md", "r") as f:
6 return f.read()
7
8
9 setup(
10 name="larq",
11 version="0.10.1",
12 python_requires=">=3.6",
13 author="Plumerai",
14 author_email="[email protected]",
15 description="An Open Source Machine Learning Library for Training Binarized Neural Networks",
16 long_description=readme(),
17 long_description_content_type="text/markdown",
18 url="https://larq.dev/",
19 packages=find_packages(exclude=["larq.snapshots"]),
20 license="Apache 2.0",
21 install_requires=[
22 "numpy >= 1.15.4, < 2.0",
23 "terminaltables>=3.1.0",
24 "dataclasses ; python_version<'3.7'",
25 ],
26 extras_require={
27 "tensorflow": ["tensorflow>=1.14.0"],
28 "tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
29 "test": [
30 "black==20.8b1",
31 "flake8>=3.7.9,<3.9.0",
32 "isort==5.6.4",
33 "packaging>=19.2,<21.0",
34 "pytest>=5.2.4,<6.2.0",
35 "pytest-cov>=2.8.1,<2.11.0",
36 "pytest-xdist>=1.30,<2.2",
37 "pytest-mock>=2.0,<3.4",
38 "pytype==2020.10.8",
39 "snapshottest>=0.5.1,<0.7.0",
40 ],
41 },
42 classifiers=[
43 "Development Status :: 4 - Beta",
44 "Intended Audience :: Developers",
45 "Intended Audience :: Education",
46 "Intended Audience :: Science/Research",
47 "License :: OSI Approved :: Apache Software License",
48 "Programming Language :: Python :: 3",
49 "Programming Language :: Python :: 3 :: Only",
50 "Programming Language :: Python :: 3.6",
51 "Programming Language :: Python :: 3.7",
52 "Programming Language :: Python :: 3.8",
53 "Topic :: Scientific/Engineering",
54 "Topic :: Scientific/Engineering :: Mathematics",
55 "Topic :: Scientific/Engineering :: Artificial Intelligence",
56 "Topic :: Software Development",
57 "Topic :: Software Development :: Libraries",
58 "Topic :: Software Development :: Libraries :: Python Modules",
59 ],
60 )
61
```
Path: `larq/__init__.py`
Content:
```
1 from larq import (
2 activations,
3 callbacks,
4 constraints,
5 context,
6 layers,
7 math,
8 metrics,
9 models,
10 optimizers,
11 quantizers,
12 utils,
13 )
14
15 __all__ = [
16 "layers",
17 "activations",
18 "callbacks",
19 "constraints",
20 "context",
21 "math",
22 "metrics",
23 "models",
24 "quantizers",
25 "optimizers",
26 "utils",
27 ]
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/larq/__init__.py b/larq/__init__.py
--- a/larq/__init__.py
+++ b/larq/__init__.py
@@ -12,6 +12,14 @@
utils,
)
+try:
+ from importlib import metadata # type: ignore
+except ImportError:
+ # Running on pre-3.8 Python; use importlib-metadata package
+ import importlib_metadata as metadata # type: ignore
+
+__version__ = metadata.version("larq")
+
__all__ = [
"layers",
"activations",
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,6 +22,7 @@
"numpy >= 1.15.4, < 2.0",
"terminaltables>=3.1.0",
"dataclasses ; python_version<'3.7'",
+ "importlib-metadata ~= 2.0 ; python_version<'3.8'",
],
extras_require={
"tensorflow": ["tensorflow>=1.14.0"],
| {"golden_diff": "diff --git a/larq/__init__.py b/larq/__init__.py\n--- a/larq/__init__.py\n+++ b/larq/__init__.py\n@@ -12,6 +12,14 @@\n utils,\n )\n \n+try:\n+ from importlib import metadata # type: ignore\n+except ImportError:\n+ # Running on pre-3.8 Python; use importlib-metadata package\n+ import importlib_metadata as metadata # type: ignore\n+\n+__version__ = metadata.version(\"larq\")\n+\n __all__ = [\n \"layers\",\n \"activations\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,6 +22,7 @@\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n+ \"importlib-metadata ~= 2.0 ; python_version<'3.8'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n", "issue": "__version__\n### Feature motivation\r\nIs there a way to dynamically poll the version of larq (or lce or larq-zoo for that matter)?\r\nIf not, could it be done using `__version__` as usual for standard library modules?\r\n\r\n### Feature description\r\n```\r\nimport larq\r\nprint(larq.__version__)\r\n```\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"larq\",\n version=\"0.10.1\",\n python_requires=\">=3.6\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Library for Training Binarized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://larq.dev/\",\n packages=find_packages(exclude=[\"larq.snapshots\"]),\n license=\"Apache 2.0\",\n install_requires=[\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n \"black==20.8b1\",\n \"flake8>=3.7.9,<3.9.0\",\n \"isort==5.6.4\",\n \"packaging>=19.2,<21.0\",\n \"pytest>=5.2.4,<6.2.0\",\n \"pytest-cov>=2.8.1,<2.11.0\",\n \"pytest-xdist>=1.30,<2.2\",\n \"pytest-mock>=2.0,<3.4\",\n \"pytype==2020.10.8\",\n \"snapshottest>=0.5.1,<0.7.0\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}, {"content": "from larq import (\n activations,\n callbacks,\n constraints,\n context,\n layers,\n math,\n metrics,\n models,\n optimizers,\n quantizers,\n utils,\n)\n\n__all__ = [\n \"layers\",\n \"activations\",\n \"callbacks\",\n \"constraints\",\n \"context\",\n \"math\",\n \"metrics\",\n \"models\",\n \"quantizers\",\n \"optimizers\",\n \"utils\",\n]\n", "path": "larq/__init__.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"larq\",\n version=\"0.10.1\",\n python_requires=\">=3.6\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Library for Training Binarized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://larq.dev/\",\n packages=find_packages(exclude=[\"larq.snapshots\"]),\n license=\"Apache 2.0\",\n install_requires=[\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n \"importlib-metadata ~= 2.0 ; python_version<'3.8'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n \"black==20.8b1\",\n \"flake8>=3.7.9,<3.9.0\",\n \"isort==5.6.4\",\n \"packaging>=19.2,<21.0\",\n \"pytest>=5.2.4,<6.2.0\",\n \"pytest-cov>=2.8.1,<2.11.0\",\n \"pytest-xdist>=1.30,<2.2\",\n \"pytest-mock>=2.0,<3.4\",\n \"pytype==2020.10.8\",\n \"snapshottest>=0.5.1,<0.7.0\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}, {"content": "from larq import (\n activations,\n callbacks,\n constraints,\n context,\n layers,\n math,\n metrics,\n models,\n optimizers,\n quantizers,\n utils,\n)\n\ntry:\n from importlib import metadata # type: ignore\nexcept ImportError:\n # Running on pre-3.8 Python; use importlib-metadata package\n import importlib_metadata as metadata # type: ignore\n\n__version__ = metadata.version(\"larq\")\n\n__all__ = [\n \"layers\",\n \"activations\",\n \"callbacks\",\n \"constraints\",\n \"context\",\n \"math\",\n \"metrics\",\n \"models\",\n \"quantizers\",\n \"optimizers\",\n \"utils\",\n]\n", "path": "larq/__init__.py"}]} | 1,154 | 249 |
gh_patches_debug_36493 | rasdani/github-patches | git_diff | pwndbg__pwndbg-2087 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Port checksec command to native command and add colors
This is annoying IMHO. I want colors and no pwntools update checks here!

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/wrappers/checksec.py`
Content:
```
1 from __future__ import annotations
2
3 from subprocess import CalledProcessError
4
5 import pwndbg.commands
6 import pwndbg.lib.cache
7 import pwndbg.wrappers
8
9 cmd_name = "checksec"
10 cmd_pwntools = ["pwn", "checksec"]
11
12
13 @pwndbg.wrappers.OnlyWithCommand(cmd_name, cmd_pwntools)
14 @pwndbg.lib.cache.cache_until("objfile")
15 def get_raw_out(local_path: str) -> str:
16 try:
17 return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file=" + local_path])
18 except CalledProcessError:
19 pass
20 try:
21 return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file", local_path])
22 except CalledProcessError:
23 pass
24 return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])
25
26
27 @pwndbg.wrappers.OnlyWithCommand(cmd_name, cmd_pwntools)
28 def relro_status(local_path: str) -> str:
29 relro = "No RELRO"
30 out = get_raw_out(local_path)
31
32 if "Full RELRO" in out:
33 relro = "Full RELRO"
34 elif "Partial RELRO" in out:
35 relro = "Partial RELRO"
36
37 return relro
38
39
40 @pwndbg.wrappers.OnlyWithCommand(cmd_name, cmd_pwntools)
41 def pie_status(local_path: str) -> str:
42 pie = "No PIE"
43 out = get_raw_out(local_path)
44
45 if "PIE enabled" in out:
46 pie = "PIE enabled"
47
48 return pie
49
```
Path: `pwndbg/commands/checksec.py`
Content:
```
1 from __future__ import annotations
2
3 import pwndbg.color
4 import pwndbg.commands
5 import pwndbg.gdblib.file
6 import pwndbg.wrappers.checksec
7
8
9 def color_line(line: str) -> str:
10 return pwndbg.color.normal(
11 line.replace("*", pwndbg.color.green("*"))
12 .replace(":", f":{pwndbg.color.GREEN}")
13 .replace("No", f"{pwndbg.color.RED}No")
14 )
15
16
17 def color_lines(output: str) -> str:
18 return "\n".join(map(color_line, output.split("\n")))
19
20
21 @pwndbg.commands.ArgparsedCommand("Prints out the binary security settings using `checksec`.")
22 @pwndbg.commands.OnlyWithFile
23 def checksec() -> None:
24 output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())
25 print(color_lines(output))
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/checksec.py b/pwndbg/commands/checksec.py
--- a/pwndbg/commands/checksec.py
+++ b/pwndbg/commands/checksec.py
@@ -1,10 +1,18 @@
from __future__ import annotations
+import argparse
+
import pwndbg.color
import pwndbg.commands
import pwndbg.gdblib.file
import pwndbg.wrappers.checksec
+parser = argparse.ArgumentParser(
+ description="Prints out the binary security settings using `checksec`."
+)
+
+parser.add_argument("-f", "--file", type=str, help="Specify the file to run `checksec` on.")
+
def color_line(line: str) -> str:
return pwndbg.color.normal(
@@ -18,8 +26,9 @@
return "\n".join(map(color_line, output.split("\n")))
[email protected]("Prints out the binary security settings using `checksec`.")
[email protected](parser, command_name="checksec")
@pwndbg.commands.OnlyWithFile
-def checksec() -> None:
- output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())
+def checksec(file: str) -> None:
+ local_path = file or pwndbg.gdblib.file.get_proc_exe_file()
+ output = pwndbg.wrappers.checksec.get_raw_out(local_path)
print(color_lines(output))
diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py
--- a/pwndbg/wrappers/checksec.py
+++ b/pwndbg/wrappers/checksec.py
@@ -1,30 +1,16 @@
from __future__ import annotations
-from subprocess import CalledProcessError
+from pwnlib.elf import ELF
-import pwndbg.commands
-import pwndbg.lib.cache
-import pwndbg.wrappers
-cmd_name = "checksec"
-cmd_pwntools = ["pwn", "checksec"]
+def get_raw_out(local_path: str) -> str:
+ elf = ELF(local_path)
+ output = f"File: {elf.path}\n"
+ output += f"Arch: {elf.arch}\n"
+ output += elf.checksec()
+ return output
[email protected](cmd_name, cmd_pwntools)
[email protected]_until("objfile")
-def get_raw_out(local_path: str) -> str:
- try:
- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file=" + local_path])
- except CalledProcessError:
- pass
- try:
- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + ["--file", local_path])
- except CalledProcessError:
- pass
- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])
-
-
[email protected](cmd_name, cmd_pwntools)
def relro_status(local_path: str) -> str:
relro = "No RELRO"
out = get_raw_out(local_path)
@@ -37,7 +23,6 @@
return relro
[email protected](cmd_name, cmd_pwntools)
def pie_status(local_path: str) -> str:
pie = "No PIE"
out = get_raw_out(local_path)
| {"golden_diff": "diff --git a/pwndbg/commands/checksec.py b/pwndbg/commands/checksec.py\n--- a/pwndbg/commands/checksec.py\n+++ b/pwndbg/commands/checksec.py\n@@ -1,10 +1,18 @@\n from __future__ import annotations\n \n+import argparse\n+\n import pwndbg.color\n import pwndbg.commands\n import pwndbg.gdblib.file\n import pwndbg.wrappers.checksec\n \n+parser = argparse.ArgumentParser(\n+ description=\"Prints out the binary security settings using `checksec`.\"\n+)\n+\n+parser.add_argument(\"-f\", \"--file\", type=str, help=\"Specify the file to run `checksec` on.\")\n+\n \n def color_line(line: str) -> str:\n return pwndbg.color.normal(\n@@ -18,8 +26,9 @@\n return \"\\n\".join(map(color_line, output.split(\"\\n\")))\n \n \[email protected](\"Prints out the binary security settings using `checksec`.\")\[email protected](parser, command_name=\"checksec\")\n @pwndbg.commands.OnlyWithFile\n-def checksec() -> None:\n- output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())\n+def checksec(file: str) -> None:\n+ local_path = file or pwndbg.gdblib.file.get_proc_exe_file()\n+ output = pwndbg.wrappers.checksec.get_raw_out(local_path)\n print(color_lines(output))\ndiff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py\n--- a/pwndbg/wrappers/checksec.py\n+++ b/pwndbg/wrappers/checksec.py\n@@ -1,30 +1,16 @@\n from __future__ import annotations\n \n-from subprocess import CalledProcessError\n+from pwnlib.elf import ELF\n \n-import pwndbg.commands\n-import pwndbg.lib.cache\n-import pwndbg.wrappers\n \n-cmd_name = \"checksec\"\n-cmd_pwntools = [\"pwn\", \"checksec\"]\n+def get_raw_out(local_path: str) -> str:\n+ elf = ELF(local_path)\n+ output = f\"File: {elf.path}\\n\"\n+ output += f\"Arch: {elf.arch}\\n\"\n+ output += elf.checksec()\n+ return output\n \n \[email protected](cmd_name, cmd_pwntools)\[email protected]_until(\"objfile\")\n-def get_raw_out(local_path: str) -> str:\n- try:\n- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file=\" + local_path])\n- except CalledProcessError:\n- pass\n- try:\n- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file\", local_path])\n- except CalledProcessError:\n- pass\n- return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])\n-\n-\[email protected](cmd_name, cmd_pwntools)\n def relro_status(local_path: str) -> str:\n relro = \"No RELRO\"\n out = get_raw_out(local_path)\n@@ -37,7 +23,6 @@\n return relro\n \n \[email protected](cmd_name, cmd_pwntools)\n def pie_status(local_path: str) -> str:\n pie = \"No PIE\"\n out = get_raw_out(local_path)\n", "issue": "Port checksec command to native command and add colors\nThis is annoying IMHO. I want colors and no pwntools update checks here!\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom subprocess import CalledProcessError\n\nimport pwndbg.commands\nimport pwndbg.lib.cache\nimport pwndbg.wrappers\n\ncmd_name = \"checksec\"\ncmd_pwntools = [\"pwn\", \"checksec\"]\n\n\[email protected](cmd_name, cmd_pwntools)\[email protected]_until(\"objfile\")\ndef get_raw_out(local_path: str) -> str:\n try:\n return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file=\" + local_path])\n except CalledProcessError:\n pass\n try:\n return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [\"--file\", local_path])\n except CalledProcessError:\n pass\n return pwndbg.wrappers.call_cmd(get_raw_out.cmd + [local_path])\n\n\[email protected](cmd_name, cmd_pwntools)\ndef relro_status(local_path: str) -> str:\n relro = \"No RELRO\"\n out = get_raw_out(local_path)\n\n if \"Full RELRO\" in out:\n relro = \"Full RELRO\"\n elif \"Partial RELRO\" in out:\n relro = \"Partial RELRO\"\n\n return relro\n\n\[email protected](cmd_name, cmd_pwntools)\ndef pie_status(local_path: str) -> str:\n pie = \"No PIE\"\n out = get_raw_out(local_path)\n\n if \"PIE enabled\" in out:\n pie = \"PIE enabled\"\n\n return pie\n", "path": "pwndbg/wrappers/checksec.py"}, {"content": "from __future__ import annotations\n\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.gdblib.file\nimport pwndbg.wrappers.checksec\n\n\ndef color_line(line: str) -> str:\n return pwndbg.color.normal(\n line.replace(\"*\", pwndbg.color.green(\"*\"))\n .replace(\":\", f\":{pwndbg.color.GREEN}\")\n .replace(\"No\", f\"{pwndbg.color.RED}No\")\n )\n\n\ndef color_lines(output: str) -> str:\n return \"\\n\".join(map(color_line, output.split(\"\\n\")))\n\n\[email protected](\"Prints out the binary security settings using `checksec`.\")\[email protected]\ndef checksec() -> None:\n output = pwndbg.wrappers.checksec.get_raw_out(pwndbg.gdblib.file.get_proc_exe_file())\n print(color_lines(output))\n", "path": "pwndbg/commands/checksec.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom pwnlib.elf import ELF\n\n\ndef get_raw_out(local_path: str) -> str:\n elf = ELF(local_path)\n output = f\"File: {elf.path}\\n\"\n output += f\"Arch: {elf.arch}\\n\"\n output += elf.checksec()\n return output\n\n\ndef relro_status(local_path: str) -> str:\n relro = \"No RELRO\"\n out = get_raw_out(local_path)\n\n if \"Full RELRO\" in out:\n relro = \"Full RELRO\"\n elif \"Partial RELRO\" in out:\n relro = \"Partial RELRO\"\n\n return relro\n\n\ndef pie_status(local_path: str) -> str:\n pie = \"No PIE\"\n out = get_raw_out(local_path)\n\n if \"PIE enabled\" in out:\n pie = \"PIE enabled\"\n\n return pie\n", "path": "pwndbg/wrappers/checksec.py"}, {"content": "from __future__ import annotations\n\nimport argparse\n\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.gdblib.file\nimport pwndbg.wrappers.checksec\n\nparser = argparse.ArgumentParser(\n description=\"Prints out the binary security settings using `checksec`.\"\n)\n\nparser.add_argument(\"-f\", \"--file\", type=str, help=\"Specify the file to run `checksec` on.\")\n\n\ndef color_line(line: str) -> str:\n return pwndbg.color.normal(\n line.replace(\"*\", pwndbg.color.green(\"*\"))\n .replace(\":\", f\":{pwndbg.color.GREEN}\")\n .replace(\"No\", f\"{pwndbg.color.RED}No\")\n )\n\n\ndef color_lines(output: str) -> str:\n return \"\\n\".join(map(color_line, output.split(\"\\n\")))\n\n\[email protected](parser, command_name=\"checksec\")\[email protected]\ndef checksec(file: str) -> None:\n local_path = file or pwndbg.gdblib.file.get_proc_exe_file()\n output = pwndbg.wrappers.checksec.get_raw_out(local_path)\n print(color_lines(output))\n", "path": "pwndbg/commands/checksec.py"}]} | 1,054 | 776 |
gh_patches_debug_19369 | rasdani/github-patches | git_diff | pymedusa__Medusa-7497 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[APP SUBMITTED]: TypeError: a bytes-like object is required, not 'str'
### INFO
**Python Version**: `3.7.5 (default, Oct 17 2019, 12:25:15) [GCC 8.3.0]`
**Operating System**: `Linux-4.9.0-11-amd64-x86_64-with`
**Locale**: `UTF-8`
**Branch**: [master](../tree/master)
**Database**: `44.14`
**Commit**: pymedusa/Medusa@918cfe7ce01808499585ffe6c1356fd5ed369f76
**Link to Log**: https://gist.github.com/8f765e724a442e6d91ed29584352638d
### ERROR
<pre>
2019-11-12 10:48:11 ERROR TORNADO :: [918cfe7] API :: TypeError("a bytes-like object is required, not 'str'")
Traceback (most recent call last):
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/server/api/v1/core.py#L165">medusa/server/api/v1/core.py</a>", line 165, in get
out_dict = _call_dispatcher(args, kwargs)
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/server/api/v1/core.py#L235">medusa/server/api/v1/core.py</a>", line 235, in call_dispatcher
cur_out_dict = func(cur_args, cur_kwargs).run() # call function and get response
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/server/api/v1/core.py#L1336">medusa/server/api/v1/core.py</a>", line 1336, in run
proc_type=self.type
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L65">medusa/process_tv.py</a>", line 65, in run
return ProcessResult(path, process_method).process(force=force, **kwargs)
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L188">medusa/process_tv.py</a>", line 188, in process
ignore_subs=ignore_subs)
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L378">medusa/process_tv.py</a>", line 378, in process_files
self.process_media(path, self.video_files, force, is_priority, ignore_subs)
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L599">medusa/process_tv.py</a>", line 599, in process_media
self.result = processor.process()
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/post_processor.py#L1281">medusa/post_processor.py</a>", line 1281, in process
notifiers.emby_notifier.update_library(ep_obj.series)
File "/app/medusa/<a href="../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/notifiers/emby.py#L131">medusa/notifiers/emby.py</a>", line 131, in update_library
log.debug('EMBY: HTTP response: {0}', resp.content.replace('\n', ''))
TypeError: a bytes-like object is required, not 'str'
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/notifiers/emby.py`
Content:
```
1 # coding=utf-8
2
3 """Emby notifier module."""
4 from __future__ import unicode_literals
5
6 import json
7 import logging
8
9 from medusa import app
10 from medusa.helper.exceptions import ex
11 from medusa.indexers.indexer_config import INDEXER_TVDBV2, INDEXER_TVRAGE
12 from medusa.indexers.utils import indexer_id_to_name, mappings
13 from medusa.logger.adapters.style import BraceAdapter
14 from medusa.session.core import MedusaSession
15
16 from requests.exceptions import HTTPError, RequestException
17
18 from six import text_type
19
20 log = BraceAdapter(logging.getLogger(__name__))
21 log.logger.addHandler(logging.NullHandler())
22
23
24 class Notifier(object):
25 """Emby notifier class."""
26
27 def __init__(self):
28 self.session = MedusaSession()
29
30 def _notify_emby(self, message, host=None, emby_apikey=None):
31 """
32 Notify Emby host via HTTP API.
33
34 :return: True for no issue or False if there was an error
35 """
36 # fill in omitted parameters
37 if not host:
38 host = app.EMBY_HOST
39 if not emby_apikey:
40 emby_apikey = app.EMBY_APIKEY
41
42 url = 'http://{host}/emby/Notifications/Admin'.format(host=host)
43 data = json.dumps({
44 'Name': 'Medusa',
45 'Description': message,
46 'ImageUrl': app.LOGO_URL
47 })
48 try:
49 resp = self.session.post(
50 url=url,
51 data=data,
52 headers={
53 'X-MediaBrowser-Token': emby_apikey,
54 'Content-Type': 'application/json'
55 }
56 )
57 resp.raise_for_status()
58
59 if resp.content:
60 log.debug('EMBY: HTTP response: {0}', resp.content.replace('\n', ''))
61
62 log.info('EMBY: Successfully sent a test notification.')
63 return True
64
65 except (HTTPError, RequestException) as error:
66 log.warning('EMBY: Warning: Unable to contact Emby at {url}: {error}',
67 {'url': url, 'error': ex(error)})
68 return False
69
70 ##############################################################################
71 # Public functions
72 ##############################################################################
73
74 def test_notify(self, host, emby_apikey):
75 """
76 Sends a test notification.
77
78 :return: True for no issue or False if there was an error
79 """
80 return self._notify_emby('This is a test notification from Medusa', host, emby_apikey)
81
82 def update_library(self, show=None):
83 """
84 Update the Emby Media Server host via HTTP API.
85
86 :return: True for no issue or False if there was an error
87 """
88 if app.USE_EMBY:
89 if not app.EMBY_HOST:
90 log.debug('EMBY: No host specified, check your settings')
91 return False
92
93 if show:
94 # EMBY only supports TVDB ids
95 provider = 'tvdbid'
96 if show.indexer == INDEXER_TVDBV2:
97 tvdb_id = show.indexerid
98 else:
99 # Try using external ids to get a TVDB id
100 tvdb_id = show.externals.get(mappings[INDEXER_TVDBV2], None)
101
102 if tvdb_id is None:
103 if show.indexer == INDEXER_TVRAGE:
104 log.warning('EMBY: TVRage indexer no longer valid')
105 else:
106 log.warning(
107 'EMBY: Unable to find a TVDB ID for {series},'
108 ' and {indexer} indexer is unsupported',
109 {'series': show.name, 'indexer': indexer_id_to_name(show.indexer)}
110 )
111 return False
112
113 params = {
114 provider: text_type(tvdb_id)
115 }
116 else:
117 params = {}
118
119 url = 'http://{host}/emby/Library/Series/Updated'.format(host=app.EMBY_HOST)
120 try:
121 resp = self.session.post(
122 url=url,
123 params=params,
124 headers={
125 'X-MediaBrowser-Token': app.EMBY_APIKEY
126 }
127 )
128 resp.raise_for_status()
129
130 if resp.content:
131 log.debug('EMBY: HTTP response: {0}', resp.content.replace('\n', ''))
132
133 log.info('EMBY: Successfully sent a "Series Library Updated" command.')
134 return True
135
136 except (HTTPError, RequestException) as error:
137 log.warning('EMBY: Warning: Unable to contact Emby at {url}: {error}',
138 {'url': url, 'error': ex(error)})
139 return False
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/notifiers/emby.py b/medusa/notifiers/emby.py
--- a/medusa/notifiers/emby.py
+++ b/medusa/notifiers/emby.py
@@ -56,8 +56,8 @@
)
resp.raise_for_status()
- if resp.content:
- log.debug('EMBY: HTTP response: {0}', resp.content.replace('\n', ''))
+ if resp.text:
+ log.debug('EMBY: HTTP response: {0}', resp.text.replace('\n', ''))
log.info('EMBY: Successfully sent a test notification.')
return True
@@ -127,8 +127,8 @@
)
resp.raise_for_status()
- if resp.content:
- log.debug('EMBY: HTTP response: {0}', resp.content.replace('\n', ''))
+ if resp.text:
+ log.debug('EMBY: HTTP response: {0}', resp.text.replace('\n', ''))
log.info('EMBY: Successfully sent a "Series Library Updated" command.')
return True
| {"golden_diff": "diff --git a/medusa/notifiers/emby.py b/medusa/notifiers/emby.py\n--- a/medusa/notifiers/emby.py\n+++ b/medusa/notifiers/emby.py\n@@ -56,8 +56,8 @@\n )\n resp.raise_for_status()\n \n- if resp.content:\n- log.debug('EMBY: HTTP response: {0}', resp.content.replace('\\n', ''))\n+ if resp.text:\n+ log.debug('EMBY: HTTP response: {0}', resp.text.replace('\\n', ''))\n \n log.info('EMBY: Successfully sent a test notification.')\n return True\n@@ -127,8 +127,8 @@\n )\n resp.raise_for_status()\n \n- if resp.content:\n- log.debug('EMBY: HTTP response: {0}', resp.content.replace('\\n', ''))\n+ if resp.text:\n+ log.debug('EMBY: HTTP response: {0}', resp.text.replace('\\n', ''))\n \n log.info('EMBY: Successfully sent a \"Series Library Updated\" command.')\n return True\n", "issue": "[APP SUBMITTED]: TypeError: a bytes-like object is required, not 'str'\n\n### INFO\n**Python Version**: `3.7.5 (default, Oct 17 2019, 12:25:15) [GCC 8.3.0]`\n**Operating System**: `Linux-4.9.0-11-amd64-x86_64-with`\n**Locale**: `UTF-8`\n**Branch**: [master](../tree/master)\n**Database**: `44.14`\n**Commit**: pymedusa/Medusa@918cfe7ce01808499585ffe6c1356fd5ed369f76\n**Link to Log**: https://gist.github.com/8f765e724a442e6d91ed29584352638d\n### ERROR\n<pre>\n2019-11-12 10:48:11 ERROR TORNADO :: [918cfe7] API :: TypeError(\"a bytes-like object is required, not 'str'\")\nTraceback (most recent call last):\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/server/api/v1/core.py#L165\">medusa/server/api/v1/core.py</a>\", line 165, in get\n out_dict = _call_dispatcher(args, kwargs)\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/server/api/v1/core.py#L235\">medusa/server/api/v1/core.py</a>\", line 235, in call_dispatcher\n cur_out_dict = func(cur_args, cur_kwargs).run() # call function and get response\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/server/api/v1/core.py#L1336\">medusa/server/api/v1/core.py</a>\", line 1336, in run\n proc_type=self.type\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L65\">medusa/process_tv.py</a>\", line 65, in run\n return ProcessResult(path, process_method).process(force=force, **kwargs)\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L188\">medusa/process_tv.py</a>\", line 188, in process\n ignore_subs=ignore_subs)\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L378\">medusa/process_tv.py</a>\", line 378, in process_files\n self.process_media(path, self.video_files, force, is_priority, ignore_subs)\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/process_tv.py#L599\">medusa/process_tv.py</a>\", line 599, in process_media\n self.result = processor.process()\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/post_processor.py#L1281\">medusa/post_processor.py</a>\", line 1281, in process\n notifiers.emby_notifier.update_library(ep_obj.series)\n File \"/app/medusa/<a href=\"../blob/918cfe7ce01808499585ffe6c1356fd5ed369f76/medusa/notifiers/emby.py#L131\">medusa/notifiers/emby.py</a>\", line 131, in update_library\n log.debug('EMBY: HTTP response: {0}', resp.content.replace('\\n', ''))\nTypeError: a bytes-like object is required, not 'str'\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Emby notifier module.\"\"\"\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\n\nfrom medusa import app\nfrom medusa.helper.exceptions import ex\nfrom medusa.indexers.indexer_config import INDEXER_TVDBV2, INDEXER_TVRAGE\nfrom medusa.indexers.utils import indexer_id_to_name, mappings\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.session.core import MedusaSession\n\nfrom requests.exceptions import HTTPError, RequestException\n\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n \"\"\"Emby notifier class.\"\"\"\n\n def __init__(self):\n self.session = MedusaSession()\n\n def _notify_emby(self, message, host=None, emby_apikey=None):\n \"\"\"\n Notify Emby host via HTTP API.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n # fill in omitted parameters\n if not host:\n host = app.EMBY_HOST\n if not emby_apikey:\n emby_apikey = app.EMBY_APIKEY\n\n url = 'http://{host}/emby/Notifications/Admin'.format(host=host)\n data = json.dumps({\n 'Name': 'Medusa',\n 'Description': message,\n 'ImageUrl': app.LOGO_URL\n })\n try:\n resp = self.session.post(\n url=url,\n data=data,\n headers={\n 'X-MediaBrowser-Token': emby_apikey,\n 'Content-Type': 'application/json'\n }\n )\n resp.raise_for_status()\n\n if resp.content:\n log.debug('EMBY: HTTP response: {0}', resp.content.replace('\\n', ''))\n\n log.info('EMBY: Successfully sent a test notification.')\n return True\n\n except (HTTPError, RequestException) as error:\n log.warning('EMBY: Warning: Unable to contact Emby at {url}: {error}',\n {'url': url, 'error': ex(error)})\n return False\n\n##############################################################################\n# Public functions\n##############################################################################\n\n def test_notify(self, host, emby_apikey):\n \"\"\"\n Sends a test notification.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n return self._notify_emby('This is a test notification from Medusa', host, emby_apikey)\n\n def update_library(self, show=None):\n \"\"\"\n Update the Emby Media Server host via HTTP API.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n if app.USE_EMBY:\n if not app.EMBY_HOST:\n log.debug('EMBY: No host specified, check your settings')\n return False\n\n if show:\n # EMBY only supports TVDB ids\n provider = 'tvdbid'\n if show.indexer == INDEXER_TVDBV2:\n tvdb_id = show.indexerid\n else:\n # Try using external ids to get a TVDB id\n tvdb_id = show.externals.get(mappings[INDEXER_TVDBV2], None)\n\n if tvdb_id is None:\n if show.indexer == INDEXER_TVRAGE:\n log.warning('EMBY: TVRage indexer no longer valid')\n else:\n log.warning(\n 'EMBY: Unable to find a TVDB ID for {series},'\n ' and {indexer} indexer is unsupported',\n {'series': show.name, 'indexer': indexer_id_to_name(show.indexer)}\n )\n return False\n\n params = {\n provider: text_type(tvdb_id)\n }\n else:\n params = {}\n\n url = 'http://{host}/emby/Library/Series/Updated'.format(host=app.EMBY_HOST)\n try:\n resp = self.session.post(\n url=url,\n params=params,\n headers={\n 'X-MediaBrowser-Token': app.EMBY_APIKEY\n }\n )\n resp.raise_for_status()\n\n if resp.content:\n log.debug('EMBY: HTTP response: {0}', resp.content.replace('\\n', ''))\n\n log.info('EMBY: Successfully sent a \"Series Library Updated\" command.')\n return True\n\n except (HTTPError, RequestException) as error:\n log.warning('EMBY: Warning: Unable to contact Emby at {url}: {error}',\n {'url': url, 'error': ex(error)})\n return False\n", "path": "medusa/notifiers/emby.py"}], "after_files": [{"content": "# coding=utf-8\n\n\"\"\"Emby notifier module.\"\"\"\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\n\nfrom medusa import app\nfrom medusa.helper.exceptions import ex\nfrom medusa.indexers.indexer_config import INDEXER_TVDBV2, INDEXER_TVRAGE\nfrom medusa.indexers.utils import indexer_id_to_name, mappings\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.session.core import MedusaSession\n\nfrom requests.exceptions import HTTPError, RequestException\n\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n \"\"\"Emby notifier class.\"\"\"\n\n def __init__(self):\n self.session = MedusaSession()\n\n def _notify_emby(self, message, host=None, emby_apikey=None):\n \"\"\"\n Notify Emby host via HTTP API.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n # fill in omitted parameters\n if not host:\n host = app.EMBY_HOST\n if not emby_apikey:\n emby_apikey = app.EMBY_APIKEY\n\n url = 'http://{host}/emby/Notifications/Admin'.format(host=host)\n data = json.dumps({\n 'Name': 'Medusa',\n 'Description': message,\n 'ImageUrl': app.LOGO_URL\n })\n try:\n resp = self.session.post(\n url=url,\n data=data,\n headers={\n 'X-MediaBrowser-Token': emby_apikey,\n 'Content-Type': 'application/json'\n }\n )\n resp.raise_for_status()\n\n if resp.text:\n log.debug('EMBY: HTTP response: {0}', resp.text.replace('\\n', ''))\n\n log.info('EMBY: Successfully sent a test notification.')\n return True\n\n except (HTTPError, RequestException) as error:\n log.warning('EMBY: Warning: Unable to contact Emby at {url}: {error}',\n {'url': url, 'error': ex(error)})\n return False\n\n##############################################################################\n# Public functions\n##############################################################################\n\n def test_notify(self, host, emby_apikey):\n \"\"\"\n Sends a test notification.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n return self._notify_emby('This is a test notification from Medusa', host, emby_apikey)\n\n def update_library(self, show=None):\n \"\"\"\n Update the Emby Media Server host via HTTP API.\n\n :return: True for no issue or False if there was an error\n \"\"\"\n if app.USE_EMBY:\n if not app.EMBY_HOST:\n log.debug('EMBY: No host specified, check your settings')\n return False\n\n if show:\n # EMBY only supports TVDB ids\n provider = 'tvdbid'\n if show.indexer == INDEXER_TVDBV2:\n tvdb_id = show.indexerid\n else:\n # Try using external ids to get a TVDB id\n tvdb_id = show.externals.get(mappings[INDEXER_TVDBV2], None)\n\n if tvdb_id is None:\n if show.indexer == INDEXER_TVRAGE:\n log.warning('EMBY: TVRage indexer no longer valid')\n else:\n log.warning(\n 'EMBY: Unable to find a TVDB ID for {series},'\n ' and {indexer} indexer is unsupported',\n {'series': show.name, 'indexer': indexer_id_to_name(show.indexer)}\n )\n return False\n\n params = {\n provider: text_type(tvdb_id)\n }\n else:\n params = {}\n\n url = 'http://{host}/emby/Library/Series/Updated'.format(host=app.EMBY_HOST)\n try:\n resp = self.session.post(\n url=url,\n params=params,\n headers={\n 'X-MediaBrowser-Token': app.EMBY_APIKEY\n }\n )\n resp.raise_for_status()\n\n if resp.text:\n log.debug('EMBY: HTTP response: {0}', resp.text.replace('\\n', ''))\n\n log.info('EMBY: Successfully sent a \"Series Library Updated\" command.')\n return True\n\n except (HTTPError, RequestException) as error:\n log.warning('EMBY: Warning: Unable to contact Emby at {url}: {error}',\n {'url': url, 'error': ex(error)})\n return False\n", "path": "medusa/notifiers/emby.py"}]} | 2,694 | 240 |
gh_patches_debug_38547 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-584 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Linear kernel's variance prior is broken
Repro:
[linear_kernel_prior_issue.ipynb.txt](https://github.com/cornellius-gp/gpytorch/files/2979717/linear_kernel_prior_issue.ipynb.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/kernels/linear_kernel.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import torch
4 from .kernel import Kernel
5 from ..lazy import MatmulLazyTensor, RootLazyTensor
6
7
8 class LinearKernel(Kernel):
9 r"""
10 Computes a covariance matrix based on the Linear kernel
11 between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
12
13 .. math::
14 \begin{equation*}
15 k_\text{Linear}(\mathbf{x_1}, \mathbf{x_2}) = (\mathbf{x_1} - \mathbf{o})^\top
16 (\mathbf{x_2} - \mathbf{o}) + v.
17 \end{equation*}
18
19 where
20
21 * :math:`\mathbf o` is an :attr:`offset` parameter.
22 * :math:`v` is a :attr:`variance` parameter.
23
24
25 .. note::
26
27 To implement this efficiently, we use a :obj:`gpytorch.lazy.RootLazyTensor` during training and a
28 :class:`gpytorch.lazy.MatmulLazyTensor` during test. These lazy tensors represent matrices of the form
29 :math:`K = XX^{\top}` and :math:`K = XZ^{\top}`. This makes inference
30 efficient because a matrix-vector product :math:`Kv` can be computed as
31 :math:`Kv=X(X^{\top}v)`, where the base multiply :math:`Xv` takes only
32 :math:`O(nd)` time and space.
33
34 Args:
35 :attr:`num_dimensions` (int):
36 Number of data dimensions to expect. This
37 is necessary to create the offset parameter.
38 :attr:`variance_prior` (:class:`gpytorch.priors.Prior`):
39 Prior over the variance parameter (default `None`).
40 :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):
41 Prior over the offset parameter (default `None`).
42 :attr:`active_dims` (list):
43 List of data dimensions to operate on.
44 `len(active_dims)` should equal `num_dimensions`.
45 """
46
47 def __init__(self, num_dimensions, variance_prior=None, active_dims=None):
48 super(LinearKernel, self).__init__(active_dims=active_dims)
49 self.register_parameter(name="raw_variance", parameter=torch.nn.Parameter(torch.zeros(1)))
50 self.register_parameter(name="offset", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))
51 if variance_prior is not None:
52 self.register_prior("variance_prior", variance_prior, "variance")
53
54 @property
55 def variance(self):
56 return self._param_transform(self.raw_variance)
57
58 @variance.setter
59 def variance(self, value):
60 self._set_variance(value)
61
62 def _set_variance(self, value):
63 if not torch.is_tensor(value):
64 value = torch.tensor(value)
65 self.initialize(raw_variance=self._inv_param_transform(value))
66
67 def forward(self, x1, x2, diag=False, batch_dims=None, **params):
68 x1_ = x1 * self.variance.sqrt()
69 if batch_dims == (0, 2):
70 x1_ = x1_.view(x1_.size(0), x1_.size(1), -1, 1)
71 x1_ = x1_.permute(0, 2, 1, 3).contiguous()
72 x1_ = x1_.view(-1, x1_.size(-2), x1_.size(-1))
73
74 if x1.size() == x2.size() and torch.equal(x1, x2):
75 # Use RootLazyTensor when x1 == x2 for efficiency when composing
76 # with other kernels
77 prod = RootLazyTensor(x1_)
78
79 else:
80 x2_ = x2 * self.variance.sqrt()
81 if batch_dims == (0, 2):
82 x2_ = x2_.view(x2_.size(0), x2_.size(1), -1, 1)
83 x2_ = x2_.permute(0, 2, 1, 3).contiguous()
84 x2_ = x2_.view(-1, x2_.size(-2), x2_.size(-1))
85
86 prod = MatmulLazyTensor(x1_, x2_.transpose(2, 1))
87
88 return prod
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpytorch/kernels/linear_kernel.py b/gpytorch/kernels/linear_kernel.py
--- a/gpytorch/kernels/linear_kernel.py
+++ b/gpytorch/kernels/linear_kernel.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
import torch
+import warnings
from .kernel import Kernel
from ..lazy import MatmulLazyTensor, RootLazyTensor
@@ -12,13 +13,12 @@
.. math::
\begin{equation*}
- k_\text{Linear}(\mathbf{x_1}, \mathbf{x_2}) = (\mathbf{x_1} - \mathbf{o})^\top
- (\mathbf{x_2} - \mathbf{o}) + v.
+ k_\text{Linear}(\mathbf{x_1}, \mathbf{x_2}) = v\mathbf{x_1}^\top
+ \mathbf{x_2}.
\end{equation*}
where
- * :math:`\mathbf o` is an :attr:`offset` parameter.
* :math:`v` is a :attr:`variance` parameter.
@@ -32,24 +32,37 @@
:math:`O(nd)` time and space.
Args:
- :attr:`num_dimensions` (int):
- Number of data dimensions to expect. This
- is necessary to create the offset parameter.
:attr:`variance_prior` (:class:`gpytorch.priors.Prior`):
Prior over the variance parameter (default `None`).
- :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):
- Prior over the offset parameter (default `None`).
:attr:`active_dims` (list):
List of data dimensions to operate on.
`len(active_dims)` should equal `num_dimensions`.
"""
- def __init__(self, num_dimensions, variance_prior=None, active_dims=None):
+ def __init__(self, num_dimensions=None, offset_prior=None, variance_prior=None, active_dims=None):
super(LinearKernel, self).__init__(active_dims=active_dims)
+ if num_dimensions is not None:
+ warnings.warn(
+ "The `num_dimensions` argument is deprecated and no longer used.",
+ DeprecationWarning
+ )
+ self.register_parameter(
+ name="offset",
+ parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions))
+ )
+ if offset_prior is not None:
+ warnings.warn(
+ "The `offset_prior` argument is deprecated and no longer used.",
+ DeprecationWarning
+ )
self.register_parameter(name="raw_variance", parameter=torch.nn.Parameter(torch.zeros(1)))
- self.register_parameter(name="offset", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))
if variance_prior is not None:
- self.register_prior("variance_prior", variance_prior, "variance")
+ self.register_prior(
+ "variance_prior",
+ variance_prior,
+ lambda: self.variance,
+ lambda v: self._set_variance(v)
+ )
@property
def variance(self):
| {"golden_diff": "diff --git a/gpytorch/kernels/linear_kernel.py b/gpytorch/kernels/linear_kernel.py\n--- a/gpytorch/kernels/linear_kernel.py\n+++ b/gpytorch/kernels/linear_kernel.py\n@@ -1,6 +1,7 @@\n #!/usr/bin/env python3\n \n import torch\n+import warnings\n from .kernel import Kernel\n from ..lazy import MatmulLazyTensor, RootLazyTensor\n \n@@ -12,13 +13,12 @@\n \n .. math::\n \\begin{equation*}\n- k_\\text{Linear}(\\mathbf{x_1}, \\mathbf{x_2}) = (\\mathbf{x_1} - \\mathbf{o})^\\top\n- (\\mathbf{x_2} - \\mathbf{o}) + v.\n+ k_\\text{Linear}(\\mathbf{x_1}, \\mathbf{x_2}) = v\\mathbf{x_1}^\\top\n+ \\mathbf{x_2}.\n \\end{equation*}\n \n where\n \n- * :math:`\\mathbf o` is an :attr:`offset` parameter.\n * :math:`v` is a :attr:`variance` parameter.\n \n \n@@ -32,24 +32,37 @@\n :math:`O(nd)` time and space.\n \n Args:\n- :attr:`num_dimensions` (int):\n- Number of data dimensions to expect. This\n- is necessary to create the offset parameter.\n :attr:`variance_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the variance parameter (default `None`).\n- :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):\n- Prior over the offset parameter (default `None`).\n :attr:`active_dims` (list):\n List of data dimensions to operate on.\n `len(active_dims)` should equal `num_dimensions`.\n \"\"\"\n \n- def __init__(self, num_dimensions, variance_prior=None, active_dims=None):\n+ def __init__(self, num_dimensions=None, offset_prior=None, variance_prior=None, active_dims=None):\n super(LinearKernel, self).__init__(active_dims=active_dims)\n+ if num_dimensions is not None:\n+ warnings.warn(\n+ \"The `num_dimensions` argument is deprecated and no longer used.\",\n+ DeprecationWarning\n+ )\n+ self.register_parameter(\n+ name=\"offset\",\n+ parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions))\n+ )\n+ if offset_prior is not None:\n+ warnings.warn(\n+ \"The `offset_prior` argument is deprecated and no longer used.\",\n+ DeprecationWarning\n+ )\n self.register_parameter(name=\"raw_variance\", parameter=torch.nn.Parameter(torch.zeros(1)))\n- self.register_parameter(name=\"offset\", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))\n if variance_prior is not None:\n- self.register_prior(\"variance_prior\", variance_prior, \"variance\")\n+ self.register_prior(\n+ \"variance_prior\",\n+ variance_prior,\n+ lambda: self.variance,\n+ lambda v: self._set_variance(v)\n+ )\n \n @property\n def variance(self):\n", "issue": "Linear kernel's variance prior is broken\nRepro: \r\n[linear_kernel_prior_issue.ipynb.txt](https://github.com/cornellius-gp/gpytorch/files/2979717/linear_kernel_prior_issue.ipynb.txt)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport torch\nfrom .kernel import Kernel\nfrom ..lazy import MatmulLazyTensor, RootLazyTensor\n\n\nclass LinearKernel(Kernel):\n r\"\"\"\n Computes a covariance matrix based on the Linear kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n \\begin{equation*}\n k_\\text{Linear}(\\mathbf{x_1}, \\mathbf{x_2}) = (\\mathbf{x_1} - \\mathbf{o})^\\top\n (\\mathbf{x_2} - \\mathbf{o}) + v.\n \\end{equation*}\n\n where\n\n * :math:`\\mathbf o` is an :attr:`offset` parameter.\n * :math:`v` is a :attr:`variance` parameter.\n\n\n .. note::\n\n To implement this efficiently, we use a :obj:`gpytorch.lazy.RootLazyTensor` during training and a\n :class:`gpytorch.lazy.MatmulLazyTensor` during test. These lazy tensors represent matrices of the form\n :math:`K = XX^{\\top}` and :math:`K = XZ^{\\top}`. This makes inference\n efficient because a matrix-vector product :math:`Kv` can be computed as\n :math:`Kv=X(X^{\\top}v)`, where the base multiply :math:`Xv` takes only\n :math:`O(nd)` time and space.\n\n Args:\n :attr:`num_dimensions` (int):\n Number of data dimensions to expect. This\n is necessary to create the offset parameter.\n :attr:`variance_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the variance parameter (default `None`).\n :attr:`offset_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the offset parameter (default `None`).\n :attr:`active_dims` (list):\n List of data dimensions to operate on.\n `len(active_dims)` should equal `num_dimensions`.\n \"\"\"\n\n def __init__(self, num_dimensions, variance_prior=None, active_dims=None):\n super(LinearKernel, self).__init__(active_dims=active_dims)\n self.register_parameter(name=\"raw_variance\", parameter=torch.nn.Parameter(torch.zeros(1)))\n self.register_parameter(name=\"offset\", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)))\n if variance_prior is not None:\n self.register_prior(\"variance_prior\", variance_prior, \"variance\")\n\n @property\n def variance(self):\n return self._param_transform(self.raw_variance)\n\n @variance.setter\n def variance(self, value):\n self._set_variance(value)\n\n def _set_variance(self, value):\n if not torch.is_tensor(value):\n value = torch.tensor(value)\n self.initialize(raw_variance=self._inv_param_transform(value))\n\n def forward(self, x1, x2, diag=False, batch_dims=None, **params):\n x1_ = x1 * self.variance.sqrt()\n if batch_dims == (0, 2):\n x1_ = x1_.view(x1_.size(0), x1_.size(1), -1, 1)\n x1_ = x1_.permute(0, 2, 1, 3).contiguous()\n x1_ = x1_.view(-1, x1_.size(-2), x1_.size(-1))\n\n if x1.size() == x2.size() and torch.equal(x1, x2):\n # Use RootLazyTensor when x1 == x2 for efficiency when composing\n # with other kernels\n prod = RootLazyTensor(x1_)\n\n else:\n x2_ = x2 * self.variance.sqrt()\n if batch_dims == (0, 2):\n x2_ = x2_.view(x2_.size(0), x2_.size(1), -1, 1)\n x2_ = x2_.permute(0, 2, 1, 3).contiguous()\n x2_ = x2_.view(-1, x2_.size(-2), x2_.size(-1))\n\n prod = MatmulLazyTensor(x1_, x2_.transpose(2, 1))\n\n return prod\n", "path": "gpytorch/kernels/linear_kernel.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport torch\nimport warnings\nfrom .kernel import Kernel\nfrom ..lazy import MatmulLazyTensor, RootLazyTensor\n\n\nclass LinearKernel(Kernel):\n r\"\"\"\n Computes a covariance matrix based on the Linear kernel\n between inputs :math:`\\mathbf{x_1}` and :math:`\\mathbf{x_2}`:\n\n .. math::\n \\begin{equation*}\n k_\\text{Linear}(\\mathbf{x_1}, \\mathbf{x_2}) = v\\mathbf{x_1}^\\top\n \\mathbf{x_2}.\n \\end{equation*}\n\n where\n\n * :math:`v` is a :attr:`variance` parameter.\n\n\n .. note::\n\n To implement this efficiently, we use a :obj:`gpytorch.lazy.RootLazyTensor` during training and a\n :class:`gpytorch.lazy.MatmulLazyTensor` during test. These lazy tensors represent matrices of the form\n :math:`K = XX^{\\top}` and :math:`K = XZ^{\\top}`. This makes inference\n efficient because a matrix-vector product :math:`Kv` can be computed as\n :math:`Kv=X(X^{\\top}v)`, where the base multiply :math:`Xv` takes only\n :math:`O(nd)` time and space.\n\n Args:\n :attr:`variance_prior` (:class:`gpytorch.priors.Prior`):\n Prior over the variance parameter (default `None`).\n :attr:`active_dims` (list):\n List of data dimensions to operate on.\n `len(active_dims)` should equal `num_dimensions`.\n \"\"\"\n\n def __init__(self, num_dimensions=None, offset_prior=None, variance_prior=None, active_dims=None):\n super(LinearKernel, self).__init__(active_dims=active_dims)\n if num_dimensions is not None:\n warnings.warn(\n \"The `num_dimensions` argument is deprecated and no longer used.\",\n DeprecationWarning\n )\n self.register_parameter(\n name=\"offset\",\n parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions))\n )\n if offset_prior is not None:\n warnings.warn(\n \"The `offset_prior` argument is deprecated and no longer used.\",\n DeprecationWarning\n )\n self.register_parameter(name=\"raw_variance\", parameter=torch.nn.Parameter(torch.zeros(1)))\n if variance_prior is not None:\n self.register_prior(\n \"variance_prior\",\n variance_prior,\n lambda: self.variance,\n lambda v: self._set_variance(v)\n )\n\n @property\n def variance(self):\n return self._param_transform(self.raw_variance)\n\n @variance.setter\n def variance(self, value):\n self._set_variance(value)\n\n def _set_variance(self, value):\n if not torch.is_tensor(value):\n value = torch.tensor(value)\n self.initialize(raw_variance=self._inv_param_transform(value))\n\n def forward(self, x1, x2, diag=False, batch_dims=None, **params):\n x1_ = x1 * self.variance.sqrt()\n if batch_dims == (0, 2):\n x1_ = x1_.view(x1_.size(0), x1_.size(1), -1, 1)\n x1_ = x1_.permute(0, 2, 1, 3).contiguous()\n x1_ = x1_.view(-1, x1_.size(-2), x1_.size(-1))\n\n if x1.size() == x2.size() and torch.equal(x1, x2):\n # Use RootLazyTensor when x1 == x2 for efficiency when composing\n # with other kernels\n prod = RootLazyTensor(x1_)\n\n else:\n x2_ = x2 * self.variance.sqrt()\n if batch_dims == (0, 2):\n x2_ = x2_.view(x2_.size(0), x2_.size(1), -1, 1)\n x2_ = x2_.permute(0, 2, 1, 3).contiguous()\n x2_ = x2_.view(-1, x2_.size(-2), x2_.size(-1))\n\n prod = MatmulLazyTensor(x1_, x2_.transpose(2, 1))\n\n return prod\n", "path": "gpytorch/kernels/linear_kernel.py"}]} | 1,446 | 717 |
gh_patches_debug_16953 | rasdani/github-patches | git_diff | saleor__saleor-1503 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hide 'Clear filters' when list view has page in GET
### What I'm trying to achieve
Clear filters button should appear only when some filters are applied.
### Steps to reproduce the problem
1. Go to dashboard or store front page with filters and pagination and move to next page (/dashboard/products/?page=2)
2. Clear filters button is present in filters card
### What I expected to happen
Clear filters button should be only present if some filters are applied.
### What happened instead/how it failed
Clear filter button is present.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/filters.py`
Content:
```
1 from django_filters import FilterSet
2
3
4 class SortedFilterSet(FilterSet):
5 '''
6 Base class for filtersets used in dashboard views. Adds flag
7 is_bound_unsorted to indicate if FilterSet has data from filters other
8 than sort_by.
9 '''
10 def __init__(self, data, *args, **kwargs):
11 data_copy = data.copy() if data else None
12 self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)
13 super().__init__(data, *args, **kwargs)
14
15 def set_is_bound_unsorted(self, data_copy):
16 if data_copy and data_copy.get('sort_by', None):
17 del data_copy['sort_by']
18 if data_copy:
19 return True
20 return False
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/core/filters.py b/saleor/core/filters.py
--- a/saleor/core/filters.py
+++ b/saleor/core/filters.py
@@ -5,16 +5,11 @@
'''
Base class for filtersets used in dashboard views. Adds flag
is_bound_unsorted to indicate if FilterSet has data from filters other
- than sort_by.
+ than sort_by or page.
'''
def __init__(self, data, *args, **kwargs):
- data_copy = data.copy() if data else None
- self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)
- super().__init__(data, *args, **kwargs)
+ self.is_bound_unsorted = self.set_is_bound_unsorted(data)
+ super(SortedFilterSet, self).__init__(data, *args, **kwargs)
- def set_is_bound_unsorted(self, data_copy):
- if data_copy and data_copy.get('sort_by', None):
- del data_copy['sort_by']
- if data_copy:
- return True
- return False
+ def set_is_bound_unsorted(self, data):
+ return any([key not in {'sort_by', 'page'} for key in data.keys()])
| {"golden_diff": "diff --git a/saleor/core/filters.py b/saleor/core/filters.py\n--- a/saleor/core/filters.py\n+++ b/saleor/core/filters.py\n@@ -5,16 +5,11 @@\n '''\n Base class for filtersets used in dashboard views. Adds flag\n is_bound_unsorted to indicate if FilterSet has data from filters other\n- than sort_by.\n+ than sort_by or page.\n '''\n def __init__(self, data, *args, **kwargs):\n- data_copy = data.copy() if data else None\n- self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)\n- super().__init__(data, *args, **kwargs)\n+ self.is_bound_unsorted = self.set_is_bound_unsorted(data)\n+ super(SortedFilterSet, self).__init__(data, *args, **kwargs)\n \n- def set_is_bound_unsorted(self, data_copy):\n- if data_copy and data_copy.get('sort_by', None):\n- del data_copy['sort_by']\n- if data_copy:\n- return True\n- return False\n+ def set_is_bound_unsorted(self, data):\n+ return any([key not in {'sort_by', 'page'} for key in data.keys()])\n", "issue": "Hide 'Clear filters' when list view has page in GET\n### What I'm trying to achieve\r\n\r\nClear filters button should appear only when some filters are applied.\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Go to dashboard or store front page with filters and pagination and move to next page (/dashboard/products/?page=2)\r\n2. Clear filters button is present in filters card\r\n\r\n### What I expected to happen\r\n\r\nClear filters button should be only present if some filters are applied.\r\n\r\n### What happened instead/how it failed\r\n\r\nClear filter button is present.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django_filters import FilterSet\n\n\nclass SortedFilterSet(FilterSet):\n '''\n Base class for filtersets used in dashboard views. Adds flag\n is_bound_unsorted to indicate if FilterSet has data from filters other\n than sort_by.\n '''\n def __init__(self, data, *args, **kwargs):\n data_copy = data.copy() if data else None\n self.is_bound_unsorted = self.set_is_bound_unsorted(data_copy)\n super().__init__(data, *args, **kwargs)\n\n def set_is_bound_unsorted(self, data_copy):\n if data_copy and data_copy.get('sort_by', None):\n del data_copy['sort_by']\n if data_copy:\n return True\n return False\n", "path": "saleor/core/filters.py"}], "after_files": [{"content": "from django_filters import FilterSet\n\n\nclass SortedFilterSet(FilterSet):\n '''\n Base class for filtersets used in dashboard views. Adds flag\n is_bound_unsorted to indicate if FilterSet has data from filters other\n than sort_by or page.\n '''\n def __init__(self, data, *args, **kwargs):\n self.is_bound_unsorted = self.set_is_bound_unsorted(data)\n super(SortedFilterSet, self).__init__(data, *args, **kwargs)\n\n def set_is_bound_unsorted(self, data):\n return any([key not in {'sort_by', 'page'} for key in data.keys()])\n", "path": "saleor/core/filters.py"}]} | 649 | 282 |
gh_patches_debug_54080 | rasdani/github-patches | git_diff | e-valuation__EvaP-728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warning in courses with small number of participants
In courses with 5 or less participants a warning should be shown above the course's questionnaire:
_This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/student/views.py`
Content:
```
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.db import transaction
4 from django.shortcuts import get_object_or_404, redirect, render
5 from django.utils.translation import ugettext as _
6
7 from evap.evaluation.auth import participant_required
8 from evap.evaluation.models import Course, Semester
9 from evap.evaluation.tools import STUDENT_STATES_ORDERED
10
11 from evap.student.forms import QuestionsForm
12 from evap.student.tools import make_form_identifier
13
14 from collections import OrderedDict
15
16 @participant_required
17 def index(request):
18 # retrieve all courses, where the user is a participant and that are not new
19 courses = list(set(Course.objects.filter(participants=request.user).exclude(state="new")))
20 voted_courses = list(set(Course.objects.filter(voters=request.user)))
21 due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))
22
23 sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)
24 courses.sort(key=sorter)
25
26 semesters = Semester.objects.all()
27 semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]
28
29 template_data = dict(
30 semester_list=semester_list,
31 voted_courses=voted_courses,
32 due_courses=due_courses,
33 can_download_grades=request.user.can_download_grades,
34 )
35 return render(request, "student_index.html", template_data)
36
37
38 def vote_preview(request, course):
39 """
40 Renders a preview of the voting page for the given course.
41 Not used by the student app itself, but by staff and contributor.
42 """
43 form_groups = helper_create_voting_form_groups(request, course.contributions.all())
44 course_form_group = form_groups.pop(course.general_contribution)
45 contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())
46
47 template_data = dict(
48 errors_exist=False,
49 course_form_group=course_form_group,
50 contributor_form_groups=contributor_form_groups,
51 course=course,
52 preview=True)
53 return render(request, "student_vote.html", template_data)
54
55
56 @participant_required
57 def vote(request, course_id):
58 # retrieve course and make sure that the user is allowed to vote
59 course = get_object_or_404(Course, id=course_id)
60 if not course.can_user_vote(request.user):
61 raise PermissionDenied
62
63 # prevent a user from voting on themselves.
64 contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()
65 form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)
66
67 if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):
68 errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())
69
70 course_form_group = form_groups.pop(course.general_contribution)
71
72 contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())
73
74 template_data = dict(
75 errors_exist=errors_exist,
76 course_form_group=course_form_group,
77 contributor_form_groups=contributor_form_groups,
78 course=course,
79 preview=False)
80 return render(request, "student_vote.html", template_data)
81
82 # all forms are valid, begin vote operation
83 with transaction.atomic():
84 for contribution, form_group in form_groups.items():
85 for questionnaire_form in form_group:
86 questionnaire = questionnaire_form.questionnaire
87 for question in questionnaire.question_set.all():
88 identifier = make_form_identifier(contribution, questionnaire, question)
89 value = questionnaire_form.cleaned_data.get(identifier)
90
91 if question.is_text_question:
92 if value:
93 question.answer_class.objects.create(
94 contribution=contribution,
95 question=question,
96 answer=value)
97 else:
98 if value != 6:
99 answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)
100 answer_counter.add_vote()
101 answer_counter.save()
102
103 # remember that the user voted already
104 course.voters.add(request.user)
105
106 course.was_evaluated(request)
107
108 messages.success(request, _("Your vote was recorded."))
109 return redirect('student:index')
110
111
112 def helper_create_form_group(request, contribution):
113 return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())
114
115 def helper_create_voting_form_groups(request, contributions):
116 form_groups = OrderedDict()
117 for contribution in contributions:
118 form_groups[contribution] = helper_create_form_group(request, contribution)
119 return form_groups
120
121 def helper_has_errors(form_group):
122 return any(form.errors for form in form_group)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/student/views.py b/evap/student/views.py
--- a/evap/student/views.py
+++ b/evap/student/views.py
@@ -76,6 +76,7 @@
course_form_group=course_form_group,
contributor_form_groups=contributor_form_groups,
course=course,
+ participants_warning=course.num_participants <= 5,
preview=False)
return render(request, "student_vote.html", template_data)
| {"golden_diff": "diff --git a/evap/student/views.py b/evap/student/views.py\n--- a/evap/student/views.py\n+++ b/evap/student/views.py\n@@ -76,6 +76,7 @@\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n+ participants_warning=course.num_participants <= 5,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n", "issue": "Warning in courses with small number of participants\nIn courses with 5 or less participants a warning should be shown above the course's questionnaire:\n\n_This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published._\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import participant_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import STUDENT_STATES_ORDERED\n\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom collections import OrderedDict\n\n@participant_required\ndef index(request):\n # retrieve all courses, where the user is a participant and that are not new\n courses = list(set(Course.objects.filter(participants=request.user).exclude(state=\"new\")))\n voted_courses = list(set(Course.objects.filter(voters=request.user)))\n due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))\n\n sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)\n courses.sort(key=sorter)\n\n semesters = Semester.objects.all()\n semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n voted_courses=voted_courses,\n due_courses=due_courses,\n can_download_grades=request.user.can_download_grades,\n )\n return render(request, \"student_index.html\", template_data)\n\n\ndef vote_preview(request, course):\n \"\"\"\n Renders a preview of the voting page for the given course.\n Not used by the student app itself, but by staff and contributor.\n \"\"\"\n form_groups = helper_create_voting_form_groups(request, course.contributions.all())\n course_form_group = form_groups.pop(course.general_contribution)\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=False,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=True)\n return render(request, \"student_vote.html\", template_data)\n\n\n@participant_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # prevent a user from voting on themselves.\n contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()\n form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)\n\n if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):\n errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())\n\n course_form_group = form_groups.pop(course.general_contribution)\n\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=errors_exist,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n\n # all forms are valid, begin vote operation\n with transaction.atomic():\n for contribution, form_group in form_groups.items():\n for questionnaire_form in form_group:\n questionnaire = questionnaire_form.questionnaire\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = questionnaire_form.cleaned_data.get(identifier)\n\n if question.is_text_question:\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n else:\n if value != 6:\n answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)\n answer_counter.add_vote()\n answer_counter.save()\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n course.was_evaluated(request)\n\n messages.success(request, _(\"Your vote was recorded.\"))\n return redirect('student:index')\n\n\ndef helper_create_form_group(request, contribution):\n return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())\n\ndef helper_create_voting_form_groups(request, contributions):\n form_groups = OrderedDict()\n for contribution in contributions:\n form_groups[contribution] = helper_create_form_group(request, contribution)\n return form_groups\n\ndef helper_has_errors(form_group):\n return any(form.errors for form in form_group)\n", "path": "evap/student/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import participant_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import STUDENT_STATES_ORDERED\n\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom collections import OrderedDict\n\n@participant_required\ndef index(request):\n # retrieve all courses, where the user is a participant and that are not new\n courses = list(set(Course.objects.filter(participants=request.user).exclude(state=\"new\")))\n voted_courses = list(set(Course.objects.filter(voters=request.user)))\n due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))\n\n sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)\n courses.sort(key=sorter)\n\n semesters = Semester.objects.all()\n semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n voted_courses=voted_courses,\n due_courses=due_courses,\n can_download_grades=request.user.can_download_grades,\n )\n return render(request, \"student_index.html\", template_data)\n\n\ndef vote_preview(request, course):\n \"\"\"\n Renders a preview of the voting page for the given course.\n Not used by the student app itself, but by staff and contributor.\n \"\"\"\n form_groups = helper_create_voting_form_groups(request, course.contributions.all())\n course_form_group = form_groups.pop(course.general_contribution)\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=False,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=True)\n return render(request, \"student_vote.html\", template_data)\n\n\n@participant_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # prevent a user from voting on themselves.\n contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()\n form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)\n\n if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):\n errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())\n\n course_form_group = form_groups.pop(course.general_contribution)\n\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=errors_exist,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n participants_warning=course.num_participants <= 5,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n\n # all forms are valid, begin vote operation\n with transaction.atomic():\n for contribution, form_group in form_groups.items():\n for questionnaire_form in form_group:\n questionnaire = questionnaire_form.questionnaire\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = questionnaire_form.cleaned_data.get(identifier)\n\n if question.is_text_question:\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n else:\n if value != 6:\n answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)\n answer_counter.add_vote()\n answer_counter.save()\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n course.was_evaluated(request)\n\n messages.success(request, _(\"Your vote was recorded.\"))\n return redirect('student:index')\n\n\ndef helper_create_form_group(request, contribution):\n return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())\n\ndef helper_create_voting_form_groups(request, contributions):\n form_groups = OrderedDict()\n for contribution in contributions:\n form_groups[contribution] = helper_create_form_group(request, contribution)\n return form_groups\n\ndef helper_has_errors(form_group):\n return any(form.errors for form in form_group)\n", "path": "evap/student/views.py"}]} | 1,650 | 102 |
gh_patches_debug_18628 | rasdani/github-patches | git_diff | frappe__frappe-23912 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feat: allow setting a custom http status code for redirects
Requires user to set `redirect_http_status` in the hook or redirect doc
Resolves #23762
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/website/path_resolver.py`
Content:
```
1 import re
2
3 import click
4 from werkzeug.routing import Rule
5
6 import frappe
7 from frappe.website.page_renderers.document_page import DocumentPage
8 from frappe.website.page_renderers.list_page import ListPage
9 from frappe.website.page_renderers.not_found_page import NotFoundPage
10 from frappe.website.page_renderers.print_page import PrintPage
11 from frappe.website.page_renderers.redirect_page import RedirectPage
12 from frappe.website.page_renderers.static_page import StaticPage
13 from frappe.website.page_renderers.template_page import TemplatePage
14 from frappe.website.page_renderers.web_form import WebFormPage
15 from frappe.website.router import evaluate_dynamic_routes
16 from frappe.website.utils import can_cache, get_home_page
17
18
19 class PathResolver:
20 __slots__ = ("path", "http_status_code")
21
22 def __init__(self, path, http_status_code=None):
23 self.path = path.strip("/ ")
24 self.http_status_code = http_status_code
25
26 def resolve(self):
27 """Return endpoint and a renderer instance that can render the endpoint."""
28 request = frappe._dict()
29 if hasattr(frappe.local, "request"):
30 request = frappe.local.request or request
31
32 # check if the request url is in 404 list
33 if request.url and can_cache() and frappe.cache.hget("website_404", request.url):
34 return self.path, NotFoundPage(self.path)
35
36 try:
37 resolve_redirect(self.path, request.query_string)
38 except frappe.Redirect as e:
39 return frappe.flags.redirect_location, RedirectPage(self.path, e.http_status_code)
40
41 endpoint = resolve_path(self.path)
42
43 # WARN: Hardcoded for better performance
44 if endpoint == "app":
45 return endpoint, TemplatePage(endpoint, self.http_status_code)
46
47 custom_renderers = self.get_custom_page_renderers()
48 renderers = custom_renderers + [
49 StaticPage,
50 WebFormPage,
51 DocumentPage,
52 TemplatePage,
53 ListPage,
54 PrintPage,
55 ]
56
57 for renderer in renderers:
58 renderer_instance = renderer(endpoint, self.http_status_code)
59 if renderer_instance.can_render():
60 return endpoint, renderer_instance
61
62 return endpoint, NotFoundPage(endpoint)
63
64 def is_valid_path(self):
65 _endpoint, renderer_instance = self.resolve()
66 return not isinstance(renderer_instance, NotFoundPage)
67
68 @staticmethod
69 def get_custom_page_renderers():
70 custom_renderers = []
71 for renderer_path in frappe.get_hooks("page_renderer") or []:
72 try:
73 renderer = frappe.get_attr(renderer_path)
74 if not hasattr(renderer, "can_render"):
75 click.echo(f"{renderer.__name__} does not have can_render method")
76 continue
77 if not hasattr(renderer, "render"):
78 click.echo(f"{renderer.__name__} does not have render method")
79 continue
80
81 custom_renderers.append(renderer)
82
83 except Exception:
84 click.echo(f"Failed to load page renderer. Import path: {renderer_path}")
85
86 return custom_renderers
87
88
89 def resolve_redirect(path, query_string=None):
90 """
91 Resolve redirects from hooks
92
93 Example:
94
95 website_redirect = [
96 # absolute location
97 {"source": "/from", "target": "https://mysite/from"},
98
99 # relative location
100 {"source": "/from", "target": "/main"},
101
102 # use regex
103 {"source": r"/from/(.*)", "target": r"/main/\1"}
104 # use r as a string prefix if you use regex groups or want to escape any string literal
105 ]
106 """
107 redirects = frappe.get_hooks("website_redirects")
108 redirects += frappe.get_all(
109 "Website Route Redirect", ["source", "target", "redirect_http_status"], order_by=None
110 )
111
112 if not redirects:
113 return
114
115 redirect_to = frappe.cache.hget("website_redirects", path)
116
117 if redirect_to:
118 if isinstance(redirect_to, dict):
119 frappe.flags.redirect_location = redirect_to["path"]
120 raise frappe.Redirect(redirect_to["status_code"])
121 frappe.flags.redirect_location = redirect_to
122 raise frappe.Redirect
123
124 for rule in redirects:
125 pattern = rule["source"].strip("/ ") + "$"
126 path_to_match = path
127 if rule.get("match_with_query_string"):
128 path_to_match = path + "?" + frappe.safe_decode(query_string)
129
130 try:
131 match = re.match(pattern, path_to_match)
132 except re.error:
133 frappe.log_error("Broken Redirect: " + pattern)
134
135 if match:
136 redirect_to = re.sub(pattern, rule["target"], path_to_match)
137 frappe.flags.redirect_location = redirect_to
138 status_code = rule.get("redirect_http_status", 301)
139 frappe.cache.hset(
140 "website_redirects", path_to_match, {"path": redirect_to, "status_code": status_code}
141 )
142 raise frappe.Redirect(status_code)
143
144
145 def resolve_path(path):
146 if not path:
147 path = "index"
148
149 if path.endswith(".html"):
150 path = path[:-5]
151
152 if path == "index":
153 path = get_home_page()
154
155 frappe.local.path = path
156
157 if path != "index":
158 path = resolve_from_map(path)
159
160 return path
161
162
163 def resolve_from_map(path):
164 """transform dynamic route to a static one from hooks and route defined in doctype"""
165 rules = [
166 Rule(r["from_route"], endpoint=r["to_route"], defaults=r.get("defaults"))
167 for r in get_website_rules()
168 ]
169
170 return evaluate_dynamic_routes(rules, path) or path
171
172
173 def get_website_rules():
174 """Get website route rules from hooks and DocType route"""
175
176 def _get():
177 rules = frappe.get_hooks("website_route_rules")
178 for d in frappe.get_all("DocType", "name, route", dict(has_web_view=1)):
179 if d.route:
180 rules.append(dict(from_route="/" + d.route.strip("/"), to_route=d.name))
181
182 return rules
183
184 if frappe.local.dev_server:
185 # dont cache in development
186 return _get()
187
188 return frappe.cache.get_value("website_route_rules", _get)
189
```
Path: `frappe/website/doctype/website_route_redirect/website_route_redirect.py`
Content:
```
1 # Copyright (c) 2019, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 # import frappe
5 from frappe.model.document import Document
6
7
8 class WebsiteRouteRedirect(Document):
9 # begin: auto-generated types
10 # This code is auto-generated. Do not modify anything in this block.
11
12 from typing import TYPE_CHECKING
13
14 if TYPE_CHECKING:
15 from frappe.types import DF
16
17 parent: DF.Data
18 parentfield: DF.Data
19 parenttype: DF.Data
20 redirect_http_status: DF.Int
21 source: DF.SmallText
22 target: DF.SmallText
23 # end: auto-generated types
24
25 pass
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/website/doctype/website_route_redirect/website_route_redirect.py b/frappe/website/doctype/website_route_redirect/website_route_redirect.py
--- a/frappe/website/doctype/website_route_redirect/website_route_redirect.py
+++ b/frappe/website/doctype/website_route_redirect/website_route_redirect.py
@@ -17,7 +17,7 @@
parent: DF.Data
parentfield: DF.Data
parenttype: DF.Data
- redirect_http_status: DF.Int
+ redirect_http_status: DF.Literal["301", "302", "307", "308"]
source: DF.SmallText
target: DF.SmallText
# end: auto-generated types
diff --git a/frappe/website/path_resolver.py b/frappe/website/path_resolver.py
--- a/frappe/website/path_resolver.py
+++ b/frappe/website/path_resolver.py
@@ -135,7 +135,7 @@
if match:
redirect_to = re.sub(pattern, rule["target"], path_to_match)
frappe.flags.redirect_location = redirect_to
- status_code = rule.get("redirect_http_status", 301)
+ status_code = rule.get("redirect_http_status") or 301
frappe.cache.hset(
"website_redirects", path_to_match, {"path": redirect_to, "status_code": status_code}
)
| {"golden_diff": "diff --git a/frappe/website/doctype/website_route_redirect/website_route_redirect.py b/frappe/website/doctype/website_route_redirect/website_route_redirect.py\n--- a/frappe/website/doctype/website_route_redirect/website_route_redirect.py\n+++ b/frappe/website/doctype/website_route_redirect/website_route_redirect.py\n@@ -17,7 +17,7 @@\n \t\tparent: DF.Data\n \t\tparentfield: DF.Data\n \t\tparenttype: DF.Data\n-\t\tredirect_http_status: DF.Int\n+\t\tredirect_http_status: DF.Literal[\"301\", \"302\", \"307\", \"308\"]\n \t\tsource: DF.SmallText\n \t\ttarget: DF.SmallText\n \t# end: auto-generated types\ndiff --git a/frappe/website/path_resolver.py b/frappe/website/path_resolver.py\n--- a/frappe/website/path_resolver.py\n+++ b/frappe/website/path_resolver.py\n@@ -135,7 +135,7 @@\n \t\tif match:\n \t\t\tredirect_to = re.sub(pattern, rule[\"target\"], path_to_match)\n \t\t\tfrappe.flags.redirect_location = redirect_to\n-\t\t\tstatus_code = rule.get(\"redirect_http_status\", 301)\n+\t\t\tstatus_code = rule.get(\"redirect_http_status\") or 301\n \t\t\tfrappe.cache.hset(\n \t\t\t\t\"website_redirects\", path_to_match, {\"path\": redirect_to, \"status_code\": status_code}\n \t\t\t)\n", "issue": "feat: allow setting a custom http status code for redirects\nRequires user to set `redirect_http_status` in the hook or redirect doc\n\nResolves #23762\n\n", "before_files": [{"content": "import re\n\nimport click\nfrom werkzeug.routing import Rule\n\nimport frappe\nfrom frappe.website.page_renderers.document_page import DocumentPage\nfrom frappe.website.page_renderers.list_page import ListPage\nfrom frappe.website.page_renderers.not_found_page import NotFoundPage\nfrom frappe.website.page_renderers.print_page import PrintPage\nfrom frappe.website.page_renderers.redirect_page import RedirectPage\nfrom frappe.website.page_renderers.static_page import StaticPage\nfrom frappe.website.page_renderers.template_page import TemplatePage\nfrom frappe.website.page_renderers.web_form import WebFormPage\nfrom frappe.website.router import evaluate_dynamic_routes\nfrom frappe.website.utils import can_cache, get_home_page\n\n\nclass PathResolver:\n\t__slots__ = (\"path\", \"http_status_code\")\n\n\tdef __init__(self, path, http_status_code=None):\n\t\tself.path = path.strip(\"/ \")\n\t\tself.http_status_code = http_status_code\n\n\tdef resolve(self):\n\t\t\"\"\"Return endpoint and a renderer instance that can render the endpoint.\"\"\"\n\t\trequest = frappe._dict()\n\t\tif hasattr(frappe.local, \"request\"):\n\t\t\trequest = frappe.local.request or request\n\n\t\t# check if the request url is in 404 list\n\t\tif request.url and can_cache() and frappe.cache.hget(\"website_404\", request.url):\n\t\t\treturn self.path, NotFoundPage(self.path)\n\n\t\ttry:\n\t\t\tresolve_redirect(self.path, request.query_string)\n\t\texcept frappe.Redirect as e:\n\t\t\treturn frappe.flags.redirect_location, RedirectPage(self.path, e.http_status_code)\n\n\t\tendpoint = resolve_path(self.path)\n\n\t\t# WARN: Hardcoded for better performance\n\t\tif endpoint == \"app\":\n\t\t\treturn endpoint, TemplatePage(endpoint, self.http_status_code)\n\n\t\tcustom_renderers = self.get_custom_page_renderers()\n\t\trenderers = custom_renderers + [\n\t\t\tStaticPage,\n\t\t\tWebFormPage,\n\t\t\tDocumentPage,\n\t\t\tTemplatePage,\n\t\t\tListPage,\n\t\t\tPrintPage,\n\t\t]\n\n\t\tfor renderer in renderers:\n\t\t\trenderer_instance = renderer(endpoint, self.http_status_code)\n\t\t\tif renderer_instance.can_render():\n\t\t\t\treturn endpoint, renderer_instance\n\n\t\treturn endpoint, NotFoundPage(endpoint)\n\n\tdef is_valid_path(self):\n\t\t_endpoint, renderer_instance = self.resolve()\n\t\treturn not isinstance(renderer_instance, NotFoundPage)\n\n\t@staticmethod\n\tdef get_custom_page_renderers():\n\t\tcustom_renderers = []\n\t\tfor renderer_path in frappe.get_hooks(\"page_renderer\") or []:\n\t\t\ttry:\n\t\t\t\trenderer = frappe.get_attr(renderer_path)\n\t\t\t\tif not hasattr(renderer, \"can_render\"):\n\t\t\t\t\tclick.echo(f\"{renderer.__name__} does not have can_render method\")\n\t\t\t\t\tcontinue\n\t\t\t\tif not hasattr(renderer, \"render\"):\n\t\t\t\t\tclick.echo(f\"{renderer.__name__} does not have render method\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tcustom_renderers.append(renderer)\n\n\t\t\texcept Exception:\n\t\t\t\tclick.echo(f\"Failed to load page renderer. Import path: {renderer_path}\")\n\n\t\treturn custom_renderers\n\n\ndef resolve_redirect(path, query_string=None):\n\t\"\"\"\n\tResolve redirects from hooks\n\n\tExample:\n\n\t website_redirect = [\n\t # absolute location\n\t {\"source\": \"/from\", \"target\": \"https://mysite/from\"},\n\n\t # relative location\n\t {\"source\": \"/from\", \"target\": \"/main\"},\n\n\t # use regex\n\t {\"source\": r\"/from/(.*)\", \"target\": r\"/main/\\1\"}\n\t # use r as a string prefix if you use regex groups or want to escape any string literal\n\t ]\n\t\"\"\"\n\tredirects = frappe.get_hooks(\"website_redirects\")\n\tredirects += frappe.get_all(\n\t\t\"Website Route Redirect\", [\"source\", \"target\", \"redirect_http_status\"], order_by=None\n\t)\n\n\tif not redirects:\n\t\treturn\n\n\tredirect_to = frappe.cache.hget(\"website_redirects\", path)\n\n\tif redirect_to:\n\t\tif isinstance(redirect_to, dict):\n\t\t\tfrappe.flags.redirect_location = redirect_to[\"path\"]\n\t\t\traise frappe.Redirect(redirect_to[\"status_code\"])\n\t\tfrappe.flags.redirect_location = redirect_to\n\t\traise frappe.Redirect\n\n\tfor rule in redirects:\n\t\tpattern = rule[\"source\"].strip(\"/ \") + \"$\"\n\t\tpath_to_match = path\n\t\tif rule.get(\"match_with_query_string\"):\n\t\t\tpath_to_match = path + \"?\" + frappe.safe_decode(query_string)\n\n\t\ttry:\n\t\t\tmatch = re.match(pattern, path_to_match)\n\t\texcept re.error:\n\t\t\tfrappe.log_error(\"Broken Redirect: \" + pattern)\n\n\t\tif match:\n\t\t\tredirect_to = re.sub(pattern, rule[\"target\"], path_to_match)\n\t\t\tfrappe.flags.redirect_location = redirect_to\n\t\t\tstatus_code = rule.get(\"redirect_http_status\", 301)\n\t\t\tfrappe.cache.hset(\n\t\t\t\t\"website_redirects\", path_to_match, {\"path\": redirect_to, \"status_code\": status_code}\n\t\t\t)\n\t\t\traise frappe.Redirect(status_code)\n\n\ndef resolve_path(path):\n\tif not path:\n\t\tpath = \"index\"\n\n\tif path.endswith(\".html\"):\n\t\tpath = path[:-5]\n\n\tif path == \"index\":\n\t\tpath = get_home_page()\n\n\tfrappe.local.path = path\n\n\tif path != \"index\":\n\t\tpath = resolve_from_map(path)\n\n\treturn path\n\n\ndef resolve_from_map(path):\n\t\"\"\"transform dynamic route to a static one from hooks and route defined in doctype\"\"\"\n\trules = [\n\t\tRule(r[\"from_route\"], endpoint=r[\"to_route\"], defaults=r.get(\"defaults\"))\n\t\tfor r in get_website_rules()\n\t]\n\n\treturn evaluate_dynamic_routes(rules, path) or path\n\n\ndef get_website_rules():\n\t\"\"\"Get website route rules from hooks and DocType route\"\"\"\n\n\tdef _get():\n\t\trules = frappe.get_hooks(\"website_route_rules\")\n\t\tfor d in frappe.get_all(\"DocType\", \"name, route\", dict(has_web_view=1)):\n\t\t\tif d.route:\n\t\t\t\trules.append(dict(from_route=\"/\" + d.route.strip(\"/\"), to_route=d.name))\n\n\t\treturn rules\n\n\tif frappe.local.dev_server:\n\t\t# dont cache in development\n\t\treturn _get()\n\n\treturn frappe.cache.get_value(\"website_route_rules\", _get)\n", "path": "frappe/website/path_resolver.py"}, {"content": "# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\n# import frappe\nfrom frappe.model.document import Document\n\n\nclass WebsiteRouteRedirect(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tparent: DF.Data\n\t\tparentfield: DF.Data\n\t\tparenttype: DF.Data\n\t\tredirect_http_status: DF.Int\n\t\tsource: DF.SmallText\n\t\ttarget: DF.SmallText\n\t# end: auto-generated types\n\n\tpass\n", "path": "frappe/website/doctype/website_route_redirect/website_route_redirect.py"}], "after_files": [{"content": "import re\n\nimport click\nfrom werkzeug.routing import Rule\n\nimport frappe\nfrom frappe.website.page_renderers.document_page import DocumentPage\nfrom frappe.website.page_renderers.list_page import ListPage\nfrom frappe.website.page_renderers.not_found_page import NotFoundPage\nfrom frappe.website.page_renderers.print_page import PrintPage\nfrom frappe.website.page_renderers.redirect_page import RedirectPage\nfrom frappe.website.page_renderers.static_page import StaticPage\nfrom frappe.website.page_renderers.template_page import TemplatePage\nfrom frappe.website.page_renderers.web_form import WebFormPage\nfrom frappe.website.router import evaluate_dynamic_routes\nfrom frappe.website.utils import can_cache, get_home_page\n\n\nclass PathResolver:\n\t__slots__ = (\"path\", \"http_status_code\")\n\n\tdef __init__(self, path, http_status_code=None):\n\t\tself.path = path.strip(\"/ \")\n\t\tself.http_status_code = http_status_code\n\n\tdef resolve(self):\n\t\t\"\"\"Return endpoint and a renderer instance that can render the endpoint.\"\"\"\n\t\trequest = frappe._dict()\n\t\tif hasattr(frappe.local, \"request\"):\n\t\t\trequest = frappe.local.request or request\n\n\t\t# check if the request url is in 404 list\n\t\tif request.url and can_cache() and frappe.cache.hget(\"website_404\", request.url):\n\t\t\treturn self.path, NotFoundPage(self.path)\n\n\t\ttry:\n\t\t\tresolve_redirect(self.path, request.query_string)\n\t\texcept frappe.Redirect as e:\n\t\t\treturn frappe.flags.redirect_location, RedirectPage(self.path, e.http_status_code)\n\n\t\tendpoint = resolve_path(self.path)\n\n\t\t# WARN: Hardcoded for better performance\n\t\tif endpoint == \"app\":\n\t\t\treturn endpoint, TemplatePage(endpoint, self.http_status_code)\n\n\t\tcustom_renderers = self.get_custom_page_renderers()\n\t\trenderers = custom_renderers + [\n\t\t\tStaticPage,\n\t\t\tWebFormPage,\n\t\t\tDocumentPage,\n\t\t\tTemplatePage,\n\t\t\tListPage,\n\t\t\tPrintPage,\n\t\t]\n\n\t\tfor renderer in renderers:\n\t\t\trenderer_instance = renderer(endpoint, self.http_status_code)\n\t\t\tif renderer_instance.can_render():\n\t\t\t\treturn endpoint, renderer_instance\n\n\t\treturn endpoint, NotFoundPage(endpoint)\n\n\tdef is_valid_path(self):\n\t\t_endpoint, renderer_instance = self.resolve()\n\t\treturn not isinstance(renderer_instance, NotFoundPage)\n\n\t@staticmethod\n\tdef get_custom_page_renderers():\n\t\tcustom_renderers = []\n\t\tfor renderer_path in frappe.get_hooks(\"page_renderer\") or []:\n\t\t\ttry:\n\t\t\t\trenderer = frappe.get_attr(renderer_path)\n\t\t\t\tif not hasattr(renderer, \"can_render\"):\n\t\t\t\t\tclick.echo(f\"{renderer.__name__} does not have can_render method\")\n\t\t\t\t\tcontinue\n\t\t\t\tif not hasattr(renderer, \"render\"):\n\t\t\t\t\tclick.echo(f\"{renderer.__name__} does not have render method\")\n\t\t\t\t\tcontinue\n\n\t\t\t\tcustom_renderers.append(renderer)\n\n\t\t\texcept Exception:\n\t\t\t\tclick.echo(f\"Failed to load page renderer. Import path: {renderer_path}\")\n\n\t\treturn custom_renderers\n\n\ndef resolve_redirect(path, query_string=None):\n\t\"\"\"\n\tResolve redirects from hooks\n\n\tExample:\n\n\t website_redirect = [\n\t # absolute location\n\t {\"source\": \"/from\", \"target\": \"https://mysite/from\"},\n\n\t # relative location\n\t {\"source\": \"/from\", \"target\": \"/main\"},\n\n\t # use regex\n\t {\"source\": r\"/from/(.*)\", \"target\": r\"/main/\\1\"}\n\t # use r as a string prefix if you use regex groups or want to escape any string literal\n\t ]\n\t\"\"\"\n\tredirects = frappe.get_hooks(\"website_redirects\")\n\tredirects += frappe.get_all(\n\t\t\"Website Route Redirect\", [\"source\", \"target\", \"redirect_http_status\"], order_by=None\n\t)\n\n\tif not redirects:\n\t\treturn\n\n\tredirect_to = frappe.cache.hget(\"website_redirects\", path)\n\n\tif redirect_to:\n\t\tif isinstance(redirect_to, dict):\n\t\t\tfrappe.flags.redirect_location = redirect_to[\"path\"]\n\t\t\traise frappe.Redirect(redirect_to[\"status_code\"])\n\t\tfrappe.flags.redirect_location = redirect_to\n\t\traise frappe.Redirect\n\n\tfor rule in redirects:\n\t\tpattern = rule[\"source\"].strip(\"/ \") + \"$\"\n\t\tpath_to_match = path\n\t\tif rule.get(\"match_with_query_string\"):\n\t\t\tpath_to_match = path + \"?\" + frappe.safe_decode(query_string)\n\n\t\ttry:\n\t\t\tmatch = re.match(pattern, path_to_match)\n\t\texcept re.error:\n\t\t\tfrappe.log_error(\"Broken Redirect: \" + pattern)\n\n\t\tif match:\n\t\t\tredirect_to = re.sub(pattern, rule[\"target\"], path_to_match)\n\t\t\tfrappe.flags.redirect_location = redirect_to\n\t\t\tstatus_code = rule.get(\"redirect_http_status\") or 301\n\t\t\tfrappe.cache.hset(\n\t\t\t\t\"website_redirects\", path_to_match, {\"path\": redirect_to, \"status_code\": status_code}\n\t\t\t)\n\t\t\traise frappe.Redirect(status_code)\n\n\ndef resolve_path(path):\n\tif not path:\n\t\tpath = \"index\"\n\n\tif path.endswith(\".html\"):\n\t\tpath = path[:-5]\n\n\tif path == \"index\":\n\t\tpath = get_home_page()\n\n\tfrappe.local.path = path\n\n\tif path != \"index\":\n\t\tpath = resolve_from_map(path)\n\n\treturn path\n\n\ndef resolve_from_map(path):\n\t\"\"\"transform dynamic route to a static one from hooks and route defined in doctype\"\"\"\n\trules = [\n\t\tRule(r[\"from_route\"], endpoint=r[\"to_route\"], defaults=r.get(\"defaults\"))\n\t\tfor r in get_website_rules()\n\t]\n\n\treturn evaluate_dynamic_routes(rules, path) or path\n\n\ndef get_website_rules():\n\t\"\"\"Get website route rules from hooks and DocType route\"\"\"\n\n\tdef _get():\n\t\trules = frappe.get_hooks(\"website_route_rules\")\n\t\tfor d in frappe.get_all(\"DocType\", \"name, route\", dict(has_web_view=1)):\n\t\t\tif d.route:\n\t\t\t\trules.append(dict(from_route=\"/\" + d.route.strip(\"/\"), to_route=d.name))\n\n\t\treturn rules\n\n\tif frappe.local.dev_server:\n\t\t# dont cache in development\n\t\treturn _get()\n\n\treturn frappe.cache.get_value(\"website_route_rules\", _get)\n", "path": "frappe/website/path_resolver.py"}, {"content": "# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\n# import frappe\nfrom frappe.model.document import Document\n\n\nclass WebsiteRouteRedirect(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tparent: DF.Data\n\t\tparentfield: DF.Data\n\t\tparenttype: DF.Data\n\t\tredirect_http_status: DF.Literal[\"301\", \"302\", \"307\", \"308\"]\n\t\tsource: DF.SmallText\n\t\ttarget: DF.SmallText\n\t# end: auto-generated types\n\n\tpass\n", "path": "frappe/website/doctype/website_route_redirect/website_route_redirect.py"}]} | 2,362 | 320 |
gh_patches_debug_20674 | rasdani/github-patches | git_diff | interlegis__sapl-2146 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Período de Composição de Comissões deve estar dentro de uma legislatura (se não permanente)
O período indicado deve estar contido no período de uma legislatura.
Período de Composição de Comissões deve estar dentro de uma legislatura (se não permanente)
O período indicado deve estar contido no período de uma legislatura.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/comissoes/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.contenttypes.models import ContentType
3 from django.core.exceptions import ValidationError
4 from django.db import transaction
5 from django.db.models import Q
6 from django.forms import ModelForm
7 from django.utils.translation import ugettext_lazy as _
8
9 from sapl.base.models import Autor, TipoAutor
10 from sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,
11 Participacao, Reuniao, Periodo)
12 from sapl.parlamentares.models import Legislatura, Mandato, Parlamentar
13
14 class ComposicaoForm(forms.ModelForm):
15
16 comissao = forms.CharField(required=False, label='Comissao', widget=forms.HiddenInput())
17
18 class Meta:
19 model = Composicao
20 exclude = []
21
22 def __init__(self, user=None, **kwargs):
23 super(ComposicaoForm, self).__init__(**kwargs)
24 self.fields['comissao'].widget.attrs['disabled'] = 'disabled'
25
26 def clean(self):
27 cleaned_data = super(ComposicaoForm, self).clean()
28
29 if not self.is_valid():
30 return cleaned_data
31
32 periodo = cleaned_data['periodo']
33 comissao_pk = self.initial['comissao'].id
34 intersecao_periodo = Composicao.objects.filter(
35 Q(periodo__data_inicio__lte=periodo.data_fim,
36 periodo__data_fim__gte=periodo.data_fim) |
37 Q(periodo__data_inicio__gte=periodo.data_inicio,
38 periodo__data_fim__lte=periodo.data_inicio),
39 comissao_id=comissao_pk)
40
41 if intersecao_periodo:
42 raise ValidationError('O período informado '
43 'choca com períodos já '
44 'cadastrados para esta comissão')
45
46 return cleaned_data
47
48 class PeriodoForm(forms.ModelForm):
49
50 class Meta:
51 model = Periodo
52 exclude = []
53
54 def clean(self):
55 cleaned_data = super(PeriodoForm, self).clean()
56
57 if not self.is_valid():
58 return cleaned_data
59
60 data_inicio = cleaned_data['data_inicio']
61 data_fim = cleaned_data['data_fim']
62
63 if data_fim and data_fim < data_inicio:
64 raise ValidationError('A Data Final não pode ser menor que '
65 'a Data Inicial')
66 return cleaned_data
67
68
69 class ParticipacaoCreateForm(forms.ModelForm):
70
71 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
72
73 class Meta:
74 model = Participacao
75 fields = '__all__'
76 exclude = ['composicao']
77
78 def __init__(self, user=None, **kwargs):
79 super(ParticipacaoCreateForm, self).__init__(**kwargs)
80
81 if self.instance:
82 comissao = kwargs['initial']
83 comissao_pk = int(comissao['parent_pk'])
84 composicao = Composicao.objects.get(id=comissao_pk)
85 participantes = composicao.participacao_set.all()
86 id_part = [p.parlamentar.id for p in participantes]
87 else:
88 id_part = []
89
90 qs = self.create_participacao()
91
92 parlamentares = Mandato.objects.filter(qs,
93 parlamentar__ativo=True
94 ).prefetch_related('parlamentar').\
95 values_list('parlamentar',
96 flat=True
97 ).distinct()
98
99 qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\
100 exclude(id__in=id_part)
101 eligible = self.verifica()
102 result = list(set(qs) & set(eligible))
103 if result == eligible:
104 self.fields['parlamentar'].queryset = qs
105 else:
106 ids = [e.id for e in eligible]
107 qs = Parlamentar.objects.filter(id__in=ids)
108 self.fields['parlamentar'].queryset = qs
109
110
111 def clean(self):
112 cleaned_data = super(ParticipacaoCreateForm, self).clean()
113
114 if not self.is_valid():
115 return cleaned_data
116
117 data_designacao = cleaned_data['data_designacao']
118 data_desligamento = cleaned_data['data_desligamento']
119
120 if data_desligamento and \
121 data_designacao > data_desligamento:
122 raise ValidationError(_('Data de designação não pode ser superior '
123 'à data de desligamento'))
124
125 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
126 cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]
127
128 if cleaned_data['cargo'].nome in cargos_unicos:
129 msg = _('Este cargo é único para esta Comissão.')
130 raise ValidationError(msg)
131 return cleaned_data
132
133
134 def create_participacao(self):
135 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
136 data_inicio_comissao = composicao.periodo.data_inicio
137 data_fim_comissao = composicao.periodo.data_fim
138 q1 = Q(data_fim_mandato__isnull=False,
139 data_fim_mandato__gte=data_inicio_comissao)
140 q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \
141 & Q(data_inicio_mandato__lte=data_fim_comissao)
142 q3 = Q(data_fim_mandato__isnull=True,
143 data_inicio_mandato__lte=data_inicio_comissao)
144 qs = q1 | q2 | q3
145 return qs
146
147 def verifica(self):
148 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
149 participantes = composicao.participacao_set.all()
150 participantes_id = [p.parlamentar.id for p in participantes]
151 parlamentares = Parlamentar.objects.all().exclude(
152 id__in=participantes_id).order_by('nome_completo')
153 parlamentares = [p for p in parlamentares if p.ativo]
154
155 lista = []
156
157 for p in parlamentares:
158 mandatos = p.mandato_set.all()
159 for m in mandatos:
160 data_inicio = m.data_inicio_mandato
161 data_fim = m.data_fim_mandato
162 comp_data_inicio = composicao.periodo.data_inicio
163 comp_data_fim = composicao.periodo.data_fim
164 if (data_fim and data_fim >= comp_data_inicio)\
165 or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\
166 or (data_fim is None and data_inicio <= comp_data_inicio):
167 lista.append(p)
168
169 lista = list(set(lista))
170
171 return lista
172
173
174 class ParticipacaoEditForm(forms.ModelForm):
175
176 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
177 nome_parlamentar = forms.CharField(required=False, label='Parlamentar')
178
179 class Meta:
180 model = Participacao
181 fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',
182 'data_designacao', 'data_desligamento',
183 'motivo_desligamento', 'observacao']
184 widgets = {
185 'parlamentar': forms.HiddenInput(),
186 }
187
188 def __init__(self, user=None, **kwargs):
189 super(ParticipacaoEditForm, self).__init__(**kwargs)
190 self.initial['nome_parlamentar'] = Parlamentar.objects.get(
191 id=self.initial['parlamentar']).nome_parlamentar
192 self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'
193
194 def clean(self):
195 cleaned_data = super(ParticipacaoEditForm, self).clean()
196
197 if not self.is_valid():
198 return cleaned_data
199
200 data_designacao = cleaned_data['data_designacao']
201 data_desligamento = cleaned_data['data_desligamento']
202
203 if data_desligamento and \
204 data_designacao > data_desligamento:
205 raise ValidationError(_('Data de designação não pode ser superior '
206 'à data de desligamento'))
207
208 composicao_id = self.instance.composicao_id
209
210 composicao = Composicao.objects.get(id=composicao_id)
211 cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]
212
213 if cleaned_data['cargo'].nome in cargos_unicos:
214 msg = _('Este cargo é único para esta Comissão.')
215 raise ValidationError(msg)
216
217 return cleaned_data
218
219
220 class ComissaoForm(forms.ModelForm):
221
222 class Meta:
223 model = Comissao
224 fields = '__all__'
225
226 def __init__(self, user=None, **kwargs):
227 super(ComissaoForm, self).__init__(**kwargs)
228 inst = self.instance
229 if inst.pk:
230 if inst.tipo.natureza == 'P':
231 self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'
232 self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'
233 self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'
234 self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'
235 self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'
236
237
238
239 def clean(self):
240 super(ComissaoForm, self).clean()
241
242 if not self.is_valid():
243 return self.cleaned_data
244
245 if len(self.cleaned_data['nome']) > 100:
246 msg = _('Nome da Comissão deve ter no máximo 50 caracteres.')
247 raise ValidationError(msg)
248 if self.cleaned_data['data_extincao']:
249 if (self.cleaned_data['data_extincao'] <
250 self.cleaned_data['data_criacao']):
251 msg = _('Data de extinção não pode ser menor que a de criação')
252 raise ValidationError(msg)
253 return self.cleaned_data
254
255 @transaction.atomic
256 def save(self, commit=True):
257 inst = self.instance
258 if not inst.pk:
259 comissao = super(ComissaoForm, self).save(commit)
260 content_type = ContentType.objects.get_for_model(Comissao)
261 object_id = comissao.pk
262 tipo = TipoAutor.objects.get(descricao__icontains='Comiss')
263 nome = comissao.sigla + ' - ' + comissao.nome
264 Autor.objects.create(
265 content_type=content_type,
266 object_id=object_id,
267 tipo=tipo,
268 nome=nome
269 )
270 return comissao
271 else:
272 comissao = super(ComissaoForm, self).save(commit)
273 return comissao
274
275
276 class ReuniaoForm(ModelForm):
277
278 comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),
279 widget=forms.HiddenInput())
280
281 class Meta:
282 model = Reuniao
283 exclude = ['cod_andamento_reuniao']
284
285 def clean(self):
286 super(ReuniaoForm, self).clean()
287
288 if not self.is_valid():
289 return self.cleaned_data
290
291 if self.cleaned_data['hora_fim']:
292 if (self.cleaned_data['hora_fim'] <
293 self.cleaned_data['hora_inicio']):
294 msg = _('A hora de término da reunião não pode ser menor que a de início')
295 raise ValidationError(msg)
296 return self.cleaned_data
297
298 class DocumentoAcessorioCreateForm(forms.ModelForm):
299
300 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
301
302 class Meta:
303 model = DocumentoAcessorio
304 exclude = ['reuniao']
305
306 def __init__(self, user=None, **kwargs):
307 super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)
308
309 if self.instance:
310 reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])
311 comissao = reuniao.comissao
312 comissao_pk = comissao.id
313 documentos = reuniao.documentoacessorio_set.all()
314 return self.create_documentoacessorio()
315
316
317 def create_documentoacessorio(self):
318 reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])
319
320
321 class DocumentoAcessorioEditForm(forms.ModelForm):
322
323 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
324
325 class Meta:
326 model = DocumentoAcessorio
327 fields = ['nome', 'data', 'autor', 'ementa',
328 'indexacao', 'arquivo']
329
330 def __init__(self, user=None, **kwargs):
331 super(DocumentoAcessorioEditForm, self).__init__(**kwargs)
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sapl/comissoes/forms.py b/sapl/comissoes/forms.py
--- a/sapl/comissoes/forms.py
+++ b/sapl/comissoes/forms.py
@@ -45,6 +45,7 @@
return cleaned_data
+
class PeriodoForm(forms.ModelForm):
class Meta:
@@ -63,9 +64,20 @@
if data_fim and data_fim < data_inicio:
raise ValidationError('A Data Final não pode ser menor que '
'a Data Inicial')
+
+ legislatura = Legislatura.objects.filter(data_inicio__lte=data_inicio,
+ data_fim__gte=data_fim)
+
+ if not legislatura:
+ raise ValidationError('O período informado '
+ 'deve estar contido em uma única '
+ 'legislatura existente')
+
+
return cleaned_data
+
class ParticipacaoCreateForm(forms.ModelForm):
parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
| {"golden_diff": "diff --git a/sapl/comissoes/forms.py b/sapl/comissoes/forms.py\n--- a/sapl/comissoes/forms.py\n+++ b/sapl/comissoes/forms.py\n@@ -45,6 +45,7 @@\n \n return cleaned_data\n \n+\n class PeriodoForm(forms.ModelForm):\n \n class Meta:\n@@ -63,9 +64,20 @@\n if data_fim and data_fim < data_inicio:\n raise ValidationError('A Data Final n\u00e3o pode ser menor que '\n 'a Data Inicial')\n+ \n+ legislatura = Legislatura.objects.filter(data_inicio__lte=data_inicio, \n+ data_fim__gte=data_fim)\n+\n+ if not legislatura:\n+ raise ValidationError('O per\u00edodo informado '\n+ 'deve estar contido em uma \u00fanica '\n+ 'legislatura existente')\n+\n+\n return cleaned_data\n \n \n+\n class ParticipacaoCreateForm(forms.ModelForm):\n \n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n", "issue": "Per\u00edodo de Composi\u00e7\u00e3o de Comiss\u00f5es deve estar dentro de uma legislatura (se n\u00e3o permanente)\nO per\u00edodo indicado deve estar contido no per\u00edodo de uma legislatura.\nPer\u00edodo de Composi\u00e7\u00e3o de Comiss\u00f5es deve estar dentro de uma legislatura (se n\u00e3o permanente)\nO per\u00edodo indicado deve estar contido no per\u00edodo de uma legislatura.\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sapl.base.models import Autor, TipoAutor\nfrom sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,\n Participacao, Reuniao, Periodo)\nfrom sapl.parlamentares.models import Legislatura, Mandato, Parlamentar\n\nclass ComposicaoForm(forms.ModelForm):\n\n comissao = forms.CharField(required=False, label='Comissao', widget=forms.HiddenInput())\n\n class Meta:\n model = Composicao\n exclude = []\n\n def __init__(self, user=None, **kwargs):\n super(ComposicaoForm, self).__init__(**kwargs)\n self.fields['comissao'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ComposicaoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n periodo = cleaned_data['periodo']\n comissao_pk = self.initial['comissao'].id\n intersecao_periodo = Composicao.objects.filter(\n Q(periodo__data_inicio__lte=periodo.data_fim,\n periodo__data_fim__gte=periodo.data_fim) |\n Q(periodo__data_inicio__gte=periodo.data_inicio,\n periodo__data_fim__lte=periodo.data_inicio),\n comissao_id=comissao_pk)\n\n if intersecao_periodo:\n raise ValidationError('O per\u00edodo informado '\n 'choca com per\u00edodos j\u00e1 '\n 'cadastrados para esta comiss\u00e3o')\n\n return cleaned_data\n\nclass PeriodoForm(forms.ModelForm):\n\n class Meta:\n model = Periodo\n exclude = []\n\n def clean(self):\n cleaned_data = super(PeriodoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_inicio = cleaned_data['data_inicio']\n data_fim = cleaned_data['data_fim']\n\n if data_fim and data_fim < data_inicio:\n raise ValidationError('A Data Final n\u00e3o pode ser menor que '\n 'a Data Inicial')\n return cleaned_data\n\n\nclass ParticipacaoCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = Participacao\n fields = '__all__'\n exclude = ['composicao']\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n comissao = kwargs['initial']\n comissao_pk = int(comissao['parent_pk'])\n composicao = Composicao.objects.get(id=comissao_pk)\n participantes = composicao.participacao_set.all()\n id_part = [p.parlamentar.id for p in participantes]\n else:\n id_part = []\n\n qs = self.create_participacao()\n\n parlamentares = Mandato.objects.filter(qs,\n parlamentar__ativo=True\n ).prefetch_related('parlamentar').\\\n values_list('parlamentar',\n flat=True\n ).distinct()\n\n qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\\\n exclude(id__in=id_part)\n eligible = self.verifica()\n result = list(set(qs) & set(eligible))\n if result == eligible:\n self.fields['parlamentar'].queryset = qs\n else:\n ids = [e.id for e in eligible]\n qs = Parlamentar.objects.filter(id__in=ids)\n self.fields['parlamentar'].queryset = qs\n\n\n def clean(self):\n cleaned_data = super(ParticipacaoCreateForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n return cleaned_data\n\n\n def create_participacao(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n data_inicio_comissao = composicao.periodo.data_inicio\n data_fim_comissao = composicao.periodo.data_fim\n q1 = Q(data_fim_mandato__isnull=False,\n data_fim_mandato__gte=data_inicio_comissao)\n q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \\\n & Q(data_inicio_mandato__lte=data_fim_comissao)\n q3 = Q(data_fim_mandato__isnull=True,\n data_inicio_mandato__lte=data_inicio_comissao)\n qs = q1 | q2 | q3\n return qs\n\n def verifica(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n participantes = composicao.participacao_set.all()\n participantes_id = [p.parlamentar.id for p in participantes]\n parlamentares = Parlamentar.objects.all().exclude(\n id__in=participantes_id).order_by('nome_completo')\n parlamentares = [p for p in parlamentares if p.ativo]\n\n lista = []\n\n for p in parlamentares:\n mandatos = p.mandato_set.all()\n for m in mandatos:\n data_inicio = m.data_inicio_mandato\n data_fim = m.data_fim_mandato\n comp_data_inicio = composicao.periodo.data_inicio\n comp_data_fim = composicao.periodo.data_fim\n if (data_fim and data_fim >= comp_data_inicio)\\\n or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\\\n or (data_fim is None and data_inicio <= comp_data_inicio):\n lista.append(p)\n\n lista = list(set(lista))\n\n return lista\n\n\nclass ParticipacaoEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n nome_parlamentar = forms.CharField(required=False, label='Parlamentar')\n\n class Meta:\n model = Participacao\n fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',\n 'data_designacao', 'data_desligamento',\n 'motivo_desligamento', 'observacao']\n widgets = {\n 'parlamentar': forms.HiddenInput(),\n }\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoEditForm, self).__init__(**kwargs)\n self.initial['nome_parlamentar'] = Parlamentar.objects.get(\n id=self.initial['parlamentar']).nome_parlamentar\n self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ParticipacaoEditForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao_id = self.instance.composicao_id\n\n composicao = Composicao.objects.get(id=composicao_id)\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n\n return cleaned_data\n\n\nclass ComissaoForm(forms.ModelForm):\n\n class Meta:\n model = Comissao\n fields = '__all__'\n\n def __init__(self, user=None, **kwargs):\n super(ComissaoForm, self).__init__(**kwargs)\n inst = self.instance\n if inst.pk:\n if inst.tipo.natureza == 'P':\n self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'\n\n\n\n def clean(self):\n super(ComissaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if len(self.cleaned_data['nome']) > 100:\n msg = _('Nome da Comiss\u00e3o deve ter no m\u00e1ximo 50 caracteres.')\n raise ValidationError(msg)\n if self.cleaned_data['data_extincao']:\n if (self.cleaned_data['data_extincao'] <\n self.cleaned_data['data_criacao']):\n msg = _('Data de extin\u00e7\u00e3o n\u00e3o pode ser menor que a de cria\u00e7\u00e3o')\n raise ValidationError(msg)\n return self.cleaned_data\n\n @transaction.atomic\n def save(self, commit=True):\n inst = self.instance\n if not inst.pk:\n comissao = super(ComissaoForm, self).save(commit)\n content_type = ContentType.objects.get_for_model(Comissao)\n object_id = comissao.pk\n tipo = TipoAutor.objects.get(descricao__icontains='Comiss')\n nome = comissao.sigla + ' - ' + comissao.nome\n Autor.objects.create(\n content_type=content_type,\n object_id=object_id,\n tipo=tipo,\n nome=nome\n )\n return comissao\n else:\n comissao = super(ComissaoForm, self).save(commit)\n return comissao\n\n\nclass ReuniaoForm(ModelForm):\n\n comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),\n widget=forms.HiddenInput())\n\n class Meta:\n model = Reuniao\n exclude = ['cod_andamento_reuniao']\n\n def clean(self):\n super(ReuniaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if self.cleaned_data['hora_fim']:\n if (self.cleaned_data['hora_fim'] <\n self.cleaned_data['hora_inicio']):\n msg = _('A hora de t\u00e9rmino da reuni\u00e3o n\u00e3o pode ser menor que a de in\u00edcio')\n raise ValidationError(msg)\n return self.cleaned_data\n\nclass DocumentoAcessorioCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n exclude = ['reuniao']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n comissao = reuniao.comissao\n comissao_pk = comissao.id\n documentos = reuniao.documentoacessorio_set.all()\n return self.create_documentoacessorio()\n\n\n def create_documentoacessorio(self):\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n\n\nclass DocumentoAcessorioEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n fields = ['nome', 'data', 'autor', 'ementa',\n 'indexacao', 'arquivo']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioEditForm, self).__init__(**kwargs)\n", "path": "sapl/comissoes/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sapl.base.models import Autor, TipoAutor\nfrom sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,\n Participacao, Reuniao, Periodo)\nfrom sapl.parlamentares.models import Legislatura, Mandato, Parlamentar\n\nclass ComposicaoForm(forms.ModelForm):\n\n comissao = forms.CharField(required=False, label='Comissao', widget=forms.HiddenInput())\n\n class Meta:\n model = Composicao\n exclude = []\n\n def __init__(self, user=None, **kwargs):\n super(ComposicaoForm, self).__init__(**kwargs)\n self.fields['comissao'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ComposicaoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n periodo = cleaned_data['periodo']\n comissao_pk = self.initial['comissao'].id\n intersecao_periodo = Composicao.objects.filter(\n Q(periodo__data_inicio__lte=periodo.data_fim,\n periodo__data_fim__gte=periodo.data_fim) |\n Q(periodo__data_inicio__gte=periodo.data_inicio,\n periodo__data_fim__lte=periodo.data_inicio),\n comissao_id=comissao_pk)\n\n if intersecao_periodo:\n raise ValidationError('O per\u00edodo informado '\n 'choca com per\u00edodos j\u00e1 '\n 'cadastrados para esta comiss\u00e3o')\n\n return cleaned_data\n\n\nclass PeriodoForm(forms.ModelForm):\n\n class Meta:\n model = Periodo\n exclude = []\n\n def clean(self):\n cleaned_data = super(PeriodoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_inicio = cleaned_data['data_inicio']\n data_fim = cleaned_data['data_fim']\n\n if data_fim and data_fim < data_inicio:\n raise ValidationError('A Data Final n\u00e3o pode ser menor que '\n 'a Data Inicial')\n \n legislatura = Legislatura.objects.filter(data_inicio__lte=data_inicio, \n data_fim__gte=data_fim)\n\n if not legislatura:\n raise ValidationError('O per\u00edodo informado '\n 'deve estar contido em uma \u00fanica '\n 'legislatura existente')\n\n\n return cleaned_data\n\n\n\nclass ParticipacaoCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = Participacao\n fields = '__all__'\n exclude = ['composicao']\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n comissao = kwargs['initial']\n comissao_pk = int(comissao['parent_pk'])\n composicao = Composicao.objects.get(id=comissao_pk)\n participantes = composicao.participacao_set.all()\n id_part = [p.parlamentar.id for p in participantes]\n else:\n id_part = []\n\n qs = self.create_participacao()\n\n parlamentares = Mandato.objects.filter(qs,\n parlamentar__ativo=True\n ).prefetch_related('parlamentar').\\\n values_list('parlamentar',\n flat=True\n ).distinct()\n\n qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\\\n exclude(id__in=id_part)\n eligible = self.verifica()\n result = list(set(qs) & set(eligible))\n if result == eligible:\n self.fields['parlamentar'].queryset = qs\n else:\n ids = [e.id for e in eligible]\n qs = Parlamentar.objects.filter(id__in=ids)\n self.fields['parlamentar'].queryset = qs\n\n\n def clean(self):\n cleaned_data = super(ParticipacaoCreateForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n return cleaned_data\n\n\n def create_participacao(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n data_inicio_comissao = composicao.periodo.data_inicio\n data_fim_comissao = composicao.periodo.data_fim\n q1 = Q(data_fim_mandato__isnull=False,\n data_fim_mandato__gte=data_inicio_comissao)\n q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \\\n & Q(data_inicio_mandato__lte=data_fim_comissao)\n q3 = Q(data_fim_mandato__isnull=True,\n data_inicio_mandato__lte=data_inicio_comissao)\n qs = q1 | q2 | q3\n return qs\n\n def verifica(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n participantes = composicao.participacao_set.all()\n participantes_id = [p.parlamentar.id for p in participantes]\n parlamentares = Parlamentar.objects.all().exclude(\n id__in=participantes_id).order_by('nome_completo')\n parlamentares = [p for p in parlamentares if p.ativo]\n\n lista = []\n\n for p in parlamentares:\n mandatos = p.mandato_set.all()\n for m in mandatos:\n data_inicio = m.data_inicio_mandato\n data_fim = m.data_fim_mandato\n comp_data_inicio = composicao.periodo.data_inicio\n comp_data_fim = composicao.periodo.data_fim\n if (data_fim and data_fim >= comp_data_inicio)\\\n or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\\\n or (data_fim is None and data_inicio <= comp_data_inicio):\n lista.append(p)\n\n lista = list(set(lista))\n\n return lista\n\n\nclass ParticipacaoEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n nome_parlamentar = forms.CharField(required=False, label='Parlamentar')\n\n class Meta:\n model = Participacao\n fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',\n 'data_designacao', 'data_desligamento',\n 'motivo_desligamento', 'observacao']\n widgets = {\n 'parlamentar': forms.HiddenInput(),\n }\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoEditForm, self).__init__(**kwargs)\n self.initial['nome_parlamentar'] = Parlamentar.objects.get(\n id=self.initial['parlamentar']).nome_parlamentar\n self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ParticipacaoEditForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao_id = self.instance.composicao_id\n\n composicao = Composicao.objects.get(id=composicao_id)\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n\n return cleaned_data\n\n\nclass ComissaoForm(forms.ModelForm):\n\n class Meta:\n model = Comissao\n fields = '__all__'\n\n def __init__(self, user=None, **kwargs):\n super(ComissaoForm, self).__init__(**kwargs)\n inst = self.instance\n if inst.pk:\n if inst.tipo.natureza == 'P':\n self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'\n\n\n\n def clean(self):\n super(ComissaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if len(self.cleaned_data['nome']) > 100:\n msg = _('Nome da Comiss\u00e3o deve ter no m\u00e1ximo 50 caracteres.')\n raise ValidationError(msg)\n if self.cleaned_data['data_extincao']:\n if (self.cleaned_data['data_extincao'] <\n self.cleaned_data['data_criacao']):\n msg = _('Data de extin\u00e7\u00e3o n\u00e3o pode ser menor que a de cria\u00e7\u00e3o')\n raise ValidationError(msg)\n return self.cleaned_data\n\n @transaction.atomic\n def save(self, commit=True):\n inst = self.instance\n if not inst.pk:\n comissao = super(ComissaoForm, self).save(commit)\n content_type = ContentType.objects.get_for_model(Comissao)\n object_id = comissao.pk\n tipo = TipoAutor.objects.get(descricao__icontains='Comiss')\n nome = comissao.sigla + ' - ' + comissao.nome\n Autor.objects.create(\n content_type=content_type,\n object_id=object_id,\n tipo=tipo,\n nome=nome\n )\n return comissao\n else:\n comissao = super(ComissaoForm, self).save(commit)\n return comissao\n\n\nclass ReuniaoForm(ModelForm):\n\n comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),\n widget=forms.HiddenInput())\n\n class Meta:\n model = Reuniao\n exclude = ['cod_andamento_reuniao']\n\n def clean(self):\n super(ReuniaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if self.cleaned_data['hora_fim']:\n if (self.cleaned_data['hora_fim'] <\n self.cleaned_data['hora_inicio']):\n msg = _('A hora de t\u00e9rmino da reuni\u00e3o n\u00e3o pode ser menor que a de in\u00edcio')\n raise ValidationError(msg)\n return self.cleaned_data\n\nclass DocumentoAcessorioCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n exclude = ['reuniao']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n comissao = reuniao.comissao\n comissao_pk = comissao.id\n documentos = reuniao.documentoacessorio_set.all()\n return self.create_documentoacessorio()\n\n\n def create_documentoacessorio(self):\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n\n\nclass DocumentoAcessorioEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n fields = ['nome', 'data', 'autor', 'ementa',\n 'indexacao', 'arquivo']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioEditForm, self).__init__(**kwargs)\n", "path": "sapl/comissoes/forms.py"}]} | 3,994 | 224 |
gh_patches_debug_33164 | rasdani/github-patches | git_diff | huggingface__trl-1617 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Speed up ZeRO-3 generation with DPO
Hi, a [recent PR](https://github.com/huggingface/trl/pull/1483) brought large improvements (x10) to PPO generation with ZeRO-3.
@lewtun, you mention on the PR that it can be adapted for other trainers. I gave it a quick shot and it seems that naive applying the context manager to trainers like DPO does not work:
```
in remove_hooks
if model.optimizer is not None and hasattr(
^^^^^^^^^^^^^^^^^^^^
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
AttributeError: 'GPTNeoXForCausalLM' object has no attribute 'optimizer'
```
There seems to be an inconsistency between the base classes. Is there a reason why DPO is based on Trainer from transformers and PPO on BaseTrainer ? What would be the easy way to add this feature to other trainers ? Thanks !
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `trl/models/utils.py`
Content:
```
1 from contextlib import contextmanager
2 from dataclasses import dataclass
3 from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union
4
5 from accelerate.utils import is_deepspeed_available
6 from transformers import PreTrainedModel, PreTrainedTokenizer
7
8 from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead
9
10
11 SUPPORTED_ARCHITECTURES = (
12 AutoModelForCausalLMWithValueHead,
13 AutoModelForSeq2SeqLMWithValueHead,
14 )
15
16
17 if is_deepspeed_available():
18 import deepspeed
19
20 if TYPE_CHECKING:
21 from accelerate import Accelerator
22 from deepspeed.runtime.engine import DeepSpeedEngine
23 from torch.nn.parallel.distributed import DistributedDataParallel
24
25 from .modeling_base import PreTrainedModelWrapper
26
27
28 # TODO: Add Abstract Base Class if more formats are added
29 @dataclass
30 class ChatMlSpecialTokens:
31 """Dataclass for special tokens used in ChatML, including system, user, assistant, bos, eos, and pad tokens."""
32
33 bos_token: str = "<|im_start|>"
34 eos_token: str = "<|im_end|>"
35 pad_token: str = "<|im_end|>"
36
37 @property
38 def system(self):
39 return f"{self.bos_token}system"
40
41 @property
42 def user(self):
43 return f"{self.bos_token}user"
44
45 @property
46 def assistant(self):
47 return f"{self.bos_token}assistant"
48
49 @property
50 def chat_template(self):
51 return (
52 "{% for message in messages %}"
53 f"{{{{'{self.bos_token}' + message['role'] + '\n' + message['content'] + '{self.eos_token}' + '\n'}}}}"
54 "{% endfor %}"
55 "{% if add_generation_prompt %}"
56 f"{{{{ '{self.assistant}\n' }}}}"
57 "{% endif %}"
58 )
59
60
61 FORMAT_MAPPING = {"chatml": ChatMlSpecialTokens}
62
63
64 def setup_chat_format(
65 model: PreTrainedModel,
66 tokenizer: PreTrainedTokenizer,
67 format: Optional[Literal["chatml"]] = "chatml",
68 resize_to_multiple_of: Optional[int] = None,
69 ) -> Tuple[PreTrainedModel, PreTrainedTokenizer]:
70 """
71 Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens.
72
73 Args:
74 model (`~transformers.PreTrainedModel`): The model to be modified.
75 tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified.
76 format (`Optional[Literal["chatml"]]`): The format to be set. Defaults to "chatml".
77 resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None.
78 Returns:
79 model (`~transformers.PreTrainedModel`): The modified model.
80 tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer.
81 """
82 # check if format available and retrieve
83 if format not in FORMAT_MAPPING:
84 raise ValueError(f"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}")
85
86 chat_format = FORMAT_MAPPING[format]()
87
88 # set special tokens and them
89 tokenizer.eos_token = chat_format.eos_token
90 tokenizer.pad_token = chat_format.pad_token
91 tokenizer.bos_token = chat_format.bos_token
92 tokenizer.add_special_tokens({"additional_special_tokens": [chat_format.bos_token, chat_format.eos_token]})
93 # set chat format for tokenizer
94 tokenizer.chat_template = chat_format.chat_template
95
96 # resize embedding layer to a multiple of 64, https://x.com/karpathy/status/1621578354024677377
97 model.resize_token_embeddings(
98 len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None
99 )
100 # Update the model config to use the new eos & bos tokens
101 if getattr(model, "config", None) is not None:
102 model.config.pad_token_id = tokenizer.pad_token_id
103 model.config.bos_token_id = tokenizer.bos_token_id
104 model.config.eos_token_id = tokenizer.eos_token_id
105 # Update the generation config to use the new eos & bos token
106 if getattr(model, "generation_config", None) is not None:
107 model.generation_config.bos_token_id = tokenizer.bos_token_id
108 model.generation_config.eos_token_id = tokenizer.eos_token_id
109 model.generation_config.pad_token_id = tokenizer.pad_token_id
110
111 return model, tokenizer
112
113
114 def remove_hooks(model: "DeepSpeedEngine") -> None:
115 """Removes the optimizer hooks from a DeepSpeed ZeRO-3 model."""
116 if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"):
117 optimizer_offload = model.optimizer.parameter_offload
118 elif model.optimizer is not None:
119 optimizer_offload = model.optimizer
120
121 for hook in optimizer_offload.forward_hooks:
122 hook.remove()
123 for hook in optimizer_offload.backward_hooks:
124 hook.remove()
125
126 optimizer_offload.forward_hooks = []
127 optimizer_offload.backward_hooks = []
128
129
130 def add_hooks(model: "DeepSpeedEngine") -> None:
131 """Adds the optimizer hooks from a DeepSpeed ZeRO-3 model."""
132 if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"):
133 optimizer_offload = model.optimizer.parameter_offload
134 elif model.optimizer is not None:
135 optimizer_offload = model.optimizer
136 optimizer_offload._register_hooks_recursively(optimizer_offload.module)
137
138
139 @contextmanager
140 def unwrap_model_for_generation(
141 model: Union["DistributedDataParallel", "DeepSpeedEngine"], accelerator: "Accelerator", is_peft_model: bool = False
142 ) -> Union["PreTrainedModelWrapper", "DeepSpeedEngine"]:
143 """Context manager to unwrap a model for generation.
144
145 For ZeRO-3 models, we gather the weights once to speed up generation.
146 """
147 unwrapped_model = accelerator.unwrap_model(model)
148 if is_peft_model:
149 unwrapped_model.pretrained_model.disable_adapter()
150 if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3:
151 with deepspeed.zero.GatheredParameters(model.parameters()):
152 remove_hooks(model)
153 yield model
154 add_hooks(model)
155 else:
156 yield unwrapped_model
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/trl/models/utils.py b/trl/models/utils.py
--- a/trl/models/utils.py
+++ b/trl/models/utils.py
@@ -1,3 +1,4 @@
+import itertools
from contextlib import contextmanager
from dataclasses import dataclass
from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union
@@ -118,6 +119,9 @@
elif model.optimizer is not None:
optimizer_offload = model.optimizer
+ for param in iter_params(optimizer_offload.module, recurse=True):
+ param.ds_active_sub_modules.clear()
+
for hook in optimizer_offload.forward_hooks:
hook.remove()
for hook in optimizer_offload.backward_hooks:
@@ -127,6 +131,14 @@
optimizer_offload.backward_hooks = []
+def get_all_parameters(sub_module, recurse=False):
+ return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters())
+
+
+def iter_params(module, recurse=False):
+ return [param for _, param in get_all_parameters(module, recurse)]
+
+
def add_hooks(model: "DeepSpeedEngine") -> None:
"""Adds the optimizer hooks from a DeepSpeed ZeRO-3 model."""
if model.optimizer is not None and hasattr(model.optimizer, "parameter_offload"):
@@ -141,7 +153,6 @@
model: Union["DistributedDataParallel", "DeepSpeedEngine"], accelerator: "Accelerator", is_peft_model: bool = False
) -> Union["PreTrainedModelWrapper", "DeepSpeedEngine"]:
"""Context manager to unwrap a model for generation.
-
For ZeRO-3 models, we gather the weights once to speed up generation.
"""
unwrapped_model = accelerator.unwrap_model(model)
| {"golden_diff": "diff --git a/trl/models/utils.py b/trl/models/utils.py\n--- a/trl/models/utils.py\n+++ b/trl/models/utils.py\n@@ -1,3 +1,4 @@\n+import itertools\n from contextlib import contextmanager\n from dataclasses import dataclass\n from typing import TYPE_CHECKING, Literal, Optional, Tuple, Union\n@@ -118,6 +119,9 @@\n elif model.optimizer is not None:\n optimizer_offload = model.optimizer\n \n+ for param in iter_params(optimizer_offload.module, recurse=True):\n+ param.ds_active_sub_modules.clear()\n+\n for hook in optimizer_offload.forward_hooks:\n hook.remove()\n for hook in optimizer_offload.backward_hooks:\n@@ -127,6 +131,14 @@\n optimizer_offload.backward_hooks = []\n \n \n+def get_all_parameters(sub_module, recurse=False):\n+ return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters())\n+\n+\n+def iter_params(module, recurse=False):\n+ return [param for _, param in get_all_parameters(module, recurse)]\n+\n+\n def add_hooks(model: \"DeepSpeedEngine\") -> None:\n \"\"\"Adds the optimizer hooks from a DeepSpeed ZeRO-3 model.\"\"\"\n if model.optimizer is not None and hasattr(model.optimizer, \"parameter_offload\"):\n@@ -141,7 +153,6 @@\n model: Union[\"DistributedDataParallel\", \"DeepSpeedEngine\"], accelerator: \"Accelerator\", is_peft_model: bool = False\n ) -> Union[\"PreTrainedModelWrapper\", \"DeepSpeedEngine\"]:\n \"\"\"Context manager to unwrap a model for generation.\n-\n For ZeRO-3 models, we gather the weights once to speed up generation.\n \"\"\"\n unwrapped_model = accelerator.unwrap_model(model)\n", "issue": "Speed up ZeRO-3 generation with DPO\nHi, a [recent PR](https://github.com/huggingface/trl/pull/1483) brought large improvements (x10) to PPO generation with ZeRO-3.\r\n@lewtun, you mention on the PR that it can be adapted for other trainers. I gave it a quick shot and it seems that naive applying the context manager to trainers like DPO does not work:\r\n```\r\nin remove_hooks\r\n if model.optimizer is not None and hasattr(\r\n ^^^^^^^^^^^^^^^^^^^^\r\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\r\nAttributeError: 'GPTNeoXForCausalLM' object has no attribute 'optimizer'\r\n```\r\nThere seems to be an inconsistency between the base classes. Is there a reason why DPO is based on Trainer from transformers and PPO on BaseTrainer ? What would be the easy way to add this feature to other trainers ? Thanks !\n", "before_files": [{"content": "from contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Literal, Optional, Tuple, Union\n\nfrom accelerate.utils import is_deepspeed_available\nfrom transformers import PreTrainedModel, PreTrainedTokenizer\n\nfrom .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead\n\n\nSUPPORTED_ARCHITECTURES = (\n AutoModelForCausalLMWithValueHead,\n AutoModelForSeq2SeqLMWithValueHead,\n)\n\n\nif is_deepspeed_available():\n import deepspeed\n\nif TYPE_CHECKING:\n from accelerate import Accelerator\n from deepspeed.runtime.engine import DeepSpeedEngine\n from torch.nn.parallel.distributed import DistributedDataParallel\n\n from .modeling_base import PreTrainedModelWrapper\n\n\n# TODO: Add Abstract Base Class if more formats are added\n@dataclass\nclass ChatMlSpecialTokens:\n \"\"\"Dataclass for special tokens used in ChatML, including system, user, assistant, bos, eos, and pad tokens.\"\"\"\n\n bos_token: str = \"<|im_start|>\"\n eos_token: str = \"<|im_end|>\"\n pad_token: str = \"<|im_end|>\"\n\n @property\n def system(self):\n return f\"{self.bos_token}system\"\n\n @property\n def user(self):\n return f\"{self.bos_token}user\"\n\n @property\n def assistant(self):\n return f\"{self.bos_token}assistant\"\n\n @property\n def chat_template(self):\n return (\n \"{% for message in messages %}\"\n f\"{{{{'{self.bos_token}' + message['role'] + '\\n' + message['content'] + '{self.eos_token}' + '\\n'}}}}\"\n \"{% endfor %}\"\n \"{% if add_generation_prompt %}\"\n f\"{{{{ '{self.assistant}\\n' }}}}\"\n \"{% endif %}\"\n )\n\n\nFORMAT_MAPPING = {\"chatml\": ChatMlSpecialTokens}\n\n\ndef setup_chat_format(\n model: PreTrainedModel,\n tokenizer: PreTrainedTokenizer,\n format: Optional[Literal[\"chatml\"]] = \"chatml\",\n resize_to_multiple_of: Optional[int] = None,\n) -> Tuple[PreTrainedModel, PreTrainedTokenizer]:\n \"\"\"\n Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens.\n\n Args:\n model (`~transformers.PreTrainedModel`): The model to be modified.\n tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified.\n format (`Optional[Literal[\"chatml\"]]`): The format to be set. Defaults to \"chatml\".\n resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None.\n Returns:\n model (`~transformers.PreTrainedModel`): The modified model.\n tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer.\n \"\"\"\n # check if format available and retrieve\n if format not in FORMAT_MAPPING:\n raise ValueError(f\"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}\")\n\n chat_format = FORMAT_MAPPING[format]()\n\n # set special tokens and them\n tokenizer.eos_token = chat_format.eos_token\n tokenizer.pad_token = chat_format.pad_token\n tokenizer.bos_token = chat_format.bos_token\n tokenizer.add_special_tokens({\"additional_special_tokens\": [chat_format.bos_token, chat_format.eos_token]})\n # set chat format for tokenizer\n tokenizer.chat_template = chat_format.chat_template\n\n # resize embedding layer to a multiple of 64, https://x.com/karpathy/status/1621578354024677377\n model.resize_token_embeddings(\n len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None\n )\n # Update the model config to use the new eos & bos tokens\n if getattr(model, \"config\", None) is not None:\n model.config.pad_token_id = tokenizer.pad_token_id\n model.config.bos_token_id = tokenizer.bos_token_id\n model.config.eos_token_id = tokenizer.eos_token_id\n # Update the generation config to use the new eos & bos token\n if getattr(model, \"generation_config\", None) is not None:\n model.generation_config.bos_token_id = tokenizer.bos_token_id\n model.generation_config.eos_token_id = tokenizer.eos_token_id\n model.generation_config.pad_token_id = tokenizer.pad_token_id\n\n return model, tokenizer\n\n\ndef remove_hooks(model: \"DeepSpeedEngine\") -> None:\n \"\"\"Removes the optimizer hooks from a DeepSpeed ZeRO-3 model.\"\"\"\n if model.optimizer is not None and hasattr(model.optimizer, \"parameter_offload\"):\n optimizer_offload = model.optimizer.parameter_offload\n elif model.optimizer is not None:\n optimizer_offload = model.optimizer\n\n for hook in optimizer_offload.forward_hooks:\n hook.remove()\n for hook in optimizer_offload.backward_hooks:\n hook.remove()\n\n optimizer_offload.forward_hooks = []\n optimizer_offload.backward_hooks = []\n\n\ndef add_hooks(model: \"DeepSpeedEngine\") -> None:\n \"\"\"Adds the optimizer hooks from a DeepSpeed ZeRO-3 model.\"\"\"\n if model.optimizer is not None and hasattr(model.optimizer, \"parameter_offload\"):\n optimizer_offload = model.optimizer.parameter_offload\n elif model.optimizer is not None:\n optimizer_offload = model.optimizer\n optimizer_offload._register_hooks_recursively(optimizer_offload.module)\n\n\n@contextmanager\ndef unwrap_model_for_generation(\n model: Union[\"DistributedDataParallel\", \"DeepSpeedEngine\"], accelerator: \"Accelerator\", is_peft_model: bool = False\n) -> Union[\"PreTrainedModelWrapper\", \"DeepSpeedEngine\"]:\n \"\"\"Context manager to unwrap a model for generation.\n\n For ZeRO-3 models, we gather the weights once to speed up generation.\n \"\"\"\n unwrapped_model = accelerator.unwrap_model(model)\n if is_peft_model:\n unwrapped_model.pretrained_model.disable_adapter()\n if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3:\n with deepspeed.zero.GatheredParameters(model.parameters()):\n remove_hooks(model)\n yield model\n add_hooks(model)\n else:\n yield unwrapped_model\n", "path": "trl/models/utils.py"}], "after_files": [{"content": "import itertools\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Literal, Optional, Tuple, Union\n\nfrom accelerate.utils import is_deepspeed_available\nfrom transformers import PreTrainedModel, PreTrainedTokenizer\n\nfrom .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead\n\n\nSUPPORTED_ARCHITECTURES = (\n AutoModelForCausalLMWithValueHead,\n AutoModelForSeq2SeqLMWithValueHead,\n)\n\n\nif is_deepspeed_available():\n import deepspeed\n\nif TYPE_CHECKING:\n from accelerate import Accelerator\n from deepspeed.runtime.engine import DeepSpeedEngine\n from torch.nn.parallel.distributed import DistributedDataParallel\n\n from .modeling_base import PreTrainedModelWrapper\n\n\n# TODO: Add Abstract Base Class if more formats are added\n@dataclass\nclass ChatMlSpecialTokens:\n \"\"\"Dataclass for special tokens used in ChatML, including system, user, assistant, bos, eos, and pad tokens.\"\"\"\n\n bos_token: str = \"<|im_start|>\"\n eos_token: str = \"<|im_end|>\"\n pad_token: str = \"<|im_end|>\"\n\n @property\n def system(self):\n return f\"{self.bos_token}system\"\n\n @property\n def user(self):\n return f\"{self.bos_token}user\"\n\n @property\n def assistant(self):\n return f\"{self.bos_token}assistant\"\n\n @property\n def chat_template(self):\n return (\n \"{% for message in messages %}\"\n f\"{{{{'{self.bos_token}' + message['role'] + '\\n' + message['content'] + '{self.eos_token}' + '\\n'}}}}\"\n \"{% endfor %}\"\n \"{% if add_generation_prompt %}\"\n f\"{{{{ '{self.assistant}\\n' }}}}\"\n \"{% endif %}\"\n )\n\n\nFORMAT_MAPPING = {\"chatml\": ChatMlSpecialTokens}\n\n\ndef setup_chat_format(\n model: PreTrainedModel,\n tokenizer: PreTrainedTokenizer,\n format: Optional[Literal[\"chatml\"]] = \"chatml\",\n resize_to_multiple_of: Optional[int] = None,\n) -> Tuple[PreTrainedModel, PreTrainedTokenizer]:\n \"\"\"\n Setup chat format by adding special tokens to the tokenizer, setting the correct format, and extending the embedding layer of the model based on the new special tokens.\n\n Args:\n model (`~transformers.PreTrainedModel`): The model to be modified.\n tokenizer (`~transformers.PreTrainedTokenizer`): The tokenizer to be modified.\n format (`Optional[Literal[\"chatml\"]]`): The format to be set. Defaults to \"chatml\".\n resize_to_multiple_of (`Optional[int]`): Number to resize the embedding layer to. Defaults to None.\n Returns:\n model (`~transformers.PreTrainedModel`): The modified model.\n tokenizer (`~transformers.PreTrainedTokenizer`): The modified tokenizer.\n \"\"\"\n # check if format available and retrieve\n if format not in FORMAT_MAPPING:\n raise ValueError(f\"Format {format} not available. Please use one of {FORMAT_MAPPING.keys()}\")\n\n chat_format = FORMAT_MAPPING[format]()\n\n # set special tokens and them\n tokenizer.eos_token = chat_format.eos_token\n tokenizer.pad_token = chat_format.pad_token\n tokenizer.bos_token = chat_format.bos_token\n tokenizer.add_special_tokens({\"additional_special_tokens\": [chat_format.bos_token, chat_format.eos_token]})\n # set chat format for tokenizer\n tokenizer.chat_template = chat_format.chat_template\n\n # resize embedding layer to a multiple of 64, https://x.com/karpathy/status/1621578354024677377\n model.resize_token_embeddings(\n len(tokenizer), pad_to_multiple_of=resize_to_multiple_of if resize_to_multiple_of is not None else None\n )\n # Update the model config to use the new eos & bos tokens\n if getattr(model, \"config\", None) is not None:\n model.config.pad_token_id = tokenizer.pad_token_id\n model.config.bos_token_id = tokenizer.bos_token_id\n model.config.eos_token_id = tokenizer.eos_token_id\n # Update the generation config to use the new eos & bos token\n if getattr(model, \"generation_config\", None) is not None:\n model.generation_config.bos_token_id = tokenizer.bos_token_id\n model.generation_config.eos_token_id = tokenizer.eos_token_id\n model.generation_config.pad_token_id = tokenizer.pad_token_id\n\n return model, tokenizer\n\n\ndef remove_hooks(model: \"DeepSpeedEngine\") -> None:\n \"\"\"Removes the optimizer hooks from a DeepSpeed ZeRO-3 model.\"\"\"\n if model.optimizer is not None and hasattr(model.optimizer, \"parameter_offload\"):\n optimizer_offload = model.optimizer.parameter_offload\n elif model.optimizer is not None:\n optimizer_offload = model.optimizer\n\n for param in iter_params(optimizer_offload.module, recurse=True):\n param.ds_active_sub_modules.clear()\n\n for hook in optimizer_offload.forward_hooks:\n hook.remove()\n for hook in optimizer_offload.backward_hooks:\n hook.remove()\n\n optimizer_offload.forward_hooks = []\n optimizer_offload.backward_hooks = []\n\n\ndef get_all_parameters(sub_module, recurse=False):\n return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters())\n\n\ndef iter_params(module, recurse=False):\n return [param for _, param in get_all_parameters(module, recurse)]\n\n\ndef add_hooks(model: \"DeepSpeedEngine\") -> None:\n \"\"\"Adds the optimizer hooks from a DeepSpeed ZeRO-3 model.\"\"\"\n if model.optimizer is not None and hasattr(model.optimizer, \"parameter_offload\"):\n optimizer_offload = model.optimizer.parameter_offload\n elif model.optimizer is not None:\n optimizer_offload = model.optimizer\n optimizer_offload._register_hooks_recursively(optimizer_offload.module)\n\n\n@contextmanager\ndef unwrap_model_for_generation(\n model: Union[\"DistributedDataParallel\", \"DeepSpeedEngine\"], accelerator: \"Accelerator\", is_peft_model: bool = False\n) -> Union[\"PreTrainedModelWrapper\", \"DeepSpeedEngine\"]:\n \"\"\"Context manager to unwrap a model for generation.\n For ZeRO-3 models, we gather the weights once to speed up generation.\n \"\"\"\n unwrapped_model = accelerator.unwrap_model(model)\n if is_peft_model:\n unwrapped_model.pretrained_model.disable_adapter()\n if accelerator.state.deepspeed_plugin is not None and accelerator.state.deepspeed_plugin.zero_stage == 3:\n with deepspeed.zero.GatheredParameters(model.parameters()):\n remove_hooks(model)\n yield model\n add_hooks(model)\n else:\n yield unwrapped_model\n", "path": "trl/models/utils.py"}]} | 2,235 | 387 |
gh_patches_debug_23212 | rasdani/github-patches | git_diff | open-mmlab__mmdetection-3966 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
support for validate with batch size > 1
Hi, glad to know we could run the inference with batch size > 1, any plan to support validation during training with batch size > 1? According to
https://github.com/open-mmlab/mmdetection/blob/c8511649550834ea168f610411a47a39cf194767/mmdet/apis/train.py#L113-L120
batch size is set to 1 for validation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/apis/train.py`
Content:
```
1 import random
2
3 import numpy as np
4 import torch
5 from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
6 from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
7 Fp16OptimizerHook, OptimizerHook, build_optimizer)
8 from mmcv.utils import build_from_cfg
9
10 from mmdet.core import DistEvalHook, EvalHook
11 from mmdet.datasets import build_dataloader, build_dataset
12 from mmdet.utils import get_root_logger
13
14
15 def set_random_seed(seed, deterministic=False):
16 """Set random seed.
17
18 Args:
19 seed (int): Seed to be used.
20 deterministic (bool): Whether to set the deterministic option for
21 CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
22 to True and `torch.backends.cudnn.benchmark` to False.
23 Default: False.
24 """
25 random.seed(seed)
26 np.random.seed(seed)
27 torch.manual_seed(seed)
28 torch.cuda.manual_seed_all(seed)
29 if deterministic:
30 torch.backends.cudnn.deterministic = True
31 torch.backends.cudnn.benchmark = False
32
33
34 def train_detector(model,
35 dataset,
36 cfg,
37 distributed=False,
38 validate=False,
39 timestamp=None,
40 meta=None):
41 logger = get_root_logger(cfg.log_level)
42
43 # prepare data loaders
44 dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
45 if 'imgs_per_gpu' in cfg.data:
46 logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
47 'Please use "samples_per_gpu" instead')
48 if 'samples_per_gpu' in cfg.data:
49 logger.warning(
50 f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
51 f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
52 f'={cfg.data.imgs_per_gpu} is used in this experiments')
53 else:
54 logger.warning(
55 'Automatically set "samples_per_gpu"="imgs_per_gpu"='
56 f'{cfg.data.imgs_per_gpu} in this experiments')
57 cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
58
59 data_loaders = [
60 build_dataloader(
61 ds,
62 cfg.data.samples_per_gpu,
63 cfg.data.workers_per_gpu,
64 # cfg.gpus will be ignored if distributed
65 len(cfg.gpu_ids),
66 dist=distributed,
67 seed=cfg.seed) for ds in dataset
68 ]
69
70 # put model on gpus
71 if distributed:
72 find_unused_parameters = cfg.get('find_unused_parameters', False)
73 # Sets the `find_unused_parameters` parameter in
74 # torch.nn.parallel.DistributedDataParallel
75 model = MMDistributedDataParallel(
76 model.cuda(),
77 device_ids=[torch.cuda.current_device()],
78 broadcast_buffers=False,
79 find_unused_parameters=find_unused_parameters)
80 else:
81 model = MMDataParallel(
82 model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
83
84 # build runner
85 optimizer = build_optimizer(model, cfg.optimizer)
86 runner = EpochBasedRunner(
87 model,
88 optimizer=optimizer,
89 work_dir=cfg.work_dir,
90 logger=logger,
91 meta=meta)
92 # an ugly workaround to make .log and .log.json filenames the same
93 runner.timestamp = timestamp
94
95 # fp16 setting
96 fp16_cfg = cfg.get('fp16', None)
97 if fp16_cfg is not None:
98 optimizer_config = Fp16OptimizerHook(
99 **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
100 elif distributed and 'type' not in cfg.optimizer_config:
101 optimizer_config = OptimizerHook(**cfg.optimizer_config)
102 else:
103 optimizer_config = cfg.optimizer_config
104
105 # register hooks
106 runner.register_training_hooks(cfg.lr_config, optimizer_config,
107 cfg.checkpoint_config, cfg.log_config,
108 cfg.get('momentum_config', None))
109 if distributed:
110 runner.register_hook(DistSamplerSeedHook())
111
112 # register eval hooks
113 if validate:
114 val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
115 val_dataloader = build_dataloader(
116 val_dataset,
117 samples_per_gpu=1,
118 workers_per_gpu=cfg.data.workers_per_gpu,
119 dist=distributed,
120 shuffle=False)
121 eval_cfg = cfg.get('evaluation', {})
122 eval_hook = DistEvalHook if distributed else EvalHook
123 runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
124
125 # user-defined hooks
126 if cfg.get('custom_hooks', None):
127 custom_hooks = cfg.custom_hooks
128 assert isinstance(custom_hooks, list), \
129 f'custom_hooks expect list type, but got {type(custom_hooks)}'
130 for hook_cfg in cfg.custom_hooks:
131 assert isinstance(hook_cfg, dict), \
132 'Each item in custom_hooks expects dict type, but got ' \
133 f'{type(hook_cfg)}'
134 hook_cfg = hook_cfg.copy()
135 priority = hook_cfg.pop('priority', 'NORMAL')
136 hook = build_from_cfg(hook_cfg, HOOKS)
137 runner.register_hook(hook, priority=priority)
138
139 if cfg.resume_from:
140 runner.resume(cfg.resume_from)
141 elif cfg.load_from:
142 runner.load_checkpoint(cfg.load_from)
143 runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmdet/apis/train.py b/mmdet/apis/train.py
--- a/mmdet/apis/train.py
+++ b/mmdet/apis/train.py
@@ -8,7 +8,8 @@
from mmcv.utils import build_from_cfg
from mmdet.core import DistEvalHook, EvalHook
-from mmdet.datasets import build_dataloader, build_dataset
+from mmdet.datasets import (build_dataloader, build_dataset,
+ replace_ImageToTensor)
from mmdet.utils import get_root_logger
@@ -111,10 +112,16 @@
# register eval hooks
if validate:
+ # Support batch_size > 1 in validation
+ val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
+ if val_samples_per_gpu > 1:
+ # Replace 'ImageToTensor' to 'DefaultFormatBundle'
+ cfg.data.val.pipeline = replace_ImageToTensor(
+ cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
- samples_per_gpu=1,
+ samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
| {"golden_diff": "diff --git a/mmdet/apis/train.py b/mmdet/apis/train.py\n--- a/mmdet/apis/train.py\n+++ b/mmdet/apis/train.py\n@@ -8,7 +8,8 @@\n from mmcv.utils import build_from_cfg\n \n from mmdet.core import DistEvalHook, EvalHook\n-from mmdet.datasets import build_dataloader, build_dataset\n+from mmdet.datasets import (build_dataloader, build_dataset,\n+ replace_ImageToTensor)\n from mmdet.utils import get_root_logger\n \n \n@@ -111,10 +112,16 @@\n \n # register eval hooks\n if validate:\n+ # Support batch_size > 1 in validation\n+ val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)\n+ if val_samples_per_gpu > 1:\n+ # Replace 'ImageToTensor' to 'DefaultFormatBundle'\n+ cfg.data.val.pipeline = replace_ImageToTensor(\n+ cfg.data.val.pipeline)\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n val_dataloader = build_dataloader(\n val_dataset,\n- samples_per_gpu=1,\n+ samples_per_gpu=val_samples_per_gpu,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n", "issue": "support for validate with batch size > 1\nHi, glad to know we could run the inference with batch size > 1, any plan to support validation during training with batch size > 1? According to \r\nhttps://github.com/open-mmlab/mmdetection/blob/c8511649550834ea168f610411a47a39cf194767/mmdet/apis/train.py#L113-L120\r\nbatch size is set to 1 for validation.\n", "before_files": [{"content": "import random\n\nimport numpy as np\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,\n Fp16OptimizerHook, OptimizerHook, build_optimizer)\nfrom mmcv.utils import build_from_cfg\n\nfrom mmdet.core import DistEvalHook, EvalHook\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmdet.utils import get_root_logger\n\n\ndef set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef train_detector(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n if 'imgs_per_gpu' in cfg.data:\n logger.warning('\"imgs_per_gpu\" is deprecated in MMDet V2.0. '\n 'Please use \"samples_per_gpu\" instead')\n if 'samples_per_gpu' in cfg.data:\n logger.warning(\n f'Got \"imgs_per_gpu\"={cfg.data.imgs_per_gpu} and '\n f'\"samples_per_gpu\"={cfg.data.samples_per_gpu}, \"imgs_per_gpu\"'\n f'={cfg.data.imgs_per_gpu} is used in this experiments')\n else:\n logger.warning(\n 'Automatically set \"samples_per_gpu\"=\"imgs_per_gpu\"='\n f'{cfg.data.imgs_per_gpu} in this experiments')\n cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu\n\n data_loaders = [\n build_dataloader(\n ds,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n # cfg.gpus will be ignored if distributed\n len(cfg.gpu_ids),\n dist=distributed,\n seed=cfg.seed) for ds in dataset\n ]\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizer(model, cfg.optimizer)\n runner = EpochBasedRunner(\n model,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta)\n # an ugly workaround to make .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = OptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.optimizer_config\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n if distributed:\n runner.register_hook(DistSamplerSeedHook())\n\n # register eval hooks\n if validate:\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n val_dataloader = build_dataloader(\n val_dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n eval_cfg = cfg.get('evaluation', {})\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n # user-defined hooks\n if cfg.get('custom_hooks', None):\n custom_hooks = cfg.custom_hooks\n assert isinstance(custom_hooks, list), \\\n f'custom_hooks expect list type, but got {type(custom_hooks)}'\n for hook_cfg in cfg.custom_hooks:\n assert isinstance(hook_cfg, dict), \\\n 'Each item in custom_hooks expects dict type, but got ' \\\n f'{type(hook_cfg)}'\n hook_cfg = hook_cfg.copy()\n priority = hook_cfg.pop('priority', 'NORMAL')\n hook = build_from_cfg(hook_cfg, HOOKS)\n runner.register_hook(hook, priority=priority)\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs)\n", "path": "mmdet/apis/train.py"}], "after_files": [{"content": "import random\n\nimport numpy as np\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,\n Fp16OptimizerHook, OptimizerHook, build_optimizer)\nfrom mmcv.utils import build_from_cfg\n\nfrom mmdet.core import DistEvalHook, EvalHook\nfrom mmdet.datasets import (build_dataloader, build_dataset,\n replace_ImageToTensor)\nfrom mmdet.utils import get_root_logger\n\n\ndef set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef train_detector(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n if 'imgs_per_gpu' in cfg.data:\n logger.warning('\"imgs_per_gpu\" is deprecated in MMDet V2.0. '\n 'Please use \"samples_per_gpu\" instead')\n if 'samples_per_gpu' in cfg.data:\n logger.warning(\n f'Got \"imgs_per_gpu\"={cfg.data.imgs_per_gpu} and '\n f'\"samples_per_gpu\"={cfg.data.samples_per_gpu}, \"imgs_per_gpu\"'\n f'={cfg.data.imgs_per_gpu} is used in this experiments')\n else:\n logger.warning(\n 'Automatically set \"samples_per_gpu\"=\"imgs_per_gpu\"='\n f'{cfg.data.imgs_per_gpu} in this experiments')\n cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu\n\n data_loaders = [\n build_dataloader(\n ds,\n cfg.data.samples_per_gpu,\n cfg.data.workers_per_gpu,\n # cfg.gpus will be ignored if distributed\n len(cfg.gpu_ids),\n dist=distributed,\n seed=cfg.seed) for ds in dataset\n ]\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', False)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizer(model, cfg.optimizer)\n runner = EpochBasedRunner(\n model,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta)\n # an ugly workaround to make .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = OptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.optimizer_config\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n if distributed:\n runner.register_hook(DistSamplerSeedHook())\n\n # register eval hooks\n if validate:\n # Support batch_size > 1 in validation\n val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)\n if val_samples_per_gpu > 1:\n # Replace 'ImageToTensor' to 'DefaultFormatBundle'\n cfg.data.val.pipeline = replace_ImageToTensor(\n cfg.data.val.pipeline)\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n val_dataloader = build_dataloader(\n val_dataset,\n samples_per_gpu=val_samples_per_gpu,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n eval_cfg = cfg.get('evaluation', {})\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n # user-defined hooks\n if cfg.get('custom_hooks', None):\n custom_hooks = cfg.custom_hooks\n assert isinstance(custom_hooks, list), \\\n f'custom_hooks expect list type, but got {type(custom_hooks)}'\n for hook_cfg in cfg.custom_hooks:\n assert isinstance(hook_cfg, dict), \\\n 'Each item in custom_hooks expects dict type, but got ' \\\n f'{type(hook_cfg)}'\n hook_cfg = hook_cfg.copy()\n priority = hook_cfg.pop('priority', 'NORMAL')\n hook = build_from_cfg(hook_cfg, HOOKS)\n runner.register_hook(hook, priority=priority)\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs)\n", "path": "mmdet/apis/train.py"}]} | 1,856 | 284 |
gh_patches_debug_17635 | rasdani/github-patches | git_diff | networkx__networkx-7255 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docstring of `nonisomorphic_trees` should use `Yields`, not `Returns`
In NetworkX 3.2.1, `nonisomorphic_trees` yields graphs or adjacency matrices, so the docstring should use a `Yields` section, but it uses `Returns`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/generators/nonisomorphic_trees.py`
Content:
```
1 """
2 Implementation of the Wright, Richmond, Odlyzko and McKay (WROM)
3 algorithm for the enumeration of all non-isomorphic free trees of a
4 given order. Rooted trees are represented by level sequences, i.e.,
5 lists in which the i-th element specifies the distance of vertex i to
6 the root.
7
8 """
9
10 __all__ = ["nonisomorphic_trees", "number_of_nonisomorphic_trees"]
11
12 import networkx as nx
13
14
15 @nx._dispatchable(graphs=None)
16 def nonisomorphic_trees(order, create="graph"):
17 """Returns a list of nonisomorphic trees
18
19 Parameters
20 ----------
21 order : int
22 order of the desired tree(s)
23
24 create : graph or matrix (default="Graph)
25 If graph is selected a list of trees will be returned,
26 if matrix is selected a list of adjacency matrix will
27 be returned
28
29 Returns
30 -------
31 G : List of NetworkX Graphs
32
33 M : List of Adjacency matrices
34
35 References
36 ----------
37
38 """
39
40 if order < 2:
41 raise ValueError
42 # start at the path graph rooted at its center
43 layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2))
44
45 while layout is not None:
46 layout = _next_tree(layout)
47 if layout is not None:
48 if create == "graph":
49 yield _layout_to_graph(layout)
50 elif create == "matrix":
51 yield _layout_to_matrix(layout)
52 layout = _next_rooted_tree(layout)
53
54
55 @nx._dispatchable(graphs=None)
56 def number_of_nonisomorphic_trees(order):
57 """Returns the number of nonisomorphic trees
58
59 Parameters
60 ----------
61 order : int
62 order of the desired tree(s)
63
64 Returns
65 -------
66 length : Number of nonisomorphic graphs for the given order
67
68 References
69 ----------
70
71 """
72 return sum(1 for _ in nonisomorphic_trees(order))
73
74
75 def _next_rooted_tree(predecessor, p=None):
76 """One iteration of the Beyer-Hedetniemi algorithm."""
77
78 if p is None:
79 p = len(predecessor) - 1
80 while predecessor[p] == 1:
81 p -= 1
82 if p == 0:
83 return None
84
85 q = p - 1
86 while predecessor[q] != predecessor[p] - 1:
87 q -= 1
88 result = list(predecessor)
89 for i in range(p, len(result)):
90 result[i] = result[i - p + q]
91 return result
92
93
94 def _next_tree(candidate):
95 """One iteration of the Wright, Richmond, Odlyzko and McKay
96 algorithm."""
97
98 # valid representation of a free tree if:
99 # there are at least two vertices at layer 1
100 # (this is always the case because we start at the path graph)
101 left, rest = _split_tree(candidate)
102
103 # and the left subtree of the root
104 # is less high than the tree with the left subtree removed
105 left_height = max(left)
106 rest_height = max(rest)
107 valid = rest_height >= left_height
108
109 if valid and rest_height == left_height:
110 # and, if left and rest are of the same height,
111 # if left does not encompass more vertices
112 if len(left) > len(rest):
113 valid = False
114 # and, if they have the same number or vertices,
115 # if left does not come after rest lexicographically
116 elif len(left) == len(rest) and left > rest:
117 valid = False
118
119 if valid:
120 return candidate
121 else:
122 # jump to the next valid free tree
123 p = len(left)
124 new_candidate = _next_rooted_tree(candidate, p)
125 if candidate[p] > 2:
126 new_left, new_rest = _split_tree(new_candidate)
127 new_left_height = max(new_left)
128 suffix = range(1, new_left_height + 2)
129 new_candidate[-len(suffix) :] = suffix
130 return new_candidate
131
132
133 def _split_tree(layout):
134 """Returns a tuple of two layouts, one containing the left
135 subtree of the root vertex, and one containing the original tree
136 with the left subtree removed."""
137
138 one_found = False
139 m = None
140 for i in range(len(layout)):
141 if layout[i] == 1:
142 if one_found:
143 m = i
144 break
145 else:
146 one_found = True
147
148 if m is None:
149 m = len(layout)
150
151 left = [layout[i] - 1 for i in range(1, m)]
152 rest = [0] + [layout[i] for i in range(m, len(layout))]
153 return (left, rest)
154
155
156 def _layout_to_matrix(layout):
157 """Create the adjacency matrix for the tree specified by the
158 given layout (level sequence)."""
159
160 result = [[0] * len(layout) for i in range(len(layout))]
161 stack = []
162 for i in range(len(layout)):
163 i_level = layout[i]
164 if stack:
165 j = stack[-1]
166 j_level = layout[j]
167 while j_level >= i_level:
168 stack.pop()
169 j = stack[-1]
170 j_level = layout[j]
171 result[i][j] = result[j][i] = 1
172 stack.append(i)
173 return result
174
175
176 def _layout_to_graph(layout):
177 """Create a NetworkX Graph for the tree specified by the
178 given layout(level sequence)"""
179 G = nx.Graph()
180 stack = []
181 for i in range(len(layout)):
182 i_level = layout[i]
183 if stack:
184 j = stack[-1]
185 j_level = layout[j]
186 while j_level >= i_level:
187 stack.pop()
188 j = stack[-1]
189 j_level = layout[j]
190 G.add_edge(i, j)
191 stack.append(i)
192 return G
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/generators/nonisomorphic_trees.py b/networkx/generators/nonisomorphic_trees.py
--- a/networkx/generators/nonisomorphic_trees.py
+++ b/networkx/generators/nonisomorphic_trees.py
@@ -19,22 +19,20 @@
Parameters
----------
order : int
- order of the desired tree(s)
-
- create : graph or matrix (default="Graph)
- If graph is selected a list of trees will be returned,
- if matrix is selected a list of adjacency matrix will
- be returned
-
- Returns
- -------
- G : List of NetworkX Graphs
-
- M : List of Adjacency matrices
-
- References
- ----------
-
+ order of the desired tree(s)
+
+ create : one of {"graph", "matrix"} (default="graph")
+ If ``"graph"`` is selected a list of ``Graph`` instances will be returned,
+ if matrix is selected a list of adjacency matrices will be returned.
+
+ Yields
+ ------
+ list
+ A list of nonisomorphic trees, in one of two formats depending on the
+ value of the `create` parameter:
+ - ``create="graph"``: yields a list of `networkx.Graph` instances
+ - ``create="matrix"``: yields a list of list-of-lists representing
+ adjacency matrices
"""
if order < 2:
| {"golden_diff": "diff --git a/networkx/generators/nonisomorphic_trees.py b/networkx/generators/nonisomorphic_trees.py\n--- a/networkx/generators/nonisomorphic_trees.py\n+++ b/networkx/generators/nonisomorphic_trees.py\n@@ -19,22 +19,20 @@\n Parameters\n ----------\n order : int\n- order of the desired tree(s)\n-\n- create : graph or matrix (default=\"Graph)\n- If graph is selected a list of trees will be returned,\n- if matrix is selected a list of adjacency matrix will\n- be returned\n-\n- Returns\n- -------\n- G : List of NetworkX Graphs\n-\n- M : List of Adjacency matrices\n-\n- References\n- ----------\n-\n+ order of the desired tree(s)\n+\n+ create : one of {\"graph\", \"matrix\"} (default=\"graph\")\n+ If ``\"graph\"`` is selected a list of ``Graph`` instances will be returned,\n+ if matrix is selected a list of adjacency matrices will be returned.\n+\n+ Yields\n+ ------\n+ list\n+ A list of nonisomorphic trees, in one of two formats depending on the\n+ value of the `create` parameter:\n+ - ``create=\"graph\"``: yields a list of `networkx.Graph` instances\n+ - ``create=\"matrix\"``: yields a list of list-of-lists representing\n+ adjacency matrices\n \"\"\"\n \n if order < 2:\n", "issue": "Docstring of `nonisomorphic_trees` should use `Yields`, not `Returns`\nIn NetworkX 3.2.1, `nonisomorphic_trees` yields graphs or adjacency matrices, so the docstring should use a `Yields` section, but it uses `Returns`.\n", "before_files": [{"content": "\"\"\"\nImplementation of the Wright, Richmond, Odlyzko and McKay (WROM)\nalgorithm for the enumeration of all non-isomorphic free trees of a\ngiven order. Rooted trees are represented by level sequences, i.e.,\nlists in which the i-th element specifies the distance of vertex i to\nthe root.\n\n\"\"\"\n\n__all__ = [\"nonisomorphic_trees\", \"number_of_nonisomorphic_trees\"]\n\nimport networkx as nx\n\n\n@nx._dispatchable(graphs=None)\ndef nonisomorphic_trees(order, create=\"graph\"):\n \"\"\"Returns a list of nonisomorphic trees\n\n Parameters\n ----------\n order : int\n order of the desired tree(s)\n\n create : graph or matrix (default=\"Graph)\n If graph is selected a list of trees will be returned,\n if matrix is selected a list of adjacency matrix will\n be returned\n\n Returns\n -------\n G : List of NetworkX Graphs\n\n M : List of Adjacency matrices\n\n References\n ----------\n\n \"\"\"\n\n if order < 2:\n raise ValueError\n # start at the path graph rooted at its center\n layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2))\n\n while layout is not None:\n layout = _next_tree(layout)\n if layout is not None:\n if create == \"graph\":\n yield _layout_to_graph(layout)\n elif create == \"matrix\":\n yield _layout_to_matrix(layout)\n layout = _next_rooted_tree(layout)\n\n\n@nx._dispatchable(graphs=None)\ndef number_of_nonisomorphic_trees(order):\n \"\"\"Returns the number of nonisomorphic trees\n\n Parameters\n ----------\n order : int\n order of the desired tree(s)\n\n Returns\n -------\n length : Number of nonisomorphic graphs for the given order\n\n References\n ----------\n\n \"\"\"\n return sum(1 for _ in nonisomorphic_trees(order))\n\n\ndef _next_rooted_tree(predecessor, p=None):\n \"\"\"One iteration of the Beyer-Hedetniemi algorithm.\"\"\"\n\n if p is None:\n p = len(predecessor) - 1\n while predecessor[p] == 1:\n p -= 1\n if p == 0:\n return None\n\n q = p - 1\n while predecessor[q] != predecessor[p] - 1:\n q -= 1\n result = list(predecessor)\n for i in range(p, len(result)):\n result[i] = result[i - p + q]\n return result\n\n\ndef _next_tree(candidate):\n \"\"\"One iteration of the Wright, Richmond, Odlyzko and McKay\n algorithm.\"\"\"\n\n # valid representation of a free tree if:\n # there are at least two vertices at layer 1\n # (this is always the case because we start at the path graph)\n left, rest = _split_tree(candidate)\n\n # and the left subtree of the root\n # is less high than the tree with the left subtree removed\n left_height = max(left)\n rest_height = max(rest)\n valid = rest_height >= left_height\n\n if valid and rest_height == left_height:\n # and, if left and rest are of the same height,\n # if left does not encompass more vertices\n if len(left) > len(rest):\n valid = False\n # and, if they have the same number or vertices,\n # if left does not come after rest lexicographically\n elif len(left) == len(rest) and left > rest:\n valid = False\n\n if valid:\n return candidate\n else:\n # jump to the next valid free tree\n p = len(left)\n new_candidate = _next_rooted_tree(candidate, p)\n if candidate[p] > 2:\n new_left, new_rest = _split_tree(new_candidate)\n new_left_height = max(new_left)\n suffix = range(1, new_left_height + 2)\n new_candidate[-len(suffix) :] = suffix\n return new_candidate\n\n\ndef _split_tree(layout):\n \"\"\"Returns a tuple of two layouts, one containing the left\n subtree of the root vertex, and one containing the original tree\n with the left subtree removed.\"\"\"\n\n one_found = False\n m = None\n for i in range(len(layout)):\n if layout[i] == 1:\n if one_found:\n m = i\n break\n else:\n one_found = True\n\n if m is None:\n m = len(layout)\n\n left = [layout[i] - 1 for i in range(1, m)]\n rest = [0] + [layout[i] for i in range(m, len(layout))]\n return (left, rest)\n\n\ndef _layout_to_matrix(layout):\n \"\"\"Create the adjacency matrix for the tree specified by the\n given layout (level sequence).\"\"\"\n\n result = [[0] * len(layout) for i in range(len(layout))]\n stack = []\n for i in range(len(layout)):\n i_level = layout[i]\n if stack:\n j = stack[-1]\n j_level = layout[j]\n while j_level >= i_level:\n stack.pop()\n j = stack[-1]\n j_level = layout[j]\n result[i][j] = result[j][i] = 1\n stack.append(i)\n return result\n\n\ndef _layout_to_graph(layout):\n \"\"\"Create a NetworkX Graph for the tree specified by the\n given layout(level sequence)\"\"\"\n G = nx.Graph()\n stack = []\n for i in range(len(layout)):\n i_level = layout[i]\n if stack:\n j = stack[-1]\n j_level = layout[j]\n while j_level >= i_level:\n stack.pop()\n j = stack[-1]\n j_level = layout[j]\n G.add_edge(i, j)\n stack.append(i)\n return G\n", "path": "networkx/generators/nonisomorphic_trees.py"}], "after_files": [{"content": "\"\"\"\nImplementation of the Wright, Richmond, Odlyzko and McKay (WROM)\nalgorithm for the enumeration of all non-isomorphic free trees of a\ngiven order. Rooted trees are represented by level sequences, i.e.,\nlists in which the i-th element specifies the distance of vertex i to\nthe root.\n\n\"\"\"\n\n__all__ = [\"nonisomorphic_trees\", \"number_of_nonisomorphic_trees\"]\n\nimport networkx as nx\n\n\n@nx._dispatchable(graphs=None)\ndef nonisomorphic_trees(order, create=\"graph\"):\n \"\"\"Returns a list of nonisomorphic trees\n\n Parameters\n ----------\n order : int\n order of the desired tree(s)\n\n create : one of {\"graph\", \"matrix\"} (default=\"graph\")\n If ``\"graph\"`` is selected a list of ``Graph`` instances will be returned,\n if matrix is selected a list of adjacency matrices will be returned.\n\n Yields\n ------\n list\n A list of nonisomorphic trees, in one of two formats depending on the\n value of the `create` parameter:\n - ``create=\"graph\"``: yields a list of `networkx.Graph` instances\n - ``create=\"matrix\"``: yields a list of list-of-lists representing\n adjacency matrices\n \"\"\"\n\n if order < 2:\n raise ValueError\n # start at the path graph rooted at its center\n layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2))\n\n while layout is not None:\n layout = _next_tree(layout)\n if layout is not None:\n if create == \"graph\":\n yield _layout_to_graph(layout)\n elif create == \"matrix\":\n yield _layout_to_matrix(layout)\n layout = _next_rooted_tree(layout)\n\n\n@nx._dispatchable(graphs=None)\ndef number_of_nonisomorphic_trees(order):\n \"\"\"Returns the number of nonisomorphic trees\n\n Parameters\n ----------\n order : int\n order of the desired tree(s)\n\n Returns\n -------\n length : Number of nonisomorphic graphs for the given order\n\n References\n ----------\n\n \"\"\"\n return sum(1 for _ in nonisomorphic_trees(order))\n\n\ndef _next_rooted_tree(predecessor, p=None):\n \"\"\"One iteration of the Beyer-Hedetniemi algorithm.\"\"\"\n\n if p is None:\n p = len(predecessor) - 1\n while predecessor[p] == 1:\n p -= 1\n if p == 0:\n return None\n\n q = p - 1\n while predecessor[q] != predecessor[p] - 1:\n q -= 1\n result = list(predecessor)\n for i in range(p, len(result)):\n result[i] = result[i - p + q]\n return result\n\n\ndef _next_tree(candidate):\n \"\"\"One iteration of the Wright, Richmond, Odlyzko and McKay\n algorithm.\"\"\"\n\n # valid representation of a free tree if:\n # there are at least two vertices at layer 1\n # (this is always the case because we start at the path graph)\n left, rest = _split_tree(candidate)\n\n # and the left subtree of the root\n # is less high than the tree with the left subtree removed\n left_height = max(left)\n rest_height = max(rest)\n valid = rest_height >= left_height\n\n if valid and rest_height == left_height:\n # and, if left and rest are of the same height,\n # if left does not encompass more vertices\n if len(left) > len(rest):\n valid = False\n # and, if they have the same number or vertices,\n # if left does not come after rest lexicographically\n elif len(left) == len(rest) and left > rest:\n valid = False\n\n if valid:\n return candidate\n else:\n # jump to the next valid free tree\n p = len(left)\n new_candidate = _next_rooted_tree(candidate, p)\n if candidate[p] > 2:\n new_left, new_rest = _split_tree(new_candidate)\n new_left_height = max(new_left)\n suffix = range(1, new_left_height + 2)\n new_candidate[-len(suffix) :] = suffix\n return new_candidate\n\n\ndef _split_tree(layout):\n \"\"\"Returns a tuple of two layouts, one containing the left\n subtree of the root vertex, and one containing the original tree\n with the left subtree removed.\"\"\"\n\n one_found = False\n m = None\n for i in range(len(layout)):\n if layout[i] == 1:\n if one_found:\n m = i\n break\n else:\n one_found = True\n\n if m is None:\n m = len(layout)\n\n left = [layout[i] - 1 for i in range(1, m)]\n rest = [0] + [layout[i] for i in range(m, len(layout))]\n return (left, rest)\n\n\ndef _layout_to_matrix(layout):\n \"\"\"Create the adjacency matrix for the tree specified by the\n given layout (level sequence).\"\"\"\n\n result = [[0] * len(layout) for i in range(len(layout))]\n stack = []\n for i in range(len(layout)):\n i_level = layout[i]\n if stack:\n j = stack[-1]\n j_level = layout[j]\n while j_level >= i_level:\n stack.pop()\n j = stack[-1]\n j_level = layout[j]\n result[i][j] = result[j][i] = 1\n stack.append(i)\n return result\n\n\ndef _layout_to_graph(layout):\n \"\"\"Create a NetworkX Graph for the tree specified by the\n given layout(level sequence)\"\"\"\n G = nx.Graph()\n stack = []\n for i in range(len(layout)):\n i_level = layout[i]\n if stack:\n j = stack[-1]\n j_level = layout[j]\n while j_level >= i_level:\n stack.pop()\n j = stack[-1]\n j_level = layout[j]\n G.add_edge(i, j)\n stack.append(i)\n return G\n", "path": "networkx/generators/nonisomorphic_trees.py"}]} | 2,091 | 331 |
gh_patches_debug_16108 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E3503 does not match CloudFormation - requires ValidationDomain when CF does not want it
*cfn-lint version: (`cfn-lint --version`)*
cfn-lint 0.33.2
*Description of issue.*
I created an ACM certificate resource, and there were problems configuring the `DomainValidationOptions` block. If using DNS validation, the only properties needed are `DomainName` and `HostedZoneId`. However, cfn-lint was demanding a third property named `ValidationDomain`. When submitting the stack for deployment to CF, it triggered an immediate rollback because CF views `HostedZoneId` and `ValidationDomain` as mutually exclusive.
Adding an ignore rule to skip the E3503 error allowed me to proceed without issues. This rule should be adjusted to match what CF enforces.
**Sample:**
```yaml
Resources:
Certificate:
Type: AWS::CertificateManager::Certificate
Metadata:
cfn-lint:
config:
ignore_checks:
- E3503
Properties:
DomainName: "*.aws.domain.com"
ValidationMethod: DNS
DomainValidationOptions:
- DomainName: aws.domain.com
HostedZoneId: !ImportValue SubdomainHostedZoneId
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import six
6 from cfnlint.rules import CloudFormationLintRule
7 from cfnlint.rules import RuleMatch
8
9
10 class DomainValidationOptions(CloudFormationLintRule):
11 """Check if a certificate's domain validation options are set up correctly"""
12 id = 'E3503'
13 shortdesc = 'ValidationDomain is superdomain of DomainName'
14 description = 'In ValidationDomainOptions, the ValidationDomain must be a superdomain of the DomainName being validated'
15 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-certificatemanager-certificate-domainvalidationoption.html#cfn-certificatemanager-certificate-domainvalidationoption-validationdomain'
16 tags = ['certificate', 'certificatemanager', 'domainvalidationoptions', 'validationdomain']
17
18 def __init__(self):
19 """ Init """
20 super(DomainValidationOptions, self).__init__()
21 self.resource_property_types = ['AWS::CertificateManager::Certificate']
22
23 def check_value(self, value, path, **kwargs):
24 """ Check value inside the list of DomainValidationOptions"""
25 matches = []
26 cfn = kwargs.get('cfn')
27 if isinstance(value, dict):
28 property_sets = cfn.get_object_without_conditions(value)
29 for property_set in property_sets:
30 properties = property_set.get('Object')
31 scenario = property_set.get('Scenario')
32 domain_name = properties.get('DomainName', '')
33 validation_domain = properties.get('ValidationDomain', '')
34 if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):
35 if domain_name == validation_domain:
36 continue
37
38 if not domain_name.endswith('.' + validation_domain):
39 message = 'ValidationDomain must be a superdomain of DomainName at {}'
40 if scenario is None:
41 matches.append(
42 RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)))))
43 else:
44 scenario_text = ' and '.join(
45 ['when condition "%s" is %s' % (k, v) for (k, v) in scenario.items()])
46 matches.append(
47 RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)) + ' ' + scenario_text)))
48 return matches
49
50 def match_resource_properties(self, properties, _, path, cfn):
51 matches = []
52 matches.extend(cfn.check_value(
53 properties, 'DomainValidationOptions', path[:],
54 check_value=self.check_value,
55 cfn=cfn,
56 ))
57
58 return matches
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py
--- a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py
+++ b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py
@@ -29,8 +29,8 @@
for property_set in property_sets:
properties = property_set.get('Object')
scenario = property_set.get('Scenario')
- domain_name = properties.get('DomainName', '')
- validation_domain = properties.get('ValidationDomain', '')
+ domain_name = properties.get('DomainName', None)
+ validation_domain = properties.get('ValidationDomain', None)
if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):
if domain_name == validation_domain:
continue
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py\n--- a/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py\n+++ b/src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py\n@@ -29,8 +29,8 @@\n for property_set in property_sets:\n properties = property_set.get('Object')\n scenario = property_set.get('Scenario')\n- domain_name = properties.get('DomainName', '')\n- validation_domain = properties.get('ValidationDomain', '')\n+ domain_name = properties.get('DomainName', None)\n+ validation_domain = properties.get('ValidationDomain', None)\n if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):\n if domain_name == validation_domain:\n continue\n", "issue": "E3503 does not match CloudFormation - requires ValidationDomain when CF does not want it\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\ncfn-lint 0.33.2\r\n\r\n*Description of issue.*\r\n\r\nI created an ACM certificate resource, and there were problems configuring the `DomainValidationOptions` block. If using DNS validation, the only properties needed are `DomainName` and `HostedZoneId`. However, cfn-lint was demanding a third property named `ValidationDomain`. When submitting the stack for deployment to CF, it triggered an immediate rollback because CF views `HostedZoneId` and `ValidationDomain` as mutually exclusive.\r\n\r\nAdding an ignore rule to skip the E3503 error allowed me to proceed without issues. This rule should be adjusted to match what CF enforces.\r\n\r\n**Sample:**\r\n\r\n```yaml\r\nResources:\r\n Certificate:\r\n Type: AWS::CertificateManager::Certificate\r\n Metadata:\r\n cfn-lint:\r\n config:\r\n ignore_checks:\r\n - E3503\r\n Properties:\r\n DomainName: \"*.aws.domain.com\"\r\n ValidationMethod: DNS\r\n DomainValidationOptions:\r\n - DomainName: aws.domain.com\r\n HostedZoneId: !ImportValue SubdomainHostedZoneId\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass DomainValidationOptions(CloudFormationLintRule):\n \"\"\"Check if a certificate's domain validation options are set up correctly\"\"\"\n id = 'E3503'\n shortdesc = 'ValidationDomain is superdomain of DomainName'\n description = 'In ValidationDomainOptions, the ValidationDomain must be a superdomain of the DomainName being validated'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-certificatemanager-certificate-domainvalidationoption.html#cfn-certificatemanager-certificate-domainvalidationoption-validationdomain'\n tags = ['certificate', 'certificatemanager', 'domainvalidationoptions', 'validationdomain']\n\n def __init__(self):\n \"\"\" Init \"\"\"\n super(DomainValidationOptions, self).__init__()\n self.resource_property_types = ['AWS::CertificateManager::Certificate']\n\n def check_value(self, value, path, **kwargs):\n \"\"\" Check value inside the list of DomainValidationOptions\"\"\"\n matches = []\n cfn = kwargs.get('cfn')\n if isinstance(value, dict):\n property_sets = cfn.get_object_without_conditions(value)\n for property_set in property_sets:\n properties = property_set.get('Object')\n scenario = property_set.get('Scenario')\n domain_name = properties.get('DomainName', '')\n validation_domain = properties.get('ValidationDomain', '')\n if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):\n if domain_name == validation_domain:\n continue\n\n if not domain_name.endswith('.' + validation_domain):\n message = 'ValidationDomain must be a superdomain of DomainName at {}'\n if scenario is None:\n matches.append(\n RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)))))\n else:\n scenario_text = ' and '.join(\n ['when condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n matches.append(\n RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)) + ' ' + scenario_text)))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n matches = []\n matches.extend(cfn.check_value(\n properties, 'DomainValidationOptions', path[:],\n check_value=self.check_value,\n cfn=cfn,\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass DomainValidationOptions(CloudFormationLintRule):\n \"\"\"Check if a certificate's domain validation options are set up correctly\"\"\"\n id = 'E3503'\n shortdesc = 'ValidationDomain is superdomain of DomainName'\n description = 'In ValidationDomainOptions, the ValidationDomain must be a superdomain of the DomainName being validated'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-certificatemanager-certificate-domainvalidationoption.html#cfn-certificatemanager-certificate-domainvalidationoption-validationdomain'\n tags = ['certificate', 'certificatemanager', 'domainvalidationoptions', 'validationdomain']\n\n def __init__(self):\n \"\"\" Init \"\"\"\n super(DomainValidationOptions, self).__init__()\n self.resource_property_types = ['AWS::CertificateManager::Certificate']\n\n def check_value(self, value, path, **kwargs):\n \"\"\" Check value inside the list of DomainValidationOptions\"\"\"\n matches = []\n cfn = kwargs.get('cfn')\n if isinstance(value, dict):\n property_sets = cfn.get_object_without_conditions(value)\n for property_set in property_sets:\n properties = property_set.get('Object')\n scenario = property_set.get('Scenario')\n domain_name = properties.get('DomainName', None)\n validation_domain = properties.get('ValidationDomain', None)\n if isinstance(domain_name, six.string_types) and isinstance(validation_domain, six.string_types):\n if domain_name == validation_domain:\n continue\n\n if not domain_name.endswith('.' + validation_domain):\n message = 'ValidationDomain must be a superdomain of DomainName at {}'\n if scenario is None:\n matches.append(\n RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)))))\n else:\n scenario_text = ' and '.join(\n ['when condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n matches.append(\n RuleMatch(path[:] + ['DomainName'], message.format('/'.join(map(str, path)) + ' ' + scenario_text)))\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n matches = []\n matches.extend(cfn.check_value(\n properties, 'DomainValidationOptions', path[:],\n check_value=self.check_value,\n cfn=cfn,\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/certificatemanager/DomainValidationOptions.py"}]} | 1,205 | 194 |
gh_patches_debug_18653 | rasdani/github-patches | git_diff | frappe__frappe-26417 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
(List View): In sidebar filter link field show Title of Field if set
**Is your feature request related to a problem? Please describe.**
In sidebar filter link field show field.name but not field.title
**Describe the solution you'd like**
if link field has title field - show title.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/desk/listview.py`
Content:
```
1 # Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
2 # License: MIT. See LICENSE
3
4 import frappe
5 from frappe.model import is_default_field
6 from frappe.query_builder import Order
7 from frappe.query_builder.functions import Count
8 from frappe.query_builder.terms import SubQuery
9 from frappe.query_builder.utils import DocType
10
11
12 @frappe.whitelist()
13 def get_list_settings(doctype):
14 try:
15 return frappe.get_cached_doc("List View Settings", doctype)
16 except frappe.DoesNotExistError:
17 frappe.clear_messages()
18
19
20 @frappe.whitelist()
21 def set_list_settings(doctype, values):
22 try:
23 doc = frappe.get_doc("List View Settings", doctype)
24 except frappe.DoesNotExistError:
25 doc = frappe.new_doc("List View Settings")
26 doc.name = doctype
27 frappe.clear_messages()
28 doc.update(frappe.parse_json(values))
29 doc.save()
30
31
32 @frappe.whitelist()
33 def get_group_by_count(doctype: str, current_filters: str, field: str) -> list[dict]:
34 current_filters = frappe.parse_json(current_filters)
35
36 if field == "assigned_to":
37 ToDo = DocType("ToDo")
38 User = DocType("User")
39 count = Count("*").as_("count")
40 filtered_records = frappe.qb.get_query(
41 doctype,
42 filters=current_filters,
43 fields=["name"],
44 validate_filters=True,
45 )
46
47 return (
48 frappe.qb.from_(ToDo)
49 .from_(User)
50 .select(ToDo.allocated_to.as_("name"), count)
51 .where(
52 (ToDo.status != "Cancelled")
53 & (ToDo.allocated_to == User.name)
54 & (User.user_type == "System User")
55 & (ToDo.reference_name.isin(SubQuery(filtered_records)))
56 )
57 .groupby(ToDo.allocated_to)
58 .orderby(count, order=Order.desc)
59 .limit(50)
60 .run(as_dict=True)
61 )
62
63 if not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):
64 raise ValueError("Field does not belong to doctype")
65
66 return frappe.get_list(
67 doctype,
68 filters=current_filters,
69 group_by=f"`tab{doctype}`.{field}",
70 fields=["count(*) as count", f"`{field}` as name"],
71 order_by="count desc",
72 limit=50,
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/frappe/desk/listview.py b/frappe/desk/listview.py
--- a/frappe/desk/listview.py
+++ b/frappe/desk/listview.py
@@ -60,10 +60,12 @@
.run(as_dict=True)
)
- if not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):
+ meta = frappe.get_meta(doctype)
+
+ if not meta.has_field(field) and not is_default_field(field):
raise ValueError("Field does not belong to doctype")
- return frappe.get_list(
+ data = frappe.get_list(
doctype,
filters=current_filters,
group_by=f"`tab{doctype}`.{field}",
@@ -71,3 +73,13 @@
order_by="count desc",
limit=50,
)
+
+ # Add in title if it's a link field and `show_title_field_in_link` is set
+ if (field_meta := meta.get_field(field)) and field_meta.fieldtype == "Link":
+ link_meta = frappe.get_meta(field_meta.options)
+ if link_meta.show_title_field_in_link:
+ title_field = link_meta.get_title_field()
+ for item in data:
+ item.title = frappe.get_value(field_meta.options, item.name, title_field)
+
+ return data
| {"golden_diff": "diff --git a/frappe/desk/listview.py b/frappe/desk/listview.py\n--- a/frappe/desk/listview.py\n+++ b/frappe/desk/listview.py\n@@ -60,10 +60,12 @@\n \t\t\t.run(as_dict=True)\n \t\t)\n \n-\tif not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):\n+\tmeta = frappe.get_meta(doctype)\n+\n+\tif not meta.has_field(field) and not is_default_field(field):\n \t\traise ValueError(\"Field does not belong to doctype\")\n \n-\treturn frappe.get_list(\n+\tdata = frappe.get_list(\n \t\tdoctype,\n \t\tfilters=current_filters,\n \t\tgroup_by=f\"`tab{doctype}`.{field}\",\n@@ -71,3 +73,13 @@\n \t\torder_by=\"count desc\",\n \t\tlimit=50,\n \t)\n+\n+\t# Add in title if it's a link field and `show_title_field_in_link` is set\n+\tif (field_meta := meta.get_field(field)) and field_meta.fieldtype == \"Link\":\n+\t\tlink_meta = frappe.get_meta(field_meta.options)\n+\t\tif link_meta.show_title_field_in_link:\n+\t\t\ttitle_field = link_meta.get_title_field()\n+\t\t\tfor item in data:\n+\t\t\t\titem.title = frappe.get_value(field_meta.options, item.name, title_field)\n+\n+\treturn data\n", "issue": "(List View): In sidebar filter link field show Title of Field if set\n**Is your feature request related to a problem? Please describe.**\r\nIn sidebar filter link field show field.name but not field.title\r\n\r\n**Describe the solution you'd like**\r\nif link field has title field - show title.\r\n\n", "before_files": [{"content": "# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.model import is_default_field\nfrom frappe.query_builder import Order\nfrom frappe.query_builder.functions import Count\nfrom frappe.query_builder.terms import SubQuery\nfrom frappe.query_builder.utils import DocType\n\n\[email protected]()\ndef get_list_settings(doctype):\n\ttry:\n\t\treturn frappe.get_cached_doc(\"List View Settings\", doctype)\n\texcept frappe.DoesNotExistError:\n\t\tfrappe.clear_messages()\n\n\[email protected]()\ndef set_list_settings(doctype, values):\n\ttry:\n\t\tdoc = frappe.get_doc(\"List View Settings\", doctype)\n\texcept frappe.DoesNotExistError:\n\t\tdoc = frappe.new_doc(\"List View Settings\")\n\t\tdoc.name = doctype\n\t\tfrappe.clear_messages()\n\tdoc.update(frappe.parse_json(values))\n\tdoc.save()\n\n\[email protected]()\ndef get_group_by_count(doctype: str, current_filters: str, field: str) -> list[dict]:\n\tcurrent_filters = frappe.parse_json(current_filters)\n\n\tif field == \"assigned_to\":\n\t\tToDo = DocType(\"ToDo\")\n\t\tUser = DocType(\"User\")\n\t\tcount = Count(\"*\").as_(\"count\")\n\t\tfiltered_records = frappe.qb.get_query(\n\t\t\tdoctype,\n\t\t\tfilters=current_filters,\n\t\t\tfields=[\"name\"],\n\t\t\tvalidate_filters=True,\n\t\t)\n\n\t\treturn (\n\t\t\tfrappe.qb.from_(ToDo)\n\t\t\t.from_(User)\n\t\t\t.select(ToDo.allocated_to.as_(\"name\"), count)\n\t\t\t.where(\n\t\t\t\t(ToDo.status != \"Cancelled\")\n\t\t\t\t& (ToDo.allocated_to == User.name)\n\t\t\t\t& (User.user_type == \"System User\")\n\t\t\t\t& (ToDo.reference_name.isin(SubQuery(filtered_records)))\n\t\t\t)\n\t\t\t.groupby(ToDo.allocated_to)\n\t\t\t.orderby(count, order=Order.desc)\n\t\t\t.limit(50)\n\t\t\t.run(as_dict=True)\n\t\t)\n\n\tif not frappe.get_meta(doctype).has_field(field) and not is_default_field(field):\n\t\traise ValueError(\"Field does not belong to doctype\")\n\n\treturn frappe.get_list(\n\t\tdoctype,\n\t\tfilters=current_filters,\n\t\tgroup_by=f\"`tab{doctype}`.{field}\",\n\t\tfields=[\"count(*) as count\", f\"`{field}` as name\"],\n\t\torder_by=\"count desc\",\n\t\tlimit=50,\n\t)\n", "path": "frappe/desk/listview.py"}], "after_files": [{"content": "# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.model import is_default_field\nfrom frappe.query_builder import Order\nfrom frappe.query_builder.functions import Count\nfrom frappe.query_builder.terms import SubQuery\nfrom frappe.query_builder.utils import DocType\n\n\[email protected]()\ndef get_list_settings(doctype):\n\ttry:\n\t\treturn frappe.get_cached_doc(\"List View Settings\", doctype)\n\texcept frappe.DoesNotExistError:\n\t\tfrappe.clear_messages()\n\n\[email protected]()\ndef set_list_settings(doctype, values):\n\ttry:\n\t\tdoc = frappe.get_doc(\"List View Settings\", doctype)\n\texcept frappe.DoesNotExistError:\n\t\tdoc = frappe.new_doc(\"List View Settings\")\n\t\tdoc.name = doctype\n\t\tfrappe.clear_messages()\n\tdoc.update(frappe.parse_json(values))\n\tdoc.save()\n\n\[email protected]()\ndef get_group_by_count(doctype: str, current_filters: str, field: str) -> list[dict]:\n\tcurrent_filters = frappe.parse_json(current_filters)\n\n\tif field == \"assigned_to\":\n\t\tToDo = DocType(\"ToDo\")\n\t\tUser = DocType(\"User\")\n\t\tcount = Count(\"*\").as_(\"count\")\n\t\tfiltered_records = frappe.qb.get_query(\n\t\t\tdoctype,\n\t\t\tfilters=current_filters,\n\t\t\tfields=[\"name\"],\n\t\t\tvalidate_filters=True,\n\t\t)\n\n\t\treturn (\n\t\t\tfrappe.qb.from_(ToDo)\n\t\t\t.from_(User)\n\t\t\t.select(ToDo.allocated_to.as_(\"name\"), count)\n\t\t\t.where(\n\t\t\t\t(ToDo.status != \"Cancelled\")\n\t\t\t\t& (ToDo.allocated_to == User.name)\n\t\t\t\t& (User.user_type == \"System User\")\n\t\t\t\t& (ToDo.reference_name.isin(SubQuery(filtered_records)))\n\t\t\t)\n\t\t\t.groupby(ToDo.allocated_to)\n\t\t\t.orderby(count, order=Order.desc)\n\t\t\t.limit(50)\n\t\t\t.run(as_dict=True)\n\t\t)\n\n\tmeta = frappe.get_meta(doctype)\n\n\tif not meta.has_field(field) and not is_default_field(field):\n\t\traise ValueError(\"Field does not belong to doctype\")\n\n\tdata = frappe.get_list(\n\t\tdoctype,\n\t\tfilters=current_filters,\n\t\tgroup_by=f\"`tab{doctype}`.{field}\",\n\t\tfields=[\"count(*) as count\", f\"`{field}` as name\"],\n\t\torder_by=\"count desc\",\n\t\tlimit=50,\n\t)\n\n\t# Add in title if it's a link field and `show_title_field_in_link` is set\n\tif (field_meta := meta.get_field(field)) and field_meta.fieldtype == \"Link\":\n\t\tlink_meta = frappe.get_meta(field_meta.options)\n\t\tif link_meta.show_title_field_in_link:\n\t\t\ttitle_field = link_meta.get_title_field()\n\t\t\tfor item in data:\n\t\t\t\titem.title = frappe.get_value(field_meta.options, item.name, title_field)\n\n\treturn data\n", "path": "frappe/desk/listview.py"}]} | 1,060 | 302 |
gh_patches_debug_31978 | rasdani/github-patches | git_diff | ansible-collections__community.vmware-1862 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
vmware_host_lockdown: Remove states absent and present
##### SUMMARY
Remove deprecated `state`s `absent` and `present`.
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
vmware_host_lockdown
##### ADDITIONAL INFORMATION
#1514
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/vmware_host_lockdown.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
5 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
6 # SPDX-License-Identifier: GPL-3.0-or-later
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11
12 DOCUMENTATION = r'''
13 ---
14 module: vmware_host_lockdown
15 short_description: Manage administrator permission for the local administrative account for the ESXi host
16 description:
17 - This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given.
18 - All parameters and VMware objects values are case sensitive.
19 - This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed.
20 - Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.
21 author:
22 - Abhijeet Kasurde (@Akasurde)
23 options:
24 cluster_name:
25 description:
26 - Name of cluster.
27 - All host systems from given cluster used to manage lockdown.
28 - Required parameter, if C(esxi_hostname) is not set.
29 type: str
30 esxi_hostname:
31 description:
32 - List of ESXi hostname to manage lockdown.
33 - Required parameter, if C(cluster_name) is not set.
34 - See examples for specifications.
35 type: list
36 elements: str
37 state:
38 description:
39 - State of hosts system
40 - If set to C(disabled), all host systems will be removed from lockdown mode.
41 - If host system is already out of lockdown mode and set to C(disabled), no action will be taken.
42 - If set to C(normal), all host systems will be set in lockdown mode.
43 - If host system is already in lockdown mode and set to C(normal), no action will be taken.
44 - If set to C(strict), all host systems will be set in strict lockdown mode.
45 - If host system is already in strict lockdown mode and set to C(strict), no action will be taken.
46 default: normal
47 choices: [ disabled, normal, strict, present, absent ]
48 type: str
49 extends_documentation_fragment:
50 - community.vmware.vmware.documentation
51
52 '''
53
54 EXAMPLES = r'''
55 - name: Enter host system into lockdown mode
56 community.vmware.vmware_host_lockdown:
57 hostname: '{{ vcenter_hostname }}'
58 username: '{{ vcenter_username }}'
59 password: '{{ vcenter_password }}'
60 esxi_hostname: '{{ esxi_hostname }}'
61 state: normal
62 delegate_to: localhost
63
64 - name: Exit host systems from lockdown mode
65 community.vmware.vmware_host_lockdown:
66 hostname: '{{ vcenter_hostname }}'
67 username: '{{ vcenter_username }}'
68 password: '{{ vcenter_password }}'
69 esxi_hostname: '{{ esxi_hostname }}'
70 state: disabled
71 delegate_to: localhost
72
73 - name: Enter host systems into lockdown mode
74 community.vmware.vmware_host_lockdown:
75 hostname: '{{ vcenter_hostname }}'
76 username: '{{ vcenter_username }}'
77 password: '{{ vcenter_password }}'
78 esxi_hostname:
79 - '{{ esxi_hostname_1 }}'
80 - '{{ esxi_hostname_2 }}'
81 state: normal
82 delegate_to: localhost
83
84 - name: Exit host systems from lockdown mode
85 community.vmware.vmware_host_lockdown:
86 hostname: '{{ vcenter_hostname }}'
87 username: '{{ vcenter_username }}'
88 password: '{{ vcenter_password }}'
89 esxi_hostname:
90 - '{{ esxi_hostname_1 }}'
91 - '{{ esxi_hostname_2 }}'
92 state: disabled
93 delegate_to: localhost
94
95 - name: Enter all host system from cluster into lockdown mode
96 community.vmware.vmware_host_lockdown:
97 hostname: '{{ vcenter_hostname }}'
98 username: '{{ vcenter_username }}'
99 password: '{{ vcenter_password }}'
100 cluster_name: '{{ cluster_name }}'
101 state: normal
102 delegate_to: localhost
103 '''
104
105 RETURN = r'''
106 results:
107 description: metadata about state of Host system lock down
108 returned: always
109 type: dict
110 sample: {
111 "host_lockdown_state": {
112 "DC0_C0": {
113 "current_state": "normal",
114 "previous_state": "disabled",
115 "desired_state": "normal",
116 },
117 }
118 }
119 '''
120
121 try:
122 from pyVmomi import vim
123 HAS_PYVMOMI = True
124 except ImportError:
125 HAS_PYVMOMI = False
126
127 from ansible.module_utils.basic import AnsibleModule
128 from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi
129 from ansible.module_utils._text import to_native
130
131
132 class VmwareLockdownManager(PyVmomi):
133 def __init__(self, module):
134 super(VmwareLockdownManager, self).__init__(module)
135 if not self.is_vcenter():
136 self.module.fail_json(msg="Lockdown operations are performed from vCenter only. "
137 "hostname %s is an ESXi server. Please specify hostname "
138 "as vCenter server." % self.module.params['hostname'])
139 cluster_name = self.params.get('cluster_name', None)
140 esxi_host_name = self.params.get('esxi_hostname', None)
141 self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
142
143 def ensure(self):
144 """
145 Function to manage internal state management
146 """
147 results = dict(changed=False, host_lockdown_state=dict())
148 change_list = []
149 desired_state = self.params.get('state')
150
151 if desired_state == 'present':
152 self.module.warn("'present' will be removed in a future version. Please use 'normal' instead.")
153 desired_state = 'normal'
154 elif desired_state == 'absent':
155 self.module.warn("'absent' will be removed in a future version. Please use 'disabled' instead.")
156 desired_state = 'disabled'
157
158 for host in self.hosts:
159 current_state_api = host.configManager.hostAccessManager.lockdownMode
160 current_state = current_state_api[8:].lower()
161 results['host_lockdown_state'][host.name] = dict(current_state=desired_state,
162 desired_state=desired_state,
163 previous_state=current_state
164 )
165 changed = False
166 if current_state != desired_state:
167 changed = True
168 if not self.module.check_mode:
169 try:
170 desired_state_api = 'lockdown' + desired_state.capitalize()
171 host.configManager.hostAccessManager.ChangeLockdownMode(desired_state_api)
172 except vim.fault.HostConfigFault as host_config_fault:
173 self.module.fail_json(msg="Failed to manage lockdown mode for esxi"
174 " hostname %s : %s" % (host.name, to_native(host_config_fault.msg)))
175 except vim.fault.AdminDisabled as admin_disabled:
176 self.module.fail_json(msg="Failed to manage lockdown mode as administrator "
177 "permission has been disabled for "
178 "esxi hostname %s : %s" % (host.name, to_native(admin_disabled.msg)))
179 except Exception as generic_exception:
180 self.module.fail_json(msg="Failed to manage lockdown mode due to generic exception for esxi "
181 "hostname %s : %s" % (host.name, to_native(generic_exception)))
182
183 change_list.append(changed)
184
185 if any(change_list):
186 results['changed'] = True
187
188 self.module.exit_json(**results)
189
190
191 def main():
192 argument_spec = vmware_argument_spec()
193 argument_spec.update(
194 cluster_name=dict(type='str', required=False),
195 esxi_hostname=dict(type='list', required=False, elements='str'),
196 state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict', 'present', 'absent'], required=False),
197 )
198
199 module = AnsibleModule(
200 argument_spec=argument_spec,
201 supports_check_mode=True,
202 required_one_of=[
203 ['cluster_name', 'esxi_hostname'],
204 ]
205 )
206
207 if not HAS_PYVMOMI:
208 module.fail_json(msg='pyvmomi required for this module')
209
210 vmware_lockdown_mgr = VmwareLockdownManager(module)
211 vmware_lockdown_mgr.ensure()
212
213
214 if __name__ == "__main__":
215 main()
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/vmware_host_lockdown.py b/plugins/modules/vmware_host_lockdown.py
--- a/plugins/modules/vmware_host_lockdown.py
+++ b/plugins/modules/vmware_host_lockdown.py
@@ -44,7 +44,7 @@
- If set to C(strict), all host systems will be set in strict lockdown mode.
- If host system is already in strict lockdown mode and set to C(strict), no action will be taken.
default: normal
- choices: [ disabled, normal, strict, present, absent ]
+ choices: [ disabled, normal, strict ]
type: str
extends_documentation_fragment:
- community.vmware.vmware.documentation
@@ -148,13 +148,6 @@
change_list = []
desired_state = self.params.get('state')
- if desired_state == 'present':
- self.module.warn("'present' will be removed in a future version. Please use 'normal' instead.")
- desired_state = 'normal'
- elif desired_state == 'absent':
- self.module.warn("'absent' will be removed in a future version. Please use 'disabled' instead.")
- desired_state = 'disabled'
-
for host in self.hosts:
current_state_api = host.configManager.hostAccessManager.lockdownMode
current_state = current_state_api[8:].lower()
@@ -193,7 +186,7 @@
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='list', required=False, elements='str'),
- state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict', 'present', 'absent'], required=False),
+ state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict'], required=False),
)
module = AnsibleModule(
| {"golden_diff": "diff --git a/plugins/modules/vmware_host_lockdown.py b/plugins/modules/vmware_host_lockdown.py\n--- a/plugins/modules/vmware_host_lockdown.py\n+++ b/plugins/modules/vmware_host_lockdown.py\n@@ -44,7 +44,7 @@\n - If set to C(strict), all host systems will be set in strict lockdown mode.\n - If host system is already in strict lockdown mode and set to C(strict), no action will be taken.\n default: normal\n- choices: [ disabled, normal, strict, present, absent ]\n+ choices: [ disabled, normal, strict ]\n type: str\n extends_documentation_fragment:\n - community.vmware.vmware.documentation\n@@ -148,13 +148,6 @@\n change_list = []\n desired_state = self.params.get('state')\n \n- if desired_state == 'present':\n- self.module.warn(\"'present' will be removed in a future version. Please use 'normal' instead.\")\n- desired_state = 'normal'\n- elif desired_state == 'absent':\n- self.module.warn(\"'absent' will be removed in a future version. Please use 'disabled' instead.\")\n- desired_state = 'disabled'\n-\n for host in self.hosts:\n current_state_api = host.configManager.hostAccessManager.lockdownMode\n current_state = current_state_api[8:].lower()\n@@ -193,7 +186,7 @@\n argument_spec.update(\n cluster_name=dict(type='str', required=False),\n esxi_hostname=dict(type='list', required=False, elements='str'),\n- state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict', 'present', 'absent'], required=False),\n+ state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict'], required=False),\n )\n \n module = AnsibleModule(\n", "issue": "vmware_host_lockdown: Remove states absent and present\n##### SUMMARY\r\nRemove deprecated `state`s `absent` and `present`.\r\n\r\n##### ISSUE TYPE\r\n- Feature Idea\r\n\r\n##### COMPONENT NAME\r\nvmware_host_lockdown\r\n\r\n##### ADDITIONAL INFORMATION\r\n#1514 \n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_host_lockdown\nshort_description: Manage administrator permission for the local administrative account for the ESXi host\ndescription:\n- This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given.\n- All parameters and VMware objects values are case sensitive.\n- This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed.\n- Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\noptions:\n cluster_name:\n description:\n - Name of cluster.\n - All host systems from given cluster used to manage lockdown.\n - Required parameter, if C(esxi_hostname) is not set.\n type: str\n esxi_hostname:\n description:\n - List of ESXi hostname to manage lockdown.\n - Required parameter, if C(cluster_name) is not set.\n - See examples for specifications.\n type: list\n elements: str\n state:\n description:\n - State of hosts system\n - If set to C(disabled), all host systems will be removed from lockdown mode.\n - If host system is already out of lockdown mode and set to C(disabled), no action will be taken.\n - If set to C(normal), all host systems will be set in lockdown mode.\n - If host system is already in lockdown mode and set to C(normal), no action will be taken.\n - If set to C(strict), all host systems will be set in strict lockdown mode.\n - If host system is already in strict lockdown mode and set to C(strict), no action will be taken.\n default: normal\n choices: [ disabled, normal, strict, present, absent ]\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Enter host system into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: normal\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: disabled\n delegate_to: localhost\n\n- name: Enter host systems into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: normal\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: disabled\n delegate_to: localhost\n\n- name: Enter all host system from cluster into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n cluster_name: '{{ cluster_name }}'\n state: normal\n delegate_to: localhost\n'''\n\nRETURN = r'''\nresults:\n description: metadata about state of Host system lock down\n returned: always\n type: dict\n sample: {\n \"host_lockdown_state\": {\n \"DC0_C0\": {\n \"current_state\": \"normal\",\n \"previous_state\": \"disabled\",\n \"desired_state\": \"normal\",\n },\n }\n }\n'''\n\ntry:\n from pyVmomi import vim\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\nfrom ansible.module_utils._text import to_native\n\n\nclass VmwareLockdownManager(PyVmomi):\n def __init__(self, module):\n super(VmwareLockdownManager, self).__init__(module)\n if not self.is_vcenter():\n self.module.fail_json(msg=\"Lockdown operations are performed from vCenter only. \"\n \"hostname %s is an ESXi server. Please specify hostname \"\n \"as vCenter server.\" % self.module.params['hostname'])\n cluster_name = self.params.get('cluster_name', None)\n esxi_host_name = self.params.get('esxi_hostname', None)\n self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)\n\n def ensure(self):\n \"\"\"\n Function to manage internal state management\n \"\"\"\n results = dict(changed=False, host_lockdown_state=dict())\n change_list = []\n desired_state = self.params.get('state')\n\n if desired_state == 'present':\n self.module.warn(\"'present' will be removed in a future version. Please use 'normal' instead.\")\n desired_state = 'normal'\n elif desired_state == 'absent':\n self.module.warn(\"'absent' will be removed in a future version. Please use 'disabled' instead.\")\n desired_state = 'disabled'\n\n for host in self.hosts:\n current_state_api = host.configManager.hostAccessManager.lockdownMode\n current_state = current_state_api[8:].lower()\n results['host_lockdown_state'][host.name] = dict(current_state=desired_state,\n desired_state=desired_state,\n previous_state=current_state\n )\n changed = False\n if current_state != desired_state:\n changed = True\n if not self.module.check_mode:\n try:\n desired_state_api = 'lockdown' + desired_state.capitalize()\n host.configManager.hostAccessManager.ChangeLockdownMode(desired_state_api)\n except vim.fault.HostConfigFault as host_config_fault:\n self.module.fail_json(msg=\"Failed to manage lockdown mode for esxi\"\n \" hostname %s : %s\" % (host.name, to_native(host_config_fault.msg)))\n except vim.fault.AdminDisabled as admin_disabled:\n self.module.fail_json(msg=\"Failed to manage lockdown mode as administrator \"\n \"permission has been disabled for \"\n \"esxi hostname %s : %s\" % (host.name, to_native(admin_disabled.msg)))\n except Exception as generic_exception:\n self.module.fail_json(msg=\"Failed to manage lockdown mode due to generic exception for esxi \"\n \"hostname %s : %s\" % (host.name, to_native(generic_exception)))\n\n change_list.append(changed)\n\n if any(change_list):\n results['changed'] = True\n\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n cluster_name=dict(type='str', required=False),\n esxi_hostname=dict(type='list', required=False, elements='str'),\n state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict', 'present', 'absent'], required=False),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_one_of=[\n ['cluster_name', 'esxi_hostname'],\n ]\n )\n\n if not HAS_PYVMOMI:\n module.fail_json(msg='pyvmomi required for this module')\n\n vmware_lockdown_mgr = VmwareLockdownManager(module)\n vmware_lockdown_mgr.ensure()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/vmware_host_lockdown.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_host_lockdown\nshort_description: Manage administrator permission for the local administrative account for the ESXi host\ndescription:\n- This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given.\n- All parameters and VMware objects values are case sensitive.\n- This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed.\n- Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\noptions:\n cluster_name:\n description:\n - Name of cluster.\n - All host systems from given cluster used to manage lockdown.\n - Required parameter, if C(esxi_hostname) is not set.\n type: str\n esxi_hostname:\n description:\n - List of ESXi hostname to manage lockdown.\n - Required parameter, if C(cluster_name) is not set.\n - See examples for specifications.\n type: list\n elements: str\n state:\n description:\n - State of hosts system\n - If set to C(disabled), all host systems will be removed from lockdown mode.\n - If host system is already out of lockdown mode and set to C(disabled), no action will be taken.\n - If set to C(normal), all host systems will be set in lockdown mode.\n - If host system is already in lockdown mode and set to C(normal), no action will be taken.\n - If set to C(strict), all host systems will be set in strict lockdown mode.\n - If host system is already in strict lockdown mode and set to C(strict), no action will be taken.\n default: normal\n choices: [ disabled, normal, strict ]\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Enter host system into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: normal\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: disabled\n delegate_to: localhost\n\n- name: Enter host systems into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: normal\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: disabled\n delegate_to: localhost\n\n- name: Enter all host system from cluster into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n cluster_name: '{{ cluster_name }}'\n state: normal\n delegate_to: localhost\n'''\n\nRETURN = r'''\nresults:\n description: metadata about state of Host system lock down\n returned: always\n type: dict\n sample: {\n \"host_lockdown_state\": {\n \"DC0_C0\": {\n \"current_state\": \"normal\",\n \"previous_state\": \"disabled\",\n \"desired_state\": \"normal\",\n },\n }\n }\n'''\n\ntry:\n from pyVmomi import vim\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\nfrom ansible.module_utils._text import to_native\n\n\nclass VmwareLockdownManager(PyVmomi):\n def __init__(self, module):\n super(VmwareLockdownManager, self).__init__(module)\n if not self.is_vcenter():\n self.module.fail_json(msg=\"Lockdown operations are performed from vCenter only. \"\n \"hostname %s is an ESXi server. Please specify hostname \"\n \"as vCenter server.\" % self.module.params['hostname'])\n cluster_name = self.params.get('cluster_name', None)\n esxi_host_name = self.params.get('esxi_hostname', None)\n self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)\n\n def ensure(self):\n \"\"\"\n Function to manage internal state management\n \"\"\"\n results = dict(changed=False, host_lockdown_state=dict())\n change_list = []\n desired_state = self.params.get('state')\n\n for host in self.hosts:\n current_state_api = host.configManager.hostAccessManager.lockdownMode\n current_state = current_state_api[8:].lower()\n results['host_lockdown_state'][host.name] = dict(current_state=desired_state,\n desired_state=desired_state,\n previous_state=current_state\n )\n changed = False\n if current_state != desired_state:\n changed = True\n if not self.module.check_mode:\n try:\n desired_state_api = 'lockdown' + desired_state.capitalize()\n host.configManager.hostAccessManager.ChangeLockdownMode(desired_state_api)\n except vim.fault.HostConfigFault as host_config_fault:\n self.module.fail_json(msg=\"Failed to manage lockdown mode for esxi\"\n \" hostname %s : %s\" % (host.name, to_native(host_config_fault.msg)))\n except vim.fault.AdminDisabled as admin_disabled:\n self.module.fail_json(msg=\"Failed to manage lockdown mode as administrator \"\n \"permission has been disabled for \"\n \"esxi hostname %s : %s\" % (host.name, to_native(admin_disabled.msg)))\n except Exception as generic_exception:\n self.module.fail_json(msg=\"Failed to manage lockdown mode due to generic exception for esxi \"\n \"hostname %s : %s\" % (host.name, to_native(generic_exception)))\n\n change_list.append(changed)\n\n if any(change_list):\n results['changed'] = True\n\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n cluster_name=dict(type='str', required=False),\n esxi_hostname=dict(type='list', required=False, elements='str'),\n state=dict(type='str', default='normal', choices=['disabled', 'normal', 'strict'], required=False),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_one_of=[\n ['cluster_name', 'esxi_hostname'],\n ]\n )\n\n if not HAS_PYVMOMI:\n module.fail_json(msg='pyvmomi required for this module')\n\n vmware_lockdown_mgr = VmwareLockdownManager(module)\n vmware_lockdown_mgr.ensure()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/vmware_host_lockdown.py"}]} | 2,690 | 413 |
gh_patches_debug_26252 | rasdani/github-patches | git_diff | gammapy__gammapy-2263 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dataset model fitting has a unit issue with PowerLaw2
When jointly fitting CTA DC-1 spectrum observations with a PowerLaw2, the interpreter will eventually reach the method **integrate_model** of the class **SpectrumEvaluator** in **gammapy.spectrum.utils**. In the loop of **line 79**, the code iterates over the bases of "amplitude" to get the **ref_unit**:
```
for unit in self.model.parameters["amplitude"].quantity.unit.bases:
if unit.is_equivalent("eV"):
ref_unit = unit
```
However, this doesn't work for PowerLaw2 since its energy unit is not inside "amplitude". Shouldn't the code, alternatively, evaluate the model to get its output energy unit when this is not inside the "amplitude"?
Dataset model fitting has a unit issue with PowerLaw2
When jointly fitting CTA DC-1 spectrum observations with a PowerLaw2, the interpreter will eventually reach the method **integrate_model** of the class **SpectrumEvaluator** in **gammapy.spectrum.utils**. In the loop of **line 79**, the code iterates over the bases of "amplitude" to get the **ref_unit**:
```
for unit in self.model.parameters["amplitude"].quantity.unit.bases:
if unit.is_equivalent("eV"):
ref_unit = unit
```
However, this doesn't work for PowerLaw2 since its energy unit is not inside "amplitude". Shouldn't the code, alternatively, evaluate the model to get its output energy unit when this is not inside the "amplitude"?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gammapy/spectrum/utils.py`
Content:
```
1 # Licensed under a 3-clause BSD style license - see LICENSE.rst
2 import numpy as np
3 from astropy.units import Quantity
4
5 __all__ = ["SpectrumEvaluator", "integrate_spectrum"]
6
7
8 class SpectrumEvaluator:
9 """Calculate number of predicted counts (``npred``).
10
11 The true and reconstructed energy binning are inferred from the provided IRFs.
12
13 Parameters
14 ----------
15 model : `~gammapy.spectrum.models.SpectralModel`
16 Spectral model
17 aeff : `~gammapy.irf.EffectiveAreaTable`
18 EffectiveArea
19 edisp : `~gammapy.irf.EnergyDispersion`, optional
20 EnergyDispersion
21 livetime : `~astropy.units.Quantity`
22 Observation duration (may be contained in aeff)
23 e_true : `~astropy.units.Quantity`, optional
24 Desired energy axis of the prediced counts vector if no IRFs are given
25
26 Examples
27 --------
28 Calculate predicted counts in a desired reconstruced energy binning
29
30 .. plot::
31 :include-source:
32
33 from gammapy.irf import EnergyDispersion, EffectiveAreaTable
34 from gammapy.spectrum import models, SpectrumEvaluator
35 import numpy as np
36 import astropy.units as u
37 import matplotlib.pyplot as plt
38
39 e_true = np.logspace(-2, 2.5, 109) * u.TeV
40 e_reco = np.logspace(-2, 2, 73) * u.TeV
41
42 aeff = EffectiveAreaTable.from_parametrization(energy=e_true)
43 edisp = EnergyDispersion.from_gauss(e_true=e_true, e_reco=e_reco,
44 sigma=0.3, bias=0)
45
46 model = models.PowerLaw(index=2.3,
47 amplitude="2.5e-12 cm-2 s-1 TeV-1",
48 reference="1 TeV")
49
50 livetime = 1 * u.h
51
52 predictor = SpectrumEvaluator(model=model,
53 aeff=aeff,
54 edisp=edisp,
55 livetime=livetime)
56 predictor.compute_npred().plot_hist()
57 plt.show()
58 """
59
60 def __init__(self, model, aeff=None, edisp=None, livetime=None, e_true=None):
61 self.model = model
62 self.aeff = aeff
63 self.edisp = edisp
64 self.livetime = livetime
65 self.e_true = e_true
66 self.e_reco = None
67
68 def compute_npred(self):
69 integral_flux = self.integrate_model()
70 true_counts = self.apply_aeff(integral_flux)
71 return self.apply_edisp(true_counts)
72
73 def integrate_model(self):
74 """Integrate model in true energy space."""
75 if self.aeff is not None:
76 # TODO: True energy is converted to model amplitude unit. See issue 869
77 ref_unit = None
78 try:
79 for unit in self.model.parameters["amplitude"].quantity.unit.bases:
80 if unit.is_equivalent("eV"):
81 ref_unit = unit
82 except IndexError:
83 ref_unit = "TeV"
84 self.e_true = self.aeff.energy.edges.to(ref_unit)
85 else:
86 if self.e_true is None:
87 raise ValueError("No true energy binning given")
88
89 return self.model.integral(
90 emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True
91 )
92
93 def apply_aeff(self, integral_flux):
94 if self.aeff is not None:
95 cts = integral_flux * self.aeff.data.data
96 else:
97 cts = integral_flux
98
99 # Multiply with livetime if not already contained in aeff or model
100 if cts.unit.is_equivalent("s-1"):
101 cts *= self.livetime
102
103 return cts.to("")
104
105 def apply_edisp(self, true_counts):
106 from . import CountsSpectrum
107
108 if self.edisp is not None:
109 cts = self.edisp.apply(true_counts)
110 self.e_reco = self.edisp.e_reco.edges
111 else:
112 cts = true_counts
113 self.e_reco = self.e_true
114
115 return CountsSpectrum(
116 data=cts, energy_lo=self.e_reco[:-1], energy_hi=self.e_reco[1:]
117 )
118
119
120 def integrate_spectrum(func, xmin, xmax, ndecade=100, intervals=False):
121 """Integrate 1d function using the log-log trapezoidal rule.
122
123 If scalar values for xmin and xmax are passed an oversampled grid is generated using the
124 ``ndecade`` keyword argument. If xmin and xmax arrays are passed, no
125 oversampling is performed and the integral is computed in the provided
126 grid.
127
128 Parameters
129 ----------
130 func : callable
131 Function to integrate.
132 xmin : `~astropy.units.Quantity` or array-like
133 Integration range minimum
134 xmax : `~astropy.units.Quantity` or array-like
135 Integration range minimum
136 ndecade : int, optional
137 Number of grid points per decade used for the integration.
138 Default : 100.
139 intervals : bool, optional
140 Return integrals in the grid not the sum, default: False
141 """
142 is_quantity = False
143 if isinstance(xmin, Quantity):
144 unit = xmin.unit
145 xmin = xmin.value
146 xmax = xmax.to_value(unit)
147 is_quantity = True
148
149 if np.isscalar(xmin):
150 logmin = np.log10(xmin)
151 logmax = np.log10(xmax)
152 n = int((logmax - logmin) * ndecade)
153 x = np.logspace(logmin, logmax, n)
154 else:
155 x = np.append(xmin, xmax[-1])
156
157 if is_quantity:
158 x = x * unit
159
160 y = func(x)
161
162 val = _trapz_loglog(y, x, intervals=intervals)
163
164 return val
165
166
167 # This function is copied over from https://github.com/zblz/naima/blob/master/naima/utils.py#L261
168 # and slightly modified to allow use with the uncertainties package
169
170
171 def _trapz_loglog(y, x, axis=-1, intervals=False):
172 """Integrate using the composite trapezoidal rule in log-log space.
173
174 Integrate `y` (`x`) along given axis in loglog space.
175
176 Parameters
177 ----------
178 y : array_like
179 Input array to integrate.
180 x : array_like, optional
181 Independent variable to integrate over.
182 axis : int, optional
183 Specify the axis.
184 intervals : bool, optional
185 Return array of shape x not the total integral, default: False
186
187 Returns
188 -------
189 trapz : float
190 Definite integral as approximated by trapezoidal rule in loglog space.
191 """
192 log10 = np.log10
193
194 try:
195 y_unit = y.unit
196 y = y.value
197 except AttributeError:
198 y_unit = 1.0
199 try:
200 x_unit = x.unit
201 x = x.value
202 except AttributeError:
203 x_unit = 1.0
204
205 y = np.asanyarray(y)
206 x = np.asanyarray(x)
207
208 slice1 = [slice(None)] * y.ndim
209 slice2 = [slice(None)] * y.ndim
210 slice1[axis] = slice(None, -1)
211 slice2[axis] = slice(1, None)
212 slice1, slice2 = tuple(slice1), tuple(slice2)
213
214 # arrays with uncertainties contain objects
215 if y.dtype == "O":
216 from uncertainties.unumpy import log10
217
218 # uncertainties.unumpy.log10 can't deal with tiny values see
219 # https://github.com/gammapy/gammapy/issues/687, so we filter out the values
220 # here. As the values are so small it doesn't affect the final result.
221 # the sqrt is taken to create a margin, because of the later division
222 # y[slice2] / y[slice1]
223 valid = y > np.sqrt(np.finfo(float).tiny)
224 x, y = x[valid], y[valid]
225
226 if x.ndim == 1:
227 shape = [1] * y.ndim
228 shape[axis] = x.shape[0]
229 x = x.reshape(shape)
230
231 with np.errstate(invalid="ignore", divide="ignore"):
232 # Compute the power law indices in each integration bin
233 b = log10(y[slice2] / y[slice1]) / log10(x[slice2] / x[slice1])
234
235 # if local powerlaw index is -1, use \int 1/x = log(x); otherwise use normal
236 # powerlaw integration
237 trapzs = np.where(
238 np.abs(b + 1.0) > 1e-10,
239 (y[slice1] * (x[slice2] * (x[slice2] / x[slice1]) ** b - x[slice1]))
240 / (b + 1),
241 x[slice1] * y[slice1] * np.log(x[slice2] / x[slice1]),
242 )
243
244 tozero = (y[slice1] == 0.0) + (y[slice2] == 0.0) + (x[slice1] == x[slice2])
245 trapzs[tozero] = 0.0
246
247 if intervals:
248 return trapzs * x_unit * y_unit
249
250 ret = np.add.reduce(trapzs, axis) * x_unit * y_unit
251
252 return ret
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gammapy/spectrum/utils.py b/gammapy/spectrum/utils.py
--- a/gammapy/spectrum/utils.py
+++ b/gammapy/spectrum/utils.py
@@ -62,33 +62,21 @@
self.aeff = aeff
self.edisp = edisp
self.livetime = livetime
+
+ if aeff is not None:
+ e_true = self.aeff.energy.edges
+
self.e_true = e_true
self.e_reco = None
def compute_npred(self):
- integral_flux = self.integrate_model()
+ integral_flux = self.model.integral(
+ emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True
+ )
+
true_counts = self.apply_aeff(integral_flux)
return self.apply_edisp(true_counts)
- def integrate_model(self):
- """Integrate model in true energy space."""
- if self.aeff is not None:
- # TODO: True energy is converted to model amplitude unit. See issue 869
- ref_unit = None
- try:
- for unit in self.model.parameters["amplitude"].quantity.unit.bases:
- if unit.is_equivalent("eV"):
- ref_unit = unit
- except IndexError:
- ref_unit = "TeV"
- self.e_true = self.aeff.energy.edges.to(ref_unit)
- else:
- if self.e_true is None:
- raise ValueError("No true energy binning given")
-
- return self.model.integral(
- emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True
- )
def apply_aeff(self, integral_flux):
if self.aeff is not None:
| {"golden_diff": "diff --git a/gammapy/spectrum/utils.py b/gammapy/spectrum/utils.py\n--- a/gammapy/spectrum/utils.py\n+++ b/gammapy/spectrum/utils.py\n@@ -62,33 +62,21 @@\n self.aeff = aeff\n self.edisp = edisp\n self.livetime = livetime\n+\n+ if aeff is not None:\n+ e_true = self.aeff.energy.edges\n+\n self.e_true = e_true\n self.e_reco = None\n \n def compute_npred(self):\n- integral_flux = self.integrate_model()\n+ integral_flux = self.model.integral(\n+ emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True\n+ )\n+\n true_counts = self.apply_aeff(integral_flux)\n return self.apply_edisp(true_counts)\n \n- def integrate_model(self):\n- \"\"\"Integrate model in true energy space.\"\"\"\n- if self.aeff is not None:\n- # TODO: True energy is converted to model amplitude unit. See issue 869\n- ref_unit = None\n- try:\n- for unit in self.model.parameters[\"amplitude\"].quantity.unit.bases:\n- if unit.is_equivalent(\"eV\"):\n- ref_unit = unit\n- except IndexError:\n- ref_unit = \"TeV\"\n- self.e_true = self.aeff.energy.edges.to(ref_unit)\n- else:\n- if self.e_true is None:\n- raise ValueError(\"No true energy binning given\")\n-\n- return self.model.integral(\n- emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True\n- )\n \n def apply_aeff(self, integral_flux):\n if self.aeff is not None:\n", "issue": "Dataset model fitting has a unit issue with PowerLaw2 \nWhen jointly fitting CTA DC-1 spectrum observations with a PowerLaw2, the interpreter will eventually reach the method **integrate_model** of the class **SpectrumEvaluator** in **gammapy.spectrum.utils**. In the loop of **line 79**, the code iterates over the bases of \"amplitude\" to get the **ref_unit**:\r\n```\r\nfor unit in self.model.parameters[\"amplitude\"].quantity.unit.bases:\r\n\tif unit.is_equivalent(\"eV\"):\r\n\t\tref_unit = unit\r\n```\r\nHowever, this doesn't work for PowerLaw2 since its energy unit is not inside \"amplitude\". Shouldn't the code, alternatively, evaluate the model to get its output energy unit when this is not inside the \"amplitude\"?\nDataset model fitting has a unit issue with PowerLaw2 \nWhen jointly fitting CTA DC-1 spectrum observations with a PowerLaw2, the interpreter will eventually reach the method **integrate_model** of the class **SpectrumEvaluator** in **gammapy.spectrum.utils**. In the loop of **line 79**, the code iterates over the bases of \"amplitude\" to get the **ref_unit**:\r\n```\r\nfor unit in self.model.parameters[\"amplitude\"].quantity.unit.bases:\r\n\tif unit.is_equivalent(\"eV\"):\r\n\t\tref_unit = unit\r\n```\r\nHowever, this doesn't work for PowerLaw2 since its energy unit is not inside \"amplitude\". Shouldn't the code, alternatively, evaluate the model to get its output energy unit when this is not inside the \"amplitude\"?\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport numpy as np\nfrom astropy.units import Quantity\n\n__all__ = [\"SpectrumEvaluator\", \"integrate_spectrum\"]\n\n\nclass SpectrumEvaluator:\n \"\"\"Calculate number of predicted counts (``npred``).\n\n The true and reconstructed energy binning are inferred from the provided IRFs.\n\n Parameters\n ----------\n model : `~gammapy.spectrum.models.SpectralModel`\n Spectral model\n aeff : `~gammapy.irf.EffectiveAreaTable`\n EffectiveArea\n edisp : `~gammapy.irf.EnergyDispersion`, optional\n EnergyDispersion\n livetime : `~astropy.units.Quantity`\n Observation duration (may be contained in aeff)\n e_true : `~astropy.units.Quantity`, optional\n Desired energy axis of the prediced counts vector if no IRFs are given\n\n Examples\n --------\n Calculate predicted counts in a desired reconstruced energy binning\n\n .. plot::\n :include-source:\n\n from gammapy.irf import EnergyDispersion, EffectiveAreaTable\n from gammapy.spectrum import models, SpectrumEvaluator\n import numpy as np\n import astropy.units as u\n import matplotlib.pyplot as plt\n\n e_true = np.logspace(-2, 2.5, 109) * u.TeV\n e_reco = np.logspace(-2, 2, 73) * u.TeV\n\n aeff = EffectiveAreaTable.from_parametrization(energy=e_true)\n edisp = EnergyDispersion.from_gauss(e_true=e_true, e_reco=e_reco,\n sigma=0.3, bias=0)\n\n model = models.PowerLaw(index=2.3,\n amplitude=\"2.5e-12 cm-2 s-1 TeV-1\",\n reference=\"1 TeV\")\n\n livetime = 1 * u.h\n\n predictor = SpectrumEvaluator(model=model,\n aeff=aeff,\n edisp=edisp,\n livetime=livetime)\n predictor.compute_npred().plot_hist()\n plt.show()\n \"\"\"\n\n def __init__(self, model, aeff=None, edisp=None, livetime=None, e_true=None):\n self.model = model\n self.aeff = aeff\n self.edisp = edisp\n self.livetime = livetime\n self.e_true = e_true\n self.e_reco = None\n\n def compute_npred(self):\n integral_flux = self.integrate_model()\n true_counts = self.apply_aeff(integral_flux)\n return self.apply_edisp(true_counts)\n\n def integrate_model(self):\n \"\"\"Integrate model in true energy space.\"\"\"\n if self.aeff is not None:\n # TODO: True energy is converted to model amplitude unit. See issue 869\n ref_unit = None\n try:\n for unit in self.model.parameters[\"amplitude\"].quantity.unit.bases:\n if unit.is_equivalent(\"eV\"):\n ref_unit = unit\n except IndexError:\n ref_unit = \"TeV\"\n self.e_true = self.aeff.energy.edges.to(ref_unit)\n else:\n if self.e_true is None:\n raise ValueError(\"No true energy binning given\")\n\n return self.model.integral(\n emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True\n )\n\n def apply_aeff(self, integral_flux):\n if self.aeff is not None:\n cts = integral_flux * self.aeff.data.data\n else:\n cts = integral_flux\n\n # Multiply with livetime if not already contained in aeff or model\n if cts.unit.is_equivalent(\"s-1\"):\n cts *= self.livetime\n\n return cts.to(\"\")\n\n def apply_edisp(self, true_counts):\n from . import CountsSpectrum\n\n if self.edisp is not None:\n cts = self.edisp.apply(true_counts)\n self.e_reco = self.edisp.e_reco.edges\n else:\n cts = true_counts\n self.e_reco = self.e_true\n\n return CountsSpectrum(\n data=cts, energy_lo=self.e_reco[:-1], energy_hi=self.e_reco[1:]\n )\n\n\ndef integrate_spectrum(func, xmin, xmax, ndecade=100, intervals=False):\n \"\"\"Integrate 1d function using the log-log trapezoidal rule.\n\n If scalar values for xmin and xmax are passed an oversampled grid is generated using the\n ``ndecade`` keyword argument. If xmin and xmax arrays are passed, no\n oversampling is performed and the integral is computed in the provided\n grid.\n\n Parameters\n ----------\n func : callable\n Function to integrate.\n xmin : `~astropy.units.Quantity` or array-like\n Integration range minimum\n xmax : `~astropy.units.Quantity` or array-like\n Integration range minimum\n ndecade : int, optional\n Number of grid points per decade used for the integration.\n Default : 100.\n intervals : bool, optional\n Return integrals in the grid not the sum, default: False\n \"\"\"\n is_quantity = False\n if isinstance(xmin, Quantity):\n unit = xmin.unit\n xmin = xmin.value\n xmax = xmax.to_value(unit)\n is_quantity = True\n\n if np.isscalar(xmin):\n logmin = np.log10(xmin)\n logmax = np.log10(xmax)\n n = int((logmax - logmin) * ndecade)\n x = np.logspace(logmin, logmax, n)\n else:\n x = np.append(xmin, xmax[-1])\n\n if is_quantity:\n x = x * unit\n\n y = func(x)\n\n val = _trapz_loglog(y, x, intervals=intervals)\n\n return val\n\n\n# This function is copied over from https://github.com/zblz/naima/blob/master/naima/utils.py#L261\n# and slightly modified to allow use with the uncertainties package\n\n\ndef _trapz_loglog(y, x, axis=-1, intervals=False):\n \"\"\"Integrate using the composite trapezoidal rule in log-log space.\n\n Integrate `y` (`x`) along given axis in loglog space.\n\n Parameters\n ----------\n y : array_like\n Input array to integrate.\n x : array_like, optional\n Independent variable to integrate over.\n axis : int, optional\n Specify the axis.\n intervals : bool, optional\n Return array of shape x not the total integral, default: False\n\n Returns\n -------\n trapz : float\n Definite integral as approximated by trapezoidal rule in loglog space.\n \"\"\"\n log10 = np.log10\n\n try:\n y_unit = y.unit\n y = y.value\n except AttributeError:\n y_unit = 1.0\n try:\n x_unit = x.unit\n x = x.value\n except AttributeError:\n x_unit = 1.0\n\n y = np.asanyarray(y)\n x = np.asanyarray(x)\n\n slice1 = [slice(None)] * y.ndim\n slice2 = [slice(None)] * y.ndim\n slice1[axis] = slice(None, -1)\n slice2[axis] = slice(1, None)\n slice1, slice2 = tuple(slice1), tuple(slice2)\n\n # arrays with uncertainties contain objects\n if y.dtype == \"O\":\n from uncertainties.unumpy import log10\n\n # uncertainties.unumpy.log10 can't deal with tiny values see\n # https://github.com/gammapy/gammapy/issues/687, so we filter out the values\n # here. As the values are so small it doesn't affect the final result.\n # the sqrt is taken to create a margin, because of the later division\n # y[slice2] / y[slice1]\n valid = y > np.sqrt(np.finfo(float).tiny)\n x, y = x[valid], y[valid]\n\n if x.ndim == 1:\n shape = [1] * y.ndim\n shape[axis] = x.shape[0]\n x = x.reshape(shape)\n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n # Compute the power law indices in each integration bin\n b = log10(y[slice2] / y[slice1]) / log10(x[slice2] / x[slice1])\n\n # if local powerlaw index is -1, use \\int 1/x = log(x); otherwise use normal\n # powerlaw integration\n trapzs = np.where(\n np.abs(b + 1.0) > 1e-10,\n (y[slice1] * (x[slice2] * (x[slice2] / x[slice1]) ** b - x[slice1]))\n / (b + 1),\n x[slice1] * y[slice1] * np.log(x[slice2] / x[slice1]),\n )\n\n tozero = (y[slice1] == 0.0) + (y[slice2] == 0.0) + (x[slice1] == x[slice2])\n trapzs[tozero] = 0.0\n\n if intervals:\n return trapzs * x_unit * y_unit\n\n ret = np.add.reduce(trapzs, axis) * x_unit * y_unit\n\n return ret\n", "path": "gammapy/spectrum/utils.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport numpy as np\nfrom astropy.units import Quantity\n\n__all__ = [\"SpectrumEvaluator\", \"integrate_spectrum\"]\n\n\nclass SpectrumEvaluator:\n \"\"\"Calculate number of predicted counts (``npred``).\n\n The true and reconstructed energy binning are inferred from the provided IRFs.\n\n Parameters\n ----------\n model : `~gammapy.spectrum.models.SpectralModel`\n Spectral model\n aeff : `~gammapy.irf.EffectiveAreaTable`\n EffectiveArea\n edisp : `~gammapy.irf.EnergyDispersion`, optional\n EnergyDispersion\n livetime : `~astropy.units.Quantity`\n Observation duration (may be contained in aeff)\n e_true : `~astropy.units.Quantity`, optional\n Desired energy axis of the prediced counts vector if no IRFs are given\n\n Examples\n --------\n Calculate predicted counts in a desired reconstruced energy binning\n\n .. plot::\n :include-source:\n\n from gammapy.irf import EnergyDispersion, EffectiveAreaTable\n from gammapy.spectrum import models, SpectrumEvaluator\n import numpy as np\n import astropy.units as u\n import matplotlib.pyplot as plt\n\n e_true = np.logspace(-2, 2.5, 109) * u.TeV\n e_reco = np.logspace(-2, 2, 73) * u.TeV\n\n aeff = EffectiveAreaTable.from_parametrization(energy=e_true)\n edisp = EnergyDispersion.from_gauss(e_true=e_true, e_reco=e_reco,\n sigma=0.3, bias=0)\n\n model = models.PowerLaw(index=2.3,\n amplitude=\"2.5e-12 cm-2 s-1 TeV-1\",\n reference=\"1 TeV\")\n\n livetime = 1 * u.h\n\n predictor = SpectrumEvaluator(model=model,\n aeff=aeff,\n edisp=edisp,\n livetime=livetime)\n predictor.compute_npred().plot_hist()\n plt.show()\n \"\"\"\n\n def __init__(self, model, aeff=None, edisp=None, livetime=None, e_true=None):\n self.model = model\n self.aeff = aeff\n self.edisp = edisp\n self.livetime = livetime\n\n if aeff is not None:\n e_true = self.aeff.energy.edges\n\n self.e_true = e_true\n self.e_reco = None\n\n def compute_npred(self):\n integral_flux = self.model.integral(\n emin=self.e_true[:-1], emax=self.e_true[1:], intervals=True\n )\n\n true_counts = self.apply_aeff(integral_flux)\n return self.apply_edisp(true_counts)\n\n\n def apply_aeff(self, integral_flux):\n if self.aeff is not None:\n cts = integral_flux * self.aeff.data.data\n else:\n cts = integral_flux\n\n # Multiply with livetime if not already contained in aeff or model\n if cts.unit.is_equivalent(\"s-1\"):\n cts *= self.livetime\n\n return cts.to(\"\")\n\n def apply_edisp(self, true_counts):\n from . import CountsSpectrum\n\n if self.edisp is not None:\n cts = self.edisp.apply(true_counts)\n self.e_reco = self.edisp.e_reco.edges\n else:\n cts = true_counts\n self.e_reco = self.e_true\n\n return CountsSpectrum(\n data=cts, energy_lo=self.e_reco[:-1], energy_hi=self.e_reco[1:]\n )\n\n\ndef integrate_spectrum(func, xmin, xmax, ndecade=100, intervals=False):\n \"\"\"Integrate 1d function using the log-log trapezoidal rule.\n\n If scalar values for xmin and xmax are passed an oversampled grid is generated using the\n ``ndecade`` keyword argument. If xmin and xmax arrays are passed, no\n oversampling is performed and the integral is computed in the provided\n grid.\n\n Parameters\n ----------\n func : callable\n Function to integrate.\n xmin : `~astropy.units.Quantity` or array-like\n Integration range minimum\n xmax : `~astropy.units.Quantity` or array-like\n Integration range minimum\n ndecade : int, optional\n Number of grid points per decade used for the integration.\n Default : 100.\n intervals : bool, optional\n Return integrals in the grid not the sum, default: False\n \"\"\"\n is_quantity = False\n if isinstance(xmin, Quantity):\n unit = xmin.unit\n xmin = xmin.value\n xmax = xmax.to_value(unit)\n is_quantity = True\n\n if np.isscalar(xmin):\n logmin = np.log10(xmin)\n logmax = np.log10(xmax)\n n = int((logmax - logmin) * ndecade)\n x = np.logspace(logmin, logmax, n)\n else:\n x = np.append(xmin, xmax[-1])\n\n if is_quantity:\n x = x * unit\n\n y = func(x)\n\n val = _trapz_loglog(y, x, intervals=intervals)\n\n return val\n\n\n# This function is copied over from https://github.com/zblz/naima/blob/master/naima/utils.py#L261\n# and slightly modified to allow use with the uncertainties package\n\n\ndef _trapz_loglog(y, x, axis=-1, intervals=False):\n \"\"\"Integrate using the composite trapezoidal rule in log-log space.\n\n Integrate `y` (`x`) along given axis in loglog space.\n\n Parameters\n ----------\n y : array_like\n Input array to integrate.\n x : array_like, optional\n Independent variable to integrate over.\n axis : int, optional\n Specify the axis.\n intervals : bool, optional\n Return array of shape x not the total integral, default: False\n\n Returns\n -------\n trapz : float\n Definite integral as approximated by trapezoidal rule in loglog space.\n \"\"\"\n log10 = np.log10\n\n try:\n y_unit = y.unit\n y = y.value\n except AttributeError:\n y_unit = 1.0\n try:\n x_unit = x.unit\n x = x.value\n except AttributeError:\n x_unit = 1.0\n\n y = np.asanyarray(y)\n x = np.asanyarray(x)\n\n slice1 = [slice(None)] * y.ndim\n slice2 = [slice(None)] * y.ndim\n slice1[axis] = slice(None, -1)\n slice2[axis] = slice(1, None)\n slice1, slice2 = tuple(slice1), tuple(slice2)\n\n # arrays with uncertainties contain objects\n if y.dtype == \"O\":\n from uncertainties.unumpy import log10\n\n # uncertainties.unumpy.log10 can't deal with tiny values see\n # https://github.com/gammapy/gammapy/issues/687, so we filter out the values\n # here. As the values are so small it doesn't affect the final result.\n # the sqrt is taken to create a margin, because of the later division\n # y[slice2] / y[slice1]\n valid = y > np.sqrt(np.finfo(float).tiny)\n x, y = x[valid], y[valid]\n\n if x.ndim == 1:\n shape = [1] * y.ndim\n shape[axis] = x.shape[0]\n x = x.reshape(shape)\n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n # Compute the power law indices in each integration bin\n b = log10(y[slice2] / y[slice1]) / log10(x[slice2] / x[slice1])\n\n # if local powerlaw index is -1, use \\int 1/x = log(x); otherwise use normal\n # powerlaw integration\n trapzs = np.where(\n np.abs(b + 1.0) > 1e-10,\n (y[slice1] * (x[slice2] * (x[slice2] / x[slice1]) ** b - x[slice1]))\n / (b + 1),\n x[slice1] * y[slice1] * np.log(x[slice2] / x[slice1]),\n )\n\n tozero = (y[slice1] == 0.0) + (y[slice2] == 0.0) + (x[slice1] == x[slice2])\n trapzs[tozero] = 0.0\n\n if intervals:\n return trapzs * x_unit * y_unit\n\n ret = np.add.reduce(trapzs, axis) * x_unit * y_unit\n\n return ret\n", "path": "gammapy/spectrum/utils.py"}]} | 3,377 | 396 |
gh_patches_debug_8449 | rasdani/github-patches | git_diff | coala__coala-bears-2883 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace dep munkres3 with munkres
https://github.com/bmc/munkres now has Python 3 support, so we dont need to use https://github.com/datapublica/munkres
openSUSE already has the former, doesnt have the latter (c.f. https://github.com/coala/coala-bears/issues/2849)
https://packages.ubuntu.com/cosmic/python3-munkres appears to also be the former.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py`
Content:
```
1 import functools
2 from itertools import combinations
3
4 from bears.c_languages.ClangBear import clang_available, ClangBear
5 from bears.c_languages.codeclone_detection.ClangCountingConditions import (
6 condition_dict)
7 from bears.c_languages.codeclone_detection.ClangCountVectorCreator import (
8 ClangCountVectorCreator)
9 from bears.c_languages.codeclone_detection.CloneDetectionRoutines import (
10 compare_functions, get_count_matrices)
11 from coala_utils.string_processing.StringConverter import StringConverter
12 from coalib.bears.GlobalBear import GlobalBear
13 from dependency_management.requirements.PipRequirement import PipRequirement
14 from coalib.collecting.Collectors import collect_dirs
15 from coalib.results.HiddenResult import HiddenResult
16 from coalib.settings.Setting import path_list, typed_ordered_dict
17 from coala_utils.decorators import (enforce_signature, generate_ordering,
18 generate_repr)
19
20 # counting_condition_dict is a function object generated by typed_dict. This
21 # function takes a setting and creates a dictionary out of it while it
22 # converts all keys to counting condition function objects (via the
23 # condition_dict) and all values to floats while unset values default to 1.
24 counting_condition_dict = typed_ordered_dict(
25 lambda setting: condition_dict[str(setting).lower()],
26 float,
27 1)
28
29 default_cc_dict = counting_condition_dict(StringConverter(
30 """
31 used: 0,
32 returned: 1.4,
33 is_condition: 0,
34 in_condition: 1.4,
35 in_second_level_condition: 1.4,
36 in_third_level_condition: 1.0,
37 is_assignee: 0,
38 is_assigner: 0.6,
39 loop_content: 0,
40 second_level_loop_content,
41 third_level_loop_content,
42 is_param: 2,
43 is_called: 1.4,
44 is_call_param: 0.0,
45 in_sum: 2.0,
46 in_product: 0,
47 in_binary_operation,
48 member_accessed"""))
49
50
51 @generate_repr(('id', hex),
52 'origin',
53 'differences',
54 'count_matrices',
55 'message')
56 @generate_ordering('origin',
57 'differences',
58 'count_matrices',
59 'message')
60 class ClangFunctionDifferenceResult(HiddenResult):
61
62 @enforce_signature
63 def __init__(self, origin,
64 differences: list,
65 count_matrices: dict):
66 super().__init__(origin,
67 [differences, count_matrices])
68 self.differences = differences
69 self.count_matrices = count_matrices
70
71
72 def get_difference(function_pair,
73 count_matrices,
74 average_calculation,
75 poly_postprocessing,
76 exp_postprocessing):
77 """
78 Retrieves the difference between two functions using the munkres algorithm.
79
80 :param function_pair: A tuple containing both indices for the
81 count_matrices dictionary.
82 :param count_matrices: A dictionary holding CMs.
83 :param average_calculation: If set to true the difference calculation
84 function will take the average of all variable
85 differences as the difference, else it will
86 normalize the function as a whole and thus
87 weighting in variables dependent on their size.
88 :param poly_postprocessing: If set to true, the difference value of big
89 function pairs will be reduced using a
90 polynomial approach.
91 :param exp_postprocessing: If set to true, the difference value of big
92 function pairs will be reduced using an
93 exponential approach.
94 :return: A tuple containing both function ids and their
95 difference.
96 """
97 function_1, function_2 = function_pair
98 return (function_1,
99 function_2,
100 compare_functions(count_matrices[function_1],
101 count_matrices[function_2],
102 average_calculation,
103 poly_postprocessing,
104 exp_postprocessing))
105
106
107 class ClangFunctionDifferenceBear(GlobalBear):
108 check_prerequisites = classmethod(clang_available)
109 LANGUAGES = ClangBear.LANGUAGES
110 REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres3', '1.0')}
111
112 def run(self,
113 counting_conditions: counting_condition_dict = default_cc_dict,
114 average_calculation: bool = False,
115 poly_postprocessing: bool = True,
116 exp_postprocessing: bool = False,
117 extra_include_paths: path_list = (),
118 ):
119 """
120 Retrieves similarities for code clone detection. Those can be reused in
121 another bear to produce results.
122
123 Postprocessing may be done because small functions are less likely to
124 be clones at the same difference value than big functions which may
125 provide a better refactoring opportunity for the user.
126
127 :param counting_conditions: A comma seperated list of counting
128 conditions. Possible values are: used,
129 returned, is_condition, in_condition,
130 in_second_level_condition,
131 in_third_level_condition, is_assignee,
132 is_assigner, loop_content,
133 second_level_loop_content,
134 third_level_loop_content, is_param,
135 in_sum, in_product, in_binary_operation,
136 member_accessed.
137 Weightings can be assigned to each
138 condition due to providing a dict
139 value, i.e. having used weighted in
140 half as much as other conditions would
141 simply be: "used: 0.5, is_assignee".
142 Weightings default to 1 if unset.
143 :param average_calculation: If set to true the difference calculation
144 function will take the average of all
145 variable differences as the difference,
146 else it will normalize the function as a
147 whole and thus weighting in variables
148 dependent on their size.
149 :param poly_postprocessing: If set to true, the difference value of big
150 function pairs will be reduced using a
151 polynomial approach.
152 :param extra_include_paths: A list containing additional include paths.
153 :param exp_postprocessing: If set to true, the difference value of big
154 function pairs will be reduced using an
155 exponential approach.
156 """
157 self.debug('Using the following counting conditions:')
158 for key, val in counting_conditions.items():
159 self.debug(' *', key.__name__, '(weighting: {})'.format(val))
160
161 self.debug('Creating count matrices...')
162 count_matrices = get_count_matrices(
163 ClangCountVectorCreator(list(counting_conditions.keys()),
164 list(counting_conditions.values())),
165 list(self.file_dict.keys()),
166 lambda prog: self.debug('{:2.4f}%...'.format(prog)),
167 self.section['files'].origin,
168 collect_dirs(extra_include_paths))
169
170 self.debug('Calculating differences...')
171
172 differences = []
173 function_count = len(count_matrices)
174 # Thats n over 2, hardcoded to simplify calculation
175 combination_length = function_count * (function_count-1) / 2
176 partial_get_difference = functools.partial(
177 get_difference,
178 count_matrices=count_matrices,
179 average_calculation=average_calculation,
180 poly_postprocessing=poly_postprocessing,
181 exp_postprocessing=exp_postprocessing)
182
183 for i, elem in enumerate(
184 map(partial_get_difference,
185 [(f1, f2) for f1, f2 in combinations(count_matrices, 2)])):
186 if i % 50 == 0:
187 self.debug('{:2.4f}%...'.format(100*i/combination_length))
188 differences.append(elem)
189
190 yield ClangFunctionDifferenceResult(self, differences, count_matrices)
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py b/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py
--- a/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py
+++ b/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py
@@ -107,7 +107,7 @@
class ClangFunctionDifferenceBear(GlobalBear):
check_prerequisites = classmethod(clang_available)
LANGUAGES = ClangBear.LANGUAGES
- REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres3', '1.0')}
+ REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres', '1.1.2')}
def run(self,
counting_conditions: counting_condition_dict = default_cc_dict,
| {"golden_diff": "diff --git a/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py b/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py\n--- a/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py\n+++ b/bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py\n@@ -107,7 +107,7 @@\n class ClangFunctionDifferenceBear(GlobalBear):\n check_prerequisites = classmethod(clang_available)\n LANGUAGES = ClangBear.LANGUAGES\n- REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres3', '1.0')}\n+ REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres', '1.1.2')}\n \n def run(self,\n counting_conditions: counting_condition_dict = default_cc_dict,\n", "issue": "Replace dep munkres3 with munkres\nhttps://github.com/bmc/munkres now has Python 3 support, so we dont need to use https://github.com/datapublica/munkres\r\n\r\nopenSUSE already has the former, doesnt have the latter (c.f. https://github.com/coala/coala-bears/issues/2849)\r\n\r\nhttps://packages.ubuntu.com/cosmic/python3-munkres appears to also be the former.\n", "before_files": [{"content": "import functools\nfrom itertools import combinations\n\nfrom bears.c_languages.ClangBear import clang_available, ClangBear\nfrom bears.c_languages.codeclone_detection.ClangCountingConditions import (\n condition_dict)\nfrom bears.c_languages.codeclone_detection.ClangCountVectorCreator import (\n ClangCountVectorCreator)\nfrom bears.c_languages.codeclone_detection.CloneDetectionRoutines import (\n compare_functions, get_count_matrices)\nfrom coala_utils.string_processing.StringConverter import StringConverter\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom dependency_management.requirements.PipRequirement import PipRequirement\nfrom coalib.collecting.Collectors import collect_dirs\nfrom coalib.results.HiddenResult import HiddenResult\nfrom coalib.settings.Setting import path_list, typed_ordered_dict\nfrom coala_utils.decorators import (enforce_signature, generate_ordering,\n generate_repr)\n\n# counting_condition_dict is a function object generated by typed_dict. This\n# function takes a setting and creates a dictionary out of it while it\n# converts all keys to counting condition function objects (via the\n# condition_dict) and all values to floats while unset values default to 1.\ncounting_condition_dict = typed_ordered_dict(\n lambda setting: condition_dict[str(setting).lower()],\n float,\n 1)\n\ndefault_cc_dict = counting_condition_dict(StringConverter(\n \"\"\"\nused: 0,\nreturned: 1.4,\nis_condition: 0,\nin_condition: 1.4,\nin_second_level_condition: 1.4,\nin_third_level_condition: 1.0,\nis_assignee: 0,\nis_assigner: 0.6,\nloop_content: 0,\nsecond_level_loop_content,\nthird_level_loop_content,\nis_param: 2,\nis_called: 1.4,\nis_call_param: 0.0,\nin_sum: 2.0,\nin_product: 0,\nin_binary_operation,\nmember_accessed\"\"\"))\n\n\n@generate_repr(('id', hex),\n 'origin',\n 'differences',\n 'count_matrices',\n 'message')\n@generate_ordering('origin',\n 'differences',\n 'count_matrices',\n 'message')\nclass ClangFunctionDifferenceResult(HiddenResult):\n\n @enforce_signature\n def __init__(self, origin,\n differences: list,\n count_matrices: dict):\n super().__init__(origin,\n [differences, count_matrices])\n self.differences = differences\n self.count_matrices = count_matrices\n\n\ndef get_difference(function_pair,\n count_matrices,\n average_calculation,\n poly_postprocessing,\n exp_postprocessing):\n \"\"\"\n Retrieves the difference between two functions using the munkres algorithm.\n\n :param function_pair: A tuple containing both indices for the\n count_matrices dictionary.\n :param count_matrices: A dictionary holding CMs.\n :param average_calculation: If set to true the difference calculation\n function will take the average of all variable\n differences as the difference, else it will\n normalize the function as a whole and thus\n weighting in variables dependent on their size.\n :param poly_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using a\n polynomial approach.\n :param exp_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using an\n exponential approach.\n :return: A tuple containing both function ids and their\n difference.\n \"\"\"\n function_1, function_2 = function_pair\n return (function_1,\n function_2,\n compare_functions(count_matrices[function_1],\n count_matrices[function_2],\n average_calculation,\n poly_postprocessing,\n exp_postprocessing))\n\n\nclass ClangFunctionDifferenceBear(GlobalBear):\n check_prerequisites = classmethod(clang_available)\n LANGUAGES = ClangBear.LANGUAGES\n REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres3', '1.0')}\n\n def run(self,\n counting_conditions: counting_condition_dict = default_cc_dict,\n average_calculation: bool = False,\n poly_postprocessing: bool = True,\n exp_postprocessing: bool = False,\n extra_include_paths: path_list = (),\n ):\n \"\"\"\n Retrieves similarities for code clone detection. Those can be reused in\n another bear to produce results.\n\n Postprocessing may be done because small functions are less likely to\n be clones at the same difference value than big functions which may\n provide a better refactoring opportunity for the user.\n\n :param counting_conditions: A comma seperated list of counting\n conditions. Possible values are: used,\n returned, is_condition, in_condition,\n in_second_level_condition,\n in_third_level_condition, is_assignee,\n is_assigner, loop_content,\n second_level_loop_content,\n third_level_loop_content, is_param,\n in_sum, in_product, in_binary_operation,\n member_accessed.\n Weightings can be assigned to each\n condition due to providing a dict\n value, i.e. having used weighted in\n half as much as other conditions would\n simply be: \"used: 0.5, is_assignee\".\n Weightings default to 1 if unset.\n :param average_calculation: If set to true the difference calculation\n function will take the average of all\n variable differences as the difference,\n else it will normalize the function as a\n whole and thus weighting in variables\n dependent on their size.\n :param poly_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using a\n polynomial approach.\n :param extra_include_paths: A list containing additional include paths.\n :param exp_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using an\n exponential approach.\n \"\"\"\n self.debug('Using the following counting conditions:')\n for key, val in counting_conditions.items():\n self.debug(' *', key.__name__, '(weighting: {})'.format(val))\n\n self.debug('Creating count matrices...')\n count_matrices = get_count_matrices(\n ClangCountVectorCreator(list(counting_conditions.keys()),\n list(counting_conditions.values())),\n list(self.file_dict.keys()),\n lambda prog: self.debug('{:2.4f}%...'.format(prog)),\n self.section['files'].origin,\n collect_dirs(extra_include_paths))\n\n self.debug('Calculating differences...')\n\n differences = []\n function_count = len(count_matrices)\n # Thats n over 2, hardcoded to simplify calculation\n combination_length = function_count * (function_count-1) / 2\n partial_get_difference = functools.partial(\n get_difference,\n count_matrices=count_matrices,\n average_calculation=average_calculation,\n poly_postprocessing=poly_postprocessing,\n exp_postprocessing=exp_postprocessing)\n\n for i, elem in enumerate(\n map(partial_get_difference,\n [(f1, f2) for f1, f2 in combinations(count_matrices, 2)])):\n if i % 50 == 0:\n self.debug('{:2.4f}%...'.format(100*i/combination_length))\n differences.append(elem)\n\n yield ClangFunctionDifferenceResult(self, differences, count_matrices)\n", "path": "bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py"}], "after_files": [{"content": "import functools\nfrom itertools import combinations\n\nfrom bears.c_languages.ClangBear import clang_available, ClangBear\nfrom bears.c_languages.codeclone_detection.ClangCountingConditions import (\n condition_dict)\nfrom bears.c_languages.codeclone_detection.ClangCountVectorCreator import (\n ClangCountVectorCreator)\nfrom bears.c_languages.codeclone_detection.CloneDetectionRoutines import (\n compare_functions, get_count_matrices)\nfrom coala_utils.string_processing.StringConverter import StringConverter\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom dependency_management.requirements.PipRequirement import PipRequirement\nfrom coalib.collecting.Collectors import collect_dirs\nfrom coalib.results.HiddenResult import HiddenResult\nfrom coalib.settings.Setting import path_list, typed_ordered_dict\nfrom coala_utils.decorators import (enforce_signature, generate_ordering,\n generate_repr)\n\n# counting_condition_dict is a function object generated by typed_dict. This\n# function takes a setting and creates a dictionary out of it while it\n# converts all keys to counting condition function objects (via the\n# condition_dict) and all values to floats while unset values default to 1.\ncounting_condition_dict = typed_ordered_dict(\n lambda setting: condition_dict[str(setting).lower()],\n float,\n 1)\n\ndefault_cc_dict = counting_condition_dict(StringConverter(\n \"\"\"\nused: 0,\nreturned: 1.4,\nis_condition: 0,\nin_condition: 1.4,\nin_second_level_condition: 1.4,\nin_third_level_condition: 1.0,\nis_assignee: 0,\nis_assigner: 0.6,\nloop_content: 0,\nsecond_level_loop_content,\nthird_level_loop_content,\nis_param: 2,\nis_called: 1.4,\nis_call_param: 0.0,\nin_sum: 2.0,\nin_product: 0,\nin_binary_operation,\nmember_accessed\"\"\"))\n\n\n@generate_repr(('id', hex),\n 'origin',\n 'differences',\n 'count_matrices',\n 'message')\n@generate_ordering('origin',\n 'differences',\n 'count_matrices',\n 'message')\nclass ClangFunctionDifferenceResult(HiddenResult):\n\n @enforce_signature\n def __init__(self, origin,\n differences: list,\n count_matrices: dict):\n super().__init__(origin,\n [differences, count_matrices])\n self.differences = differences\n self.count_matrices = count_matrices\n\n\ndef get_difference(function_pair,\n count_matrices,\n average_calculation,\n poly_postprocessing,\n exp_postprocessing):\n \"\"\"\n Retrieves the difference between two functions using the munkres algorithm.\n\n :param function_pair: A tuple containing both indices for the\n count_matrices dictionary.\n :param count_matrices: A dictionary holding CMs.\n :param average_calculation: If set to true the difference calculation\n function will take the average of all variable\n differences as the difference, else it will\n normalize the function as a whole and thus\n weighting in variables dependent on their size.\n :param poly_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using a\n polynomial approach.\n :param exp_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using an\n exponential approach.\n :return: A tuple containing both function ids and their\n difference.\n \"\"\"\n function_1, function_2 = function_pair\n return (function_1,\n function_2,\n compare_functions(count_matrices[function_1],\n count_matrices[function_2],\n average_calculation,\n poly_postprocessing,\n exp_postprocessing))\n\n\nclass ClangFunctionDifferenceBear(GlobalBear):\n check_prerequisites = classmethod(clang_available)\n LANGUAGES = ClangBear.LANGUAGES\n REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres', '1.1.2')}\n\n def run(self,\n counting_conditions: counting_condition_dict = default_cc_dict,\n average_calculation: bool = False,\n poly_postprocessing: bool = True,\n exp_postprocessing: bool = False,\n extra_include_paths: path_list = (),\n ):\n \"\"\"\n Retrieves similarities for code clone detection. Those can be reused in\n another bear to produce results.\n\n Postprocessing may be done because small functions are less likely to\n be clones at the same difference value than big functions which may\n provide a better refactoring opportunity for the user.\n\n :param counting_conditions: A comma seperated list of counting\n conditions. Possible values are: used,\n returned, is_condition, in_condition,\n in_second_level_condition,\n in_third_level_condition, is_assignee,\n is_assigner, loop_content,\n second_level_loop_content,\n third_level_loop_content, is_param,\n in_sum, in_product, in_binary_operation,\n member_accessed.\n Weightings can be assigned to each\n condition due to providing a dict\n value, i.e. having used weighted in\n half as much as other conditions would\n simply be: \"used: 0.5, is_assignee\".\n Weightings default to 1 if unset.\n :param average_calculation: If set to true the difference calculation\n function will take the average of all\n variable differences as the difference,\n else it will normalize the function as a\n whole and thus weighting in variables\n dependent on their size.\n :param poly_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using a\n polynomial approach.\n :param extra_include_paths: A list containing additional include paths.\n :param exp_postprocessing: If set to true, the difference value of big\n function pairs will be reduced using an\n exponential approach.\n \"\"\"\n self.debug('Using the following counting conditions:')\n for key, val in counting_conditions.items():\n self.debug(' *', key.__name__, '(weighting: {})'.format(val))\n\n self.debug('Creating count matrices...')\n count_matrices = get_count_matrices(\n ClangCountVectorCreator(list(counting_conditions.keys()),\n list(counting_conditions.values())),\n list(self.file_dict.keys()),\n lambda prog: self.debug('{:2.4f}%...'.format(prog)),\n self.section['files'].origin,\n collect_dirs(extra_include_paths))\n\n self.debug('Calculating differences...')\n\n differences = []\n function_count = len(count_matrices)\n # Thats n over 2, hardcoded to simplify calculation\n combination_length = function_count * (function_count-1) / 2\n partial_get_difference = functools.partial(\n get_difference,\n count_matrices=count_matrices,\n average_calculation=average_calculation,\n poly_postprocessing=poly_postprocessing,\n exp_postprocessing=exp_postprocessing)\n\n for i, elem in enumerate(\n map(partial_get_difference,\n [(f1, f2) for f1, f2 in combinations(count_matrices, 2)])):\n if i % 50 == 0:\n self.debug('{:2.4f}%...'.format(100*i/combination_length))\n differences.append(elem)\n\n yield ClangFunctionDifferenceResult(self, differences, count_matrices)\n", "path": "bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py"}]} | 2,384 | 188 |
gh_patches_debug_9424 | rasdani/github-patches | git_diff | napari__napari-6057 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ipython] File > Save Screenshot... warns about replacing if extension is provided
## 🐛 Bug
If a napari viewer is launched from ipython and you use File > Save Screenshot... then everything works fine if you just enter a file name, **but if you specify the extension**—out of habit or to change the file format—then napari will warn that the file already exists, even if it does not.
Regardless of choice, the file is created and everything works.
Looking carefully, when one clicks Save the screen flashes, the file appears in the list in the dialog, and the warning pops up--the dialog remains up.
It's like the file is created and then it tries a second time when it goes to close the dialog.
Edit:
On the other hand, *if you pass a filename that exists,* **but without extension**, it will correctly warn that the file exists, but if say `no` to overwriting, the dialog closes with: `WARNING: QDialog::exec: Recursive call detected`
If you *do pass a full name with extension*, it will correctly warn that the file exists, and saying `no` will return you to the dialog to modify the name—as expected.
## To Reproduce
Steps to reproduce the behavior:
1. launch ipython and use
```
import napari
viewer = napari.Viewer()
```
3. open any sample image or add any layer
4. File > Save Screenshot...
5. enter a file name **with an extension** but make sure it's a unique name.
## Expected behavior
No warning should be raised if the file doesn't exist, the dialog should just disappear and the file should be created.
If the warning is raised and the user clicks `no` to not overwrite, the dialog should remain open so the user can change the name.
## Environment
```
napari: 0.4.18
Platform: macOS-13.4.1-arm64-arm-64bit
System: MacOS 13.4.1
Python: 3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:41:52) [Clang 15.0.7 ]
Qt: 5.15.6
PyQt5: 5.15.7
NumPy: 1.25.1
SciPy: 1.11.1
Dask: 2023.7.0
VisPy: 0.12.2
magicgui: 0.7.2
superqt: unknown
in-n-out: 0.1.8
app-model: 0.2.0
npe2: 0.7.0
OpenGL:
- GL version: 2.1 Metal - 83.1
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 1800x1169, scale 2.0
Settings path:
- /Users/sobolp/Library/Application Support/napari/napari-418_d279d6cf5d4193876ed97858e757fe322037331b/settings.yaml
```
## Additional context
Other dialogs, like Save Selected Layer work fine.
Using the native dialog (when running napari from the command line) does not have this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_qt/dialogs/screenshot_dialog.py`
Content:
```
1 import os
2 from pathlib import Path
3 from typing import Any, Callable
4
5 from qtpy.QtWidgets import QFileDialog, QMessageBox
6
7 from napari.utils.misc import in_ipython
8 from napari.utils.translations import trans
9
10 HOME_DIRECTORY = str(Path.home())
11
12
13 class ScreenshotDialog(QFileDialog):
14 """
15 Dialog to chose save location of screenshot.
16
17 Parameters
18 ----------
19 save_function : Callable[[str], Any],
20 Function to be called on success of selecting save location
21 parent : QWidget, optional
22 Optional parent widget for this widget..
23 directory : str, optional
24 Starting directory to be set to File Dialog
25
26 """
27
28 def __init__(
29 self,
30 save_function: Callable[[str], Any],
31 parent=None,
32 directory=HOME_DIRECTORY,
33 history=None,
34 ) -> None:
35 super().__init__(parent, trans._("Save screenshot"))
36 self.setAcceptMode(QFileDialog.AcceptSave)
37 self.setFileMode(QFileDialog.AnyFile)
38 self.setNameFilter(
39 trans._("Image files (*.png *.bmp *.gif *.tif *.tiff)")
40 )
41 self.setDirectory(directory)
42 self.setHistory(history)
43
44 if in_ipython():
45 self.setOptions(QFileDialog.DontUseNativeDialog)
46
47 self.save_function = save_function
48
49 def accept(self):
50 save_path = self.selectedFiles()[0]
51 if os.path.splitext(save_path)[1] == "":
52 save_path = save_path + ".png"
53 if os.path.exists(save_path):
54 res = QMessageBox().warning(
55 self,
56 trans._("Confirm overwrite"),
57 trans._(
58 "{save_path} already exists. Do you want to replace it?",
59 save_path=save_path,
60 ),
61 QMessageBox.Yes | QMessageBox.No,
62 QMessageBox.No,
63 )
64 if res != QMessageBox.Yes:
65 # standard accept return 1, reject 0. This inform that dialog should be reopened
66 super().accept()
67 self.exec_()
68 self.save_function(save_path)
69 return super().accept()
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/_qt/dialogs/screenshot_dialog.py b/napari/_qt/dialogs/screenshot_dialog.py
--- a/napari/_qt/dialogs/screenshot_dialog.py
+++ b/napari/_qt/dialogs/screenshot_dialog.py
@@ -62,8 +62,9 @@
QMessageBox.No,
)
if res != QMessageBox.Yes:
- # standard accept return 1, reject 0. This inform that dialog should be reopened
- super().accept()
- self.exec_()
- self.save_function(save_path)
- return super().accept()
+ # return in this case since a valid name for the
+ # file is needed so the dialog needs to be visible
+ return
+ super().accept()
+ if self.result():
+ self.save_function(save_path)
| {"golden_diff": "diff --git a/napari/_qt/dialogs/screenshot_dialog.py b/napari/_qt/dialogs/screenshot_dialog.py\n--- a/napari/_qt/dialogs/screenshot_dialog.py\n+++ b/napari/_qt/dialogs/screenshot_dialog.py\n@@ -62,8 +62,9 @@\n QMessageBox.No,\n )\n if res != QMessageBox.Yes:\n- # standard accept return 1, reject 0. This inform that dialog should be reopened\n- super().accept()\n- self.exec_()\n- self.save_function(save_path)\n- return super().accept()\n+ # return in this case since a valid name for the\n+ # file is needed so the dialog needs to be visible\n+ return\n+ super().accept()\n+ if self.result():\n+ self.save_function(save_path)\n", "issue": "[ipython] File > Save Screenshot... warns about replacing if extension is provided\n## \ud83d\udc1b Bug\r\n\r\nIf a napari viewer is launched from ipython and you use File > Save Screenshot... then everything works fine if you just enter a file name, **but if you specify the extension**\u2014out of habit or to change the file format\u2014then napari will warn that the file already exists, even if it does not.\r\nRegardless of choice, the file is created and everything works.\r\n\r\nLooking carefully, when one clicks Save the screen flashes, the file appears in the list in the dialog, and the warning pops up--the dialog remains up.\r\nIt's like the file is created and then it tries a second time when it goes to close the dialog.\r\n\r\nEdit:\r\nOn the other hand, *if you pass a filename that exists,* **but without extension**, it will correctly warn that the file exists, but if say `no` to overwriting, the dialog closes with: `WARNING: QDialog::exec: Recursive call detected`\r\nIf you *do pass a full name with extension*, it will correctly warn that the file exists, and saying `no` will return you to the dialog to modify the name\u2014as expected.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. launch ipython and use \r\n```\r\nimport napari\r\nviewer = napari.Viewer()\r\n```\r\n3. open any sample image or add any layer\r\n4. File > Save Screenshot...\r\n5. enter a file name **with an extension** but make sure it's a unique name.\r\n\r\n## Expected behavior\r\n\r\nNo warning should be raised if the file doesn't exist, the dialog should just disappear and the file should be created.\r\nIf the warning is raised and the user clicks `no` to not overwrite, the dialog should remain open so the user can change the name.\r\n\r\n## Environment\r\n\r\n```\r\nnapari: 0.4.18\r\nPlatform: macOS-13.4.1-arm64-arm-64bit\r\nSystem: MacOS 13.4.1\r\nPython: 3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:41:52) [Clang 15.0.7 ]\r\nQt: 5.15.6\r\nPyQt5: 5.15.7\r\nNumPy: 1.25.1\r\nSciPy: 1.11.1\r\nDask: 2023.7.0\r\nVisPy: 0.12.2\r\nmagicgui: 0.7.2\r\nsuperqt: unknown\r\nin-n-out: 0.1.8\r\napp-model: 0.2.0\r\nnpe2: 0.7.0\r\n\r\nOpenGL:\r\n- GL version: 2.1 Metal - 83.1\r\n- MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n- screen 1: resolution 1800x1169, scale 2.0\r\n\r\nSettings path:\r\n- /Users/sobolp/Library/Application Support/napari/napari-418_d279d6cf5d4193876ed97858e757fe322037331b/settings.yaml\r\n\r\n```\r\n\r\n## Additional context\r\n\r\nOther dialogs, like Save Selected Layer work fine.\r\nUsing the native dialog (when running napari from the command line) does not have this issue.\n", "before_files": [{"content": "import os\nfrom pathlib import Path\nfrom typing import Any, Callable\n\nfrom qtpy.QtWidgets import QFileDialog, QMessageBox\n\nfrom napari.utils.misc import in_ipython\nfrom napari.utils.translations import trans\n\nHOME_DIRECTORY = str(Path.home())\n\n\nclass ScreenshotDialog(QFileDialog):\n \"\"\"\n Dialog to chose save location of screenshot.\n\n Parameters\n ----------\n save_function : Callable[[str], Any],\n Function to be called on success of selecting save location\n parent : QWidget, optional\n Optional parent widget for this widget..\n directory : str, optional\n Starting directory to be set to File Dialog\n\n \"\"\"\n\n def __init__(\n self,\n save_function: Callable[[str], Any],\n parent=None,\n directory=HOME_DIRECTORY,\n history=None,\n ) -> None:\n super().__init__(parent, trans._(\"Save screenshot\"))\n self.setAcceptMode(QFileDialog.AcceptSave)\n self.setFileMode(QFileDialog.AnyFile)\n self.setNameFilter(\n trans._(\"Image files (*.png *.bmp *.gif *.tif *.tiff)\")\n )\n self.setDirectory(directory)\n self.setHistory(history)\n\n if in_ipython():\n self.setOptions(QFileDialog.DontUseNativeDialog)\n\n self.save_function = save_function\n\n def accept(self):\n save_path = self.selectedFiles()[0]\n if os.path.splitext(save_path)[1] == \"\":\n save_path = save_path + \".png\"\n if os.path.exists(save_path):\n res = QMessageBox().warning(\n self,\n trans._(\"Confirm overwrite\"),\n trans._(\n \"{save_path} already exists. Do you want to replace it?\",\n save_path=save_path,\n ),\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No,\n )\n if res != QMessageBox.Yes:\n # standard accept return 1, reject 0. This inform that dialog should be reopened\n super().accept()\n self.exec_()\n self.save_function(save_path)\n return super().accept()\n", "path": "napari/_qt/dialogs/screenshot_dialog.py"}], "after_files": [{"content": "import os\nfrom pathlib import Path\nfrom typing import Any, Callable\n\nfrom qtpy.QtWidgets import QFileDialog, QMessageBox\n\nfrom napari.utils.misc import in_ipython\nfrom napari.utils.translations import trans\n\nHOME_DIRECTORY = str(Path.home())\n\n\nclass ScreenshotDialog(QFileDialog):\n \"\"\"\n Dialog to chose save location of screenshot.\n\n Parameters\n ----------\n save_function : Callable[[str], Any],\n Function to be called on success of selecting save location\n parent : QWidget, optional\n Optional parent widget for this widget..\n directory : str, optional\n Starting directory to be set to File Dialog\n\n \"\"\"\n\n def __init__(\n self,\n save_function: Callable[[str], Any],\n parent=None,\n directory=HOME_DIRECTORY,\n history=None,\n ) -> None:\n super().__init__(parent, trans._(\"Save screenshot\"))\n self.setAcceptMode(QFileDialog.AcceptSave)\n self.setFileMode(QFileDialog.AnyFile)\n self.setNameFilter(\n trans._(\"Image files (*.png *.bmp *.gif *.tif *.tiff)\")\n )\n self.setDirectory(directory)\n self.setHistory(history)\n\n if in_ipython():\n self.setOptions(QFileDialog.DontUseNativeDialog)\n\n self.save_function = save_function\n\n def accept(self):\n save_path = self.selectedFiles()[0]\n if os.path.splitext(save_path)[1] == \"\":\n save_path = save_path + \".png\"\n if os.path.exists(save_path):\n res = QMessageBox().warning(\n self,\n trans._(\"Confirm overwrite\"),\n trans._(\n \"{save_path} already exists. Do you want to replace it?\",\n save_path=save_path,\n ),\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No,\n )\n if res != QMessageBox.Yes:\n # return in this case since a valid name for the\n # file is needed so the dialog needs to be visible\n return\n super().accept()\n if self.result():\n self.save_function(save_path)\n", "path": "napari/_qt/dialogs/screenshot_dialog.py"}]} | 1,571 | 180 |
gh_patches_debug_24519 | rasdani/github-patches | git_diff | ray-project__ray-5208 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[rllib] utils.debug.summarize() dies on empty arrays
# System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04
- **Ray installed from (source or binary)**: binary (pypi)
- **Ray version**: 0.7.1 & 0.7.2
- **Python version**: 3.6
- **Exact command to reproduce**: N/A
### Describe the problem
I'm running rllib on an environment that returns weird zero-length observations. RLLib chokes when it tries to summarise them because the arrays don't have a min/max/mean. Example traceback below.
### Source code / logs
```
2019-07-16 14:10:09,054 ERROR trial_runner.py:487 -- Error processing event.
Traceback (most recent call last):
File "/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 436, in _process_trial
result = self.trial_executor.fetch_result(trial)
…snip…
File "/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 308, in _env_runner
summarize(unfiltered_obs)))
File "/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/utils/debug.py", line 65, in summarize
return _printer.pformat(_summarize(obj))
File "/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/utils/debug.py", line 70, in _summarize
return {k: _summarize(v) for k, v in obj.items()}
…snip…
File "/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/utils/debug.py", line 87, in _summarize
obj.shape, obj.dtype, round(float(np.min(obj)), 3),
File "/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 2618, in amin
initial=initial)
File "/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 86, in _wrapreduction
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
ValueError: zero-size array to reduction operation minimum which has no identity
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/rllib/utils/debug.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import numpy as np
6 import pprint
7 import time
8
9 from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
10
11 _logged = set()
12 _disabled = False
13 _periodic_log = False
14 _last_logged = 0.0
15 _printer = pprint.PrettyPrinter(indent=2, width=60)
16
17
18 def log_once(key):
19 """Returns True if this is the "first" call for a given key.
20
21 Various logging settings can adjust the definition of "first".
22
23 Example:
24 >>> if log_once("some_key"):
25 ... logger.info("Some verbose logging statement")
26 """
27
28 global _last_logged
29
30 if _disabled:
31 return False
32 elif key not in _logged:
33 _logged.add(key)
34 _last_logged = time.time()
35 return True
36 elif _periodic_log and time.time() - _last_logged > 60.0:
37 _logged.clear()
38 _last_logged = time.time()
39 return False
40 else:
41 return False
42
43
44 def disable_log_once_globally():
45 """Make log_once() return False in this process."""
46
47 global _disabled
48 _disabled = True
49
50
51 def enable_periodic_logging():
52 """Make log_once() periodically return True in this process."""
53
54 global _periodic_log
55 _periodic_log = True
56
57
58 def summarize(obj):
59 """Return a pretty-formatted string for an object.
60
61 This has special handling for pretty-formatting of commonly used data types
62 in RLlib, such as SampleBatch, numpy arrays, etc.
63 """
64
65 return _printer.pformat(_summarize(obj))
66
67
68 def _summarize(obj):
69 if isinstance(obj, dict):
70 return {k: _summarize(v) for k, v in obj.items()}
71 elif hasattr(obj, "_asdict"):
72 return {
73 "type": obj.__class__.__name__,
74 "data": _summarize(obj._asdict()),
75 }
76 elif isinstance(obj, list):
77 return [_summarize(x) for x in obj]
78 elif isinstance(obj, tuple):
79 return tuple(_summarize(x) for x in obj)
80 elif isinstance(obj, np.ndarray):
81 if obj.dtype == np.object:
82 return _StringValue("np.ndarray({}, dtype={}, head={})".format(
83 obj.shape, obj.dtype, _summarize(obj[0])))
84 else:
85 return _StringValue(
86 "np.ndarray({}, dtype={}, min={}, max={}, mean={})".format(
87 obj.shape, obj.dtype, round(float(np.min(obj)), 3),
88 round(float(np.max(obj)), 3), round(
89 float(np.mean(obj)), 3)))
90 elif isinstance(obj, MultiAgentBatch):
91 return {
92 "type": "MultiAgentBatch",
93 "policy_batches": _summarize(obj.policy_batches),
94 "count": obj.count,
95 }
96 elif isinstance(obj, SampleBatch):
97 return {
98 "type": "SampleBatch",
99 "data": {k: _summarize(v)
100 for k, v in obj.items()},
101 }
102 else:
103 return obj
104
105
106 class _StringValue(object):
107 def __init__(self, value):
108 self.value = value
109
110 def __repr__(self):
111 return self.value
112
```
Path: `python/ray/rllib/utils/memory.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import numpy as np
6 import time
7
8 import ray
9
10 FREE_DELAY_S = 10.0
11 MAX_FREE_QUEUE_SIZE = 100
12 _last_free_time = 0.0
13 _to_free = []
14
15
16 def ray_get_and_free(object_ids):
17 """Call ray.get and then queue the object ids for deletion.
18
19 This function should be used whenever possible in RLlib, to optimize
20 memory usage. The only exception is when an object_id is shared among
21 multiple readers.
22
23 Args:
24 object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.
25
26 Returns:
27 The result of ray.get(object_ids).
28 """
29
30 global _last_free_time
31 global _to_free
32
33 result = ray.get(object_ids)
34 if type(object_ids) is not list:
35 object_ids = [object_ids]
36 _to_free.extend(object_ids)
37
38 # batch calls to free to reduce overheads
39 now = time.time()
40 if (len(_to_free) > MAX_FREE_QUEUE_SIZE
41 or now - _last_free_time > FREE_DELAY_S):
42 ray.internal.free(_to_free)
43 _to_free = []
44 _last_free_time = now
45
46 return result
47
48
49 def aligned_array(size, dtype, align=64):
50 """Returns an array of a given size that is 64-byte aligned.
51
52 The returned array can be efficiently copied into GPU memory by TensorFlow.
53 """
54
55 n = size * dtype.itemsize
56 empty = np.empty(n + (align - 1), dtype=np.uint8)
57 data_align = empty.ctypes.data % align
58 offset = 0 if data_align == 0 else (align - data_align)
59 output = empty[offset:offset + n].view(dtype)
60
61 assert len(output) == size, len(output)
62 assert output.ctypes.data % align == 0, output.ctypes.data
63 return output
64
65
66 def concat_aligned(items):
67 """Concatenate arrays, ensuring the output is 64-byte aligned.
68
69 We only align float arrays; other arrays are concatenated as normal.
70
71 This should be used instead of np.concatenate() to improve performance
72 when the output array is likely to be fed into TensorFlow.
73 """
74
75 if len(items) == 0:
76 return []
77 elif len(items) == 1:
78 # we assume the input is aligned. In any case, it doesn't help
79 # performance to force align it since that incurs a needless copy.
80 return items[0]
81 elif (isinstance(items[0], np.ndarray)
82 and items[0].dtype in [np.float32, np.float64, np.uint8]):
83 dtype = items[0].dtype
84 flat = aligned_array(sum(s.size for s in items), dtype)
85 batch_dim = sum(s.shape[0] for s in items)
86 new_shape = (batch_dim, ) + items[0].shape[1:]
87 output = flat.reshape(new_shape)
88 assert output.ctypes.data % 64 == 0, output.ctypes.data
89 np.concatenate(items, out=output)
90 return output
91 else:
92 return np.concatenate(items)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/rllib/utils/debug.py b/python/ray/rllib/utils/debug.py
--- a/python/ray/rllib/utils/debug.py
+++ b/python/ray/rllib/utils/debug.py
@@ -78,7 +78,10 @@
elif isinstance(obj, tuple):
return tuple(_summarize(x) for x in obj)
elif isinstance(obj, np.ndarray):
- if obj.dtype == np.object:
+ if obj.size == 0:
+ return _StringValue("np.ndarray({}, dtype={})".format(
+ obj.shape, obj.dtype))
+ elif obj.dtype == np.object:
return _StringValue("np.ndarray({}, dtype={}, head={})".format(
obj.shape, obj.dtype, _summarize(obj[0])))
else:
diff --git a/python/ray/rllib/utils/memory.py b/python/ray/rllib/utils/memory.py
--- a/python/ray/rllib/utils/memory.py
+++ b/python/ray/rllib/utils/memory.py
@@ -56,7 +56,11 @@
empty = np.empty(n + (align - 1), dtype=np.uint8)
data_align = empty.ctypes.data % align
offset = 0 if data_align == 0 else (align - data_align)
- output = empty[offset:offset + n].view(dtype)
+ if n == 0:
+ # stop np from optimising out empty slice reference
+ output = empty[offset:offset + 1][0:0].view(dtype)
+ else:
+ output = empty[offset:offset + n].view(dtype)
assert len(output) == size, len(output)
assert output.ctypes.data % align == 0, output.ctypes.data
| {"golden_diff": "diff --git a/python/ray/rllib/utils/debug.py b/python/ray/rllib/utils/debug.py\n--- a/python/ray/rllib/utils/debug.py\n+++ b/python/ray/rllib/utils/debug.py\n@@ -78,7 +78,10 @@\n elif isinstance(obj, tuple):\n return tuple(_summarize(x) for x in obj)\n elif isinstance(obj, np.ndarray):\n- if obj.dtype == np.object:\n+ if obj.size == 0:\n+ return _StringValue(\"np.ndarray({}, dtype={})\".format(\n+ obj.shape, obj.dtype))\n+ elif obj.dtype == np.object:\n return _StringValue(\"np.ndarray({}, dtype={}, head={})\".format(\n obj.shape, obj.dtype, _summarize(obj[0])))\n else:\ndiff --git a/python/ray/rllib/utils/memory.py b/python/ray/rllib/utils/memory.py\n--- a/python/ray/rllib/utils/memory.py\n+++ b/python/ray/rllib/utils/memory.py\n@@ -56,7 +56,11 @@\n empty = np.empty(n + (align - 1), dtype=np.uint8)\n data_align = empty.ctypes.data % align\n offset = 0 if data_align == 0 else (align - data_align)\n- output = empty[offset:offset + n].view(dtype)\n+ if n == 0:\n+ # stop np from optimising out empty slice reference\n+ output = empty[offset:offset + 1][0:0].view(dtype)\n+ else:\n+ output = empty[offset:offset + n].view(dtype)\n \n assert len(output) == size, len(output)\n assert output.ctypes.data % align == 0, output.ctypes.data\n", "issue": "[rllib] utils.debug.summarize() dies on empty arrays\n# System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04\r\n- **Ray installed from (source or binary)**: binary (pypi)\r\n- **Ray version**: 0.7.1 & 0.7.2\r\n- **Python version**: 3.6\r\n- **Exact command to reproduce**: N/A\r\n\r\n### Describe the problem\r\n\r\nI'm running rllib on an environment that returns weird zero-length observations. RLLib chokes when it tries to summarise them because the arrays don't have a min/max/mean. Example traceback below.\r\n\r\n### Source code / logs\r\n\r\n```\r\n2019-07-16 14:10:09,054 ERROR trial_runner.py:487 -- Error processing event.\r\nTraceback (most recent call last):\r\n File \"/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/tune/trial_runner.py\", line 436, in _process_trial\r\n result = self.trial_executor.fetch_result(trial)\r\n\u2026snip\u2026\r\n File \"/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py\", line 308, in _env_runner\r\n summarize(unfiltered_obs)))\r\n File \"/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/utils/debug.py\", line 65, in summarize\r\n return _printer.pformat(_summarize(obj))\r\n File \"/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/utils/debug.py\", line 70, in _summarize\r\n return {k: _summarize(v) for k, v in obj.items()}\r\n\u2026snip\u2026\r\n File \"/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/ray/rllib/utils/debug.py\", line 87, in _summarize\r\n obj.shape, obj.dtype, round(float(np.min(obj)), 3),\r\n File \"/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/numpy/core/fromnumeric.py\", line 2618, in amin\r\n initial=initial)\r\n File \"/home/sam/.virtualenvs/weird-env/lib/python3.6/site-packages/numpy/core/fromnumeric.py\", line 86, in _wrapreduction\r\n return ufunc.reduce(obj, axis, dtype, out, **passkwargs)\r\nValueError: zero-size array to reduction operation minimum which has no identity\r\n\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport pprint\nimport time\n\nfrom ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch\n\n_logged = set()\n_disabled = False\n_periodic_log = False\n_last_logged = 0.0\n_printer = pprint.PrettyPrinter(indent=2, width=60)\n\n\ndef log_once(key):\n \"\"\"Returns True if this is the \"first\" call for a given key.\n\n Various logging settings can adjust the definition of \"first\".\n\n Example:\n >>> if log_once(\"some_key\"):\n ... logger.info(\"Some verbose logging statement\")\n \"\"\"\n\n global _last_logged\n\n if _disabled:\n return False\n elif key not in _logged:\n _logged.add(key)\n _last_logged = time.time()\n return True\n elif _periodic_log and time.time() - _last_logged > 60.0:\n _logged.clear()\n _last_logged = time.time()\n return False\n else:\n return False\n\n\ndef disable_log_once_globally():\n \"\"\"Make log_once() return False in this process.\"\"\"\n\n global _disabled\n _disabled = True\n\n\ndef enable_periodic_logging():\n \"\"\"Make log_once() periodically return True in this process.\"\"\"\n\n global _periodic_log\n _periodic_log = True\n\n\ndef summarize(obj):\n \"\"\"Return a pretty-formatted string for an object.\n\n This has special handling for pretty-formatting of commonly used data types\n in RLlib, such as SampleBatch, numpy arrays, etc.\n \"\"\"\n\n return _printer.pformat(_summarize(obj))\n\n\ndef _summarize(obj):\n if isinstance(obj, dict):\n return {k: _summarize(v) for k, v in obj.items()}\n elif hasattr(obj, \"_asdict\"):\n return {\n \"type\": obj.__class__.__name__,\n \"data\": _summarize(obj._asdict()),\n }\n elif isinstance(obj, list):\n return [_summarize(x) for x in obj]\n elif isinstance(obj, tuple):\n return tuple(_summarize(x) for x in obj)\n elif isinstance(obj, np.ndarray):\n if obj.dtype == np.object:\n return _StringValue(\"np.ndarray({}, dtype={}, head={})\".format(\n obj.shape, obj.dtype, _summarize(obj[0])))\n else:\n return _StringValue(\n \"np.ndarray({}, dtype={}, min={}, max={}, mean={})\".format(\n obj.shape, obj.dtype, round(float(np.min(obj)), 3),\n round(float(np.max(obj)), 3), round(\n float(np.mean(obj)), 3)))\n elif isinstance(obj, MultiAgentBatch):\n return {\n \"type\": \"MultiAgentBatch\",\n \"policy_batches\": _summarize(obj.policy_batches),\n \"count\": obj.count,\n }\n elif isinstance(obj, SampleBatch):\n return {\n \"type\": \"SampleBatch\",\n \"data\": {k: _summarize(v)\n for k, v in obj.items()},\n }\n else:\n return obj\n\n\nclass _StringValue(object):\n def __init__(self, value):\n self.value = value\n\n def __repr__(self):\n return self.value\n", "path": "python/ray/rllib/utils/debug.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport time\n\nimport ray\n\nFREE_DELAY_S = 10.0\nMAX_FREE_QUEUE_SIZE = 100\n_last_free_time = 0.0\n_to_free = []\n\n\ndef ray_get_and_free(object_ids):\n \"\"\"Call ray.get and then queue the object ids for deletion.\n\n This function should be used whenever possible in RLlib, to optimize\n memory usage. The only exception is when an object_id is shared among\n multiple readers.\n\n Args:\n object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.\n\n Returns:\n The result of ray.get(object_ids).\n \"\"\"\n\n global _last_free_time\n global _to_free\n\n result = ray.get(object_ids)\n if type(object_ids) is not list:\n object_ids = [object_ids]\n _to_free.extend(object_ids)\n\n # batch calls to free to reduce overheads\n now = time.time()\n if (len(_to_free) > MAX_FREE_QUEUE_SIZE\n or now - _last_free_time > FREE_DELAY_S):\n ray.internal.free(_to_free)\n _to_free = []\n _last_free_time = now\n\n return result\n\n\ndef aligned_array(size, dtype, align=64):\n \"\"\"Returns an array of a given size that is 64-byte aligned.\n\n The returned array can be efficiently copied into GPU memory by TensorFlow.\n \"\"\"\n\n n = size * dtype.itemsize\n empty = np.empty(n + (align - 1), dtype=np.uint8)\n data_align = empty.ctypes.data % align\n offset = 0 if data_align == 0 else (align - data_align)\n output = empty[offset:offset + n].view(dtype)\n\n assert len(output) == size, len(output)\n assert output.ctypes.data % align == 0, output.ctypes.data\n return output\n\n\ndef concat_aligned(items):\n \"\"\"Concatenate arrays, ensuring the output is 64-byte aligned.\n\n We only align float arrays; other arrays are concatenated as normal.\n\n This should be used instead of np.concatenate() to improve performance\n when the output array is likely to be fed into TensorFlow.\n \"\"\"\n\n if len(items) == 0:\n return []\n elif len(items) == 1:\n # we assume the input is aligned. In any case, it doesn't help\n # performance to force align it since that incurs a needless copy.\n return items[0]\n elif (isinstance(items[0], np.ndarray)\n and items[0].dtype in [np.float32, np.float64, np.uint8]):\n dtype = items[0].dtype\n flat = aligned_array(sum(s.size for s in items), dtype)\n batch_dim = sum(s.shape[0] for s in items)\n new_shape = (batch_dim, ) + items[0].shape[1:]\n output = flat.reshape(new_shape)\n assert output.ctypes.data % 64 == 0, output.ctypes.data\n np.concatenate(items, out=output)\n return output\n else:\n return np.concatenate(items)\n", "path": "python/ray/rllib/utils/memory.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport pprint\nimport time\n\nfrom ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch\n\n_logged = set()\n_disabled = False\n_periodic_log = False\n_last_logged = 0.0\n_printer = pprint.PrettyPrinter(indent=2, width=60)\n\n\ndef log_once(key):\n \"\"\"Returns True if this is the \"first\" call for a given key.\n\n Various logging settings can adjust the definition of \"first\".\n\n Example:\n >>> if log_once(\"some_key\"):\n ... logger.info(\"Some verbose logging statement\")\n \"\"\"\n\n global _last_logged\n\n if _disabled:\n return False\n elif key not in _logged:\n _logged.add(key)\n _last_logged = time.time()\n return True\n elif _periodic_log and time.time() - _last_logged > 60.0:\n _logged.clear()\n _last_logged = time.time()\n return False\n else:\n return False\n\n\ndef disable_log_once_globally():\n \"\"\"Make log_once() return False in this process.\"\"\"\n\n global _disabled\n _disabled = True\n\n\ndef enable_periodic_logging():\n \"\"\"Make log_once() periodically return True in this process.\"\"\"\n\n global _periodic_log\n _periodic_log = True\n\n\ndef summarize(obj):\n \"\"\"Return a pretty-formatted string for an object.\n\n This has special handling for pretty-formatting of commonly used data types\n in RLlib, such as SampleBatch, numpy arrays, etc.\n \"\"\"\n\n return _printer.pformat(_summarize(obj))\n\n\ndef _summarize(obj):\n if isinstance(obj, dict):\n return {k: _summarize(v) for k, v in obj.items()}\n elif hasattr(obj, \"_asdict\"):\n return {\n \"type\": obj.__class__.__name__,\n \"data\": _summarize(obj._asdict()),\n }\n elif isinstance(obj, list):\n return [_summarize(x) for x in obj]\n elif isinstance(obj, tuple):\n return tuple(_summarize(x) for x in obj)\n elif isinstance(obj, np.ndarray):\n if obj.size == 0:\n return _StringValue(\"np.ndarray({}, dtype={})\".format(\n obj.shape, obj.dtype))\n elif obj.dtype == np.object:\n return _StringValue(\"np.ndarray({}, dtype={}, head={})\".format(\n obj.shape, obj.dtype, _summarize(obj[0])))\n else:\n return _StringValue(\n \"np.ndarray({}, dtype={}, min={}, max={}, mean={})\".format(\n obj.shape, obj.dtype, round(float(np.min(obj)), 3),\n round(float(np.max(obj)), 3), round(\n float(np.mean(obj)), 3)))\n elif isinstance(obj, MultiAgentBatch):\n return {\n \"type\": \"MultiAgentBatch\",\n \"policy_batches\": _summarize(obj.policy_batches),\n \"count\": obj.count,\n }\n elif isinstance(obj, SampleBatch):\n return {\n \"type\": \"SampleBatch\",\n \"data\": {k: _summarize(v)\n for k, v in obj.items()},\n }\n else:\n return obj\n\n\nclass _StringValue(object):\n def __init__(self, value):\n self.value = value\n\n def __repr__(self):\n return self.value\n", "path": "python/ray/rllib/utils/debug.py"}, {"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport time\n\nimport ray\n\nFREE_DELAY_S = 10.0\nMAX_FREE_QUEUE_SIZE = 100\n_last_free_time = 0.0\n_to_free = []\n\n\ndef ray_get_and_free(object_ids):\n \"\"\"Call ray.get and then queue the object ids for deletion.\n\n This function should be used whenever possible in RLlib, to optimize\n memory usage. The only exception is when an object_id is shared among\n multiple readers.\n\n Args:\n object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.\n\n Returns:\n The result of ray.get(object_ids).\n \"\"\"\n\n global _last_free_time\n global _to_free\n\n result = ray.get(object_ids)\n if type(object_ids) is not list:\n object_ids = [object_ids]\n _to_free.extend(object_ids)\n\n # batch calls to free to reduce overheads\n now = time.time()\n if (len(_to_free) > MAX_FREE_QUEUE_SIZE\n or now - _last_free_time > FREE_DELAY_S):\n ray.internal.free(_to_free)\n _to_free = []\n _last_free_time = now\n\n return result\n\n\ndef aligned_array(size, dtype, align=64):\n \"\"\"Returns an array of a given size that is 64-byte aligned.\n\n The returned array can be efficiently copied into GPU memory by TensorFlow.\n \"\"\"\n\n n = size * dtype.itemsize\n empty = np.empty(n + (align - 1), dtype=np.uint8)\n data_align = empty.ctypes.data % align\n offset = 0 if data_align == 0 else (align - data_align)\n if n == 0:\n # stop np from optimising out empty slice reference\n output = empty[offset:offset + 1][0:0].view(dtype)\n else:\n output = empty[offset:offset + n].view(dtype)\n\n assert len(output) == size, len(output)\n assert output.ctypes.data % align == 0, output.ctypes.data\n return output\n\n\ndef concat_aligned(items):\n \"\"\"Concatenate arrays, ensuring the output is 64-byte aligned.\n\n We only align float arrays; other arrays are concatenated as normal.\n\n This should be used instead of np.concatenate() to improve performance\n when the output array is likely to be fed into TensorFlow.\n \"\"\"\n\n if len(items) == 0:\n return []\n elif len(items) == 1:\n # we assume the input is aligned. In any case, it doesn't help\n # performance to force align it since that incurs a needless copy.\n return items[0]\n elif (isinstance(items[0], np.ndarray)\n and items[0].dtype in [np.float32, np.float64, np.uint8]):\n dtype = items[0].dtype\n flat = aligned_array(sum(s.size for s in items), dtype)\n batch_dim = sum(s.shape[0] for s in items)\n new_shape = (batch_dim, ) + items[0].shape[1:]\n output = flat.reshape(new_shape)\n assert output.ctypes.data % 64 == 0, output.ctypes.data\n np.concatenate(items, out=output)\n return output\n else:\n return np.concatenate(items)\n", "path": "python/ray/rllib/utils/memory.py"}]} | 2,713 | 375 |
gh_patches_debug_14917 | rasdani/github-patches | git_diff | chainer__chainer-317 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`create_huffman_tree` fails when two entries have the same counts in py3
The function uses a priority queue in its implementation. When two entries have the same counts, the queue tries to compare two tuples but it is not possible in py3.
For example, `create_huffman_tree({'x': 1, 'y': 2, 'z': 3})` fails because the function tries to compare `(3, ('x', 'y'))` and `(3, 'z')`, and compares a tuple and a str. It causes an error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/hierarchical_softmax.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class TreeParser(object):
9
10 def __init__(self):
11 self.next_id = 0
12
13 def size(self):
14 return self.next_id
15
16 def get_paths(self):
17 return self.paths
18
19 def get_codes(self):
20 return self.codes
21
22 def parse(self, tree):
23 self.next_id = 0
24 self.path = []
25 self.code = []
26 self.paths = {}
27 self.codes = {}
28 self._parse(tree)
29
30 assert(len(self.path) == 0)
31 assert(len(self.code) == 0)
32 assert(len(self.paths) == len(self.codes))
33
34 def _parse(self, node):
35 if isinstance(node, tuple):
36 # internal node
37 if len(node) != 2:
38 raise ValueError(
39 'All internal nodes must have two child nodes')
40 left, right = node
41 self.path.append(self.next_id)
42 self.next_id += 1
43 self.code.append(1.0)
44 self._parse(left)
45
46 self.code[-1] = -1.0
47 self._parse(right)
48
49 self.path.pop()
50 self.code.pop()
51
52 else:
53 # leaf node
54 self.paths[node] = numpy.array(self.path).astype(numpy.int32)
55 self.codes[node] = numpy.array(self.code).astype(numpy.float32)
56
57
58 class BinaryHierarchicalSoftmax(function.Function):
59
60 """Implementation of hierarchical softmax (HSM).
61
62 In natural language applications, vocabulary size is too large to use
63 softmax loss.
64 Instead, the hierarchical softmax uses product of sigmoid functions.
65 It costs only :math:`O(\log(n))` time where :math:`n` is the vocabulary
66 size in average.
67
68 At first a user need to prepare a binary tree whose each leaf is
69 corresponding to a word in a vocabulary.
70 When a word :math:`x` is given, exactly one path from the root of the tree
71 to the leaf of the word exists.
72 Let :math:`\mbox{path}(x) = ((e_1, b_1), \dots, (e_m, b_m))` be the path of
73 :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and
74 :math:`b_i \in \{-1, 1\}` indicates direction to move at :math:`i`-th
75 internal node (-1 is left, and 1 is right).
76 Then, the probability of :math:`x` is given as below:
77
78 .. math::
79
80 P(x) &= \prod_{(e_i, b_i) \in \mbox{path}(x)}P(b_i | e_i) \\\\
81 &= \prod_{(e_i, b_i) \in \mbox{path}(x)}\sigma(b_i x^\\top
82 w_{e_i}),
83
84 where :math:`\sigma(\\cdot)` is a sigmoid function, and :math:`w` is a
85 weight matrix.
86
87 This function costs :math:`O(\log(n))` time as an average length of paths
88 is :math:`O(\log(n))`, and :math:`O(n)` memory as the number of internal
89 nodes equals :math:`n - 1`.
90
91 Args:
92 in_size (int): Dimension of input vectors.
93 tree: A binary tree made with tuples like `((1, 2), 3)`.
94
95 See: Hierarchical Probabilistic Neural Network Language Model [Morin+,
96 AISTAT2005].
97
98 """
99
100 parameter_names = ('W',)
101 gradient_names = ('gW',)
102
103 def __init__(self, in_size, tree):
104 parser = TreeParser()
105 parser.parse(tree)
106 self.paths = parser.get_paths()
107 self.codes = parser.get_codes()
108
109 self.W = numpy.random.uniform(
110 -1, 1, (parser.size(), in_size)).astype(numpy.float32)
111 self.gW = numpy.zeros(self.W.shape, numpy.float32)
112
113 def check_type_forward(self, in_types):
114 type_check.expect(in_types.size() == 2)
115 x_type, t_type = in_types
116
117 type_check.expect(
118 x_type.dtype == numpy.float32,
119 x_type.ndim == 2,
120 t_type.dtype == numpy.int32,
121 t_type.ndim == 1,
122 x_type.shape[0] == t_type.shape[0]
123 )
124
125 def check_type_backward(self, in_types, out_types):
126 type_check.expect(
127 out_types.size() == 1,
128 out_types[0].dtype == numpy.float32,
129 out_types[0].ndim == 0
130 )
131
132 def forward_cpu(self, args):
133 x, t = args
134
135 loss = numpy.float32(0.0)
136 for ix, it in six.moves.zip(x, t):
137 loss += self._forward_cpu_one(ix, it)
138 return numpy.array(loss),
139
140 def _forward_cpu_one(self, x, t):
141 assert t in self.paths
142
143 w = self.W[self.paths[t]]
144 wxy = w.dot(x) * self.codes[t]
145 loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
146 return numpy.sum(loss)
147
148 def backward_cpu(self, args, loss):
149 x, t = args
150 gloss, = loss
151 gx = numpy.empty_like(x)
152 for i, (ix, it) in enumerate(six.moves.zip(x, t)):
153 gx[i] = self._backward_cpu_one(ix, it, gloss)
154 return gx, None
155
156 def _backward_cpu_one(self, x, t, gloss):
157 path = self.paths[t]
158 w = self.W[path]
159 wxy = w.dot(x) * self.codes[t]
160 g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))
161 gx = g.dot(w)
162 gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))
163 self.gW[path] += gw
164 return gx
165
166
167 def create_huffman_tree(word_counts):
168 """Make a huffman tree from a dictionary containing word counts.
169
170 This method creates a binary huffman tree, that is required for
171 :class:`BinaryHierarchicalSoftmax`.
172 For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to
173 ``((3, 1), (2, 0))``.
174
175 Args:
176 word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):
177 Dictionary representing counts of words.
178
179 Returns:
180 Binary huffman tree with tuples and keys of ``word_coutns``.
181
182 """
183 if len(word_counts) == 0:
184 raise ValueError('Empty vocabulary')
185
186 q = six.moves.queue.PriorityQueue()
187 for w, c in six.iteritems(word_counts):
188 q.put((c, w))
189
190 while q.qsize() >= 2:
191 (count1, word1) = q.get()
192 (count2, word2) = q.get()
193 count = count1 + count2
194 tree = (word1, word2)
195 q.put((count, tree))
196
197 return q.get()[1]
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/hierarchical_softmax.py b/chainer/functions/hierarchical_softmax.py
--- a/chainer/functions/hierarchical_softmax.py
+++ b/chainer/functions/hierarchical_softmax.py
@@ -184,14 +184,17 @@
raise ValueError('Empty vocabulary')
q = six.moves.queue.PriorityQueue()
- for w, c in six.iteritems(word_counts):
- q.put((c, w))
+ # Add unique id to each entry so that we can compare two entries with same
+ # counts.
+ # Note that itreitems randomly order the entries.
+ for uid, (w, c) in enumerate(six.iteritems(word_counts)):
+ q.put((c, uid, w))
while q.qsize() >= 2:
- (count1, word1) = q.get()
- (count2, word2) = q.get()
+ (count1, id1, word1) = q.get()
+ (count2, id2, word2) = q.get()
count = count1 + count2
tree = (word1, word2)
- q.put((count, tree))
+ q.put((count, min(id1, id2), tree))
- return q.get()[1]
+ return q.get()[2]
| {"golden_diff": "diff --git a/chainer/functions/hierarchical_softmax.py b/chainer/functions/hierarchical_softmax.py\n--- a/chainer/functions/hierarchical_softmax.py\n+++ b/chainer/functions/hierarchical_softmax.py\n@@ -184,14 +184,17 @@\n raise ValueError('Empty vocabulary')\n \n q = six.moves.queue.PriorityQueue()\n- for w, c in six.iteritems(word_counts):\n- q.put((c, w))\n+ # Add unique id to each entry so that we can compare two entries with same\n+ # counts.\n+ # Note that itreitems randomly order the entries.\n+ for uid, (w, c) in enumerate(six.iteritems(word_counts)):\n+ q.put((c, uid, w))\n \n while q.qsize() >= 2:\n- (count1, word1) = q.get()\n- (count2, word2) = q.get()\n+ (count1, id1, word1) = q.get()\n+ (count2, id2, word2) = q.get()\n count = count1 + count2\n tree = (word1, word2)\n- q.put((count, tree))\n+ q.put((count, min(id1, id2), tree))\n \n- return q.get()[1]\n+ return q.get()[2]\n", "issue": "`create_huffman_tree` fails when two entries have the same counts in py3\nThe function uses a priority queue in its implementation. When two entries have the same counts, the queue tries to compare two tuples but it is not possible in py3.\n\nFor example, `create_huffman_tree({'x': 1, 'y': 2, 'z': 3})` fails because the function tries to compare `(3, ('x', 'y'))` and `(3, 'z')`, and compares a tuple and a str. It causes an error.\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass TreeParser(object):\n\n def __init__(self):\n self.next_id = 0\n\n def size(self):\n return self.next_id\n\n def get_paths(self):\n return self.paths\n\n def get_codes(self):\n return self.codes\n\n def parse(self, tree):\n self.next_id = 0\n self.path = []\n self.code = []\n self.paths = {}\n self.codes = {}\n self._parse(tree)\n\n assert(len(self.path) == 0)\n assert(len(self.code) == 0)\n assert(len(self.paths) == len(self.codes))\n\n def _parse(self, node):\n if isinstance(node, tuple):\n # internal node\n if len(node) != 2:\n raise ValueError(\n 'All internal nodes must have two child nodes')\n left, right = node\n self.path.append(self.next_id)\n self.next_id += 1\n self.code.append(1.0)\n self._parse(left)\n\n self.code[-1] = -1.0\n self._parse(right)\n\n self.path.pop()\n self.code.pop()\n\n else:\n # leaf node\n self.paths[node] = numpy.array(self.path).astype(numpy.int32)\n self.codes[node] = numpy.array(self.code).astype(numpy.float32)\n\n\nclass BinaryHierarchicalSoftmax(function.Function):\n\n \"\"\"Implementation of hierarchical softmax (HSM).\n\n In natural language applications, vocabulary size is too large to use\n softmax loss.\n Instead, the hierarchical softmax uses product of sigmoid functions.\n It costs only :math:`O(\\log(n))` time where :math:`n` is the vocabulary\n size in average.\n\n At first a user need to prepare a binary tree whose each leaf is\n corresponding to a word in a vocabulary.\n When a word :math:`x` is given, exactly one path from the root of the tree\n to the leaf of the word exists.\n Let :math:`\\mbox{path}(x) = ((e_1, b_1), \\dots, (e_m, b_m))` be the path of\n :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and\n :math:`b_i \\in \\{-1, 1\\}` indicates direction to move at :math:`i`-th\n internal node (-1 is left, and 1 is right).\n Then, the probability of :math:`x` is given as below:\n\n .. math::\n\n P(x) &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}P(b_i | e_i) \\\\\\\\\n &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}\\sigma(b_i x^\\\\top\n w_{e_i}),\n\n where :math:`\\sigma(\\\\cdot)` is a sigmoid function, and :math:`w` is a\n weight matrix.\n\n This function costs :math:`O(\\log(n))` time as an average length of paths\n is :math:`O(\\log(n))`, and :math:`O(n)` memory as the number of internal\n nodes equals :math:`n - 1`.\n\n Args:\n in_size (int): Dimension of input vectors.\n tree: A binary tree made with tuples like `((1, 2), 3)`.\n\n See: Hierarchical Probabilistic Neural Network Language Model [Morin+,\n AISTAT2005].\n\n \"\"\"\n\n parameter_names = ('W',)\n gradient_names = ('gW',)\n\n def __init__(self, in_size, tree):\n parser = TreeParser()\n parser.parse(tree)\n self.paths = parser.get_paths()\n self.codes = parser.get_codes()\n\n self.W = numpy.random.uniform(\n -1, 1, (parser.size(), in_size)).astype(numpy.float32)\n self.gW = numpy.zeros(self.W.shape, numpy.float32)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0]\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n out_types.size() == 1,\n out_types[0].dtype == numpy.float32,\n out_types[0].ndim == 0\n )\n\n def forward_cpu(self, args):\n x, t = args\n\n loss = numpy.float32(0.0)\n for ix, it in six.moves.zip(x, t):\n loss += self._forward_cpu_one(ix, it)\n return numpy.array(loss),\n\n def _forward_cpu_one(self, x, t):\n assert t in self.paths\n\n w = self.W[self.paths[t]]\n wxy = w.dot(x) * self.codes[t]\n loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))\n return numpy.sum(loss)\n\n def backward_cpu(self, args, loss):\n x, t = args\n gloss, = loss\n gx = numpy.empty_like(x)\n for i, (ix, it) in enumerate(six.moves.zip(x, t)):\n gx[i] = self._backward_cpu_one(ix, it, gloss)\n return gx, None\n\n def _backward_cpu_one(self, x, t, gloss):\n path = self.paths[t]\n w = self.W[path]\n wxy = w.dot(x) * self.codes[t]\n g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))\n gx = g.dot(w)\n gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))\n self.gW[path] += gw\n return gx\n\n\ndef create_huffman_tree(word_counts):\n \"\"\"Make a huffman tree from a dictionary containing word counts.\n\n This method creates a binary huffman tree, that is required for\n :class:`BinaryHierarchicalSoftmax`.\n For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to\n ``((3, 1), (2, 0))``.\n\n Args:\n word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):\n Dictionary representing counts of words.\n\n Returns:\n Binary huffman tree with tuples and keys of ``word_coutns``.\n\n \"\"\"\n if len(word_counts) == 0:\n raise ValueError('Empty vocabulary')\n\n q = six.moves.queue.PriorityQueue()\n for w, c in six.iteritems(word_counts):\n q.put((c, w))\n\n while q.qsize() >= 2:\n (count1, word1) = q.get()\n (count2, word2) = q.get()\n count = count1 + count2\n tree = (word1, word2)\n q.put((count, tree))\n\n return q.get()[1]\n", "path": "chainer/functions/hierarchical_softmax.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass TreeParser(object):\n\n def __init__(self):\n self.next_id = 0\n\n def size(self):\n return self.next_id\n\n def get_paths(self):\n return self.paths\n\n def get_codes(self):\n return self.codes\n\n def parse(self, tree):\n self.next_id = 0\n self.path = []\n self.code = []\n self.paths = {}\n self.codes = {}\n self._parse(tree)\n\n assert(len(self.path) == 0)\n assert(len(self.code) == 0)\n assert(len(self.paths) == len(self.codes))\n\n def _parse(self, node):\n if isinstance(node, tuple):\n # internal node\n if len(node) != 2:\n raise ValueError(\n 'All internal nodes must have two child nodes')\n left, right = node\n self.path.append(self.next_id)\n self.next_id += 1\n self.code.append(1.0)\n self._parse(left)\n\n self.code[-1] = -1.0\n self._parse(right)\n\n self.path.pop()\n self.code.pop()\n\n else:\n # leaf node\n self.paths[node] = numpy.array(self.path).astype(numpy.int32)\n self.codes[node] = numpy.array(self.code).astype(numpy.float32)\n\n\nclass BinaryHierarchicalSoftmax(function.Function):\n\n \"\"\"Implementation of hierarchical softmax (HSM).\n\n In natural language applications, vocabulary size is too large to use\n softmax loss.\n Instead, the hierarchical softmax uses product of sigmoid functions.\n It costs only :math:`O(\\log(n))` time where :math:`n` is the vocabulary\n size in average.\n\n At first a user need to prepare a binary tree whose each leaf is\n corresponding to a word in a vocabulary.\n When a word :math:`x` is given, exactly one path from the root of the tree\n to the leaf of the word exists.\n Let :math:`\\mbox{path}(x) = ((e_1, b_1), \\dots, (e_m, b_m))` be the path of\n :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and\n :math:`b_i \\in \\{-1, 1\\}` indicates direction to move at :math:`i`-th\n internal node (-1 is left, and 1 is right).\n Then, the probability of :math:`x` is given as below:\n\n .. math::\n\n P(x) &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}P(b_i | e_i) \\\\\\\\\n &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}\\sigma(b_i x^\\\\top\n w_{e_i}),\n\n where :math:`\\sigma(\\\\cdot)` is a sigmoid function, and :math:`w` is a\n weight matrix.\n\n This function costs :math:`O(\\log(n))` time as an average length of paths\n is :math:`O(\\log(n))`, and :math:`O(n)` memory as the number of internal\n nodes equals :math:`n - 1`.\n\n Args:\n in_size (int): Dimension of input vectors.\n tree: A binary tree made with tuples like `((1, 2), 3)`.\n\n See: Hierarchical Probabilistic Neural Network Language Model [Morin+,\n AISTAT2005].\n\n \"\"\"\n\n parameter_names = ('W',)\n gradient_names = ('gW',)\n\n def __init__(self, in_size, tree):\n parser = TreeParser()\n parser.parse(tree)\n self.paths = parser.get_paths()\n self.codes = parser.get_codes()\n\n self.W = numpy.random.uniform(\n -1, 1, (parser.size(), in_size)).astype(numpy.float32)\n self.gW = numpy.zeros(self.W.shape, numpy.float32)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0]\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n out_types.size() == 1,\n out_types[0].dtype == numpy.float32,\n out_types[0].ndim == 0\n )\n\n def forward_cpu(self, args):\n x, t = args\n\n loss = numpy.float32(0.0)\n for ix, it in six.moves.zip(x, t):\n loss += self._forward_cpu_one(ix, it)\n return numpy.array(loss),\n\n def _forward_cpu_one(self, x, t):\n assert t in self.paths\n\n w = self.W[self.paths[t]]\n wxy = w.dot(x) * self.codes[t]\n loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))\n return numpy.sum(loss)\n\n def backward_cpu(self, args, loss):\n x, t = args\n gloss, = loss\n gx = numpy.empty_like(x)\n for i, (ix, it) in enumerate(six.moves.zip(x, t)):\n gx[i] = self._backward_cpu_one(ix, it, gloss)\n return gx, None\n\n def _backward_cpu_one(self, x, t, gloss):\n path = self.paths[t]\n w = self.W[path]\n wxy = w.dot(x) * self.codes[t]\n g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))\n gx = g.dot(w)\n gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))\n self.gW[path] += gw\n return gx\n\n\ndef create_huffman_tree(word_counts):\n \"\"\"Make a huffman tree from a dictionary containing word counts.\n\n This method creates a binary huffman tree, that is required for\n :class:`BinaryHierarchicalSoftmax`.\n For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to\n ``((3, 1), (2, 0))``.\n\n Args:\n word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):\n Dictionary representing counts of words.\n\n Returns:\n Binary huffman tree with tuples and keys of ``word_coutns``.\n\n \"\"\"\n if len(word_counts) == 0:\n raise ValueError('Empty vocabulary')\n\n q = six.moves.queue.PriorityQueue()\n # Add unique id to each entry so that we can compare two entries with same\n # counts.\n # Note that itreitems randomly order the entries.\n for uid, (w, c) in enumerate(six.iteritems(word_counts)):\n q.put((c, uid, w))\n\n while q.qsize() >= 2:\n (count1, id1, word1) = q.get()\n (count2, id2, word2) = q.get()\n count = count1 + count2\n tree = (word1, word2)\n q.put((count, min(id1, id2), tree))\n\n return q.get()[2]\n", "path": "chainer/functions/hierarchical_softmax.py"}]} | 2,513 | 290 |
gh_patches_debug_14823 | rasdani/github-patches | git_diff | kornia__kornia-2620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
expose `average_endpoint_error` on `kornia.metrics.__init__.py`
can you expose it on kornia.metrics.__init__.py ? - to use as `kornia.metrics.average_endpoint_error` too
_Originally posted by @johnnv1 in https://github.com/kornia/kornia/pull/2615#discussion_r1351007042_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/metrics/__init__.py`
Content:
```
1 from .accuracy import accuracy
2 from .average_meter import AverageMeter
3 from .confusion_matrix import confusion_matrix
4 from .endpoint_error import AEPE, aepe
5 from .mean_average_precision import mean_average_precision
6 from .mean_iou import mean_iou, mean_iou_bbox
7 from .psnr import psnr
8 from .ssim import SSIM, ssim
9 from .ssim3d import SSIM3D, ssim3d
10
11 __all__ = [
12 "accuracy",
13 "AverageMeter",
14 "confusion_matrix",
15 "aepe",
16 "AEPE",
17 "mean_iou",
18 "mean_iou_bbox",
19 "mean_average_precision",
20 "psnr",
21 "ssim",
22 "ssim3d",
23 "SSIM",
24 "SSIM3D",
25 ]
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/metrics/__init__.py b/kornia/metrics/__init__.py
--- a/kornia/metrics/__init__.py
+++ b/kornia/metrics/__init__.py
@@ -1,7 +1,7 @@
from .accuracy import accuracy
from .average_meter import AverageMeter
from .confusion_matrix import confusion_matrix
-from .endpoint_error import AEPE, aepe
+from .endpoint_error import AEPE, aepe, average_endpoint_error
from .mean_average_precision import mean_average_precision
from .mean_iou import mean_iou, mean_iou_bbox
from .psnr import psnr
@@ -14,6 +14,7 @@
"confusion_matrix",
"aepe",
"AEPE",
+ "average_endpoint_error",
"mean_iou",
"mean_iou_bbox",
"mean_average_precision",
| {"golden_diff": "diff --git a/kornia/metrics/__init__.py b/kornia/metrics/__init__.py\n--- a/kornia/metrics/__init__.py\n+++ b/kornia/metrics/__init__.py\n@@ -1,7 +1,7 @@\n from .accuracy import accuracy\n from .average_meter import AverageMeter\n from .confusion_matrix import confusion_matrix\n-from .endpoint_error import AEPE, aepe\n+from .endpoint_error import AEPE, aepe, average_endpoint_error\n from .mean_average_precision import mean_average_precision\n from .mean_iou import mean_iou, mean_iou_bbox\n from .psnr import psnr\n@@ -14,6 +14,7 @@\n \"confusion_matrix\",\n \"aepe\",\n \"AEPE\",\n+ \"average_endpoint_error\",\n \"mean_iou\",\n \"mean_iou_bbox\",\n \"mean_average_precision\",\n", "issue": "expose `average_endpoint_error` on `kornia.metrics.__init__.py`\n can you expose it on kornia.metrics.__init__.py ? - to use as `kornia.metrics.average_endpoint_error` too\r\n\r\n_Originally posted by @johnnv1 in https://github.com/kornia/kornia/pull/2615#discussion_r1351007042_\r\n \n", "before_files": [{"content": "from .accuracy import accuracy\nfrom .average_meter import AverageMeter\nfrom .confusion_matrix import confusion_matrix\nfrom .endpoint_error import AEPE, aepe\nfrom .mean_average_precision import mean_average_precision\nfrom .mean_iou import mean_iou, mean_iou_bbox\nfrom .psnr import psnr\nfrom .ssim import SSIM, ssim\nfrom .ssim3d import SSIM3D, ssim3d\n\n__all__ = [\n \"accuracy\",\n \"AverageMeter\",\n \"confusion_matrix\",\n \"aepe\",\n \"AEPE\",\n \"mean_iou\",\n \"mean_iou_bbox\",\n \"mean_average_precision\",\n \"psnr\",\n \"ssim\",\n \"ssim3d\",\n \"SSIM\",\n \"SSIM3D\",\n]\n", "path": "kornia/metrics/__init__.py"}], "after_files": [{"content": "from .accuracy import accuracy\nfrom .average_meter import AverageMeter\nfrom .confusion_matrix import confusion_matrix\nfrom .endpoint_error import AEPE, aepe, average_endpoint_error\nfrom .mean_average_precision import mean_average_precision\nfrom .mean_iou import mean_iou, mean_iou_bbox\nfrom .psnr import psnr\nfrom .ssim import SSIM, ssim\nfrom .ssim3d import SSIM3D, ssim3d\n\n__all__ = [\n \"accuracy\",\n \"AverageMeter\",\n \"confusion_matrix\",\n \"aepe\",\n \"AEPE\",\n \"average_endpoint_error\",\n \"mean_iou\",\n \"mean_iou_bbox\",\n \"mean_average_precision\",\n \"psnr\",\n \"ssim\",\n \"ssim3d\",\n \"SSIM\",\n \"SSIM3D\",\n]\n", "path": "kornia/metrics/__init__.py"}]} | 570 | 195 |
gh_patches_debug_1128 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-2698 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] the `benchmarks` folder is included upon installation
I noticed that while inspecting the conda package during my attempt to create a conda forge build.

The fix is likely as simple as adding `benchmarks` to `packages=find_packages(exclude=[....])` in the `setup.py` file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 Copyright 2020 The Microsoft DeepSpeed Team
3
4 DeepSpeed library
5
6 To build wheel on Windows:
7 1. Install pytorch, such as pytorch 1.12 + cuda 11.6
8 2. Install visual cpp build tool
9 3. Include cuda toolkit
10 4. Launch cmd console with Administrator privilege for creating required symlink folders
11
12 Create a new wheel via the following command:
13 build_win.bat
14
15 The wheel will be located at: dist/*.whl
16 """
17
18 import os
19 import sys
20 import subprocess
21 from setuptools import setup, find_packages
22 from setuptools.command import egg_info
23 import time
24
25 torch_available = True
26 try:
27 import torch
28 except ImportError:
29 torch_available = False
30 print('[WARNING] Unable to import torch, pre-compiling ops will be disabled. ' \
31 'Please visit https://pytorch.org/ to see how to properly install torch on your system.')
32
33 from op_builder import get_default_compute_capabilities, OpBuilder
34 from op_builder.all_ops import ALL_OPS
35 from op_builder.builder import installed_cuda_version
36
37 # fetch rocm state
38 is_rocm_pytorch = OpBuilder.is_rocm_pytorch()
39 rocm_version = OpBuilder.installed_rocm_version()
40
41 RED_START = '\033[31m'
42 RED_END = '\033[0m'
43 ERROR = f"{RED_START} [ERROR] {RED_END}"
44
45
46 def abort(msg):
47 print(f"{ERROR} {msg}")
48 assert False, msg
49
50
51 def fetch_requirements(path):
52 with open(path, 'r') as fd:
53 return [r.strip() for r in fd.readlines()]
54
55
56 install_requires = fetch_requirements('requirements/requirements.txt')
57 extras_require = {
58 '1bit': [], # add cupy based on cuda/rocm version
59 '1bit_mpi': fetch_requirements('requirements/requirements-1bit-mpi.txt'),
60 'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),
61 'dev': fetch_requirements('requirements/requirements-dev.txt'),
62 'autotuning': fetch_requirements('requirements/requirements-autotuning.txt'),
63 'autotuning_ml': fetch_requirements('requirements/requirements-autotuning-ml.txt'),
64 'sparse_attn': fetch_requirements('requirements/requirements-sparse_attn.txt'),
65 'inf': fetch_requirements('requirements/requirements-inf.txt'),
66 'sd': fetch_requirements('requirements/requirements-sd.txt')
67 }
68
69 # Add specific cupy version to both onebit extension variants
70 if torch_available and torch.cuda.is_available():
71 cupy = None
72 if is_rocm_pytorch:
73 rocm_major, rocm_minor = rocm_version
74 # XXX cupy support for rocm 5 is not available yet
75 if rocm_major <= 4:
76 cupy = f"cupy-rocm-{rocm_major}-{rocm_minor}"
77 else:
78 cupy = f"cupy-cuda{''.join(map(str,installed_cuda_version()))}"
79 if cupy:
80 extras_require['1bit'].append(cupy)
81 extras_require['1bit_mpi'].append(cupy)
82
83 # Make an [all] extra that installs all needed dependencies
84 all_extras = set()
85 for extra in extras_require.items():
86 for req in extra[1]:
87 all_extras.add(req)
88 extras_require['all'] = list(all_extras)
89
90 cmdclass = {}
91
92 # For any pre-installed ops force disable ninja
93 if torch_available:
94 from accelerator import get_accelerator
95 cmdclass['build_ext'] = get_accelerator().build_extension().with_options(
96 use_ninja=False)
97
98 if torch_available:
99 TORCH_MAJOR = torch.__version__.split('.')[0]
100 TORCH_MINOR = torch.__version__.split('.')[1]
101 else:
102 TORCH_MAJOR = "0"
103 TORCH_MINOR = "0"
104
105 if torch_available and not torch.cuda.is_available():
106 # Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486
107 print(
108 "[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only "
109 "you can ignore this message. Adding compute capability for Pascal, Volta, and Turing "
110 "(compute capabilities 6.0, 6.1, 6.2)")
111 if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
112 os.environ["TORCH_CUDA_ARCH_LIST"] = get_default_compute_capabilities()
113
114 ext_modules = []
115
116 # Default to pre-install kernels to false so we rely on JIT on Linux, opposite on Windows.
117 BUILD_OP_PLATFORM = 1 if sys.platform == "win32" else 0
118 BUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))
119 print(f"DS_BUILD_OPS={BUILD_OP_DEFAULT}")
120
121 if BUILD_OP_DEFAULT:
122 assert torch_available, "Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops."
123
124
125 def command_exists(cmd):
126 if sys.platform == "win32":
127 result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)
128 return result.wait() == 1
129 else:
130 result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)
131 return result.wait() == 0
132
133
134 def op_envvar(op_name):
135 assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \
136 f"{op_name} is missing BUILD_VAR field"
137 return ALL_OPS[op_name].BUILD_VAR
138
139
140 def op_enabled(op_name):
141 env_var = op_envvar(op_name)
142 return int(os.environ.get(env_var, BUILD_OP_DEFAULT))
143
144
145 compatible_ops = dict.fromkeys(ALL_OPS.keys(), False)
146 install_ops = dict.fromkeys(ALL_OPS.keys(), False)
147 for op_name, builder in ALL_OPS.items():
148 op_compatible = builder.is_compatible()
149 compatible_ops[op_name] = op_compatible
150
151 # If op is requested but not available, throw an error
152 if op_enabled(op_name) and not op_compatible:
153 env_var = op_envvar(op_name)
154 if env_var not in os.environ:
155 builder.warning(f"One can disable {op_name} with {env_var}=0")
156 abort(f"Unable to pre-compile {op_name}")
157
158 # if op is compatible but install is not enabled (JIT mode)
159 if is_rocm_pytorch and op_compatible and not op_enabled(op_name):
160 builder.hipify_extension()
161
162 # If op install enabled, add builder to extensions
163 if op_enabled(op_name) and op_compatible:
164 assert torch_available, f"Unable to pre-compile {op_name}, please first install torch"
165 install_ops[op_name] = op_enabled(op_name)
166 ext_modules.append(builder.builder())
167
168 print(f'Install Ops={install_ops}')
169
170 # Write out version/git info
171 git_hash_cmd = "git rev-parse --short HEAD"
172 git_branch_cmd = "git rev-parse --abbrev-ref HEAD"
173 if command_exists('git') and 'DS_BUILD_STRING' not in os.environ:
174 try:
175 result = subprocess.check_output(git_hash_cmd, shell=True)
176 git_hash = result.decode('utf-8').strip()
177 result = subprocess.check_output(git_branch_cmd, shell=True)
178 git_branch = result.decode('utf-8').strip()
179 except subprocess.CalledProcessError:
180 git_hash = "unknown"
181 git_branch = "unknown"
182 else:
183 git_hash = "unknown"
184 git_branch = "unknown"
185
186
187 def create_dir_symlink(src, dest):
188 if not os.path.islink(dest):
189 if os.path.exists(dest):
190 os.remove(dest)
191 assert not os.path.exists(dest)
192 os.symlink(src, dest)
193
194
195 if sys.platform == "win32":
196 # This creates a symbolic links on Windows.
197 # It needs Administrator privilege to create symlinks on Windows.
198 create_dir_symlink('..\\..\\csrc', '.\\deepspeed\\ops\\csrc')
199 create_dir_symlink('..\\..\\op_builder', '.\\deepspeed\\ops\\op_builder')
200 create_dir_symlink('..\\accelerator', '.\\deepspeed\\accelerator')
201 egg_info.manifest_maker.template = 'MANIFEST_win.in'
202
203 # Parse the DeepSpeed version string from version.txt
204 version_str = open('version.txt', 'r').read().strip()
205
206 # Build specifiers like .devX can be added at install time. Otherwise, add the git hash.
207 # example: DS_BUILD_STR=".dev20201022" python setup.py sdist bdist_wheel
208
209 # Building wheel for distribution, update version file
210 if 'DS_BUILD_STRING' in os.environ:
211 # Build string env specified, probably building for distribution
212 with open('build.txt', 'w') as fd:
213 fd.write(os.environ.get('DS_BUILD_STRING'))
214 version_str += os.environ.get('DS_BUILD_STRING')
215 elif os.path.isfile('build.txt'):
216 # build.txt exists, probably installing from distribution
217 with open('build.txt', 'r') as fd:
218 version_str += fd.read().strip()
219 else:
220 # None of the above, probably installing from source
221 version_str += f'+{git_hash}'
222
223 torch_version = ".".join([TORCH_MAJOR, TORCH_MINOR])
224 bf16_support = False
225 # Set cuda_version to 0.0 if cpu-only
226 cuda_version = "0.0"
227 nccl_version = "0.0"
228 # Set hip_version to 0.0 if cpu-only
229 hip_version = "0.0"
230 if torch_available and torch.version.cuda is not None:
231 cuda_version = ".".join(torch.version.cuda.split('.')[:2])
232 if sys.platform != "win32":
233 if isinstance(torch.cuda.nccl.version(), int):
234 # This will break if minor version > 9
235 nccl_version = ".".join(str(torch.cuda.nccl.version())[:2])
236 else:
237 nccl_version = ".".join(map(str, torch.cuda.nccl.version()[:2]))
238 if hasattr(torch.cuda, 'is_bf16_supported') and torch.cuda.is_available():
239 bf16_support = torch.cuda.is_bf16_supported()
240 if torch_available and hasattr(torch.version, 'hip') and torch.version.hip is not None:
241 hip_version = ".".join(torch.version.hip.split('.')[:2])
242 torch_info = {
243 "version": torch_version,
244 "bf16_support": bf16_support,
245 "cuda_version": cuda_version,
246 "nccl_version": nccl_version,
247 "hip_version": hip_version
248 }
249
250 print(f"version={version_str}, git_hash={git_hash}, git_branch={git_branch}")
251 with open('deepspeed/git_version_info_installed.py', 'w') as fd:
252 fd.write(f"version='{version_str}'\n")
253 fd.write(f"git_hash='{git_hash}'\n")
254 fd.write(f"git_branch='{git_branch}'\n")
255 fd.write(f"installed_ops={install_ops}\n")
256 fd.write(f"compatible_ops={compatible_ops}\n")
257 fd.write(f"torch_info={torch_info}\n")
258
259 print(f'install_requires={install_requires}')
260 print(f'compatible_ops={compatible_ops}')
261 print(f'ext_modules={ext_modules}')
262
263 # Parse README.md to make long_description for PyPI page.
264 thisdir = os.path.abspath(os.path.dirname(__file__))
265 with open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:
266 readme_text = fin.read()
267
268 start_time = time.time()
269
270 setup(name='deepspeed',
271 version=version_str,
272 description='DeepSpeed library',
273 long_description=readme_text,
274 long_description_content_type='text/markdown',
275 author='DeepSpeed Team',
276 author_email='[email protected]',
277 url='http://deepspeed.ai',
278 project_urls={
279 'Documentation': 'https://deepspeed.readthedocs.io',
280 'Source': 'https://github.com/microsoft/DeepSpeed',
281 },
282 install_requires=install_requires,
283 extras_require=extras_require,
284 packages=find_packages(exclude=[
285 "azure",
286 "csrc",
287 "docker",
288 "docs",
289 "examples",
290 "op_builder",
291 "release",
292 "requirements",
293 "scripts",
294 "tests"
295 ]),
296 include_package_data=True,
297 scripts=[
298 'bin/deepspeed',
299 'bin/deepspeed.pt',
300 'bin/ds',
301 'bin/ds_ssh',
302 'bin/ds_report',
303 'bin/ds_bench',
304 'bin/dsr',
305 'bin/ds_elastic'
306 ],
307 classifiers=[
308 'Programming Language :: Python :: 3.6',
309 'Programming Language :: Python :: 3.7',
310 'Programming Language :: Python :: 3.8',
311 'Programming Language :: Python :: 3.9',
312 'Programming Language :: Python :: 3.10'
313 ],
314 license='MIT',
315 ext_modules=ext_modules,
316 cmdclass=cmdclass)
317
318 end_time = time.time()
319 print(f'deepspeed build time = {end_time - start_time} secs')
320
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -291,7 +291,9 @@
"release",
"requirements",
"scripts",
- "tests"
+ "tests",
+ "benchmarks",
+ "accelerator"
]),
include_package_data=True,
scripts=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -291,7 +291,9 @@\n \"release\",\n \"requirements\",\n \"scripts\",\n- \"tests\"\n+ \"tests\",\n+ \"benchmarks\",\n+ \"accelerator\"\n ]),\n include_package_data=True,\n scripts=[\n", "issue": "[BUG] the `benchmarks` folder is included upon installation\nI noticed that while inspecting the conda package during my attempt to create a conda forge build.\r\n\r\n\r\n\r\nThe fix is likely as simple as adding `benchmarks` to `packages=find_packages(exclude=[....])` in the `setup.py` file.\n", "before_files": [{"content": "\"\"\"\nCopyright 2020 The Microsoft DeepSpeed Team\n\nDeepSpeed library\n\nTo build wheel on Windows:\n 1. Install pytorch, such as pytorch 1.12 + cuda 11.6\n 2. Install visual cpp build tool\n 3. Include cuda toolkit\n 4. Launch cmd console with Administrator privilege for creating required symlink folders\n\nCreate a new wheel via the following command:\n build_win.bat\n\nThe wheel will be located at: dist/*.whl\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nfrom setuptools import setup, find_packages\nfrom setuptools.command import egg_info\nimport time\n\ntorch_available = True\ntry:\n import torch\nexcept ImportError:\n torch_available = False\n print('[WARNING] Unable to import torch, pre-compiling ops will be disabled. ' \\\n 'Please visit https://pytorch.org/ to see how to properly install torch on your system.')\n\nfrom op_builder import get_default_compute_capabilities, OpBuilder\nfrom op_builder.all_ops import ALL_OPS\nfrom op_builder.builder import installed_cuda_version\n\n# fetch rocm state\nis_rocm_pytorch = OpBuilder.is_rocm_pytorch()\nrocm_version = OpBuilder.installed_rocm_version()\n\nRED_START = '\\033[31m'\nRED_END = '\\033[0m'\nERROR = f\"{RED_START} [ERROR] {RED_END}\"\n\n\ndef abort(msg):\n print(f\"{ERROR} {msg}\")\n assert False, msg\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ninstall_requires = fetch_requirements('requirements/requirements.txt')\nextras_require = {\n '1bit': [], # add cupy based on cuda/rocm version\n '1bit_mpi': fetch_requirements('requirements/requirements-1bit-mpi.txt'),\n 'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),\n 'dev': fetch_requirements('requirements/requirements-dev.txt'),\n 'autotuning': fetch_requirements('requirements/requirements-autotuning.txt'),\n 'autotuning_ml': fetch_requirements('requirements/requirements-autotuning-ml.txt'),\n 'sparse_attn': fetch_requirements('requirements/requirements-sparse_attn.txt'),\n 'inf': fetch_requirements('requirements/requirements-inf.txt'),\n 'sd': fetch_requirements('requirements/requirements-sd.txt')\n}\n\n# Add specific cupy version to both onebit extension variants\nif torch_available and torch.cuda.is_available():\n cupy = None\n if is_rocm_pytorch:\n rocm_major, rocm_minor = rocm_version\n # XXX cupy support for rocm 5 is not available yet\n if rocm_major <= 4:\n cupy = f\"cupy-rocm-{rocm_major}-{rocm_minor}\"\n else:\n cupy = f\"cupy-cuda{''.join(map(str,installed_cuda_version()))}\"\n if cupy:\n extras_require['1bit'].append(cupy)\n extras_require['1bit_mpi'].append(cupy)\n\n# Make an [all] extra that installs all needed dependencies\nall_extras = set()\nfor extra in extras_require.items():\n for req in extra[1]:\n all_extras.add(req)\nextras_require['all'] = list(all_extras)\n\ncmdclass = {}\n\n# For any pre-installed ops force disable ninja\nif torch_available:\n from accelerator import get_accelerator\n cmdclass['build_ext'] = get_accelerator().build_extension().with_options(\n use_ninja=False)\n\nif torch_available:\n TORCH_MAJOR = torch.__version__.split('.')[0]\n TORCH_MINOR = torch.__version__.split('.')[1]\nelse:\n TORCH_MAJOR = \"0\"\n TORCH_MINOR = \"0\"\n\nif torch_available and not torch.cuda.is_available():\n # Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486\n print(\n \"[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only \"\n \"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing \"\n \"(compute capabilities 6.0, 6.1, 6.2)\")\n if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = get_default_compute_capabilities()\n\next_modules = []\n\n# Default to pre-install kernels to false so we rely on JIT on Linux, opposite on Windows.\nBUILD_OP_PLATFORM = 1 if sys.platform == \"win32\" else 0\nBUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))\nprint(f\"DS_BUILD_OPS={BUILD_OP_DEFAULT}\")\n\nif BUILD_OP_DEFAULT:\n assert torch_available, \"Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops.\"\n\n\ndef command_exists(cmd):\n if sys.platform == \"win32\":\n result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 1\n else:\n result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 0\n\n\ndef op_envvar(op_name):\n assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \\\n f\"{op_name} is missing BUILD_VAR field\"\n return ALL_OPS[op_name].BUILD_VAR\n\n\ndef op_enabled(op_name):\n env_var = op_envvar(op_name)\n return int(os.environ.get(env_var, BUILD_OP_DEFAULT))\n\n\ncompatible_ops = dict.fromkeys(ALL_OPS.keys(), False)\ninstall_ops = dict.fromkeys(ALL_OPS.keys(), False)\nfor op_name, builder in ALL_OPS.items():\n op_compatible = builder.is_compatible()\n compatible_ops[op_name] = op_compatible\n\n # If op is requested but not available, throw an error\n if op_enabled(op_name) and not op_compatible:\n env_var = op_envvar(op_name)\n if env_var not in os.environ:\n builder.warning(f\"One can disable {op_name} with {env_var}=0\")\n abort(f\"Unable to pre-compile {op_name}\")\n\n # if op is compatible but install is not enabled (JIT mode)\n if is_rocm_pytorch and op_compatible and not op_enabled(op_name):\n builder.hipify_extension()\n\n # If op install enabled, add builder to extensions\n if op_enabled(op_name) and op_compatible:\n assert torch_available, f\"Unable to pre-compile {op_name}, please first install torch\"\n install_ops[op_name] = op_enabled(op_name)\n ext_modules.append(builder.builder())\n\nprint(f'Install Ops={install_ops}')\n\n# Write out version/git info\ngit_hash_cmd = \"git rev-parse --short HEAD\"\ngit_branch_cmd = \"git rev-parse --abbrev-ref HEAD\"\nif command_exists('git') and 'DS_BUILD_STRING' not in os.environ:\n try:\n result = subprocess.check_output(git_hash_cmd, shell=True)\n git_hash = result.decode('utf-8').strip()\n result = subprocess.check_output(git_branch_cmd, shell=True)\n git_branch = result.decode('utf-8').strip()\n except subprocess.CalledProcessError:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\nelse:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\n\n\ndef create_dir_symlink(src, dest):\n if not os.path.islink(dest):\n if os.path.exists(dest):\n os.remove(dest)\n assert not os.path.exists(dest)\n os.symlink(src, dest)\n\n\nif sys.platform == \"win32\":\n # This creates a symbolic links on Windows.\n # It needs Administrator privilege to create symlinks on Windows.\n create_dir_symlink('..\\\\..\\\\csrc', '.\\\\deepspeed\\\\ops\\\\csrc')\n create_dir_symlink('..\\\\..\\\\op_builder', '.\\\\deepspeed\\\\ops\\\\op_builder')\n create_dir_symlink('..\\\\accelerator', '.\\\\deepspeed\\\\accelerator')\n egg_info.manifest_maker.template = 'MANIFEST_win.in'\n\n# Parse the DeepSpeed version string from version.txt\nversion_str = open('version.txt', 'r').read().strip()\n\n# Build specifiers like .devX can be added at install time. Otherwise, add the git hash.\n# example: DS_BUILD_STR=\".dev20201022\" python setup.py sdist bdist_wheel\n\n# Building wheel for distribution, update version file\nif 'DS_BUILD_STRING' in os.environ:\n # Build string env specified, probably building for distribution\n with open('build.txt', 'w') as fd:\n fd.write(os.environ.get('DS_BUILD_STRING'))\n version_str += os.environ.get('DS_BUILD_STRING')\nelif os.path.isfile('build.txt'):\n # build.txt exists, probably installing from distribution\n with open('build.txt', 'r') as fd:\n version_str += fd.read().strip()\nelse:\n # None of the above, probably installing from source\n version_str += f'+{git_hash}'\n\ntorch_version = \".\".join([TORCH_MAJOR, TORCH_MINOR])\nbf16_support = False\n# Set cuda_version to 0.0 if cpu-only\ncuda_version = \"0.0\"\nnccl_version = \"0.0\"\n# Set hip_version to 0.0 if cpu-only\nhip_version = \"0.0\"\nif torch_available and torch.version.cuda is not None:\n cuda_version = \".\".join(torch.version.cuda.split('.')[:2])\n if sys.platform != \"win32\":\n if isinstance(torch.cuda.nccl.version(), int):\n # This will break if minor version > 9\n nccl_version = \".\".join(str(torch.cuda.nccl.version())[:2])\n else:\n nccl_version = \".\".join(map(str, torch.cuda.nccl.version()[:2]))\n if hasattr(torch.cuda, 'is_bf16_supported') and torch.cuda.is_available():\n bf16_support = torch.cuda.is_bf16_supported()\nif torch_available and hasattr(torch.version, 'hip') and torch.version.hip is not None:\n hip_version = \".\".join(torch.version.hip.split('.')[:2])\ntorch_info = {\n \"version\": torch_version,\n \"bf16_support\": bf16_support,\n \"cuda_version\": cuda_version,\n \"nccl_version\": nccl_version,\n \"hip_version\": hip_version\n}\n\nprint(f\"version={version_str}, git_hash={git_hash}, git_branch={git_branch}\")\nwith open('deepspeed/git_version_info_installed.py', 'w') as fd:\n fd.write(f\"version='{version_str}'\\n\")\n fd.write(f\"git_hash='{git_hash}'\\n\")\n fd.write(f\"git_branch='{git_branch}'\\n\")\n fd.write(f\"installed_ops={install_ops}\\n\")\n fd.write(f\"compatible_ops={compatible_ops}\\n\")\n fd.write(f\"torch_info={torch_info}\\n\")\n\nprint(f'install_requires={install_requires}')\nprint(f'compatible_ops={compatible_ops}')\nprint(f'ext_modules={ext_modules}')\n\n# Parse README.md to make long_description for PyPI page.\nthisdir = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:\n readme_text = fin.read()\n\nstart_time = time.time()\n\nsetup(name='deepspeed',\n version=version_str,\n description='DeepSpeed library',\n long_description=readme_text,\n long_description_content_type='text/markdown',\n author='DeepSpeed Team',\n author_email='[email protected]',\n url='http://deepspeed.ai',\n project_urls={\n 'Documentation': 'https://deepspeed.readthedocs.io',\n 'Source': 'https://github.com/microsoft/DeepSpeed',\n },\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(exclude=[\n \"azure\",\n \"csrc\",\n \"docker\",\n \"docs\",\n \"examples\",\n \"op_builder\",\n \"release\",\n \"requirements\",\n \"scripts\",\n \"tests\"\n ]),\n include_package_data=True,\n scripts=[\n 'bin/deepspeed',\n 'bin/deepspeed.pt',\n 'bin/ds',\n 'bin/ds_ssh',\n 'bin/ds_report',\n 'bin/ds_bench',\n 'bin/dsr',\n 'bin/ds_elastic'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10'\n ],\n license='MIT',\n ext_modules=ext_modules,\n cmdclass=cmdclass)\n\nend_time = time.time()\nprint(f'deepspeed build time = {end_time - start_time} secs')\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2020 The Microsoft DeepSpeed Team\n\nDeepSpeed library\n\nTo build wheel on Windows:\n 1. Install pytorch, such as pytorch 1.12 + cuda 11.6\n 2. Install visual cpp build tool\n 3. Include cuda toolkit\n 4. Launch cmd console with Administrator privilege for creating required symlink folders\n\nCreate a new wheel via the following command:\n build_win.bat\n\nThe wheel will be located at: dist/*.whl\n\"\"\"\n\nimport os\nimport sys\nimport subprocess\nfrom setuptools import setup, find_packages\nfrom setuptools.command import egg_info\nimport time\n\ntorch_available = True\ntry:\n import torch\nexcept ImportError:\n torch_available = False\n print('[WARNING] Unable to import torch, pre-compiling ops will be disabled. ' \\\n 'Please visit https://pytorch.org/ to see how to properly install torch on your system.')\n\nfrom op_builder import get_default_compute_capabilities, OpBuilder\nfrom op_builder.all_ops import ALL_OPS\nfrom op_builder.builder import installed_cuda_version\n\n# fetch rocm state\nis_rocm_pytorch = OpBuilder.is_rocm_pytorch()\nrocm_version = OpBuilder.installed_rocm_version()\n\nRED_START = '\\033[31m'\nRED_END = '\\033[0m'\nERROR = f\"{RED_START} [ERROR] {RED_END}\"\n\n\ndef abort(msg):\n print(f\"{ERROR} {msg}\")\n assert False, msg\n\n\ndef fetch_requirements(path):\n with open(path, 'r') as fd:\n return [r.strip() for r in fd.readlines()]\n\n\ninstall_requires = fetch_requirements('requirements/requirements.txt')\nextras_require = {\n '1bit': [], # add cupy based on cuda/rocm version\n '1bit_mpi': fetch_requirements('requirements/requirements-1bit-mpi.txt'),\n 'readthedocs': fetch_requirements('requirements/requirements-readthedocs.txt'),\n 'dev': fetch_requirements('requirements/requirements-dev.txt'),\n 'autotuning': fetch_requirements('requirements/requirements-autotuning.txt'),\n 'autotuning_ml': fetch_requirements('requirements/requirements-autotuning-ml.txt'),\n 'sparse_attn': fetch_requirements('requirements/requirements-sparse_attn.txt'),\n 'inf': fetch_requirements('requirements/requirements-inf.txt'),\n 'sd': fetch_requirements('requirements/requirements-sd.txt')\n}\n\n# Add specific cupy version to both onebit extension variants\nif torch_available and torch.cuda.is_available():\n cupy = None\n if is_rocm_pytorch:\n rocm_major, rocm_minor = rocm_version\n # XXX cupy support for rocm 5 is not available yet\n if rocm_major <= 4:\n cupy = f\"cupy-rocm-{rocm_major}-{rocm_minor}\"\n else:\n cupy = f\"cupy-cuda{''.join(map(str,installed_cuda_version()))}\"\n if cupy:\n extras_require['1bit'].append(cupy)\n extras_require['1bit_mpi'].append(cupy)\n\n# Make an [all] extra that installs all needed dependencies\nall_extras = set()\nfor extra in extras_require.items():\n for req in extra[1]:\n all_extras.add(req)\nextras_require['all'] = list(all_extras)\n\ncmdclass = {}\n\n# For any pre-installed ops force disable ninja\nif torch_available:\n from accelerator import get_accelerator\n cmdclass['build_ext'] = get_accelerator().build_extension().with_options(\n use_ninja=False)\n\nif torch_available:\n TORCH_MAJOR = torch.__version__.split('.')[0]\n TORCH_MINOR = torch.__version__.split('.')[1]\nelse:\n TORCH_MAJOR = \"0\"\n TORCH_MINOR = \"0\"\n\nif torch_available and not torch.cuda.is_available():\n # Fix to allow docker builds, similar to https://github.com/NVIDIA/apex/issues/486\n print(\n \"[WARNING] Torch did not find cuda available, if cross-compiling or running with cpu only \"\n \"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing \"\n \"(compute capabilities 6.0, 6.1, 6.2)\")\n if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = get_default_compute_capabilities()\n\next_modules = []\n\n# Default to pre-install kernels to false so we rely on JIT on Linux, opposite on Windows.\nBUILD_OP_PLATFORM = 1 if sys.platform == \"win32\" else 0\nBUILD_OP_DEFAULT = int(os.environ.get('DS_BUILD_OPS', BUILD_OP_PLATFORM))\nprint(f\"DS_BUILD_OPS={BUILD_OP_DEFAULT}\")\n\nif BUILD_OP_DEFAULT:\n assert torch_available, \"Unable to pre-compile ops without torch installed. Please install torch before attempting to pre-compile ops.\"\n\n\ndef command_exists(cmd):\n if sys.platform == \"win32\":\n result = subprocess.Popen(f'{cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 1\n else:\n result = subprocess.Popen(f'type {cmd}', stdout=subprocess.PIPE, shell=True)\n return result.wait() == 0\n\n\ndef op_envvar(op_name):\n assert hasattr(ALL_OPS[op_name], 'BUILD_VAR'), \\\n f\"{op_name} is missing BUILD_VAR field\"\n return ALL_OPS[op_name].BUILD_VAR\n\n\ndef op_enabled(op_name):\n env_var = op_envvar(op_name)\n return int(os.environ.get(env_var, BUILD_OP_DEFAULT))\n\n\ncompatible_ops = dict.fromkeys(ALL_OPS.keys(), False)\ninstall_ops = dict.fromkeys(ALL_OPS.keys(), False)\nfor op_name, builder in ALL_OPS.items():\n op_compatible = builder.is_compatible()\n compatible_ops[op_name] = op_compatible\n\n # If op is requested but not available, throw an error\n if op_enabled(op_name) and not op_compatible:\n env_var = op_envvar(op_name)\n if env_var not in os.environ:\n builder.warning(f\"One can disable {op_name} with {env_var}=0\")\n abort(f\"Unable to pre-compile {op_name}\")\n\n # if op is compatible but install is not enabled (JIT mode)\n if is_rocm_pytorch and op_compatible and not op_enabled(op_name):\n builder.hipify_extension()\n\n # If op install enabled, add builder to extensions\n if op_enabled(op_name) and op_compatible:\n assert torch_available, f\"Unable to pre-compile {op_name}, please first install torch\"\n install_ops[op_name] = op_enabled(op_name)\n ext_modules.append(builder.builder())\n\nprint(f'Install Ops={install_ops}')\n\n# Write out version/git info\ngit_hash_cmd = \"git rev-parse --short HEAD\"\ngit_branch_cmd = \"git rev-parse --abbrev-ref HEAD\"\nif command_exists('git') and 'DS_BUILD_STRING' not in os.environ:\n try:\n result = subprocess.check_output(git_hash_cmd, shell=True)\n git_hash = result.decode('utf-8').strip()\n result = subprocess.check_output(git_branch_cmd, shell=True)\n git_branch = result.decode('utf-8').strip()\n except subprocess.CalledProcessError:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\nelse:\n git_hash = \"unknown\"\n git_branch = \"unknown\"\n\n\ndef create_dir_symlink(src, dest):\n if not os.path.islink(dest):\n if os.path.exists(dest):\n os.remove(dest)\n assert not os.path.exists(dest)\n os.symlink(src, dest)\n\n\nif sys.platform == \"win32\":\n # This creates a symbolic links on Windows.\n # It needs Administrator privilege to create symlinks on Windows.\n create_dir_symlink('..\\\\..\\\\csrc', '.\\\\deepspeed\\\\ops\\\\csrc')\n create_dir_symlink('..\\\\..\\\\op_builder', '.\\\\deepspeed\\\\ops\\\\op_builder')\n create_dir_symlink('..\\\\accelerator', '.\\\\deepspeed\\\\accelerator')\n egg_info.manifest_maker.template = 'MANIFEST_win.in'\n\n# Parse the DeepSpeed version string from version.txt\nversion_str = open('version.txt', 'r').read().strip()\n\n# Build specifiers like .devX can be added at install time. Otherwise, add the git hash.\n# example: DS_BUILD_STR=\".dev20201022\" python setup.py sdist bdist_wheel\n\n# Building wheel for distribution, update version file\nif 'DS_BUILD_STRING' in os.environ:\n # Build string env specified, probably building for distribution\n with open('build.txt', 'w') as fd:\n fd.write(os.environ.get('DS_BUILD_STRING'))\n version_str += os.environ.get('DS_BUILD_STRING')\nelif os.path.isfile('build.txt'):\n # build.txt exists, probably installing from distribution\n with open('build.txt', 'r') as fd:\n version_str += fd.read().strip()\nelse:\n # None of the above, probably installing from source\n version_str += f'+{git_hash}'\n\ntorch_version = \".\".join([TORCH_MAJOR, TORCH_MINOR])\nbf16_support = False\n# Set cuda_version to 0.0 if cpu-only\ncuda_version = \"0.0\"\nnccl_version = \"0.0\"\n# Set hip_version to 0.0 if cpu-only\nhip_version = \"0.0\"\nif torch_available and torch.version.cuda is not None:\n cuda_version = \".\".join(torch.version.cuda.split('.')[:2])\n if sys.platform != \"win32\":\n if isinstance(torch.cuda.nccl.version(), int):\n # This will break if minor version > 9\n nccl_version = \".\".join(str(torch.cuda.nccl.version())[:2])\n else:\n nccl_version = \".\".join(map(str, torch.cuda.nccl.version()[:2]))\n if hasattr(torch.cuda, 'is_bf16_supported') and torch.cuda.is_available():\n bf16_support = torch.cuda.is_bf16_supported()\nif torch_available and hasattr(torch.version, 'hip') and torch.version.hip is not None:\n hip_version = \".\".join(torch.version.hip.split('.')[:2])\ntorch_info = {\n \"version\": torch_version,\n \"bf16_support\": bf16_support,\n \"cuda_version\": cuda_version,\n \"nccl_version\": nccl_version,\n \"hip_version\": hip_version\n}\n\nprint(f\"version={version_str}, git_hash={git_hash}, git_branch={git_branch}\")\nwith open('deepspeed/git_version_info_installed.py', 'w') as fd:\n fd.write(f\"version='{version_str}'\\n\")\n fd.write(f\"git_hash='{git_hash}'\\n\")\n fd.write(f\"git_branch='{git_branch}'\\n\")\n fd.write(f\"installed_ops={install_ops}\\n\")\n fd.write(f\"compatible_ops={compatible_ops}\\n\")\n fd.write(f\"torch_info={torch_info}\\n\")\n\nprint(f'install_requires={install_requires}')\nprint(f'compatible_ops={compatible_ops}')\nprint(f'ext_modules={ext_modules}')\n\n# Parse README.md to make long_description for PyPI page.\nthisdir = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(thisdir, 'README.md'), encoding='utf-8') as fin:\n readme_text = fin.read()\n\nstart_time = time.time()\n\nsetup(name='deepspeed',\n version=version_str,\n description='DeepSpeed library',\n long_description=readme_text,\n long_description_content_type='text/markdown',\n author='DeepSpeed Team',\n author_email='[email protected]',\n url='http://deepspeed.ai',\n project_urls={\n 'Documentation': 'https://deepspeed.readthedocs.io',\n 'Source': 'https://github.com/microsoft/DeepSpeed',\n },\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(exclude=[\n \"azure\",\n \"csrc\",\n \"docker\",\n \"docs\",\n \"examples\",\n \"op_builder\",\n \"release\",\n \"requirements\",\n \"scripts\",\n \"tests\",\n \"benchmarks\",\n \"accelerator\"\n ]),\n include_package_data=True,\n scripts=[\n 'bin/deepspeed',\n 'bin/deepspeed.pt',\n 'bin/ds',\n 'bin/ds_ssh',\n 'bin/ds_report',\n 'bin/ds_bench',\n 'bin/dsr',\n 'bin/ds_elastic'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10'\n ],\n license='MIT',\n ext_modules=ext_modules,\n cmdclass=cmdclass)\n\nend_time = time.time()\nprint(f'deepspeed build time = {end_time - start_time} secs')\n", "path": "setup.py"}]} | 4,073 | 79 |
gh_patches_debug_56929 | rasdani/github-patches | git_diff | kivy__python-for-android-575 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTTP 302 recipe download file
Downloading from sourceforge autoselecting an mirror results in a HTTP 302 instead of a direct download.
Trying to build this clean recipe below results in:
```
RAN: '/bin/tar xzf ~/.local/share/python-for-android/packages/boost/boost_1_58_0.tar.gz'
STDOUT:
STDERR:
gzip: stdin: not in gzip format
/bin/tar: Child returned status 1
/bin/tar: Error is not recoverable: exiting now
```
Because this .tar.gz file is in reality the 302 HTML (ASCII) page.
```
from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
from os.path import exists, join
class BoostRecipe(Recipe):
version = '1.58.0'
url = 'http://downloads.sourceforge.net/project/boost/boost/{version}/boost_1_58_0.tar.gz'
recipe = BoostRecipe()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/util.py`
Content:
```
1 import contextlib
2 from os.path import exists
3 from os import getcwd, chdir, makedirs
4 import io
5 import json
6 import shutil
7 import sys
8 from tempfile import mkdtemp
9 try:
10 from urllib.request import FancyURLopener
11 except ImportError:
12 from urllib import FancyURLopener
13
14 from pythonforandroid.logger import (logger, Err_Fore)
15
16 IS_PY3 = sys.version_info[0] >= 3
17
18 if IS_PY3:
19 unistr = str
20 else:
21 unistr = unicode
22
23
24 class ChromeDownloader(FancyURLopener):
25 version = (
26 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
27 '(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36')
28
29 urlretrieve = ChromeDownloader().retrieve
30
31
32 @contextlib.contextmanager
33 def current_directory(new_dir):
34 cur_dir = getcwd()
35 logger.info(''.join((Err_Fore.CYAN, '-> directory context ', new_dir,
36 Err_Fore.RESET)))
37 chdir(new_dir)
38 yield
39 logger.info(''.join((Err_Fore.CYAN, '<- directory context ', cur_dir,
40 Err_Fore.RESET)))
41 chdir(cur_dir)
42
43
44 @contextlib.contextmanager
45 def temp_directory():
46 temp_dir = mkdtemp()
47 try:
48 logger.debug(''.join((Err_Fore.CYAN, ' + temp directory used ',
49 temp_dir, Err_Fore.RESET)))
50 yield temp_dir
51 finally:
52 shutil.rmtree(temp_dir)
53 logger.debug(''.join((Err_Fore.CYAN, ' - temp directory deleted ',
54 temp_dir, Err_Fore.RESET)))
55
56
57 def ensure_dir(filename):
58 if not exists(filename):
59 makedirs(filename)
60
61
62 class JsonStore(object):
63 """Replacement of shelve using json, needed for support python 2 and 3.
64 """
65
66 def __init__(self, filename):
67 super(JsonStore, self).__init__()
68 self.filename = filename
69 self.data = {}
70 if exists(filename):
71 try:
72 with io.open(filename, encoding='utf-8') as fd:
73 self.data = json.load(fd)
74 except ValueError:
75 print("Unable to read the state.db, content will be replaced.")
76
77 def __getitem__(self, key):
78 return self.data[key]
79
80 def __setitem__(self, key, value):
81 self.data[key] = value
82 self.sync()
83
84 def __delitem__(self, key):
85 del self.data[key]
86 self.sync()
87
88 def __contains__(self, item):
89 return item in self.data
90
91 def get(self, item, default=None):
92 return self.data.get(item, default)
93
94 def keys(self):
95 return self.data.keys()
96
97 def remove_all(self, prefix):
98 for key in self.data.keys()[:]:
99 if not key.startswith(prefix):
100 continue
101 del self.data[key]
102 self.sync()
103
104 def sync(self):
105 # http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python/14870531#14870531
106 if IS_PY3:
107 with open(self.filename, 'w') as fd:
108 json.dump(self.data, fd, ensure_ascii=False)
109 else:
110 with io.open(self.filename, 'w', encoding='utf-8') as fd:
111 fd.write(unicode(json.dumps(self.data, ensure_ascii=False)))
112
113
114 def which(program, path_env):
115 '''Locate an executable in the system.'''
116 import os
117
118 def is_exe(fpath):
119 return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
120
121 fpath, fname = os.path.split(program)
122 if fpath:
123 if is_exe(program):
124 return program
125 else:
126 for path in path_env.split(os.pathsep):
127 path = path.strip('"')
128 exe_file = os.path.join(path, program)
129 if is_exe(exe_file):
130 return exe_file
131
132 return None
133
134
135 def get_directory(filename):
136 '''If the filename ends with a recognised file extension, return the
137 filename without this extension.'''
138 if filename.endswith('.tar.gz'):
139 return basename(filename[:-7])
140 elif filename.endswith('.tgz'):
141 return basename(filename[:-4])
142 elif filename.endswith('.tar.bz2'):
143 return basename(filename[:-8])
144 elif filename.endswith('.tbz2'):
145 return basename(filename[:-5])
146 elif filename.endswith('.zip'):
147 return basename(filename[:-4])
148 info('Unknown file extension for {}'.format(filename))
149 exit(1)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/util.py b/pythonforandroid/util.py
--- a/pythonforandroid/util.py
+++ b/pythonforandroid/util.py
@@ -21,12 +21,10 @@
unistr = unicode
-class ChromeDownloader(FancyURLopener):
- version = (
- 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
- '(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36')
+class WgetDownloader(FancyURLopener):
+ version = ('Wget/1.17.1')
-urlretrieve = ChromeDownloader().retrieve
+urlretrieve = WgetDownloader().retrieve
@contextlib.contextmanager
| {"golden_diff": "diff --git a/pythonforandroid/util.py b/pythonforandroid/util.py\n--- a/pythonforandroid/util.py\n+++ b/pythonforandroid/util.py\n@@ -21,12 +21,10 @@\n unistr = unicode\n \n \n-class ChromeDownloader(FancyURLopener):\n- version = (\n- 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '\n- '(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36')\n+class WgetDownloader(FancyURLopener):\n+ version = ('Wget/1.17.1')\n \n-urlretrieve = ChromeDownloader().retrieve\n+urlretrieve = WgetDownloader().retrieve\n \n \n @contextlib.contextmanager\n", "issue": "HTTP 302 recipe download file\nDownloading from sourceforge autoselecting an mirror results in a HTTP 302 instead of a direct download.\nTrying to build this clean recipe below results in:\n\n```\nRAN: '/bin/tar xzf ~/.local/share/python-for-android/packages/boost/boost_1_58_0.tar.gz'\n STDOUT:\n STDERR:\ngzip: stdin: not in gzip format\n/bin/tar: Child returned status 1\n/bin/tar: Error is not recoverable: exiting now\n```\n\nBecause this .tar.gz file is in reality the 302 HTML (ASCII) page.\n\n```\nfrom pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory\nfrom os.path import exists, join\n\nclass BoostRecipe(Recipe):\n version = '1.58.0'\n url = 'http://downloads.sourceforge.net/project/boost/boost/{version}/boost_1_58_0.tar.gz'\n\nrecipe = BoostRecipe()\n```\n\n", "before_files": [{"content": "import contextlib\nfrom os.path import exists\nfrom os import getcwd, chdir, makedirs\nimport io\nimport json\nimport shutil\nimport sys\nfrom tempfile import mkdtemp\ntry:\n from urllib.request import FancyURLopener\nexcept ImportError:\n from urllib import FancyURLopener\n\nfrom pythonforandroid.logger import (logger, Err_Fore)\n\nIS_PY3 = sys.version_info[0] >= 3\n\nif IS_PY3:\n unistr = str\nelse:\n unistr = unicode\n\n\nclass ChromeDownloader(FancyURLopener):\n version = (\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/28.0.1500.71 Safari/537.36')\n\nurlretrieve = ChromeDownloader().retrieve\n\n\[email protected]\ndef current_directory(new_dir):\n cur_dir = getcwd()\n logger.info(''.join((Err_Fore.CYAN, '-> directory context ', new_dir,\n Err_Fore.RESET)))\n chdir(new_dir)\n yield\n logger.info(''.join((Err_Fore.CYAN, '<- directory context ', cur_dir,\n Err_Fore.RESET)))\n chdir(cur_dir)\n\n\[email protected]\ndef temp_directory():\n temp_dir = mkdtemp()\n try:\n logger.debug(''.join((Err_Fore.CYAN, ' + temp directory used ',\n temp_dir, Err_Fore.RESET)))\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)\n logger.debug(''.join((Err_Fore.CYAN, ' - temp directory deleted ',\n temp_dir, Err_Fore.RESET)))\n\n\ndef ensure_dir(filename):\n if not exists(filename):\n makedirs(filename)\n\n\nclass JsonStore(object):\n \"\"\"Replacement of shelve using json, needed for support python 2 and 3.\n \"\"\"\n\n def __init__(self, filename):\n super(JsonStore, self).__init__()\n self.filename = filename\n self.data = {}\n if exists(filename):\n try:\n with io.open(filename, encoding='utf-8') as fd:\n self.data = json.load(fd)\n except ValueError:\n print(\"Unable to read the state.db, content will be replaced.\")\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n self.data[key] = value\n self.sync()\n\n def __delitem__(self, key):\n del self.data[key]\n self.sync()\n\n def __contains__(self, item):\n return item in self.data\n\n def get(self, item, default=None):\n return self.data.get(item, default)\n\n def keys(self):\n return self.data.keys()\n\n def remove_all(self, prefix):\n for key in self.data.keys()[:]:\n if not key.startswith(prefix):\n continue\n del self.data[key]\n self.sync()\n\n def sync(self):\n # http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python/14870531#14870531\n if IS_PY3:\n with open(self.filename, 'w') as fd:\n json.dump(self.data, fd, ensure_ascii=False)\n else:\n with io.open(self.filename, 'w', encoding='utf-8') as fd:\n fd.write(unicode(json.dumps(self.data, ensure_ascii=False)))\n\n\ndef which(program, path_env):\n '''Locate an executable in the system.'''\n import os\n\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in path_env.split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\n\ndef get_directory(filename):\n '''If the filename ends with a recognised file extension, return the\n filename without this extension.'''\n if filename.endswith('.tar.gz'):\n return basename(filename[:-7])\n elif filename.endswith('.tgz'):\n return basename(filename[:-4])\n elif filename.endswith('.tar.bz2'):\n return basename(filename[:-8])\n elif filename.endswith('.tbz2'):\n return basename(filename[:-5])\n elif filename.endswith('.zip'):\n return basename(filename[:-4])\n info('Unknown file extension for {}'.format(filename))\n exit(1)\n", "path": "pythonforandroid/util.py"}], "after_files": [{"content": "import contextlib\nfrom os.path import exists\nfrom os import getcwd, chdir, makedirs\nimport io\nimport json\nimport shutil\nimport sys\nfrom tempfile import mkdtemp\ntry:\n from urllib.request import FancyURLopener\nexcept ImportError:\n from urllib import FancyURLopener\n\nfrom pythonforandroid.logger import (logger, Err_Fore)\n\nIS_PY3 = sys.version_info[0] >= 3\n\nif IS_PY3:\n unistr = str\nelse:\n unistr = unicode\n\n\nclass WgetDownloader(FancyURLopener):\n version = ('Wget/1.17.1')\n\nurlretrieve = WgetDownloader().retrieve\n\n\[email protected]\ndef current_directory(new_dir):\n cur_dir = getcwd()\n logger.info(''.join((Err_Fore.CYAN, '-> directory context ', new_dir,\n Err_Fore.RESET)))\n chdir(new_dir)\n yield\n logger.info(''.join((Err_Fore.CYAN, '<- directory context ', cur_dir,\n Err_Fore.RESET)))\n chdir(cur_dir)\n\n\[email protected]\ndef temp_directory():\n temp_dir = mkdtemp()\n try:\n logger.debug(''.join((Err_Fore.CYAN, ' + temp directory used ',\n temp_dir, Err_Fore.RESET)))\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)\n logger.debug(''.join((Err_Fore.CYAN, ' - temp directory deleted ',\n temp_dir, Err_Fore.RESET)))\n\n\ndef ensure_dir(filename):\n if not exists(filename):\n makedirs(filename)\n\n\nclass JsonStore(object):\n \"\"\"Replacement of shelve using json, needed for support python 2 and 3.\n \"\"\"\n\n def __init__(self, filename):\n super(JsonStore, self).__init__()\n self.filename = filename\n self.data = {}\n if exists(filename):\n try:\n with io.open(filename, encoding='utf-8') as fd:\n self.data = json.load(fd)\n except ValueError:\n print(\"Unable to read the state.db, content will be replaced.\")\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n self.data[key] = value\n self.sync()\n\n def __delitem__(self, key):\n del self.data[key]\n self.sync()\n\n def __contains__(self, item):\n return item in self.data\n\n def get(self, item, default=None):\n return self.data.get(item, default)\n\n def keys(self):\n return self.data.keys()\n\n def remove_all(self, prefix):\n for key in self.data.keys()[:]:\n if not key.startswith(prefix):\n continue\n del self.data[key]\n self.sync()\n\n def sync(self):\n # http://stackoverflow.com/questions/12309269/write-json-data-to-file-in-python/14870531#14870531\n if IS_PY3:\n with open(self.filename, 'w') as fd:\n json.dump(self.data, fd, ensure_ascii=False)\n else:\n with io.open(self.filename, 'w', encoding='utf-8') as fd:\n fd.write(unicode(json.dumps(self.data, ensure_ascii=False)))\n\n\ndef which(program, path_env):\n '''Locate an executable in the system.'''\n import os\n\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in path_env.split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\n\ndef get_directory(filename):\n '''If the filename ends with a recognised file extension, return the\n filename without this extension.'''\n if filename.endswith('.tar.gz'):\n return basename(filename[:-7])\n elif filename.endswith('.tgz'):\n return basename(filename[:-4])\n elif filename.endswith('.tar.bz2'):\n return basename(filename[:-8])\n elif filename.endswith('.tbz2'):\n return basename(filename[:-5])\n elif filename.endswith('.zip'):\n return basename(filename[:-4])\n info('Unknown file extension for {}'.format(filename))\n exit(1)\n", "path": "pythonforandroid/util.py"}]} | 1,835 | 180 |
gh_patches_debug_26242 | rasdani/github-patches | git_diff | bokeh__bokeh-4929 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Checkbox example is not working as expected
The new checkbox example in master (examples/plotting/file/line_on_off.py) is not working as expected, the plotted lines are depend on how many checkbox are ticked and not on which one. The reason is that the js code is not checking for the values but for the existence of the index.
I have a fix for this with a PR coming, and opening this only as an associated issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/plotting/file/line_on_off.py`
Content:
```
1 """ Example demonstrating turning lines on and off - with JS only
2
3 """
4
5 import numpy as np
6
7 from bokeh.io import output_file, show
8 from bokeh.layouts import row
9 from bokeh.palettes import Viridis3
10 from bokeh.plotting import figure
11 from bokeh.models import CheckboxGroup, CustomJS
12
13 output_file("line_on_off.html", title="line_on_off.py example")
14
15 code = """
16 if (0 in checkbox.active) {
17 l0.visible = true
18 } else {
19 l0.visible = false
20 }
21 if (1 in checkbox.active) {
22 l1.visible = true
23 } else {
24 l1.visible = false
25 }
26 if (2 in checkbox.active) {
27 l2.visible = true
28 } else {
29 l2.visible = false
30 }
31 """
32
33 p = figure()
34 props = dict(line_width=4, line_alpha=0.7)
35 x = np.linspace(0, 4 * np.pi, 100)
36 l0 = p.line(x, np.sin(x), color=Viridis3[0], legend="Line 0", **props)
37 l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend="Line 1", **props)
38 l2 = p.line(x, np.tan(x), color=Viridis3[2], legend="Line 2", **props)
39
40 callback = CustomJS(code=code, args={})
41 checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"], active=[0, 1, 2], callback=callback, width=100)
42 callback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)
43
44 layout = row(checkbox, p)
45 show(layout)
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/plotting/file/line_on_off.py b/examples/plotting/file/line_on_off.py
--- a/examples/plotting/file/line_on_off.py
+++ b/examples/plotting/file/line_on_off.py
@@ -12,24 +12,6 @@
output_file("line_on_off.html", title="line_on_off.py example")
-code = """
- if (0 in checkbox.active) {
- l0.visible = true
- } else {
- l0.visible = false
- }
- if (1 in checkbox.active) {
- l1.visible = true
- } else {
- l1.visible = false
- }
- if (2 in checkbox.active) {
- l2.visible = true
- } else {
- l2.visible = false
- }
-"""
-
p = figure()
props = dict(line_width=4, line_alpha=0.7)
x = np.linspace(0, 4 * np.pi, 100)
@@ -37,9 +19,14 @@
l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend="Line 1", **props)
l2 = p.line(x, np.tan(x), color=Viridis3[2], legend="Line 2", **props)
-callback = CustomJS(code=code, args={})
-checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"], active=[0, 1, 2], callback=callback, width=100)
-callback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)
+checkbox = CheckboxGroup(labels=["Line 0", "Line 1", "Line 2"],
+ active=[0, 1, 2], width=100)
+checkbox.callback = CustomJS(args=dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox),
+ lang="coffeescript", code="""
+l0.visible = 0 in checkbox.active;
+l1.visible = 1 in checkbox.active;
+l2.visible = 2 in checkbox.active;
+""")
layout = row(checkbox, p)
show(layout)
| {"golden_diff": "diff --git a/examples/plotting/file/line_on_off.py b/examples/plotting/file/line_on_off.py\n--- a/examples/plotting/file/line_on_off.py\n+++ b/examples/plotting/file/line_on_off.py\n@@ -12,24 +12,6 @@\n \n output_file(\"line_on_off.html\", title=\"line_on_off.py example\")\n \n-code = \"\"\"\n- if (0 in checkbox.active) {\n- l0.visible = true\n- } else {\n- l0.visible = false\n- }\n- if (1 in checkbox.active) {\n- l1.visible = true\n- } else {\n- l1.visible = false\n- }\n- if (2 in checkbox.active) {\n- l2.visible = true\n- } else {\n- l2.visible = false\n- }\n-\"\"\"\n-\n p = figure()\n props = dict(line_width=4, line_alpha=0.7)\n x = np.linspace(0, 4 * np.pi, 100)\n@@ -37,9 +19,14 @@\n l1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend=\"Line 1\", **props)\n l2 = p.line(x, np.tan(x), color=Viridis3[2], legend=\"Line 2\", **props)\n \n-callback = CustomJS(code=code, args={})\n-checkbox = CheckboxGroup(labels=[\"Line 0\", \"Line 1\", \"Line 2\"], active=[0, 1, 2], callback=callback, width=100)\n-callback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)\n+checkbox = CheckboxGroup(labels=[\"Line 0\", \"Line 1\", \"Line 2\"],\n+ active=[0, 1, 2], width=100)\n+checkbox.callback = CustomJS(args=dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox),\n+ lang=\"coffeescript\", code=\"\"\"\n+l0.visible = 0 in checkbox.active;\n+l1.visible = 1 in checkbox.active;\n+l2.visible = 2 in checkbox.active;\n+\"\"\")\n \n layout = row(checkbox, p)\n show(layout)\n", "issue": "Checkbox example is not working as expected\nThe new checkbox example in master (examples/plotting/file/line_on_off.py) is not working as expected, the plotted lines are depend on how many checkbox are ticked and not on which one. The reason is that the js code is not checking for the values but for the existence of the index.\n\nI have a fix for this with a PR coming, and opening this only as an associated issue.\n\n", "before_files": [{"content": "\"\"\" Example demonstrating turning lines on and off - with JS only\n\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.io import output_file, show\nfrom bokeh.layouts import row\nfrom bokeh.palettes import Viridis3\nfrom bokeh.plotting import figure\nfrom bokeh.models import CheckboxGroup, CustomJS\n\noutput_file(\"line_on_off.html\", title=\"line_on_off.py example\")\n\ncode = \"\"\"\n if (0 in checkbox.active) {\n l0.visible = true\n } else {\n l0.visible = false\n }\n if (1 in checkbox.active) {\n l1.visible = true\n } else {\n l1.visible = false\n }\n if (2 in checkbox.active) {\n l2.visible = true\n } else {\n l2.visible = false\n }\n\"\"\"\n\np = figure()\nprops = dict(line_width=4, line_alpha=0.7)\nx = np.linspace(0, 4 * np.pi, 100)\nl0 = p.line(x, np.sin(x), color=Viridis3[0], legend=\"Line 0\", **props)\nl1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend=\"Line 1\", **props)\nl2 = p.line(x, np.tan(x), color=Viridis3[2], legend=\"Line 2\", **props)\n\ncallback = CustomJS(code=code, args={})\ncheckbox = CheckboxGroup(labels=[\"Line 0\", \"Line 1\", \"Line 2\"], active=[0, 1, 2], callback=callback, width=100)\ncallback.args = dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox)\n\nlayout = row(checkbox, p)\nshow(layout)\n", "path": "examples/plotting/file/line_on_off.py"}], "after_files": [{"content": "\"\"\" Example demonstrating turning lines on and off - with JS only\n\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.io import output_file, show\nfrom bokeh.layouts import row\nfrom bokeh.palettes import Viridis3\nfrom bokeh.plotting import figure\nfrom bokeh.models import CheckboxGroup, CustomJS\n\noutput_file(\"line_on_off.html\", title=\"line_on_off.py example\")\n\np = figure()\nprops = dict(line_width=4, line_alpha=0.7)\nx = np.linspace(0, 4 * np.pi, 100)\nl0 = p.line(x, np.sin(x), color=Viridis3[0], legend=\"Line 0\", **props)\nl1 = p.line(x, 4 * np.cos(x), color=Viridis3[1], legend=\"Line 1\", **props)\nl2 = p.line(x, np.tan(x), color=Viridis3[2], legend=\"Line 2\", **props)\n\ncheckbox = CheckboxGroup(labels=[\"Line 0\", \"Line 1\", \"Line 2\"],\n active=[0, 1, 2], width=100)\ncheckbox.callback = CustomJS(args=dict(l0=l0, l1=l1, l2=l2, checkbox=checkbox),\n lang=\"coffeescript\", code=\"\"\"\nl0.visible = 0 in checkbox.active;\nl1.visible = 1 in checkbox.active;\nl2.visible = 2 in checkbox.active;\n\"\"\")\n\nlayout = row(checkbox, p)\nshow(layout)\n", "path": "examples/plotting/file/line_on_off.py"}]} | 828 | 500 |
gh_patches_debug_33573 | rasdani/github-patches | git_diff | pre-commit__pre-commit-966 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve platform command line length limits
I have a question related to #510
When I run `pre-commit run` all hooks work as expected (processing just changed files), but `pre-commit run -a` divides a list of files into 5 bulks and run the same hook in parallel executions. This creates unnecessary checks of files which belong to the same directory.
Is it possible to control that certain hooks should not be executed in parallel but rather pass all filenames to it?
Alternatively, is there an environment variable or something like that which says that pre-commit is running a hook for all files?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/xargs.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import unicode_literals
4
5 import concurrent.futures
6 import contextlib
7 import math
8 import sys
9
10 import six
11
12 from pre_commit import parse_shebang
13 from pre_commit.util import cmd_output
14
15
16 # TODO: properly compute max_length value
17 def _get_platform_max_length():
18 # posix minimum
19 return 4 * 1024
20
21
22 def _command_length(*cmd):
23 full_cmd = ' '.join(cmd)
24
25 # win32 uses the amount of characters, more details at:
26 # https://github.com/pre-commit/pre-commit/pull/839
27 if sys.platform == 'win32':
28 # the python2.x apis require bytes, we encode as UTF-8
29 if six.PY2:
30 return len(full_cmd.encode('utf-8'))
31 else:
32 return len(full_cmd.encode('utf-16le')) // 2
33 else:
34 return len(full_cmd.encode(sys.getfilesystemencoding()))
35
36
37 class ArgumentTooLongError(RuntimeError):
38 pass
39
40
41 def partition(cmd, varargs, target_concurrency, _max_length=None):
42 _max_length = _max_length or _get_platform_max_length()
43
44 # Generally, we try to partition evenly into at least `target_concurrency`
45 # partitions, but we don't want a bunch of tiny partitions.
46 max_args = max(4, math.ceil(len(varargs) / target_concurrency))
47
48 cmd = tuple(cmd)
49 ret = []
50
51 ret_cmd = []
52 # Reversed so arguments are in order
53 varargs = list(reversed(varargs))
54
55 total_length = _command_length(*cmd)
56 while varargs:
57 arg = varargs.pop()
58
59 arg_length = _command_length(arg) + 1
60 if (
61 total_length + arg_length <= _max_length and
62 len(ret_cmd) < max_args
63 ):
64 ret_cmd.append(arg)
65 total_length += arg_length
66 elif not ret_cmd:
67 raise ArgumentTooLongError(arg)
68 else:
69 # We've exceeded the length, yield a command
70 ret.append(cmd + tuple(ret_cmd))
71 ret_cmd = []
72 total_length = _command_length(*cmd)
73 varargs.append(arg)
74
75 ret.append(cmd + tuple(ret_cmd))
76
77 return tuple(ret)
78
79
80 @contextlib.contextmanager
81 def _thread_mapper(maxsize):
82 if maxsize == 1:
83 yield map
84 else:
85 with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:
86 yield ex.map
87
88
89 def xargs(cmd, varargs, **kwargs):
90 """A simplified implementation of xargs.
91
92 negate: Make nonzero successful and zero a failure
93 target_concurrency: Target number of partitions to run concurrently
94 """
95 negate = kwargs.pop('negate', False)
96 target_concurrency = kwargs.pop('target_concurrency', 1)
97 retcode = 0
98 stdout = b''
99 stderr = b''
100
101 try:
102 parse_shebang.normexe(cmd[0])
103 except parse_shebang.ExecutableNotFoundError as e:
104 return e.to_output()
105
106 partitions = partition(cmd, varargs, target_concurrency, **kwargs)
107
108 def run_cmd_partition(run_cmd):
109 return cmd_output(*run_cmd, encoding=None, retcode=None)
110
111 threads = min(len(partitions), target_concurrency)
112 with _thread_mapper(threads) as thread_map:
113 results = thread_map(run_cmd_partition, partitions)
114
115 for proc_retcode, proc_out, proc_err in results:
116 # This is *slightly* too clever so I'll explain it.
117 # First the xor boolean table:
118 # T | F |
119 # +-------+
120 # T | F | T |
121 # --+-------+
122 # F | T | F |
123 # --+-------+
124 # When negate is True, it has the effect of flipping the return
125 # code. Otherwise, the returncode is unchanged.
126 retcode |= bool(proc_retcode) ^ negate
127 stdout += proc_out
128 stderr += proc_err
129
130 return retcode, stdout, stderr
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/xargs.py b/pre_commit/xargs.py
--- a/pre_commit/xargs.py
+++ b/pre_commit/xargs.py
@@ -5,6 +5,7 @@
import concurrent.futures
import contextlib
import math
+import os
import sys
import six
@@ -13,10 +14,24 @@
from pre_commit.util import cmd_output
-# TODO: properly compute max_length value
-def _get_platform_max_length():
- # posix minimum
- return 4 * 1024
+def _environ_size(_env=None):
+ environ = _env if _env is not None else getattr(os, 'environb', os.environ)
+ size = 8 * len(environ) # number of pointers in `envp`
+ for k, v in environ.items():
+ size += len(k) + len(v) + 2 # c strings in `envp`
+ return size
+
+
+def _get_platform_max_length(): # pragma: no cover (platform specific)
+ if os.name == 'posix':
+ maximum = os.sysconf(str('SC_ARG_MAX')) - 2048 - _environ_size()
+ maximum = min(maximum, 2 ** 17)
+ return maximum
+ elif os.name == 'nt':
+ return 2 ** 15 - 2048 # UNICODE_STRING max - headroom
+ else:
+ # posix minimum
+ return 2 ** 12
def _command_length(*cmd):
@@ -52,7 +67,7 @@
# Reversed so arguments are in order
varargs = list(reversed(varargs))
- total_length = _command_length(*cmd)
+ total_length = _command_length(*cmd) + 1
while varargs:
arg = varargs.pop()
@@ -69,7 +84,7 @@
# We've exceeded the length, yield a command
ret.append(cmd + tuple(ret_cmd))
ret_cmd = []
- total_length = _command_length(*cmd)
+ total_length = _command_length(*cmd) + 1
varargs.append(arg)
ret.append(cmd + tuple(ret_cmd))
@@ -99,7 +114,7 @@
stderr = b''
try:
- parse_shebang.normexe(cmd[0])
+ cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
| {"golden_diff": "diff --git a/pre_commit/xargs.py b/pre_commit/xargs.py\n--- a/pre_commit/xargs.py\n+++ b/pre_commit/xargs.py\n@@ -5,6 +5,7 @@\n import concurrent.futures\n import contextlib\n import math\n+import os\n import sys\n \n import six\n@@ -13,10 +14,24 @@\n from pre_commit.util import cmd_output\n \n \n-# TODO: properly compute max_length value\n-def _get_platform_max_length():\n- # posix minimum\n- return 4 * 1024\n+def _environ_size(_env=None):\n+ environ = _env if _env is not None else getattr(os, 'environb', os.environ)\n+ size = 8 * len(environ) # number of pointers in `envp`\n+ for k, v in environ.items():\n+ size += len(k) + len(v) + 2 # c strings in `envp`\n+ return size\n+\n+\n+def _get_platform_max_length(): # pragma: no cover (platform specific)\n+ if os.name == 'posix':\n+ maximum = os.sysconf(str('SC_ARG_MAX')) - 2048 - _environ_size()\n+ maximum = min(maximum, 2 ** 17)\n+ return maximum\n+ elif os.name == 'nt':\n+ return 2 ** 15 - 2048 # UNICODE_STRING max - headroom\n+ else:\n+ # posix minimum\n+ return 2 ** 12\n \n \n def _command_length(*cmd):\n@@ -52,7 +67,7 @@\n # Reversed so arguments are in order\n varargs = list(reversed(varargs))\n \n- total_length = _command_length(*cmd)\n+ total_length = _command_length(*cmd) + 1\n while varargs:\n arg = varargs.pop()\n \n@@ -69,7 +84,7 @@\n # We've exceeded the length, yield a command\n ret.append(cmd + tuple(ret_cmd))\n ret_cmd = []\n- total_length = _command_length(*cmd)\n+ total_length = _command_length(*cmd) + 1\n varargs.append(arg)\n \n ret.append(cmd + tuple(ret_cmd))\n@@ -99,7 +114,7 @@\n stderr = b''\n \n try:\n- parse_shebang.normexe(cmd[0])\n+ cmd = parse_shebang.normalize_cmd(cmd)\n except parse_shebang.ExecutableNotFoundError as e:\n return e.to_output()\n", "issue": "Improve platform command line length limits\nI have a question related to #510 \r\n\r\nWhen I run `pre-commit run` all hooks work as expected (processing just changed files), but `pre-commit run -a` divides a list of files into 5 bulks and run the same hook in parallel executions. This creates unnecessary checks of files which belong to the same directory.\r\n\r\nIs it possible to control that certain hooks should not be executed in parallel but rather pass all filenames to it?\r\n\r\nAlternatively, is there an environment variable or something like that which says that pre-commit is running a hook for all files?\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport concurrent.futures\nimport contextlib\nimport math\nimport sys\n\nimport six\n\nfrom pre_commit import parse_shebang\nfrom pre_commit.util import cmd_output\n\n\n# TODO: properly compute max_length value\ndef _get_platform_max_length():\n # posix minimum\n return 4 * 1024\n\n\ndef _command_length(*cmd):\n full_cmd = ' '.join(cmd)\n\n # win32 uses the amount of characters, more details at:\n # https://github.com/pre-commit/pre-commit/pull/839\n if sys.platform == 'win32':\n # the python2.x apis require bytes, we encode as UTF-8\n if six.PY2:\n return len(full_cmd.encode('utf-8'))\n else:\n return len(full_cmd.encode('utf-16le')) // 2\n else:\n return len(full_cmd.encode(sys.getfilesystemencoding()))\n\n\nclass ArgumentTooLongError(RuntimeError):\n pass\n\n\ndef partition(cmd, varargs, target_concurrency, _max_length=None):\n _max_length = _max_length or _get_platform_max_length()\n\n # Generally, we try to partition evenly into at least `target_concurrency`\n # partitions, but we don't want a bunch of tiny partitions.\n max_args = max(4, math.ceil(len(varargs) / target_concurrency))\n\n cmd = tuple(cmd)\n ret = []\n\n ret_cmd = []\n # Reversed so arguments are in order\n varargs = list(reversed(varargs))\n\n total_length = _command_length(*cmd)\n while varargs:\n arg = varargs.pop()\n\n arg_length = _command_length(arg) + 1\n if (\n total_length + arg_length <= _max_length and\n len(ret_cmd) < max_args\n ):\n ret_cmd.append(arg)\n total_length += arg_length\n elif not ret_cmd:\n raise ArgumentTooLongError(arg)\n else:\n # We've exceeded the length, yield a command\n ret.append(cmd + tuple(ret_cmd))\n ret_cmd = []\n total_length = _command_length(*cmd)\n varargs.append(arg)\n\n ret.append(cmd + tuple(ret_cmd))\n\n return tuple(ret)\n\n\[email protected]\ndef _thread_mapper(maxsize):\n if maxsize == 1:\n yield map\n else:\n with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:\n yield ex.map\n\n\ndef xargs(cmd, varargs, **kwargs):\n \"\"\"A simplified implementation of xargs.\n\n negate: Make nonzero successful and zero a failure\n target_concurrency: Target number of partitions to run concurrently\n \"\"\"\n negate = kwargs.pop('negate', False)\n target_concurrency = kwargs.pop('target_concurrency', 1)\n retcode = 0\n stdout = b''\n stderr = b''\n\n try:\n parse_shebang.normexe(cmd[0])\n except parse_shebang.ExecutableNotFoundError as e:\n return e.to_output()\n\n partitions = partition(cmd, varargs, target_concurrency, **kwargs)\n\n def run_cmd_partition(run_cmd):\n return cmd_output(*run_cmd, encoding=None, retcode=None)\n\n threads = min(len(partitions), target_concurrency)\n with _thread_mapper(threads) as thread_map:\n results = thread_map(run_cmd_partition, partitions)\n\n for proc_retcode, proc_out, proc_err in results:\n # This is *slightly* too clever so I'll explain it.\n # First the xor boolean table:\n # T | F |\n # +-------+\n # T | F | T |\n # --+-------+\n # F | T | F |\n # --+-------+\n # When negate is True, it has the effect of flipping the return\n # code. Otherwise, the returncode is unchanged.\n retcode |= bool(proc_retcode) ^ negate\n stdout += proc_out\n stderr += proc_err\n\n return retcode, stdout, stderr\n", "path": "pre_commit/xargs.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport concurrent.futures\nimport contextlib\nimport math\nimport os\nimport sys\n\nimport six\n\nfrom pre_commit import parse_shebang\nfrom pre_commit.util import cmd_output\n\n\ndef _environ_size(_env=None):\n environ = _env if _env is not None else getattr(os, 'environb', os.environ)\n size = 8 * len(environ) # number of pointers in `envp`\n for k, v in environ.items():\n size += len(k) + len(v) + 2 # c strings in `envp`\n return size\n\n\ndef _get_platform_max_length(): # pragma: no cover (platform specific)\n if os.name == 'posix':\n maximum = os.sysconf(str('SC_ARG_MAX')) - 2048 - _environ_size()\n maximum = min(maximum, 2 ** 17)\n return maximum\n elif os.name == 'nt':\n return 2 ** 15 - 2048 # UNICODE_STRING max - headroom\n else:\n # posix minimum\n return 2 ** 12\n\n\ndef _command_length(*cmd):\n full_cmd = ' '.join(cmd)\n\n # win32 uses the amount of characters, more details at:\n # https://github.com/pre-commit/pre-commit/pull/839\n if sys.platform == 'win32':\n # the python2.x apis require bytes, we encode as UTF-8\n if six.PY2:\n return len(full_cmd.encode('utf-8'))\n else:\n return len(full_cmd.encode('utf-16le')) // 2\n else:\n return len(full_cmd.encode(sys.getfilesystemencoding()))\n\n\nclass ArgumentTooLongError(RuntimeError):\n pass\n\n\ndef partition(cmd, varargs, target_concurrency, _max_length=None):\n _max_length = _max_length or _get_platform_max_length()\n\n # Generally, we try to partition evenly into at least `target_concurrency`\n # partitions, but we don't want a bunch of tiny partitions.\n max_args = max(4, math.ceil(len(varargs) / target_concurrency))\n\n cmd = tuple(cmd)\n ret = []\n\n ret_cmd = []\n # Reversed so arguments are in order\n varargs = list(reversed(varargs))\n\n total_length = _command_length(*cmd) + 1\n while varargs:\n arg = varargs.pop()\n\n arg_length = _command_length(arg) + 1\n if (\n total_length + arg_length <= _max_length and\n len(ret_cmd) < max_args\n ):\n ret_cmd.append(arg)\n total_length += arg_length\n elif not ret_cmd:\n raise ArgumentTooLongError(arg)\n else:\n # We've exceeded the length, yield a command\n ret.append(cmd + tuple(ret_cmd))\n ret_cmd = []\n total_length = _command_length(*cmd) + 1\n varargs.append(arg)\n\n ret.append(cmd + tuple(ret_cmd))\n\n return tuple(ret)\n\n\[email protected]\ndef _thread_mapper(maxsize):\n if maxsize == 1:\n yield map\n else:\n with concurrent.futures.ThreadPoolExecutor(maxsize) as ex:\n yield ex.map\n\n\ndef xargs(cmd, varargs, **kwargs):\n \"\"\"A simplified implementation of xargs.\n\n negate: Make nonzero successful and zero a failure\n target_concurrency: Target number of partitions to run concurrently\n \"\"\"\n negate = kwargs.pop('negate', False)\n target_concurrency = kwargs.pop('target_concurrency', 1)\n retcode = 0\n stdout = b''\n stderr = b''\n\n try:\n cmd = parse_shebang.normalize_cmd(cmd)\n except parse_shebang.ExecutableNotFoundError as e:\n return e.to_output()\n\n partitions = partition(cmd, varargs, target_concurrency, **kwargs)\n\n def run_cmd_partition(run_cmd):\n return cmd_output(*run_cmd, encoding=None, retcode=None)\n\n threads = min(len(partitions), target_concurrency)\n with _thread_mapper(threads) as thread_map:\n results = thread_map(run_cmd_partition, partitions)\n\n for proc_retcode, proc_out, proc_err in results:\n # This is *slightly* too clever so I'll explain it.\n # First the xor boolean table:\n # T | F |\n # +-------+\n # T | F | T |\n # --+-------+\n # F | T | F |\n # --+-------+\n # When negate is True, it has the effect of flipping the return\n # code. Otherwise, the returncode is unchanged.\n retcode |= bool(proc_retcode) ^ negate\n stdout += proc_out\n stderr += proc_err\n\n return retcode, stdout, stderr\n", "path": "pre_commit/xargs.py"}]} | 1,573 | 566 |
gh_patches_debug_39362 | rasdani/github-patches | git_diff | pytorch__ignite-2490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure that all core and contrib metrics can work in DDP
## 🚀 Feature
Tracking issue. All core metrics are working in DDP, except special case #1283.
The idea is to check all core and contrib metrics if work in DDP and open new issues if we need to work on some metric.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/epoch_metric.py`
Content:
```
1 import warnings
2 from typing import Callable, cast, List, Tuple, Union
3
4 import torch
5
6 import ignite.distributed as idist
7 from ignite.exceptions import NotComputableError
8 from ignite.metrics.metric import Metric, reinit__is_reduced
9
10 __all__ = ["EpochMetric"]
11
12
13 class EpochMetric(Metric):
14 """Class for metrics that should be computed on the entire output history of a model.
15 Model's output and targets are restricted to be of shape ``(batch_size, n_targets)``. Output
16 datatype should be `float32`. Target datatype should be `long` for classification and `float` for regression.
17
18 .. warning::
19
20 Current implementation stores all input data (output and target) in as tensors before computing a metric.
21 This can potentially lead to a memory error if the input data is larger than available RAM.
22
23 In distributed configuration, all stored data (output and target) is mutually collected across all processes
24 using all gather collective operation. This can potentially lead to a memory error.
25 Compute method executes ``compute_fn`` on zero rank process only and final result is broadcasted to
26 all processes.
27
28 - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
29
30 Args:
31 compute_fn: a callable with the signature (`torch.tensor`, `torch.tensor`) takes as the input
32 `predictions` and `targets` and returns a scalar. Input tensors will be on specified ``device``
33 (see arg below).
34 output_transform: a callable that is used to transform the
35 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
36 form expected by the metric. This can be useful if, for example, you have a multi-output model and
37 you want to compute the metric with respect to one of the outputs.
38 check_compute_fn: if True, ``compute_fn`` is run on the first batch of data to ensure there are no
39 issues. If issues exist, user is warned that there might be an issue with the ``compute_fn``.
40 Default, True.
41 device: optional device specification for internal storage.
42
43 Example:
44
45 For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
46
47 .. include:: defaults.rst
48 :start-after: :orphan:
49
50 .. testcode::
51
52 def mse_fn(y_preds, y_targets):
53 return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item()
54
55 metric = EpochMetric(mse_fn)
56 metric.attach(default_evaluator, "mse")
57 y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
58 y_pred = y_true * 0.75
59 state = default_evaluator.run([[y_pred, y_true]])
60 print(state.metrics["mse"])
61
62 .. testoutput::
63
64 0.5729...
65
66 Warnings:
67 EpochMetricWarning: User is warned that there are issues with ``compute_fn`` on a batch of data processed.
68 To disable the warning, set ``check_compute_fn=False``.
69 """
70
71 def __init__(
72 self,
73 compute_fn: Callable,
74 output_transform: Callable = lambda x: x,
75 check_compute_fn: bool = True,
76 device: Union[str, torch.device] = torch.device("cpu"),
77 ) -> None:
78
79 if not callable(compute_fn):
80 raise TypeError("Argument compute_fn should be callable.")
81
82 self.compute_fn = compute_fn
83 self._check_compute_fn = check_compute_fn
84
85 super(EpochMetric, self).__init__(output_transform=output_transform, device=device)
86
87 @reinit__is_reduced
88 def reset(self) -> None:
89 self._predictions = [] # type: List[torch.Tensor]
90 self._targets = [] # type: List[torch.Tensor]
91
92 def _check_shape(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
93 y_pred, y = output
94 if y_pred.ndimension() not in (1, 2):
95 raise ValueError("Predictions should be of shape (batch_size, n_targets) or (batch_size, ).")
96
97 if y.ndimension() not in (1, 2):
98 raise ValueError("Targets should be of shape (batch_size, n_targets) or (batch_size, ).")
99
100 def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
101 y_pred, y = output
102 if len(self._predictions) < 1:
103 return
104 dtype_preds = self._predictions[-1].dtype
105 if dtype_preds != y_pred.dtype:
106 raise ValueError(
107 f"Incoherent types between input y_pred and stored predictions: {dtype_preds} vs {y_pred.dtype}"
108 )
109
110 dtype_targets = self._targets[-1].dtype
111 if dtype_targets != y.dtype:
112 raise ValueError(f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}")
113
114 @reinit__is_reduced
115 def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
116 self._check_shape(output)
117 y_pred, y = output[0].detach(), output[1].detach()
118
119 if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:
120 y_pred = y_pred.squeeze(dim=-1)
121
122 if y.ndimension() == 2 and y.shape[1] == 1:
123 y = y.squeeze(dim=-1)
124
125 y_pred = y_pred.clone().to(self._device)
126 y = y.clone().to(self._device)
127
128 self._check_type((y_pred, y))
129 self._predictions.append(y_pred)
130 self._targets.append(y)
131
132 # Check once the signature and execution of compute_fn
133 if len(self._predictions) == 1 and self._check_compute_fn:
134 try:
135 self.compute_fn(self._predictions[0], self._targets[0])
136 except Exception as e:
137 warnings.warn(f"Probably, there can be a problem with `compute_fn`:\n {e}.", EpochMetricWarning)
138
139 def compute(self) -> float:
140 if len(self._predictions) < 1 or len(self._targets) < 1:
141 raise NotComputableError("EpochMetric must have at least one example before it can be computed.")
142
143 _prediction_tensor = torch.cat(self._predictions, dim=0)
144 _target_tensor = torch.cat(self._targets, dim=0)
145
146 ws = idist.get_world_size()
147
148 if ws > 1 and not self._is_reduced:
149 # All gather across all processes
150 _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
151 _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
152 self._is_reduced = True
153
154 result = 0.0
155 if idist.get_rank() == 0:
156 # Run compute_fn on zero rank only
157 result = self.compute_fn(_prediction_tensor, _target_tensor)
158
159 if ws > 1:
160 # broadcast result to all processes
161 result = cast(float, idist.broadcast(result, src=0))
162
163 return result
164
165
166 class EpochMetricWarning(UserWarning):
167 pass
168
```
Path: `ignite/contrib/metrics/precision_recall_curve.py`
Content:
```
1 from typing import Any, Callable, Tuple
2
3 import torch
4
5 from ignite.metrics import EpochMetric
6
7
8 def precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:
9 try:
10 from sklearn.metrics import precision_recall_curve
11 except ImportError:
12 raise RuntimeError("This contrib module requires sklearn to be installed.")
13
14 y_true = y_targets.numpy()
15 y_pred = y_preds.numpy()
16 return precision_recall_curve(y_true, y_pred)
17
18
19 class PrecisionRecallCurve(EpochMetric):
20 """Compute precision-recall pairs for different probability thresholds for binary classification task
21 by accumulating predictions and the ground-truth during an epoch and applying
22 `sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/
23 sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .
24
25 Args:
26 output_transform: a callable that is used to transform the
27 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
28 form expected by the metric. This can be useful if, for example, you have a multi-output model and
29 you want to compute the metric with respect to one of the outputs.
30 check_compute_fn: Default False. If True, `precision_recall_curve
31 <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
32 #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are
33 no issues. User will be warned in case there are any issues computing the function.
34
35 Note:
36 PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
37 or confidence values. To apply an activation to y_pred, use output_transform as shown below:
38
39 .. code-block:: python
40
41 def sigmoid_output_transform(output):
42 y_pred, y = output
43 y_pred = torch.sigmoid(y_pred)
44 return y_pred, y
45 avg_precision = PrecisionRecallCurve(sigmoid_output_transform)
46
47 Examples:
48
49 .. include:: defaults.rst
50 :start-after: :orphan:
51
52 .. testcode::
53
54 y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
55 y_true = torch.tensor([0, 0, 1, 1])
56 prec_recall_curve = PrecisionRecallCurve()
57 prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
58 state = default_evaluator.run([[y_pred, y_true]])
59
60 print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
61 print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
62 print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])
63
64 .. testoutput::
65
66 Precision [1.0, 1.0, 1.0]
67 Recall [1.0, 0.5, 0.0]
68 Thresholds [0.7109, 0.9997]
69
70 """
71
72 def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:
73 super(PrecisionRecallCurve, self).__init__(
74 precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py
--- a/ignite/contrib/metrics/precision_recall_curve.py
+++ b/ignite/contrib/metrics/precision_recall_curve.py
@@ -1,7 +1,9 @@
-from typing import Any, Callable, Tuple
+from typing import Any, Callable, cast, Tuple, Union
import torch
+import ignite.distributed as idist
+from ignite.exceptions import NotComputableError
from ignite.metrics import EpochMetric
@@ -69,7 +71,48 @@
"""
- def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:
+ def __init__(
+ self,
+ output_transform: Callable = lambda x: x,
+ check_compute_fn: bool = False,
+ device: Union[str, torch.device] = torch.device("cpu"),
+ ) -> None:
super(PrecisionRecallCurve, self).__init__(
- precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn
+ precision_recall_curve_compute_fn,
+ output_transform=output_transform,
+ check_compute_fn=check_compute_fn,
+ device=device,
)
+
+ def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ if len(self._predictions) < 1 or len(self._targets) < 1:
+ raise NotComputableError("EpochMetric must have at least one example before it can be computed.")
+
+ _prediction_tensor = torch.cat(self._predictions, dim=0)
+ _target_tensor = torch.cat(self._targets, dim=0)
+
+ ws = idist.get_world_size()
+ if ws > 1 and not self._is_reduced:
+ # All gather across all processes
+ _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
+ _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
+ self._is_reduced = True
+
+ if idist.get_rank() == 0:
+ # Run compute_fn on zero rank only
+ precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor)
+ precision = torch.Tensor(precision)
+ recall = torch.Tensor(recall)
+ # thresholds can have negative strides, not compatible with torch tensors
+ # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2
+ thresholds = torch.Tensor(thresholds.copy())
+ else:
+ precision, recall, thresholds = None, None, None
+
+ if ws > 1:
+ # broadcast result to all processes
+ precision = idist.broadcast(precision, src=0, safe_mode=True)
+ recall = idist.broadcast(recall, src=0, safe_mode=True)
+ thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)
+
+ return precision, recall, thresholds
diff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py
--- a/ignite/metrics/epoch_metric.py
+++ b/ignite/metrics/epoch_metric.py
@@ -1,5 +1,5 @@
import warnings
-from typing import Callable, cast, List, Tuple, Union
+from typing import Any, Callable, cast, List, Tuple, Union
import torch
@@ -136,7 +136,7 @@
except Exception as e:
warnings.warn(f"Probably, there can be a problem with `compute_fn`:\n {e}.", EpochMetricWarning)
- def compute(self) -> float:
+ def compute(self) -> Any:
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("EpochMetric must have at least one example before it can be computed.")
| {"golden_diff": "diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py\n--- a/ignite/contrib/metrics/precision_recall_curve.py\n+++ b/ignite/contrib/metrics/precision_recall_curve.py\n@@ -1,7 +1,9 @@\n-from typing import Any, Callable, Tuple\n+from typing import Any, Callable, cast, Tuple, Union\n \n import torch\n \n+import ignite.distributed as idist\n+from ignite.exceptions import NotComputableError\n from ignite.metrics import EpochMetric\n \n \n@@ -69,7 +71,48 @@\n \n \"\"\"\n \n- def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:\n+ def __init__(\n+ self,\n+ output_transform: Callable = lambda x: x,\n+ check_compute_fn: bool = False,\n+ device: Union[str, torch.device] = torch.device(\"cpu\"),\n+ ) -> None:\n super(PrecisionRecallCurve, self).__init__(\n- precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn\n+ precision_recall_curve_compute_fn,\n+ output_transform=output_transform,\n+ check_compute_fn=check_compute_fn,\n+ device=device,\n )\n+\n+ def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n+ if len(self._predictions) < 1 or len(self._targets) < 1:\n+ raise NotComputableError(\"EpochMetric must have at least one example before it can be computed.\")\n+\n+ _prediction_tensor = torch.cat(self._predictions, dim=0)\n+ _target_tensor = torch.cat(self._targets, dim=0)\n+\n+ ws = idist.get_world_size()\n+ if ws > 1 and not self._is_reduced:\n+ # All gather across all processes\n+ _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))\n+ _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))\n+ self._is_reduced = True\n+\n+ if idist.get_rank() == 0:\n+ # Run compute_fn on zero rank only\n+ precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor)\n+ precision = torch.Tensor(precision)\n+ recall = torch.Tensor(recall)\n+ # thresholds can have negative strides, not compatible with torch tensors\n+ # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2\n+ thresholds = torch.Tensor(thresholds.copy())\n+ else:\n+ precision, recall, thresholds = None, None, None\n+\n+ if ws > 1:\n+ # broadcast result to all processes\n+ precision = idist.broadcast(precision, src=0, safe_mode=True)\n+ recall = idist.broadcast(recall, src=0, safe_mode=True)\n+ thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)\n+\n+ return precision, recall, thresholds\ndiff --git a/ignite/metrics/epoch_metric.py b/ignite/metrics/epoch_metric.py\n--- a/ignite/metrics/epoch_metric.py\n+++ b/ignite/metrics/epoch_metric.py\n@@ -1,5 +1,5 @@\n import warnings\n-from typing import Callable, cast, List, Tuple, Union\n+from typing import Any, Callable, cast, List, Tuple, Union\n \n import torch\n \n@@ -136,7 +136,7 @@\n except Exception as e:\n warnings.warn(f\"Probably, there can be a problem with `compute_fn`:\\n {e}.\", EpochMetricWarning)\n \n- def compute(self) -> float:\n+ def compute(self) -> Any:\n if len(self._predictions) < 1 or len(self._targets) < 1:\n raise NotComputableError(\"EpochMetric must have at least one example before it can be computed.\")\n", "issue": "Ensure that all core and contrib metrics can work in DDP\n## \ud83d\ude80 Feature\r\n\r\nTracking issue. All core metrics are working in DDP, except special case #1283.\r\nThe idea is to check all core and contrib metrics if work in DDP and open new issues if we need to work on some metric.\r\n\n", "before_files": [{"content": "import warnings\nfrom typing import Callable, cast, List, Tuple, Union\n\nimport torch\n\nimport ignite.distributed as idist\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced\n\n__all__ = [\"EpochMetric\"]\n\n\nclass EpochMetric(Metric):\n \"\"\"Class for metrics that should be computed on the entire output history of a model.\n Model's output and targets are restricted to be of shape ``(batch_size, n_targets)``. Output\n datatype should be `float32`. Target datatype should be `long` for classification and `float` for regression.\n\n .. warning::\n\n Current implementation stores all input data (output and target) in as tensors before computing a metric.\n This can potentially lead to a memory error if the input data is larger than available RAM.\n\n In distributed configuration, all stored data (output and target) is mutually collected across all processes\n using all gather collective operation. This can potentially lead to a memory error.\n Compute method executes ``compute_fn`` on zero rank process only and final result is broadcasted to\n all processes.\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n\n Args:\n compute_fn: a callable with the signature (`torch.tensor`, `torch.tensor`) takes as the input\n `predictions` and `targets` and returns a scalar. Input tensors will be on specified ``device``\n (see arg below).\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: if True, ``compute_fn`` is run on the first batch of data to ensure there are no\n issues. If issues exist, user is warned that there might be an issue with the ``compute_fn``.\n Default, True.\n device: optional device specification for internal storage.\n\n Example:\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n def mse_fn(y_preds, y_targets):\n return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item()\n\n metric = EpochMetric(mse_fn)\n metric.attach(default_evaluator, \"mse\")\n y_true = torch.Tensor([0, 1, 2, 3, 4, 5])\n y_pred = y_true * 0.75\n state = default_evaluator.run([[y_pred, y_true]])\n print(state.metrics[\"mse\"])\n\n .. testoutput::\n\n 0.5729...\n\n Warnings:\n EpochMetricWarning: User is warned that there are issues with ``compute_fn`` on a batch of data processed.\n To disable the warning, set ``check_compute_fn=False``.\n \"\"\"\n\n def __init__(\n self,\n compute_fn: Callable,\n output_transform: Callable = lambda x: x,\n check_compute_fn: bool = True,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if not callable(compute_fn):\n raise TypeError(\"Argument compute_fn should be callable.\")\n\n self.compute_fn = compute_fn\n self._check_compute_fn = check_compute_fn\n\n super(EpochMetric, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._predictions = [] # type: List[torch.Tensor]\n self._targets = [] # type: List[torch.Tensor]\n\n def _check_shape(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n y_pred, y = output\n if y_pred.ndimension() not in (1, 2):\n raise ValueError(\"Predictions should be of shape (batch_size, n_targets) or (batch_size, ).\")\n\n if y.ndimension() not in (1, 2):\n raise ValueError(\"Targets should be of shape (batch_size, n_targets) or (batch_size, ).\")\n\n def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n y_pred, y = output\n if len(self._predictions) < 1:\n return\n dtype_preds = self._predictions[-1].dtype\n if dtype_preds != y_pred.dtype:\n raise ValueError(\n f\"Incoherent types between input y_pred and stored predictions: {dtype_preds} vs {y_pred.dtype}\"\n )\n\n dtype_targets = self._targets[-1].dtype\n if dtype_targets != y.dtype:\n raise ValueError(f\"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}\")\n\n @reinit__is_reduced\n def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n self._check_shape(output)\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:\n y_pred = y_pred.squeeze(dim=-1)\n\n if y.ndimension() == 2 and y.shape[1] == 1:\n y = y.squeeze(dim=-1)\n\n y_pred = y_pred.clone().to(self._device)\n y = y.clone().to(self._device)\n\n self._check_type((y_pred, y))\n self._predictions.append(y_pred)\n self._targets.append(y)\n\n # Check once the signature and execution of compute_fn\n if len(self._predictions) == 1 and self._check_compute_fn:\n try:\n self.compute_fn(self._predictions[0], self._targets[0])\n except Exception as e:\n warnings.warn(f\"Probably, there can be a problem with `compute_fn`:\\n {e}.\", EpochMetricWarning)\n\n def compute(self) -> float:\n if len(self._predictions) < 1 or len(self._targets) < 1:\n raise NotComputableError(\"EpochMetric must have at least one example before it can be computed.\")\n\n _prediction_tensor = torch.cat(self._predictions, dim=0)\n _target_tensor = torch.cat(self._targets, dim=0)\n\n ws = idist.get_world_size()\n\n if ws > 1 and not self._is_reduced:\n # All gather across all processes\n _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))\n _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))\n self._is_reduced = True\n\n result = 0.0\n if idist.get_rank() == 0:\n # Run compute_fn on zero rank only\n result = self.compute_fn(_prediction_tensor, _target_tensor)\n\n if ws > 1:\n # broadcast result to all processes\n result = cast(float, idist.broadcast(result, src=0))\n\n return result\n\n\nclass EpochMetricWarning(UserWarning):\n pass\n", "path": "ignite/metrics/epoch_metric.py"}, {"content": "from typing import Any, Callable, Tuple\n\nimport torch\n\nfrom ignite.metrics import EpochMetric\n\n\ndef precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:\n try:\n from sklearn.metrics import precision_recall_curve\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n\n y_true = y_targets.numpy()\n y_pred = y_preds.numpy()\n return precision_recall_curve(y_true, y_pred)\n\n\nclass PrecisionRecallCurve(EpochMetric):\n \"\"\"Compute precision-recall pairs for different probability thresholds for binary classification task\n by accumulating predictions and the ground-truth during an epoch and applying\n `sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: Default False. If True, `precision_recall_curve\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html\n #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n\n Note:\n PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates\n or confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def sigmoid_output_transform(output):\n y_pred, y = output\n y_pred = torch.sigmoid(y_pred)\n return y_pred, y\n avg_precision = PrecisionRecallCurve(sigmoid_output_transform)\n\n Examples:\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])\n y_true = torch.tensor([0, 0, 1, 1])\n prec_recall_curve = PrecisionRecallCurve()\n prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')\n state = default_evaluator.run([[y_pred, y_true]])\n\n print(\"Precision\", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])\n print(\"Recall\", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])\n print(\"Thresholds\", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])\n\n .. testoutput::\n\n Precision [1.0, 1.0, 1.0]\n Recall [1.0, 0.5, 0.0]\n Thresholds [0.7109, 0.9997]\n\n \"\"\"\n\n def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:\n super(PrecisionRecallCurve, self).__init__(\n precision_recall_curve_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn\n )\n", "path": "ignite/contrib/metrics/precision_recall_curve.py"}], "after_files": [{"content": "import warnings\nfrom typing import Any, Callable, cast, List, Tuple, Union\n\nimport torch\n\nimport ignite.distributed as idist\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced\n\n__all__ = [\"EpochMetric\"]\n\n\nclass EpochMetric(Metric):\n \"\"\"Class for metrics that should be computed on the entire output history of a model.\n Model's output and targets are restricted to be of shape ``(batch_size, n_targets)``. Output\n datatype should be `float32`. Target datatype should be `long` for classification and `float` for regression.\n\n .. warning::\n\n Current implementation stores all input data (output and target) in as tensors before computing a metric.\n This can potentially lead to a memory error if the input data is larger than available RAM.\n\n In distributed configuration, all stored data (output and target) is mutually collected across all processes\n using all gather collective operation. This can potentially lead to a memory error.\n Compute method executes ``compute_fn`` on zero rank process only and final result is broadcasted to\n all processes.\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n\n Args:\n compute_fn: a callable with the signature (`torch.tensor`, `torch.tensor`) takes as the input\n `predictions` and `targets` and returns a scalar. Input tensors will be on specified ``device``\n (see arg below).\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: if True, ``compute_fn`` is run on the first batch of data to ensure there are no\n issues. If issues exist, user is warned that there might be an issue with the ``compute_fn``.\n Default, True.\n device: optional device specification for internal storage.\n\n Example:\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n def mse_fn(y_preds, y_targets):\n return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item()\n\n metric = EpochMetric(mse_fn)\n metric.attach(default_evaluator, \"mse\")\n y_true = torch.Tensor([0, 1, 2, 3, 4, 5])\n y_pred = y_true * 0.75\n state = default_evaluator.run([[y_pred, y_true]])\n print(state.metrics[\"mse\"])\n\n .. testoutput::\n\n 0.5729...\n\n Warnings:\n EpochMetricWarning: User is warned that there are issues with ``compute_fn`` on a batch of data processed.\n To disable the warning, set ``check_compute_fn=False``.\n \"\"\"\n\n def __init__(\n self,\n compute_fn: Callable,\n output_transform: Callable = lambda x: x,\n check_compute_fn: bool = True,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n\n if not callable(compute_fn):\n raise TypeError(\"Argument compute_fn should be callable.\")\n\n self.compute_fn = compute_fn\n self._check_compute_fn = check_compute_fn\n\n super(EpochMetric, self).__init__(output_transform=output_transform, device=device)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._predictions = [] # type: List[torch.Tensor]\n self._targets = [] # type: List[torch.Tensor]\n\n def _check_shape(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n y_pred, y = output\n if y_pred.ndimension() not in (1, 2):\n raise ValueError(\"Predictions should be of shape (batch_size, n_targets) or (batch_size, ).\")\n\n if y.ndimension() not in (1, 2):\n raise ValueError(\"Targets should be of shape (batch_size, n_targets) or (batch_size, ).\")\n\n def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n y_pred, y = output\n if len(self._predictions) < 1:\n return\n dtype_preds = self._predictions[-1].dtype\n if dtype_preds != y_pred.dtype:\n raise ValueError(\n f\"Incoherent types between input y_pred and stored predictions: {dtype_preds} vs {y_pred.dtype}\"\n )\n\n dtype_targets = self._targets[-1].dtype\n if dtype_targets != y.dtype:\n raise ValueError(f\"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}\")\n\n @reinit__is_reduced\n def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:\n self._check_shape(output)\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:\n y_pred = y_pred.squeeze(dim=-1)\n\n if y.ndimension() == 2 and y.shape[1] == 1:\n y = y.squeeze(dim=-1)\n\n y_pred = y_pred.clone().to(self._device)\n y = y.clone().to(self._device)\n\n self._check_type((y_pred, y))\n self._predictions.append(y_pred)\n self._targets.append(y)\n\n # Check once the signature and execution of compute_fn\n if len(self._predictions) == 1 and self._check_compute_fn:\n try:\n self.compute_fn(self._predictions[0], self._targets[0])\n except Exception as e:\n warnings.warn(f\"Probably, there can be a problem with `compute_fn`:\\n {e}.\", EpochMetricWarning)\n\n def compute(self) -> Any:\n if len(self._predictions) < 1 or len(self._targets) < 1:\n raise NotComputableError(\"EpochMetric must have at least one example before it can be computed.\")\n\n _prediction_tensor = torch.cat(self._predictions, dim=0)\n _target_tensor = torch.cat(self._targets, dim=0)\n\n ws = idist.get_world_size()\n\n if ws > 1 and not self._is_reduced:\n # All gather across all processes\n _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))\n _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))\n self._is_reduced = True\n\n result = 0.0\n if idist.get_rank() == 0:\n # Run compute_fn on zero rank only\n result = self.compute_fn(_prediction_tensor, _target_tensor)\n\n if ws > 1:\n # broadcast result to all processes\n result = cast(float, idist.broadcast(result, src=0))\n\n return result\n\n\nclass EpochMetricWarning(UserWarning):\n pass\n", "path": "ignite/metrics/epoch_metric.py"}, {"content": "from typing import Any, Callable, cast, Tuple, Union\n\nimport torch\n\nimport ignite.distributed as idist\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics import EpochMetric\n\n\ndef precision_recall_curve_compute_fn(y_preds: torch.Tensor, y_targets: torch.Tensor) -> Tuple[Any, Any, Any]:\n try:\n from sklearn.metrics import precision_recall_curve\n except ImportError:\n raise RuntimeError(\"This contrib module requires sklearn to be installed.\")\n\n y_true = y_targets.numpy()\n y_pred = y_preds.numpy()\n return precision_recall_curve(y_true, y_pred)\n\n\nclass PrecisionRecallCurve(EpochMetric):\n \"\"\"Compute precision-recall pairs for different probability thresholds for binary classification task\n by accumulating predictions and the ground-truth during an epoch and applying\n `sklearn.metrics.precision_recall_curve <https://scikit-learn.org/stable/modules/generated/\n sklearn.metrics.precision_recall_curve.html#sklearn.metrics.precision_recall_curve>`_ .\n\n Args:\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n check_compute_fn: Default False. If True, `precision_recall_curve\n <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html\n #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are\n no issues. User will be warned in case there are any issues computing the function.\n\n Note:\n PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates\n or confidence values. To apply an activation to y_pred, use output_transform as shown below:\n\n .. code-block:: python\n\n def sigmoid_output_transform(output):\n y_pred, y = output\n y_pred = torch.sigmoid(y_pred)\n return y_pred, y\n avg_precision = PrecisionRecallCurve(sigmoid_output_transform)\n\n Examples:\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])\n y_true = torch.tensor([0, 0, 1, 1])\n prec_recall_curve = PrecisionRecallCurve()\n prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')\n state = default_evaluator.run([[y_pred, y_true]])\n\n print(\"Precision\", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])\n print(\"Recall\", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])\n print(\"Thresholds\", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])\n\n .. testoutput::\n\n Precision [1.0, 1.0, 1.0]\n Recall [1.0, 0.5, 0.0]\n Thresholds [0.7109, 0.9997]\n\n \"\"\"\n\n def __init__(\n self,\n output_transform: Callable = lambda x: x,\n check_compute_fn: bool = False,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ) -> None:\n super(PrecisionRecallCurve, self).__init__(\n precision_recall_curve_compute_fn,\n output_transform=output_transform,\n check_compute_fn=check_compute_fn,\n device=device,\n )\n\n def compute(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n if len(self._predictions) < 1 or len(self._targets) < 1:\n raise NotComputableError(\"EpochMetric must have at least one example before it can be computed.\")\n\n _prediction_tensor = torch.cat(self._predictions, dim=0)\n _target_tensor = torch.cat(self._targets, dim=0)\n\n ws = idist.get_world_size()\n if ws > 1 and not self._is_reduced:\n # All gather across all processes\n _prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))\n _target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))\n self._is_reduced = True\n\n if idist.get_rank() == 0:\n # Run compute_fn on zero rank only\n precision, recall, thresholds = self.compute_fn(_prediction_tensor, _target_tensor)\n precision = torch.Tensor(precision)\n recall = torch.Tensor(recall)\n # thresholds can have negative strides, not compatible with torch tensors\n # https://discuss.pytorch.org/t/negative-strides-in-tensor-error/134287/2\n thresholds = torch.Tensor(thresholds.copy())\n else:\n precision, recall, thresholds = None, None, None\n\n if ws > 1:\n # broadcast result to all processes\n precision = idist.broadcast(precision, src=0, safe_mode=True)\n recall = idist.broadcast(recall, src=0, safe_mode=True)\n thresholds = idist.broadcast(thresholds, src=0, safe_mode=True)\n\n return precision, recall, thresholds\n", "path": "ignite/contrib/metrics/precision_recall_curve.py"}]} | 3,267 | 875 |
gh_patches_debug_4652 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-231 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The pkgutil provider reports state: present when a package is not in the catalog
When I try to install a non-existing package with something like
```
# ansible unstable9s -m pkgutil -a "name=CSWlibpsl-dev state=present"
```
The result is success and the state "present" is reported:
```
unstable9s | success >> {
"changed": true,
"name": "CSWlibpsl-dev",
"state": "present",
"stderr": "Package CSWlibpsl-dev not in catalog. Exiting.\n",
"stdout": "Solving needed dependencies ...\n"
}
```
The package was not installed on the machine and is not installed afterwards:
```
unstable9s% pkginfo -x CSWlibpsl-dev
ERROR: information for "CSWlibpsl-dev" was not found
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packaging/os/pkgutil.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
5 # based on svr4pkg by
6 # Boyd Adamson <boyd () boydadamson.com> (2012)
7 #
8 # This file is part of Ansible
9 #
10 # Ansible is free software: you can redistribute it and/or modify
11 # it under the terms of the GNU General Public License as published by
12 # the Free Software Foundation, either version 3 of the License, or
13 # (at your option) any later version.
14 #
15 # Ansible is distributed in the hope that it will be useful,
16 # but WITHOUT ANY WARRANTY; without even the implied warranty of
17 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 # GNU General Public License for more details.
19 #
20 # You should have received a copy of the GNU General Public License
21 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
22 #
23
24 DOCUMENTATION = '''
25 ---
26 module: pkgutil
27 short_description: Manage CSW-Packages on Solaris
28 description:
29 - Manages CSW packages (SVR4 format) on Solaris 10 and 11.
30 - These were the native packages on Solaris <= 10 and are available
31 as a legacy feature in Solaris 11.
32 - Pkgutil is an advanced packaging system, which resolves dependency on installation.
33 It is designed for CSW packages.
34 version_added: "1.3"
35 author: Alexander Winkler
36 options:
37 name:
38 description:
39 - Package name, e.g. (C(CSWnrpe))
40 required: true
41 site:
42 description:
43 - Specifies the repository path to install the package from.
44 - Its global definition is done in C(/etc/opt/csw/pkgutil.conf).
45 state:
46 description:
47 - Whether to install (C(present)), or remove (C(absent)) a package.
48 - The upgrade (C(latest)) operation will update/install the package to the latest version available.
49 - "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them."
50 required: true
51 choices: ["present", "absent", "latest"]
52 '''
53
54 EXAMPLES = '''
55 # Install a package
56 pkgutil: name=CSWcommon state=present
57
58 # Install a package from a specific repository
59 pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest'
60 '''
61
62 import os
63 import pipes
64
65 def package_installed(module, name):
66 cmd = [module.get_bin_path('pkginfo', True)]
67 cmd.append('-q')
68 cmd.append(name)
69 rc, out, err = module.run_command(' '.join(cmd))
70 if rc == 0:
71 return True
72 else:
73 return False
74
75 def package_latest(module, name, site):
76 # Only supports one package
77 cmd = [ 'pkgutil', '--single', '-c' ]
78 if site is not None:
79 cmd += [ '-t', pipes.quote(site) ]
80 cmd.append(pipes.quote(name))
81 cmd += [ '| tail -1 | grep -v SAME' ]
82 rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True)
83 if rc == 1:
84 return True
85 else:
86 return False
87
88 def run_command(module, cmd):
89 progname = cmd[0]
90 cmd[0] = module.get_bin_path(progname, True)
91 return module.run_command(cmd)
92
93 def package_install(module, state, name, site):
94 cmd = [ 'pkgutil', '-iy' ]
95 if site is not None:
96 cmd += [ '-t', site ]
97 if state == 'latest':
98 cmd += [ '-f' ]
99 cmd.append(name)
100 (rc, out, err) = run_command(module, cmd)
101 return (rc, out, err)
102
103 def package_upgrade(module, name, site):
104 cmd = [ 'pkgutil', '-ufy' ]
105 if site is not None:
106 cmd += [ '-t', site ]
107 cmd.append(name)
108 (rc, out, err) = run_command(module, cmd)
109 return (rc, out, err)
110
111 def package_uninstall(module, name):
112 cmd = [ 'pkgutil', '-ry', name]
113 (rc, out, err) = run_command(module, cmd)
114 return (rc, out, err)
115
116 def main():
117 module = AnsibleModule(
118 argument_spec = dict(
119 name = dict(required = True),
120 state = dict(required = True, choices=['present', 'absent','latest']),
121 site = dict(default = None),
122 ),
123 supports_check_mode=True
124 )
125 name = module.params['name']
126 state = module.params['state']
127 site = module.params['site']
128 rc = None
129 out = ''
130 err = ''
131 result = {}
132 result['name'] = name
133 result['state'] = state
134
135 if state == 'present':
136 if not package_installed(module, name):
137 if module.check_mode:
138 module.exit_json(changed=True)
139 (rc, out, err) = package_install(module, state, name, site)
140 # Stdout is normally empty but for some packages can be
141 # very long and is not often useful
142 if len(out) > 75:
143 out = out[:75] + '...'
144
145 elif state == 'latest':
146 if not package_installed(module, name):
147 if module.check_mode:
148 module.exit_json(changed=True)
149 (rc, out, err) = package_install(module, state, name, site)
150 else:
151 if not package_latest(module, name, site):
152 if module.check_mode:
153 module.exit_json(changed=True)
154 (rc, out, err) = package_upgrade(module, name, site)
155 if len(out) > 75:
156 out = out[:75] + '...'
157
158 elif state == 'absent':
159 if package_installed(module, name):
160 if module.check_mode:
161 module.exit_json(changed=True)
162 (rc, out, err) = package_uninstall(module, name)
163 out = out[:75]
164
165 if rc is None:
166 result['changed'] = False
167 else:
168 result['changed'] = True
169
170 if out:
171 result['stdout'] = out
172 if err:
173 result['stderr'] = err
174
175 module.exit_json(**result)
176
177 # import module snippets
178 from ansible.module_utils.basic import *
179 main()
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py
--- a/packaging/os/pkgutil.py
+++ b/packaging/os/pkgutil.py
@@ -163,9 +163,13 @@
out = out[:75]
if rc is None:
+ # pkgutil was not executed because the package was already present/absent
result['changed'] = False
- else:
+ elif rc == 0
result['changed'] = True
+ else
+ result['changed'] = False
+ result['failed'] = True
if out:
result['stdout'] = out
| {"golden_diff": "diff --git a/packaging/os/pkgutil.py b/packaging/os/pkgutil.py\n--- a/packaging/os/pkgutil.py\n+++ b/packaging/os/pkgutil.py\n@@ -163,9 +163,13 @@\n out = out[:75]\n \n if rc is None:\n+ # pkgutil was not executed because the package was already present/absent\n result['changed'] = False\n- else:\n+ elif rc == 0\n result['changed'] = True\n+ else\n+ result['changed'] = False\n+ result['failed'] = True\n \n if out:\n result['stdout'] = out\n", "issue": "The pkgutil provider reports state: present when a package is not in the catalog\nWhen I try to install a non-existing package with something like\n\n```\n# ansible unstable9s -m pkgutil -a \"name=CSWlibpsl-dev state=present\"\n```\n\nThe result is success and the state \"present\" is reported:\n\n```\nunstable9s | success >> {\n \"changed\": true, \n \"name\": \"CSWlibpsl-dev\", \n \"state\": \"present\", \n \"stderr\": \"Package CSWlibpsl-dev not in catalog. Exiting.\\n\", \n \"stdout\": \"Solving needed dependencies ...\\n\"\n}\n```\n\nThe package was not installed on the machine and is not installed afterwards:\n\n```\nunstable9s% pkginfo -x CSWlibpsl-dev\nERROR: information for \"CSWlibpsl-dev\" was not found\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Alexander Winkler <mail () winkler-alexander.de>\n# \tbased on svr4pkg by\n# \t\tBoyd Adamson <boyd () boydadamson.com> (2012)\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: pkgutil \nshort_description: Manage CSW-Packages on Solaris\ndescription:\n - Manages CSW packages (SVR4 format) on Solaris 10 and 11.\n - These were the native packages on Solaris <= 10 and are available\n as a legacy feature in Solaris 11.\n - Pkgutil is an advanced packaging system, which resolves dependency on installation.\n It is designed for CSW packages.\nversion_added: \"1.3\"\nauthor: Alexander Winkler\noptions:\n name:\n description:\n - Package name, e.g. (C(CSWnrpe))\n required: true\n site:\n description:\n - Specifies the repository path to install the package from.\n - Its global definition is done in C(/etc/opt/csw/pkgutil.conf).\n state:\n description:\n - Whether to install (C(present)), or remove (C(absent)) a package.\n - The upgrade (C(latest)) operation will update/install the package to the latest version available.\n - \"Note: The module has a limitation that (C(latest)) only works for one package, not lists of them.\"\n required: true\n choices: [\"present\", \"absent\", \"latest\"]\n'''\n\nEXAMPLES = '''\n# Install a package\npkgutil: name=CSWcommon state=present\n\n# Install a package from a specific repository\npkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest'\n'''\n\nimport os\nimport pipes\n\ndef package_installed(module, name):\n cmd = [module.get_bin_path('pkginfo', True)]\n cmd.append('-q')\n cmd.append(name)\n rc, out, err = module.run_command(' '.join(cmd))\n if rc == 0:\n return True\n else:\n return False\n\ndef package_latest(module, name, site):\n # Only supports one package\n cmd = [ 'pkgutil', '--single', '-c' ]\n if site is not None:\n cmd += [ '-t', pipes.quote(site) ]\n cmd.append(pipes.quote(name))\n cmd += [ '| tail -1 | grep -v SAME' ]\n rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True)\n if rc == 1:\n return True\n else:\n return False\n\ndef run_command(module, cmd):\n progname = cmd[0]\n cmd[0] = module.get_bin_path(progname, True)\n return module.run_command(cmd)\n\ndef package_install(module, state, name, site):\n cmd = [ 'pkgutil', '-iy' ]\n if site is not None:\n cmd += [ '-t', site ]\n if state == 'latest':\n cmd += [ '-f' ] \n cmd.append(name)\n (rc, out, err) = run_command(module, cmd)\n return (rc, out, err)\n\ndef package_upgrade(module, name, site):\n cmd = [ 'pkgutil', '-ufy' ]\n if site is not None:\n cmd += [ '-t', site ]\n cmd.append(name)\n (rc, out, err) = run_command(module, cmd)\n return (rc, out, err)\n\ndef package_uninstall(module, name):\n cmd = [ 'pkgutil', '-ry', name]\n (rc, out, err) = run_command(module, cmd)\n return (rc, out, err)\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n name = dict(required = True),\n state = dict(required = True, choices=['present', 'absent','latest']),\n site = dict(default = None),\n ),\n supports_check_mode=True\n )\n name = module.params['name']\n state = module.params['state']\n site = module.params['site']\n rc = None\n out = ''\n err = ''\n result = {}\n result['name'] = name\n result['state'] = state\n\n if state == 'present':\n if not package_installed(module, name):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_install(module, state, name, site)\n # Stdout is normally empty but for some packages can be\n # very long and is not often useful\n if len(out) > 75:\n out = out[:75] + '...'\n\n elif state == 'latest':\n if not package_installed(module, name):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_install(module, state, name, site)\n else:\n if not package_latest(module, name, site):\n if module.check_mode:\n module.exit_json(changed=True) \n (rc, out, err) = package_upgrade(module, name, site)\n if len(out) > 75:\n out = out[:75] + '...'\n\n elif state == 'absent':\n if package_installed(module, name):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_uninstall(module, name)\n out = out[:75]\n\n if rc is None:\n result['changed'] = False\n else:\n result['changed'] = True\n\n if out:\n result['stdout'] = out\n if err:\n result['stderr'] = err\n\n module.exit_json(**result)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "packaging/os/pkgutil.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Alexander Winkler <mail () winkler-alexander.de>\n# \tbased on svr4pkg by\n# \t\tBoyd Adamson <boyd () boydadamson.com> (2012)\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: pkgutil \nshort_description: Manage CSW-Packages on Solaris\ndescription:\n - Manages CSW packages (SVR4 format) on Solaris 10 and 11.\n - These were the native packages on Solaris <= 10 and are available\n as a legacy feature in Solaris 11.\n - Pkgutil is an advanced packaging system, which resolves dependency on installation.\n It is designed for CSW packages.\nversion_added: \"1.3\"\nauthor: Alexander Winkler\noptions:\n name:\n description:\n - Package name, e.g. (C(CSWnrpe))\n required: true\n site:\n description:\n - Specifies the repository path to install the package from.\n - Its global definition is done in C(/etc/opt/csw/pkgutil.conf).\n state:\n description:\n - Whether to install (C(present)), or remove (C(absent)) a package.\n - The upgrade (C(latest)) operation will update/install the package to the latest version available.\n - \"Note: The module has a limitation that (C(latest)) only works for one package, not lists of them.\"\n required: true\n choices: [\"present\", \"absent\", \"latest\"]\n'''\n\nEXAMPLES = '''\n# Install a package\npkgutil: name=CSWcommon state=present\n\n# Install a package from a specific repository\npkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest'\n'''\n\nimport os\nimport pipes\n\ndef package_installed(module, name):\n cmd = [module.get_bin_path('pkginfo', True)]\n cmd.append('-q')\n cmd.append(name)\n rc, out, err = module.run_command(' '.join(cmd))\n if rc == 0:\n return True\n else:\n return False\n\ndef package_latest(module, name, site):\n # Only supports one package\n cmd = [ 'pkgutil', '--single', '-c' ]\n if site is not None:\n cmd += [ '-t', pipes.quote(site) ]\n cmd.append(pipes.quote(name))\n cmd += [ '| tail -1 | grep -v SAME' ]\n rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True)\n if rc == 1:\n return True\n else:\n return False\n\ndef run_command(module, cmd):\n progname = cmd[0]\n cmd[0] = module.get_bin_path(progname, True)\n return module.run_command(cmd)\n\ndef package_install(module, state, name, site):\n cmd = [ 'pkgutil', '-iy' ]\n if site is not None:\n cmd += [ '-t', site ]\n if state == 'latest':\n cmd += [ '-f' ] \n cmd.append(name)\n (rc, out, err) = run_command(module, cmd)\n return (rc, out, err)\n\ndef package_upgrade(module, name, site):\n cmd = [ 'pkgutil', '-ufy' ]\n if site is not None:\n cmd += [ '-t', site ]\n cmd.append(name)\n (rc, out, err) = run_command(module, cmd)\n return (rc, out, err)\n\ndef package_uninstall(module, name):\n cmd = [ 'pkgutil', '-ry', name]\n (rc, out, err) = run_command(module, cmd)\n return (rc, out, err)\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n name = dict(required = True),\n state = dict(required = True, choices=['present', 'absent','latest']),\n site = dict(default = None),\n ),\n supports_check_mode=True\n )\n name = module.params['name']\n state = module.params['state']\n site = module.params['site']\n rc = None\n out = ''\n err = ''\n result = {}\n result['name'] = name\n result['state'] = state\n\n if state == 'present':\n if not package_installed(module, name):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_install(module, state, name, site)\n # Stdout is normally empty but for some packages can be\n # very long and is not often useful\n if len(out) > 75:\n out = out[:75] + '...'\n\n elif state == 'latest':\n if not package_installed(module, name):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_install(module, state, name, site)\n else:\n if not package_latest(module, name, site):\n if module.check_mode:\n module.exit_json(changed=True) \n (rc, out, err) = package_upgrade(module, name, site)\n if len(out) > 75:\n out = out[:75] + '...'\n\n elif state == 'absent':\n if package_installed(module, name):\n if module.check_mode:\n module.exit_json(changed=True)\n (rc, out, err) = package_uninstall(module, name)\n out = out[:75]\n\n if rc is None:\n # pkgutil was not executed because the package was already present/absent\n result['changed'] = False\n elif rc == 0\n result['changed'] = True\n else\n result['changed'] = False\n result['failed'] = True\n\n if out:\n result['stdout'] = out\n if err:\n result['stderr'] = err\n\n module.exit_json(**result)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "packaging/os/pkgutil.py"}]} | 2,330 | 150 |
gh_patches_debug_43340 | rasdani/github-patches | git_diff | mozilla__bugbug-622 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add options to the Duplicate classifier to select number of bugs in the training set
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bugbug/models/duplicate.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import random
7
8 from sklearn.calibration import CalibratedClassifierCV
9 from sklearn.compose import ColumnTransformer
10 from sklearn.pipeline import Pipeline
11 from sklearn.preprocessing import LabelEncoder
12 from sklearn.svm import LinearSVC
13
14 from bugbug import bug_features, bugzilla, feature_cleanup
15 from bugbug.model import BugCoupleModel
16
17 NUM_DUPLICATES = 7000
18 NUM_DUP_NONDUPS = 3500
19 NUM_NONDUPS_NONDUPS = 3500
20
21 REPORTERS_TO_IGNORE = {"[email protected]", "[email protected]"}
22
23
24 class LinearSVCWithLabelEncoding(CalibratedClassifierCV):
25 def __init__(self, clf):
26 super().__init__(clf)
27 self._le = LabelEncoder()
28
29 def fit(self, X, y):
30 super().fit(X, y)
31 self._le.fit(y)
32
33
34 class DuplicateModel(BugCoupleModel):
35 def __init__(self, lemmatization=False):
36 BugCoupleModel.__init__(self, lemmatization)
37
38 self.calculate_importance = False
39
40 cleanup_functions = [
41 feature_cleanup.responses(),
42 feature_cleanup.hex(),
43 feature_cleanup.dll(),
44 feature_cleanup.fileref(),
45 feature_cleanup.url(),
46 feature_cleanup.synonyms(),
47 feature_cleanup.crash(),
48 ]
49
50 self.extraction_pipeline = Pipeline(
51 [
52 ("bug_extractor", bug_features.BugExtractor([], cleanup_functions)),
53 (
54 "union",
55 ColumnTransformer([("text", self.text_vectorizer(), "text")]),
56 ),
57 ]
58 )
59
60 self.clf = LinearSVCWithLabelEncoding(LinearSVC())
61
62 def get_labels(self):
63
64 random.seed(4)
65
66 all_ids = set(
67 bug["id"]
68 for bug in bugzilla.get_bugs()
69 if bug["creator"] not in REPORTERS_TO_IGNORE
70 and "dupeme" not in bug["keywords"]
71 )
72
73 classes = {}
74
75 # Only store ids of bugs that have duplicates or are duplicates
76 duplicate_ids = []
77
78 duplicates_num = 0
79 for bug_data in bugzilla.get_bugs():
80 bug_id = bug_data["id"]
81 if bug_id not in all_ids:
82 continue
83
84 if bug_data["dupe_of"] or len(bug_data["duplicates"]) > 0:
85 duplicate_ids.append(bug_id)
86
87 for duplicate_bug_id in bug_data["duplicates"]:
88 if duplicate_bug_id not in all_ids:
89 continue
90
91 duplicate_ids.append(duplicate_bug_id)
92
93 if duplicates_num < NUM_DUPLICATES:
94 classes[(bug_id, duplicate_bug_id)] = 1
95 duplicates_num += 1
96
97 # Remove duplicate duplicate IDs.
98 duplicate_ids = list(set(duplicate_ids))
99
100 # Store all remaining ids
101 non_duplicate_ids = list(all_ids - set(duplicate_ids))
102
103 print(f"Number of duplicate labels is: {NUM_DUPLICATES}")
104
105 # When the bug has no duplicates, we create dup-nondup labels.
106 dup_nondup_num = 0
107 while dup_nondup_num < NUM_DUP_NONDUPS:
108 bug_id1 = random.choice(duplicate_ids)
109 bug_id2 = random.choice(non_duplicate_ids)
110
111 classes[(bug_id1, bug_id2)] = 0
112 dup_nondup_num += 1
113
114 print(f"Number of hybrid labels is: {NUM_DUP_NONDUPS}")
115
116 # Now we map non-dup to non-dup bug.
117 nondup_nondup_num = 0
118 while nondup_nondup_num < NUM_DUP_NONDUPS:
119 bug_id1 = random.choice(non_duplicate_ids)
120 bug_id2 = random.choice(non_duplicate_ids)
121 if bug_id1 != bug_id2:
122 classes[(bug_id1, bug_id2)] = 0
123 nondup_nondup_num += 1
124
125 print(f"Number of purely non-duplicate labels is: {NUM_NONDUPS_NONDUPS}")
126
127 return classes, [0, 1]
128
129 def get_feature_names(self):
130 return self.extraction_pipeline.named_steps["union"].get_feature_names()
131
```
Path: `run.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import argparse
7 import csv
8 import os
9 import sys
10 from datetime import datetime, timedelta
11
12 import numpy as np
13
14 from bugbug import bugzilla, db, repository
15 from bugbug.models import MODELS, get_model_class
16
17
18 def parse_args(args):
19 parser = argparse.ArgumentParser()
20 parser.add_argument(
21 "--lemmatization",
22 help="Perform lemmatization (using spaCy)",
23 action="store_true",
24 )
25 parser.add_argument("--train", help="Perform training", action="store_true")
26 parser.add_argument(
27 "--goal", help="Goal of the classifier", choices=MODELS.keys(), default="defect"
28 )
29 parser.add_argument(
30 "--classifier",
31 help="Type of the classifier. Only used for component classification.",
32 choices=["default", "nn"],
33 default="default",
34 )
35 parser.add_argument("--classify", help="Perform evaluation", action="store_true")
36 parser.add_argument(
37 "--generate-sheet",
38 help="Perform evaluation on bugs from last week and generate a csv file",
39 action="store_true",
40 )
41 parser.add_argument("--token", help="Bugzilla token", action="store")
42 parser.add_argument(
43 "--historical",
44 help="""Analyze historical bugs. Only used for defect, bugtype,
45 defectenhancementtask and regression tasks.""",
46 action="store_true",
47 )
48 return parser.parse_args(args)
49
50
51 def main(args):
52 model_file_name = "{}{}model".format(
53 args.goal, "" if args.classifier == "default" else args.classifier
54 )
55
56 if args.goal == "component":
57 if args.classifier == "default":
58 model_class_name = "component"
59 else:
60 model_class_name = "component_nn"
61 else:
62 model_class_name = args.goal
63
64 model_class = get_model_class(model_class_name)
65
66 if args.train:
67 db.download(bugzilla.BUGS_DB)
68 db.download(repository.COMMITS_DB)
69
70 historical_supported_tasks = [
71 "defect",
72 "bugtype",
73 "defectenhancementtask",
74 "regression",
75 ]
76
77 if args.goal in historical_supported_tasks:
78 model = model_class(args.lemmatization, args.historical)
79 else:
80 model = model_class(args.lemmatization)
81 model.train()
82 else:
83 model = model_class.load(model_file_name)
84
85 if args.classify:
86 for bug in bugzilla.get_bugs():
87 print(
88 f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug["id"] } - { bug["summary"]} '
89 )
90
91 if model.calculate_importance:
92 probas, importances = model.classify(
93 bug, probabilities=True, importances=True
94 )
95
96 feature_names = model.get_feature_names()
97 for i, (importance, index, is_positive) in enumerate(importances):
98 print(
99 f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})'
100 )
101 else:
102 probas = model.classify(bug, probabilities=True, importances=False)
103
104 if np.argmax(probas) == 1:
105 print(f"Positive! {probas}")
106 else:
107 print(f"Negative! {probas}")
108 input()
109
110 if args.generate_sheet:
111 assert (
112 args.token is not None
113 ), "A Bugzilla token should be set in order to download bugs"
114 today = datetime.utcnow()
115 a_week_ago = today - timedelta(7)
116 bugzilla.set_token(args.token)
117 bugs = bugzilla.download_bugs_between(a_week_ago, today)
118
119 print(f"Classifying {len(bugs)} bugs...")
120
121 rows = [["Bug", f"{args.goal}(model)", args.goal, "Title"]]
122
123 for bug in bugs:
124 p = model.classify(bug, probabilities=True)
125 rows.append(
126 [
127 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]}',
128 "y" if p[0][1] >= 0.7 else "n",
129 "",
130 bug["summary"],
131 ]
132 )
133
134 os.makedirs("sheets", exist_ok=True)
135 with open(
136 os.path.join(
137 "sheets",
138 f'{args.goal}-{datetime.utcnow().strftime("%Y-%m-%d")}-labels.csv',
139 ),
140 "w",
141 ) as f:
142 writer = csv.writer(f)
143 writer.writerows(rows)
144
145
146 if __name__ == "__main__":
147 main(parse_args(sys.argv[1:]))
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bugbug/models/duplicate.py b/bugbug/models/duplicate.py
--- a/bugbug/models/duplicate.py
+++ b/bugbug/models/duplicate.py
@@ -14,10 +14,6 @@
from bugbug import bug_features, bugzilla, feature_cleanup
from bugbug.model import BugCoupleModel
-NUM_DUPLICATES = 7000
-NUM_DUP_NONDUPS = 3500
-NUM_NONDUPS_NONDUPS = 3500
-
REPORTERS_TO_IGNORE = {"[email protected]", "[email protected]"}
@@ -32,7 +28,10 @@
class DuplicateModel(BugCoupleModel):
- def __init__(self, lemmatization=False):
+ def __init__(self, training_size=14000, lemmatization=False):
+ self.num_duplicates = training_size // 2
+ self.num_nondups_nondups = self.num_dup_nondups = training_size // 4
+
BugCoupleModel.__init__(self, lemmatization)
self.calculate_importance = False
@@ -90,7 +89,7 @@
duplicate_ids.append(duplicate_bug_id)
- if duplicates_num < NUM_DUPLICATES:
+ if duplicates_num < self.num_duplicates:
classes[(bug_id, duplicate_bug_id)] = 1
duplicates_num += 1
@@ -100,29 +99,29 @@
# Store all remaining ids
non_duplicate_ids = list(all_ids - set(duplicate_ids))
- print(f"Number of duplicate labels is: {NUM_DUPLICATES}")
+ print(f"Number of duplicate labels is: {self.num_duplicates}")
# When the bug has no duplicates, we create dup-nondup labels.
dup_nondup_num = 0
- while dup_nondup_num < NUM_DUP_NONDUPS:
+ while dup_nondup_num < self.num_dup_nondups:
bug_id1 = random.choice(duplicate_ids)
bug_id2 = random.choice(non_duplicate_ids)
classes[(bug_id1, bug_id2)] = 0
dup_nondup_num += 1
- print(f"Number of hybrid labels is: {NUM_DUP_NONDUPS}")
+ print(f"Number of hybrid labels is: {self.num_dup_nondups}")
# Now we map non-dup to non-dup bug.
nondup_nondup_num = 0
- while nondup_nondup_num < NUM_DUP_NONDUPS:
+ while nondup_nondup_num < self.num_nondups_nondups:
bug_id1 = random.choice(non_duplicate_ids)
bug_id2 = random.choice(non_duplicate_ids)
if bug_id1 != bug_id2:
classes[(bug_id1, bug_id2)] = 0
nondup_nondup_num += 1
- print(f"Number of purely non-duplicate labels is: {NUM_NONDUPS_NONDUPS}")
+ print(f"Number of purely non-duplicate labels is: {self.num_nondups_nondups}")
return classes, [0, 1]
diff --git a/run.py b/run.py
--- a/run.py
+++ b/run.py
@@ -22,6 +22,13 @@
help="Perform lemmatization (using spaCy)",
action="store_true",
)
+ parser.add_argument(
+ "--training-set-size",
+ nargs="?",
+ default=14000,
+ type=int,
+ help="The size of the training set for the duplicate model",
+ )
parser.add_argument("--train", help="Perform training", action="store_true")
parser.add_argument(
"--goal", help="Goal of the classifier", choices=MODELS.keys(), default="defect"
@@ -76,6 +83,8 @@
if args.goal in historical_supported_tasks:
model = model_class(args.lemmatization, args.historical)
+ elif args.goal == "duplicate":
+ model = model_class(args.training_set_size, args.lemmatization)
else:
model = model_class(args.lemmatization)
model.train()
| {"golden_diff": "diff --git a/bugbug/models/duplicate.py b/bugbug/models/duplicate.py\n--- a/bugbug/models/duplicate.py\n+++ b/bugbug/models/duplicate.py\n@@ -14,10 +14,6 @@\n from bugbug import bug_features, bugzilla, feature_cleanup\n from bugbug.model import BugCoupleModel\n \n-NUM_DUPLICATES = 7000\n-NUM_DUP_NONDUPS = 3500\n-NUM_NONDUPS_NONDUPS = 3500\n-\n REPORTERS_TO_IGNORE = {\"[email protected]\", \"[email protected]\"}\n \n \n@@ -32,7 +28,10 @@\n \n \n class DuplicateModel(BugCoupleModel):\n- def __init__(self, lemmatization=False):\n+ def __init__(self, training_size=14000, lemmatization=False):\n+ self.num_duplicates = training_size // 2\n+ self.num_nondups_nondups = self.num_dup_nondups = training_size // 4\n+\n BugCoupleModel.__init__(self, lemmatization)\n \n self.calculate_importance = False\n@@ -90,7 +89,7 @@\n \n duplicate_ids.append(duplicate_bug_id)\n \n- if duplicates_num < NUM_DUPLICATES:\n+ if duplicates_num < self.num_duplicates:\n classes[(bug_id, duplicate_bug_id)] = 1\n duplicates_num += 1\n \n@@ -100,29 +99,29 @@\n # Store all remaining ids\n non_duplicate_ids = list(all_ids - set(duplicate_ids))\n \n- print(f\"Number of duplicate labels is: {NUM_DUPLICATES}\")\n+ print(f\"Number of duplicate labels is: {self.num_duplicates}\")\n \n # When the bug has no duplicates, we create dup-nondup labels.\n dup_nondup_num = 0\n- while dup_nondup_num < NUM_DUP_NONDUPS:\n+ while dup_nondup_num < self.num_dup_nondups:\n bug_id1 = random.choice(duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n \n classes[(bug_id1, bug_id2)] = 0\n dup_nondup_num += 1\n \n- print(f\"Number of hybrid labels is: {NUM_DUP_NONDUPS}\")\n+ print(f\"Number of hybrid labels is: {self.num_dup_nondups}\")\n \n # Now we map non-dup to non-dup bug.\n nondup_nondup_num = 0\n- while nondup_nondup_num < NUM_DUP_NONDUPS:\n+ while nondup_nondup_num < self.num_nondups_nondups:\n bug_id1 = random.choice(non_duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n if bug_id1 != bug_id2:\n classes[(bug_id1, bug_id2)] = 0\n nondup_nondup_num += 1\n \n- print(f\"Number of purely non-duplicate labels is: {NUM_NONDUPS_NONDUPS}\")\n+ print(f\"Number of purely non-duplicate labels is: {self.num_nondups_nondups}\")\n \n return classes, [0, 1]\n \ndiff --git a/run.py b/run.py\n--- a/run.py\n+++ b/run.py\n@@ -22,6 +22,13 @@\n help=\"Perform lemmatization (using spaCy)\",\n action=\"store_true\",\n )\n+ parser.add_argument(\n+ \"--training-set-size\",\n+ nargs=\"?\",\n+ default=14000,\n+ type=int,\n+ help=\"The size of the training set for the duplicate model\",\n+ )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\", help=\"Goal of the classifier\", choices=MODELS.keys(), default=\"defect\"\n@@ -76,6 +83,8 @@\n \n if args.goal in historical_supported_tasks:\n model = model_class(args.lemmatization, args.historical)\n+ elif args.goal == \"duplicate\":\n+ model = model_class(args.training_set_size, args.lemmatization)\n else:\n model = model_class(args.lemmatization)\n model.train()\n", "issue": "Add options to the Duplicate classifier to select number of bugs in the training set\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport random\n\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import LinearSVC\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugCoupleModel\n\nNUM_DUPLICATES = 7000\nNUM_DUP_NONDUPS = 3500\nNUM_NONDUPS_NONDUPS = 3500\n\nREPORTERS_TO_IGNORE = {\"[email protected]\", \"[email protected]\"}\n\n\nclass LinearSVCWithLabelEncoding(CalibratedClassifierCV):\n def __init__(self, clf):\n super().__init__(clf)\n self._le = LabelEncoder()\n\n def fit(self, X, y):\n super().fit(X, y)\n self._le.fit(y)\n\n\nclass DuplicateModel(BugCoupleModel):\n def __init__(self, lemmatization=False):\n BugCoupleModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n cleanup_functions = [\n feature_cleanup.responses(),\n feature_cleanup.hex(),\n feature_cleanup.dll(),\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n feature_cleanup.crash(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\"bug_extractor\", bug_features.BugExtractor([], cleanup_functions)),\n (\n \"union\",\n ColumnTransformer([(\"text\", self.text_vectorizer(), \"text\")]),\n ),\n ]\n )\n\n self.clf = LinearSVCWithLabelEncoding(LinearSVC())\n\n def get_labels(self):\n\n random.seed(4)\n\n all_ids = set(\n bug[\"id\"]\n for bug in bugzilla.get_bugs()\n if bug[\"creator\"] not in REPORTERS_TO_IGNORE\n and \"dupeme\" not in bug[\"keywords\"]\n )\n\n classes = {}\n\n # Only store ids of bugs that have duplicates or are duplicates\n duplicate_ids = []\n\n duplicates_num = 0\n for bug_data in bugzilla.get_bugs():\n bug_id = bug_data[\"id\"]\n if bug_id not in all_ids:\n continue\n\n if bug_data[\"dupe_of\"] or len(bug_data[\"duplicates\"]) > 0:\n duplicate_ids.append(bug_id)\n\n for duplicate_bug_id in bug_data[\"duplicates\"]:\n if duplicate_bug_id not in all_ids:\n continue\n\n duplicate_ids.append(duplicate_bug_id)\n\n if duplicates_num < NUM_DUPLICATES:\n classes[(bug_id, duplicate_bug_id)] = 1\n duplicates_num += 1\n\n # Remove duplicate duplicate IDs.\n duplicate_ids = list(set(duplicate_ids))\n\n # Store all remaining ids\n non_duplicate_ids = list(all_ids - set(duplicate_ids))\n\n print(f\"Number of duplicate labels is: {NUM_DUPLICATES}\")\n\n # When the bug has no duplicates, we create dup-nondup labels.\n dup_nondup_num = 0\n while dup_nondup_num < NUM_DUP_NONDUPS:\n bug_id1 = random.choice(duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n\n classes[(bug_id1, bug_id2)] = 0\n dup_nondup_num += 1\n\n print(f\"Number of hybrid labels is: {NUM_DUP_NONDUPS}\")\n\n # Now we map non-dup to non-dup bug.\n nondup_nondup_num = 0\n while nondup_nondup_num < NUM_DUP_NONDUPS:\n bug_id1 = random.choice(non_duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n if bug_id1 != bug_id2:\n classes[(bug_id1, bug_id2)] = 0\n nondup_nondup_num += 1\n\n print(f\"Number of purely non-duplicate labels is: {NUM_NONDUPS_NONDUPS}\")\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/duplicate.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport os\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nfrom bugbug import bugzilla, db, repository\nfrom bugbug.models import MODELS, get_model_class\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--lemmatization\",\n help=\"Perform lemmatization (using spaCy)\",\n action=\"store_true\",\n )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\", help=\"Goal of the classifier\", choices=MODELS.keys(), default=\"defect\"\n )\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--classify\", help=\"Perform evaluation\", action=\"store_true\")\n parser.add_argument(\n \"--generate-sheet\",\n help=\"Perform evaluation on bugs from last week and generate a csv file\",\n action=\"store_true\",\n )\n parser.add_argument(\"--token\", help=\"Bugzilla token\", action=\"store\")\n parser.add_argument(\n \"--historical\",\n help=\"\"\"Analyze historical bugs. Only used for defect, bugtype,\n defectenhancementtask and regression tasks.\"\"\",\n action=\"store_true\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n model_file_name = \"{}{}model\".format(\n args.goal, \"\" if args.classifier == \"default\" else args.classifier\n )\n\n if args.goal == \"component\":\n if args.classifier == \"default\":\n model_class_name = \"component\"\n else:\n model_class_name = \"component_nn\"\n else:\n model_class_name = args.goal\n\n model_class = get_model_class(model_class_name)\n\n if args.train:\n db.download(bugzilla.BUGS_DB)\n db.download(repository.COMMITS_DB)\n\n historical_supported_tasks = [\n \"defect\",\n \"bugtype\",\n \"defectenhancementtask\",\n \"regression\",\n ]\n\n if args.goal in historical_supported_tasks:\n model = model_class(args.lemmatization, args.historical)\n else:\n model = model_class(args.lemmatization)\n model.train()\n else:\n model = model_class.load(model_file_name)\n\n if args.classify:\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug[\"id\"] } - { bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importances = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_feature_names()\n for i, (importance, index, is_positive) in enumerate(importances):\n print(\n f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})'\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n if args.generate_sheet:\n assert (\n args.token is not None\n ), \"A Bugzilla token should be set in order to download bugs\"\n today = datetime.utcnow()\n a_week_ago = today - timedelta(7)\n bugzilla.set_token(args.token)\n bugs = bugzilla.download_bugs_between(a_week_ago, today)\n\n print(f\"Classifying {len(bugs)} bugs...\")\n\n rows = [[\"Bug\", f\"{args.goal}(model)\", args.goal, \"Title\"]]\n\n for bug in bugs:\n p = model.classify(bug, probabilities=True)\n rows.append(\n [\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]}',\n \"y\" if p[0][1] >= 0.7 else \"n\",\n \"\",\n bug[\"summary\"],\n ]\n )\n\n os.makedirs(\"sheets\", exist_ok=True)\n with open(\n os.path.join(\n \"sheets\",\n f'{args.goal}-{datetime.utcnow().strftime(\"%Y-%m-%d\")}-labels.csv',\n ),\n \"w\",\n ) as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "run.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport random\n\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import LinearSVC\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup\nfrom bugbug.model import BugCoupleModel\n\nREPORTERS_TO_IGNORE = {\"[email protected]\", \"[email protected]\"}\n\n\nclass LinearSVCWithLabelEncoding(CalibratedClassifierCV):\n def __init__(self, clf):\n super().__init__(clf)\n self._le = LabelEncoder()\n\n def fit(self, X, y):\n super().fit(X, y)\n self._le.fit(y)\n\n\nclass DuplicateModel(BugCoupleModel):\n def __init__(self, training_size=14000, lemmatization=False):\n self.num_duplicates = training_size // 2\n self.num_nondups_nondups = self.num_dup_nondups = training_size // 4\n\n BugCoupleModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n cleanup_functions = [\n feature_cleanup.responses(),\n feature_cleanup.hex(),\n feature_cleanup.dll(),\n feature_cleanup.fileref(),\n feature_cleanup.url(),\n feature_cleanup.synonyms(),\n feature_cleanup.crash(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\"bug_extractor\", bug_features.BugExtractor([], cleanup_functions)),\n (\n \"union\",\n ColumnTransformer([(\"text\", self.text_vectorizer(), \"text\")]),\n ),\n ]\n )\n\n self.clf = LinearSVCWithLabelEncoding(LinearSVC())\n\n def get_labels(self):\n\n random.seed(4)\n\n all_ids = set(\n bug[\"id\"]\n for bug in bugzilla.get_bugs()\n if bug[\"creator\"] not in REPORTERS_TO_IGNORE\n and \"dupeme\" not in bug[\"keywords\"]\n )\n\n classes = {}\n\n # Only store ids of bugs that have duplicates or are duplicates\n duplicate_ids = []\n\n duplicates_num = 0\n for bug_data in bugzilla.get_bugs():\n bug_id = bug_data[\"id\"]\n if bug_id not in all_ids:\n continue\n\n if bug_data[\"dupe_of\"] or len(bug_data[\"duplicates\"]) > 0:\n duplicate_ids.append(bug_id)\n\n for duplicate_bug_id in bug_data[\"duplicates\"]:\n if duplicate_bug_id not in all_ids:\n continue\n\n duplicate_ids.append(duplicate_bug_id)\n\n if duplicates_num < self.num_duplicates:\n classes[(bug_id, duplicate_bug_id)] = 1\n duplicates_num += 1\n\n # Remove duplicate duplicate IDs.\n duplicate_ids = list(set(duplicate_ids))\n\n # Store all remaining ids\n non_duplicate_ids = list(all_ids - set(duplicate_ids))\n\n print(f\"Number of duplicate labels is: {self.num_duplicates}\")\n\n # When the bug has no duplicates, we create dup-nondup labels.\n dup_nondup_num = 0\n while dup_nondup_num < self.num_dup_nondups:\n bug_id1 = random.choice(duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n\n classes[(bug_id1, bug_id2)] = 0\n dup_nondup_num += 1\n\n print(f\"Number of hybrid labels is: {self.num_dup_nondups}\")\n\n # Now we map non-dup to non-dup bug.\n nondup_nondup_num = 0\n while nondup_nondup_num < self.num_nondups_nondups:\n bug_id1 = random.choice(non_duplicate_ids)\n bug_id2 = random.choice(non_duplicate_ids)\n if bug_id1 != bug_id2:\n classes[(bug_id1, bug_id2)] = 0\n nondup_nondup_num += 1\n\n print(f\"Number of purely non-duplicate labels is: {self.num_nondups_nondups}\")\n\n return classes, [0, 1]\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps[\"union\"].get_feature_names()\n", "path": "bugbug/models/duplicate.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport os\nimport sys\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nfrom bugbug import bugzilla, db, repository\nfrom bugbug.models import MODELS, get_model_class\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--lemmatization\",\n help=\"Perform lemmatization (using spaCy)\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--training-set-size\",\n nargs=\"?\",\n default=14000,\n type=int,\n help=\"The size of the training set for the duplicate model\",\n )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\", help=\"Goal of the classifier\", choices=MODELS.keys(), default=\"defect\"\n )\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--classify\", help=\"Perform evaluation\", action=\"store_true\")\n parser.add_argument(\n \"--generate-sheet\",\n help=\"Perform evaluation on bugs from last week and generate a csv file\",\n action=\"store_true\",\n )\n parser.add_argument(\"--token\", help=\"Bugzilla token\", action=\"store\")\n parser.add_argument(\n \"--historical\",\n help=\"\"\"Analyze historical bugs. Only used for defect, bugtype,\n defectenhancementtask and regression tasks.\"\"\",\n action=\"store_true\",\n )\n return parser.parse_args(args)\n\n\ndef main(args):\n model_file_name = \"{}{}model\".format(\n args.goal, \"\" if args.classifier == \"default\" else args.classifier\n )\n\n if args.goal == \"component\":\n if args.classifier == \"default\":\n model_class_name = \"component\"\n else:\n model_class_name = \"component_nn\"\n else:\n model_class_name = args.goal\n\n model_class = get_model_class(model_class_name)\n\n if args.train:\n db.download(bugzilla.BUGS_DB)\n db.download(repository.COMMITS_DB)\n\n historical_supported_tasks = [\n \"defect\",\n \"bugtype\",\n \"defectenhancementtask\",\n \"regression\",\n ]\n\n if args.goal in historical_supported_tasks:\n model = model_class(args.lemmatization, args.historical)\n elif args.goal == \"duplicate\":\n model = model_class(args.training_set_size, args.lemmatization)\n else:\n model = model_class(args.lemmatization)\n model.train()\n else:\n model = model_class.load(model_file_name)\n\n if args.classify:\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug[\"id\"] } - { bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importances = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_feature_names()\n for i, (importance, index, is_positive) in enumerate(importances):\n print(\n f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})'\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n if args.generate_sheet:\n assert (\n args.token is not None\n ), \"A Bugzilla token should be set in order to download bugs\"\n today = datetime.utcnow()\n a_week_ago = today - timedelta(7)\n bugzilla.set_token(args.token)\n bugs = bugzilla.download_bugs_between(a_week_ago, today)\n\n print(f\"Classifying {len(bugs)} bugs...\")\n\n rows = [[\"Bug\", f\"{args.goal}(model)\", args.goal, \"Title\"]]\n\n for bug in bugs:\n p = model.classify(bug, probabilities=True)\n rows.append(\n [\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]}',\n \"y\" if p[0][1] >= 0.7 else \"n\",\n \"\",\n bug[\"summary\"],\n ]\n )\n\n os.makedirs(\"sheets\", exist_ok=True)\n with open(\n os.path.join(\n \"sheets\",\n f'{args.goal}-{datetime.utcnow().strftime(\"%Y-%m-%d\")}-labels.csv',\n ),\n \"w\",\n ) as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n\n\nif __name__ == \"__main__\":\n main(parse_args(sys.argv[1:]))\n", "path": "run.py"}]} | 2,931 | 957 |
gh_patches_debug_22557 | rasdani/github-patches | git_diff | conan-io__conan-center-index-5430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] swig/4.0.1, swig/4.0.2: invalid swiglib path makes installed swig unusable
<!--
Please don't forget to update the issue title.
Include all applicable information to help us reproduce your problem.
-->
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **swig/4.0.1**, **swig/4.02**
* Operating System+version: **Linux Ubuntu 20.04**
* Compiler+version: **GCC 9**
* Conan version: **conan 1.34.1**
* Python version: **Python 3.8.2**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
Configuration for profile default:
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=9
compiler.libcxx=libstdc++
build_type=Release
[options]
[build_requires]
[env]
```
### Steps to reproduce (Include if Applicable)
conanfile.txt contents:
```
[build_requires]
swig/4.0.1 # 4.0.2 has same behaviour
[generators]
virtualenv
```
Run the following commands:
```
mkdir ~/build
cd ~/build
conan install <folder-containing-above-file>
. activate.sh
which swig
swig -swiglib
```
On my machine, I see:
```
#### which swig
/home/vagrant/.conan/data/swig/4.0.2/_/_/package/4a3775e698f76c30fab71ea08d47f61f009d1176/bin/swig
#### swig -swiglib
/home/conan/w/BuildSingleReference/.conan/data/swig/4.0.2/_/_/package/4a3775e698f76c30fab71ea08d47f61f009d1176/bin/swiglib
```
The initial segment of swiglib directory path is wrong (and non-existent); it appears to be from the package build machine.
The invalid swiglib path results in the following messages like the following when attempting to process a .i file:
```
:1. Unable to find 'swig.swg'
:3. Unable to find 'python.swg'
```
### In comparison to swig_installer/4.0.1@bincrafters/stable
I have successfully used swig_installer/4.0.1@bincrafters/stable (https://github.com/bincrafters/conan-swig_installer/blob/testing/4.0.1/conanfile.py) across Windows 10, various Ubuntu (16.04, 18.04, 20.04), Debian (9, 10) and MacOS. Some significant differences:
* the bincrafters folders are organized differently, with bin and shared as peer directories
```
├── bin
├── licenses
└── share
└── swig
└── 4.0.1
```
* With this recipe, the layout is:
```
├── bin
│ └── swiglib
├── lib
│ └── cmake
└── licenses
```
* whether it is the current directory arrangement or the inclusion of --with-swiglibdir in your configuration, the net result is an invalid swiglib path
* the bincrafter's recipe, even without running the activate.sh script correctly reports the swiglib location. The current recipe does not
* Although it appears to be completely unnecessary, the bincrafters recipe also sets the SWIG_LIB environment variable
* Setting SWIG_LIB in your recipe would be a viable work around to the problem, although I am not sure that it would be the best
### Tested work arounds
* set SWIG_LIB environment variable locally prior to running swig
* use --build to force a local build: ```conan install --build=swig <path-to-conanfile-dir>```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/swig/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools, AutoToolsBuildEnvironment
2 from contextlib import contextmanager
3 import os
4
5
6 class SwigConan(ConanFile):
7 name = "swig"
8 description = "SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages."
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "http://www.swig.org"
11 license = "GPL-3.0-or-later"
12 topics = ("conan", "swig", "python", "java", "wrapper")
13 exports_sources = "patches/**", "cmake/*"
14 settings = "os", "arch", "compiler", "build_type"
15
16 _autotools = None
17
18 @property
19 def _source_subfolder(self):
20 return "source_subfolder"
21
22 def configure(self):
23 del self.settings.compiler.libcxx
24 del self.settings.compiler.cppstd
25
26 def build_requirements(self):
27 if tools.os_info.is_windows and not tools.get_env("CONAN_BASH_PATH") \
28 and tools.os_info.detect_windows_subsystem() != "msys2":
29 self.build_requires("msys2/20190524")
30 if self.settings.compiler == "Visual Studio":
31 self.build_requires("winflexbison/2.5.22")
32 else:
33 self.build_requires("bison/3.7.1")
34 self.build_requires("automake/1.16.2")
35
36 def requirements(self):
37 self.requires("pcre/8.41")
38
39 def source(self):
40 tools.get(**self.conan_data["sources"][self.version])
41 os.rename("swig-rel-{}".format(self.version), self._source_subfolder)
42
43 @property
44 def _user_info_build(self):
45 # If using the experimental feature with different context for host and
46 # build, the 'user_info' attributes of the 'build_requires' packages
47 # will be located into the 'user_info_build' object. In other cases they
48 # will be located into the 'deps_user_info' object.
49 return getattr(self, "user_info_build", None) or self.deps_user_info
50
51 @contextmanager
52 def _build_context(self):
53 env = {}
54 if self.settings.compiler != "Visual Studio":
55 env["YACC"] = self._user_info_build["bison"].YACC
56 if self.settings.compiler == "Visual Studio":
57 with tools.vcvars(self.settings):
58 env.update({
59 "CC": "{} cl -nologo".format(tools.unix_path(self._user_info_build["automake"].compile)),
60 "CXX": "{} cl -nologo".format(tools.unix_path(self._user_info_build["automake"].compile)),
61 "AR": "{} link".format(self._user_info_build["automake"].ar_lib),
62 "LD": "link",
63 })
64 with tools.environment_append(env):
65 yield
66 else:
67 with tools.environment_append(env):
68 yield
69
70 def _configure_autotools(self):
71 if self._autotools:
72 return self._autotools
73
74 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
75 deps_libpaths = self._autotools.library_paths
76 deps_libs = self._autotools.libs
77 deps_defines = self._autotools.defines
78 if self.settings.os == "Windows" and self.settings.compiler != "Visual Studio":
79 self._autotools.link_flags.append("-static")
80
81 libargs = list("-L\"{}\"".format(p) for p in deps_libpaths) + list("-l\"{}\"".format(l) for l in deps_libs)
82 args = [
83 "PCRE_LIBS={}".format(" ".join(libargs)),
84 "PCRE_CPPFLAGS={}".format(" ".join("-D{}".format(define) for define in deps_defines)),
85 "--host={}".format(self.settings.arch),
86 "--with-swiglibdir={}".format(self._swiglibdir),
87 ]
88
89 host, build = None, None
90
91 if self.settings.compiler == "Visual Studio":
92 self.output.warn("Visual Studio compiler cannot create ccache-swig. Disabling ccache-swig.")
93 args.append("--disable-ccache")
94 self._autotools.flags.append("-FS")
95 # MSVC canonical names aren't understood
96 host, build = False, False
97
98 self._autotools.libs = []
99 self._autotools.library_paths = []
100
101 self._autotools.configure(args=args, configure_dir=self._source_subfolder,
102 host=host, build=build)
103 return self._autotools
104
105 def _patch_sources(self):
106 for patch in self.conan_data["patches"][self.version]:
107 tools.patch(**patch)
108
109 def build(self):
110 self._patch_sources()
111 with tools.chdir(os.path.join(self._source_subfolder)):
112 self.run("./autogen.sh", win_bash=tools.os_info.is_windows)
113 with self._build_context():
114 autotools = self._configure_autotools()
115 autotools.make()
116
117 def package(self):
118 self.copy(pattern="LICENSE*", dst="licenses", src=self._source_subfolder)
119 self.copy(pattern="COPYRIGHT", dst="licenses", src=self._source_subfolder)
120 self.copy("*", src="cmake", dst=self._module_subfolder)
121 with self._build_context():
122 autotools = self._configure_autotools()
123 autotools.install()
124
125 @property
126 def _swiglibdir(self):
127 return os.path.join(self.package_folder, "bin", "swiglib").replace("\\", "/")
128
129 @property
130 def _module_subfolder(self):
131 return os.path.join("lib", "cmake")
132
133 @property
134 def _module_file(self):
135 return "conan-official-{}-targets.cmake".format(self.name)
136
137 def package_info(self):
138 self.cpp_info.names["cmake_find_package"] = "SWIG"
139 self.cpp_info.names["cmake_find_package_multi"] = "SWIG"
140 self.cpp_info.builddirs = [self._module_subfolder]
141 self.cpp_info.build_modules = [os.path.join(self._module_subfolder, self._module_file)]
142
143 bindir = os.path.join(self.package_folder, "bin")
144 self.output.info("Appending PATH environment variable: {}".format(bindir))
145 self.env_info.PATH.append(bindir)
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/swig/all/conanfile.py b/recipes/swig/all/conanfile.py
--- a/recipes/swig/all/conanfile.py
+++ b/recipes/swig/all/conanfile.py
@@ -23,18 +23,21 @@
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
+ def package_id(self):
+ del self.info.settings.compiler
+
def build_requirements(self):
if tools.os_info.is_windows and not tools.get_env("CONAN_BASH_PATH") \
and tools.os_info.detect_windows_subsystem() != "msys2":
- self.build_requires("msys2/20190524")
+ self.build_requires("msys2/cci.latest")
if self.settings.compiler == "Visual Studio":
- self.build_requires("winflexbison/2.5.22")
+ self.build_requires("winflexbison/2.5.24")
else:
self.build_requires("bison/3.7.1")
- self.build_requires("automake/1.16.2")
+ self.build_requires("automake/1.16.3")
def requirements(self):
- self.requires("pcre/8.41")
+ self.requires("pcre/8.44")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
| {"golden_diff": "diff --git a/recipes/swig/all/conanfile.py b/recipes/swig/all/conanfile.py\n--- a/recipes/swig/all/conanfile.py\n+++ b/recipes/swig/all/conanfile.py\n@@ -23,18 +23,21 @@\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n \n+ def package_id(self):\n+ del self.info.settings.compiler\n+\n def build_requirements(self):\n if tools.os_info.is_windows and not tools.get_env(\"CONAN_BASH_PATH\") \\\n and tools.os_info.detect_windows_subsystem() != \"msys2\":\n- self.build_requires(\"msys2/20190524\")\n+ self.build_requires(\"msys2/cci.latest\")\n if self.settings.compiler == \"Visual Studio\":\n- self.build_requires(\"winflexbison/2.5.22\")\n+ self.build_requires(\"winflexbison/2.5.24\")\n else:\n self.build_requires(\"bison/3.7.1\")\n- self.build_requires(\"automake/1.16.2\")\n+ self.build_requires(\"automake/1.16.3\")\n \n def requirements(self):\n- self.requires(\"pcre/8.41\")\n+ self.requires(\"pcre/8.44\")\n \n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n", "issue": "[package] swig/4.0.1, swig/4.0.2: invalid swiglib path makes installed swig unusable\n<!-- \r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n-->\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **swig/4.0.1**, **swig/4.02**\r\n * Operating System+version: **Linux Ubuntu 20.04**\r\n * Compiler+version: **GCC 9**\r\n * Conan version: **conan 1.34.1**\r\n * Python version: **Python 3.8.2**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\nConfiguration for profile default:\r\n\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=9\r\ncompiler.libcxx=libstdc++\r\nbuild_type=Release\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nconanfile.txt contents:\r\n```\r\n[build_requires]\r\nswig/4.0.1 # 4.0.2 has same behaviour\r\n\r\n[generators]\r\nvirtualenv\r\n```\r\n\r\nRun the following commands:\r\n```\r\nmkdir ~/build\r\ncd ~/build\r\nconan install <folder-containing-above-file>\r\n. activate.sh\r\nwhich swig\r\nswig -swiglib\r\n```\r\nOn my machine, I see:\r\n```\r\n#### which swig\r\n/home/vagrant/.conan/data/swig/4.0.2/_/_/package/4a3775e698f76c30fab71ea08d47f61f009d1176/bin/swig\r\n\r\n#### swig -swiglib\r\n/home/conan/w/BuildSingleReference/.conan/data/swig/4.0.2/_/_/package/4a3775e698f76c30fab71ea08d47f61f009d1176/bin/swiglib\r\n```\r\nThe initial segment of swiglib directory path is wrong (and non-existent); it appears to be from the package build machine.\r\n\r\nThe invalid swiglib path results in the following messages like the following when attempting to process a .i file:\r\n\r\n```\r\n :1. Unable to find 'swig.swg'\r\n :3. Unable to find 'python.swg'\r\n```\r\n\r\n### In comparison to swig_installer/4.0.1@bincrafters/stable\r\n\r\nI have successfully used swig_installer/4.0.1@bincrafters/stable (https://github.com/bincrafters/conan-swig_installer/blob/testing/4.0.1/conanfile.py) across Windows 10, various Ubuntu (16.04, 18.04, 20.04), Debian (9, 10) and MacOS. Some significant differences:\r\n\r\n* the bincrafters folders are organized differently, with bin and shared as peer directories\r\n```\r\n\u251c\u2500\u2500 bin\r\n\u251c\u2500\u2500 licenses\r\n\u2514\u2500\u2500 share\r\n \u2514\u2500\u2500 swig\r\n \u2514\u2500\u2500 4.0.1\r\n```\r\n* With this recipe, the layout is:\r\n```\r\n\u251c\u2500\u2500 bin\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 swiglib\r\n\u251c\u2500\u2500 lib\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 cmake\r\n\u2514\u2500\u2500 licenses\r\n```\r\n* whether it is the current directory arrangement or the inclusion of --with-swiglibdir in your configuration, the net result is an invalid swiglib path\r\n* the bincrafter's recipe, even without running the activate.sh script correctly reports the swiglib location. The current recipe does not\r\n* Although it appears to be completely unnecessary, the bincrafters recipe also sets the SWIG_LIB environment variable\r\n* Setting SWIG_LIB in your recipe would be a viable work around to the problem, although I am not sure that it would be the best\r\n\r\n### Tested work arounds\r\n* set SWIG_LIB environment variable locally prior to running swig\r\n* use --build to force a local build: ```conan install --build=swig <path-to-conanfile-dir>```\n", "before_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom contextlib import contextmanager\nimport os\n\n\nclass SwigConan(ConanFile):\n name = \"swig\"\n description = \"SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages.\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.swig.org\"\n license = \"GPL-3.0-or-later\"\n topics = (\"conan\", \"swig\", \"python\", \"java\", \"wrapper\")\n exports_sources = \"patches/**\", \"cmake/*\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n if tools.os_info.is_windows and not tools.get_env(\"CONAN_BASH_PATH\") \\\n and tools.os_info.detect_windows_subsystem() != \"msys2\":\n self.build_requires(\"msys2/20190524\")\n if self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"winflexbison/2.5.22\")\n else:\n self.build_requires(\"bison/3.7.1\")\n self.build_requires(\"automake/1.16.2\")\n\n def requirements(self):\n self.requires(\"pcre/8.41\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"swig-rel-{}\".format(self.version), self._source_subfolder)\n\n @property\n def _user_info_build(self):\n # If using the experimental feature with different context for host and\n # build, the 'user_info' attributes of the 'build_requires' packages\n # will be located into the 'user_info_build' object. In other cases they\n # will be located into the 'deps_user_info' object.\n return getattr(self, \"user_info_build\", None) or self.deps_user_info\n\n @contextmanager\n def _build_context(self):\n env = {}\n if self.settings.compiler != \"Visual Studio\":\n env[\"YACC\"] = self._user_info_build[\"bison\"].YACC\n if self.settings.compiler == \"Visual Studio\":\n with tools.vcvars(self.settings):\n env.update({\n \"CC\": \"{} cl -nologo\".format(tools.unix_path(self._user_info_build[\"automake\"].compile)),\n \"CXX\": \"{} cl -nologo\".format(tools.unix_path(self._user_info_build[\"automake\"].compile)),\n \"AR\": \"{} link\".format(self._user_info_build[\"automake\"].ar_lib),\n \"LD\": \"link\",\n })\n with tools.environment_append(env):\n yield\n else:\n with tools.environment_append(env):\n yield\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n deps_libpaths = self._autotools.library_paths\n deps_libs = self._autotools.libs\n deps_defines = self._autotools.defines\n if self.settings.os == \"Windows\" and self.settings.compiler != \"Visual Studio\":\n self._autotools.link_flags.append(\"-static\")\n\n libargs = list(\"-L\\\"{}\\\"\".format(p) for p in deps_libpaths) + list(\"-l\\\"{}\\\"\".format(l) for l in deps_libs)\n args = [\n \"PCRE_LIBS={}\".format(\" \".join(libargs)),\n \"PCRE_CPPFLAGS={}\".format(\" \".join(\"-D{}\".format(define) for define in deps_defines)),\n \"--host={}\".format(self.settings.arch),\n \"--with-swiglibdir={}\".format(self._swiglibdir),\n ]\n\n host, build = None, None\n\n if self.settings.compiler == \"Visual Studio\":\n self.output.warn(\"Visual Studio compiler cannot create ccache-swig. Disabling ccache-swig.\")\n args.append(\"--disable-ccache\")\n self._autotools.flags.append(\"-FS\")\n # MSVC canonical names aren't understood\n host, build = False, False\n\n self._autotools.libs = []\n self._autotools.library_paths = []\n\n self._autotools.configure(args=args, configure_dir=self._source_subfolder,\n host=host, build=build)\n return self._autotools\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def build(self):\n self._patch_sources()\n with tools.chdir(os.path.join(self._source_subfolder)):\n self.run(\"./autogen.sh\", win_bash=tools.os_info.is_windows)\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(pattern=\"COPYRIGHT\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*\", src=\"cmake\", dst=self._module_subfolder)\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.install()\n\n @property\n def _swiglibdir(self):\n return os.path.join(self.package_folder, \"bin\", \"swiglib\").replace(\"\\\\\", \"/\")\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file(self):\n return \"conan-official-{}-targets.cmake\".format(self.name)\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"SWIG\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"SWIG\"\n self.cpp_info.builddirs = [self._module_subfolder]\n self.cpp_info.build_modules = [os.path.join(self._module_subfolder, self._module_file)]\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/swig/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom contextlib import contextmanager\nimport os\n\n\nclass SwigConan(ConanFile):\n name = \"swig\"\n description = \"SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages.\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.swig.org\"\n license = \"GPL-3.0-or-later\"\n topics = (\"conan\", \"swig\", \"python\", \"java\", \"wrapper\")\n exports_sources = \"patches/**\", \"cmake/*\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def package_id(self):\n del self.info.settings.compiler\n\n def build_requirements(self):\n if tools.os_info.is_windows and not tools.get_env(\"CONAN_BASH_PATH\") \\\n and tools.os_info.detect_windows_subsystem() != \"msys2\":\n self.build_requires(\"msys2/cci.latest\")\n if self.settings.compiler == \"Visual Studio\":\n self.build_requires(\"winflexbison/2.5.24\")\n else:\n self.build_requires(\"bison/3.7.1\")\n self.build_requires(\"automake/1.16.3\")\n\n def requirements(self):\n self.requires(\"pcre/8.44\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"swig-rel-{}\".format(self.version), self._source_subfolder)\n\n @property\n def _user_info_build(self):\n # If using the experimental feature with different context for host and\n # build, the 'user_info' attributes of the 'build_requires' packages\n # will be located into the 'user_info_build' object. In other cases they\n # will be located into the 'deps_user_info' object.\n return getattr(self, \"user_info_build\", None) or self.deps_user_info\n\n @contextmanager\n def _build_context(self):\n env = {}\n if self.settings.compiler != \"Visual Studio\":\n env[\"YACC\"] = self._user_info_build[\"bison\"].YACC\n if self.settings.compiler == \"Visual Studio\":\n with tools.vcvars(self.settings):\n env.update({\n \"CC\": \"{} cl -nologo\".format(tools.unix_path(self._user_info_build[\"automake\"].compile)),\n \"CXX\": \"{} cl -nologo\".format(tools.unix_path(self._user_info_build[\"automake\"].compile)),\n \"AR\": \"{} link\".format(self._user_info_build[\"automake\"].ar_lib),\n \"LD\": \"link\",\n })\n with tools.environment_append(env):\n yield\n else:\n with tools.environment_append(env):\n yield\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n deps_libpaths = self._autotools.library_paths\n deps_libs = self._autotools.libs\n deps_defines = self._autotools.defines\n if self.settings.os == \"Windows\" and self.settings.compiler != \"Visual Studio\":\n self._autotools.link_flags.append(\"-static\")\n\n libargs = list(\"-L\\\"{}\\\"\".format(p) for p in deps_libpaths) + list(\"-l\\\"{}\\\"\".format(l) for l in deps_libs)\n args = [\n \"PCRE_LIBS={}\".format(\" \".join(libargs)),\n \"PCRE_CPPFLAGS={}\".format(\" \".join(\"-D{}\".format(define) for define in deps_defines)),\n \"--host={}\".format(self.settings.arch),\n \"--with-swiglibdir={}\".format(self._swiglibdir),\n ]\n\n host, build = None, None\n\n if self.settings.compiler == \"Visual Studio\":\n self.output.warn(\"Visual Studio compiler cannot create ccache-swig. Disabling ccache-swig.\")\n args.append(\"--disable-ccache\")\n self._autotools.flags.append(\"-FS\")\n # MSVC canonical names aren't understood\n host, build = False, False\n\n self._autotools.libs = []\n self._autotools.library_paths = []\n\n self._autotools.configure(args=args, configure_dir=self._source_subfolder,\n host=host, build=build)\n return self._autotools\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def build(self):\n self._patch_sources()\n with tools.chdir(os.path.join(self._source_subfolder)):\n self.run(\"./autogen.sh\", win_bash=tools.os_info.is_windows)\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"LICENSE*\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(pattern=\"COPYRIGHT\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*\", src=\"cmake\", dst=self._module_subfolder)\n with self._build_context():\n autotools = self._configure_autotools()\n autotools.install()\n\n @property\n def _swiglibdir(self):\n return os.path.join(self.package_folder, \"bin\", \"swiglib\").replace(\"\\\\\", \"/\")\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file(self):\n return \"conan-official-{}-targets.cmake\".format(self.name)\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"SWIG\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"SWIG\"\n self.cpp_info.builddirs = [self._module_subfolder]\n self.cpp_info.build_modules = [os.path.join(self._module_subfolder, self._module_file)]\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/swig/all/conanfile.py"}]} | 2,968 | 315 |
gh_patches_debug_47840 | rasdani/github-patches | git_diff | holoviz__panel-2814 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add light to list of button types
Bokeh now supports a `light` button. Please add this to the list of `button_type`.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/widgets/misc.py`
Content:
```
1 """
2 Miscellaneous widgets which do not fit into the other main categories.
3 """
4 import os
5
6 from base64 import b64encode
7
8 import param
9
10 from ..io.notebook import push
11 from ..io.state import state
12 from ..models import (
13 VideoStream as _BkVideoStream, FileDownload as _BkFileDownload
14 )
15 from .base import Widget
16 from .indicators import Progress # noqa
17
18
19 class VideoStream(Widget):
20
21 format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],
22 doc="""
23 The file format as which the video is returned.""")
24
25 paused = param.Boolean(default=False, doc="""
26 Whether the video is currently paused""")
27
28 timeout = param.Number(default=None, doc="""
29 Interval between snapshots in millisecons""")
30
31 value = param.String(default='', doc="""
32 A base64 representation of the video stream snapshot.""")
33
34 _widget_type = _BkVideoStream
35
36 _rename = {'name': None}
37
38 def snapshot(self):
39 """
40 Triggers a snapshot of the current VideoStream state to sync
41 the widget value.
42 """
43 for ref, (m, _) in self._models.items():
44 m.snapshot = not m.snapshot
45 (self, root, doc, comm) = state._views[ref]
46 if comm and 'embedded' not in root.tags:
47 push(doc, comm)
48
49
50 class FileDownload(Widget):
51
52 auto = param.Boolean(default=True, doc="""
53 Whether to download on the initial click or allow for
54 right-click save as.""")
55
56 button_type = param.ObjectSelector(default='default', objects=[
57 'default', 'primary', 'success', 'warning', 'danger'])
58
59 callback = param.Callable(default=None, doc="""
60 A callable that returns the file path or file-like object.""")
61
62 data = param.String(default=None, doc="""
63 The data being transferred.""")
64
65 embed = param.Boolean(default=False, doc="""
66 Whether to embed the file on initialization.""")
67
68 file = param.Parameter(default=None, doc="""
69 The file, file-like object or file contents to transfer. If
70 the file is not pointing to a file on disk a filename must
71 also be provided.""")
72
73 filename = param.String(default=None, doc="""
74 A filename which will also be the default name when downloading
75 the file.""")
76
77 label = param.String(default="Download file", doc="""
78 The label of the download button""")
79
80 _clicks = param.Integer(default=0)
81
82 _transfers = param.Integer(default=0)
83
84 _mime_types = {
85 'application': {
86 'pdf': 'pdf', 'zip': 'zip'
87 },
88 'audio': {
89 'mp3': 'mp3', 'ogg': 'ogg', 'wav': 'wav', 'webm': 'webm'
90 },
91 'image': {
92 'apng': 'apng', 'bmp': 'bmp', 'gif': 'gif', 'ico': 'x-icon',
93 'cur': 'x-icon', 'jpg': 'jpeg', 'jpeg': 'jpeg', 'png': 'png',
94 'svg': 'svg+xml', 'tif': 'tiff', 'tiff': 'tiff', 'webp': 'webp'
95 },
96 'text': {
97 'css': 'css', 'csv': 'plain;charset=UTF-8', 'js': 'javascript',
98 'html': 'html', 'txt': 'plain;charset=UTF-8'
99 },
100 'video': {
101 'mp4': 'mp4', 'ogg': 'ogg', 'webm': 'webm'
102 }
103 }
104
105 _widget_type = _BkFileDownload
106
107 _rename = {
108 'callback': None, 'embed': None, 'file': None,
109 '_clicks': 'clicks', 'name': 'title'
110 }
111
112 def __init__(self, file=None, **params):
113 self._default_label = 'label' not in params
114 self._synced = False
115 super().__init__(file=file, **params)
116 if self.embed:
117 self._transfer()
118 self._update_label()
119
120 @param.depends('label', watch=True)
121 def _update_default(self):
122 self._default_label = False
123
124 @param.depends('file', watch=True)
125 def _update_filename(self):
126 if isinstance(self.file, str):
127 self.filename = os.path.basename(self.file)
128
129 @param.depends('auto', 'file', 'filename', watch=True)
130 def _update_label(self):
131 label = 'Download' if self._synced or self.auto else 'Transfer'
132 if self._default_label:
133 if self.file is None and self.callback is None:
134 label = 'No file set'
135 else:
136 try:
137 filename = self.filename or os.path.basename(self.file)
138 except TypeError:
139 raise ValueError('Must provide filename if file-like '
140 'object is provided.')
141 label = '%s %s' % (label, filename)
142 self.label = label
143 self._default_label = True
144
145 @param.depends('embed', 'file', 'callback', watch=True)
146 def _update_embed(self):
147 if self.embed:
148 self._transfer()
149
150 @param.depends('_clicks', watch=True)
151 def _transfer(self):
152 if self.file is None and self.callback is None:
153 if self.embed:
154 raise ValueError('Must provide a file or a callback '
155 'if it is to be embedded.')
156 return
157
158 from ..param import ParamFunction
159 if self.callback is None:
160 fileobj = self.file
161 else:
162 fileobj = ParamFunction.eval(self.callback)
163 filename = self.filename
164 if isinstance(fileobj, str):
165 if not os.path.isfile(fileobj):
166 raise FileNotFoundError('File "%s" not found.' % fileobj)
167 with open(fileobj, 'rb') as f:
168 b64 = b64encode(f.read()).decode("utf-8")
169 if filename is None:
170 filename = os.path.basename(fileobj)
171 elif hasattr(fileobj, 'read'):
172 bdata = fileobj.read()
173 if not isinstance(bdata, bytes):
174 bdata = bdata.encode("utf-8")
175 b64 = b64encode(bdata).decode("utf-8")
176 if filename is None:
177 raise ValueError('Must provide filename if file-like '
178 'object is provided.')
179 else:
180 raise ValueError('Cannot transfer unknown object of type %s' %
181 type(fileobj).__name__)
182
183 ext = filename.split('.')[-1]
184 for mtype, subtypes in self._mime_types.items():
185 stype = None
186 if ext in subtypes:
187 stype = subtypes[ext]
188 break
189 if stype is None:
190 mime = 'application/octet-stream'
191 else:
192 mime = '{type}/{subtype}'.format(type=mtype, subtype=stype)
193
194 data = "data:{mime};base64,{b64}".format(mime=mime, b64=b64)
195 self._synced = True
196
197 self.param.set_param(data=data, filename=filename)
198 self._update_label()
199 self._transfers += 1
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/widgets/misc.py b/panel/widgets/misc.py
--- a/panel/widgets/misc.py
+++ b/panel/widgets/misc.py
@@ -54,7 +54,7 @@
right-click save as.""")
button_type = param.ObjectSelector(default='default', objects=[
- 'default', 'primary', 'success', 'warning', 'danger'])
+ 'default', 'primary', 'success', 'warning', 'danger', 'light'])
callback = param.Callable(default=None, doc="""
A callable that returns the file path or file-like object.""")
| {"golden_diff": "diff --git a/panel/widgets/misc.py b/panel/widgets/misc.py\n--- a/panel/widgets/misc.py\n+++ b/panel/widgets/misc.py\n@@ -54,7 +54,7 @@\n right-click save as.\"\"\")\n \n button_type = param.ObjectSelector(default='default', objects=[\n- 'default', 'primary', 'success', 'warning', 'danger'])\n+ 'default', 'primary', 'success', 'warning', 'danger', 'light'])\n \n callback = param.Callable(default=None, doc=\"\"\"\n A callable that returns the file path or file-like object.\"\"\")\n", "issue": "Add light to list of button types\nBokeh now supports a `light` button. Please add this to the list of `button_type`.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nMiscellaneous widgets which do not fit into the other main categories.\n\"\"\"\nimport os\n\nfrom base64 import b64encode\n\nimport param\n\nfrom ..io.notebook import push\nfrom ..io.state import state\nfrom ..models import (\n VideoStream as _BkVideoStream, FileDownload as _BkFileDownload\n)\nfrom .base import Widget\nfrom .indicators import Progress # noqa\n\n\nclass VideoStream(Widget):\n\n format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],\n doc=\"\"\"\n The file format as which the video is returned.\"\"\")\n\n paused = param.Boolean(default=False, doc=\"\"\"\n Whether the video is currently paused\"\"\")\n\n timeout = param.Number(default=None, doc=\"\"\"\n Interval between snapshots in millisecons\"\"\")\n\n value = param.String(default='', doc=\"\"\"\n A base64 representation of the video stream snapshot.\"\"\")\n\n _widget_type = _BkVideoStream\n\n _rename = {'name': None}\n\n def snapshot(self):\n \"\"\"\n Triggers a snapshot of the current VideoStream state to sync\n the widget value.\n \"\"\"\n for ref, (m, _) in self._models.items():\n m.snapshot = not m.snapshot\n (self, root, doc, comm) = state._views[ref]\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n\n\nclass FileDownload(Widget):\n\n auto = param.Boolean(default=True, doc=\"\"\"\n Whether to download on the initial click or allow for\n right-click save as.\"\"\")\n\n button_type = param.ObjectSelector(default='default', objects=[\n 'default', 'primary', 'success', 'warning', 'danger'])\n\n callback = param.Callable(default=None, doc=\"\"\"\n A callable that returns the file path or file-like object.\"\"\")\n\n data = param.String(default=None, doc=\"\"\"\n The data being transferred.\"\"\")\n\n embed = param.Boolean(default=False, doc=\"\"\"\n Whether to embed the file on initialization.\"\"\")\n\n file = param.Parameter(default=None, doc=\"\"\"\n The file, file-like object or file contents to transfer. If\n the file is not pointing to a file on disk a filename must\n also be provided.\"\"\")\n\n filename = param.String(default=None, doc=\"\"\"\n A filename which will also be the default name when downloading\n the file.\"\"\")\n\n label = param.String(default=\"Download file\", doc=\"\"\"\n The label of the download button\"\"\")\n\n _clicks = param.Integer(default=0)\n\n _transfers = param.Integer(default=0)\n\n _mime_types = {\n 'application': {\n 'pdf': 'pdf', 'zip': 'zip'\n },\n 'audio': {\n 'mp3': 'mp3', 'ogg': 'ogg', 'wav': 'wav', 'webm': 'webm'\n },\n 'image': {\n 'apng': 'apng', 'bmp': 'bmp', 'gif': 'gif', 'ico': 'x-icon',\n 'cur': 'x-icon', 'jpg': 'jpeg', 'jpeg': 'jpeg', 'png': 'png',\n 'svg': 'svg+xml', 'tif': 'tiff', 'tiff': 'tiff', 'webp': 'webp'\n },\n 'text': {\n 'css': 'css', 'csv': 'plain;charset=UTF-8', 'js': 'javascript',\n 'html': 'html', 'txt': 'plain;charset=UTF-8'\n },\n 'video': {\n 'mp4': 'mp4', 'ogg': 'ogg', 'webm': 'webm'\n }\n }\n\n _widget_type = _BkFileDownload\n\n _rename = {\n 'callback': None, 'embed': None, 'file': None,\n '_clicks': 'clicks', 'name': 'title'\n }\n\n def __init__(self, file=None, **params):\n self._default_label = 'label' not in params\n self._synced = False\n super().__init__(file=file, **params)\n if self.embed:\n self._transfer()\n self._update_label()\n\n @param.depends('label', watch=True)\n def _update_default(self):\n self._default_label = False\n\n @param.depends('file', watch=True)\n def _update_filename(self):\n if isinstance(self.file, str):\n self.filename = os.path.basename(self.file)\n\n @param.depends('auto', 'file', 'filename', watch=True)\n def _update_label(self):\n label = 'Download' if self._synced or self.auto else 'Transfer'\n if self._default_label:\n if self.file is None and self.callback is None:\n label = 'No file set'\n else:\n try:\n filename = self.filename or os.path.basename(self.file)\n except TypeError:\n raise ValueError('Must provide filename if file-like '\n 'object is provided.')\n label = '%s %s' % (label, filename)\n self.label = label\n self._default_label = True\n\n @param.depends('embed', 'file', 'callback', watch=True)\n def _update_embed(self):\n if self.embed:\n self._transfer()\n\n @param.depends('_clicks', watch=True)\n def _transfer(self):\n if self.file is None and self.callback is None:\n if self.embed:\n raise ValueError('Must provide a file or a callback '\n 'if it is to be embedded.')\n return\n\n from ..param import ParamFunction\n if self.callback is None:\n fileobj = self.file\n else:\n fileobj = ParamFunction.eval(self.callback)\n filename = self.filename\n if isinstance(fileobj, str):\n if not os.path.isfile(fileobj):\n raise FileNotFoundError('File \"%s\" not found.' % fileobj)\n with open(fileobj, 'rb') as f:\n b64 = b64encode(f.read()).decode(\"utf-8\")\n if filename is None:\n filename = os.path.basename(fileobj)\n elif hasattr(fileobj, 'read'):\n bdata = fileobj.read()\n if not isinstance(bdata, bytes):\n bdata = bdata.encode(\"utf-8\")\n b64 = b64encode(bdata).decode(\"utf-8\")\n if filename is None:\n raise ValueError('Must provide filename if file-like '\n 'object is provided.')\n else:\n raise ValueError('Cannot transfer unknown object of type %s' %\n type(fileobj).__name__)\n\n ext = filename.split('.')[-1]\n for mtype, subtypes in self._mime_types.items():\n stype = None\n if ext in subtypes:\n stype = subtypes[ext]\n break\n if stype is None:\n mime = 'application/octet-stream'\n else:\n mime = '{type}/{subtype}'.format(type=mtype, subtype=stype)\n\n data = \"data:{mime};base64,{b64}\".format(mime=mime, b64=b64)\n self._synced = True\n\n self.param.set_param(data=data, filename=filename)\n self._update_label()\n self._transfers += 1\n", "path": "panel/widgets/misc.py"}], "after_files": [{"content": "\"\"\"\nMiscellaneous widgets which do not fit into the other main categories.\n\"\"\"\nimport os\n\nfrom base64 import b64encode\n\nimport param\n\nfrom ..io.notebook import push\nfrom ..io.state import state\nfrom ..models import (\n VideoStream as _BkVideoStream, FileDownload as _BkFileDownload\n)\nfrom .base import Widget\nfrom .indicators import Progress # noqa\n\n\nclass VideoStream(Widget):\n\n format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],\n doc=\"\"\"\n The file format as which the video is returned.\"\"\")\n\n paused = param.Boolean(default=False, doc=\"\"\"\n Whether the video is currently paused\"\"\")\n\n timeout = param.Number(default=None, doc=\"\"\"\n Interval between snapshots in millisecons\"\"\")\n\n value = param.String(default='', doc=\"\"\"\n A base64 representation of the video stream snapshot.\"\"\")\n\n _widget_type = _BkVideoStream\n\n _rename = {'name': None}\n\n def snapshot(self):\n \"\"\"\n Triggers a snapshot of the current VideoStream state to sync\n the widget value.\n \"\"\"\n for ref, (m, _) in self._models.items():\n m.snapshot = not m.snapshot\n (self, root, doc, comm) = state._views[ref]\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n\n\nclass FileDownload(Widget):\n\n auto = param.Boolean(default=True, doc=\"\"\"\n Whether to download on the initial click or allow for\n right-click save as.\"\"\")\n\n button_type = param.ObjectSelector(default='default', objects=[\n 'default', 'primary', 'success', 'warning', 'danger', 'light'])\n\n callback = param.Callable(default=None, doc=\"\"\"\n A callable that returns the file path or file-like object.\"\"\")\n\n data = param.String(default=None, doc=\"\"\"\n The data being transferred.\"\"\")\n\n embed = param.Boolean(default=False, doc=\"\"\"\n Whether to embed the file on initialization.\"\"\")\n\n file = param.Parameter(default=None, doc=\"\"\"\n The file, file-like object or file contents to transfer. If\n the file is not pointing to a file on disk a filename must\n also be provided.\"\"\")\n\n filename = param.String(default=None, doc=\"\"\"\n A filename which will also be the default name when downloading\n the file.\"\"\")\n\n label = param.String(default=\"Download file\", doc=\"\"\"\n The label of the download button\"\"\")\n\n _clicks = param.Integer(default=0)\n\n _transfers = param.Integer(default=0)\n\n _mime_types = {\n 'application': {\n 'pdf': 'pdf', 'zip': 'zip'\n },\n 'audio': {\n 'mp3': 'mp3', 'ogg': 'ogg', 'wav': 'wav', 'webm': 'webm'\n },\n 'image': {\n 'apng': 'apng', 'bmp': 'bmp', 'gif': 'gif', 'ico': 'x-icon',\n 'cur': 'x-icon', 'jpg': 'jpeg', 'jpeg': 'jpeg', 'png': 'png',\n 'svg': 'svg+xml', 'tif': 'tiff', 'tiff': 'tiff', 'webp': 'webp'\n },\n 'text': {\n 'css': 'css', 'csv': 'plain;charset=UTF-8', 'js': 'javascript',\n 'html': 'html', 'txt': 'plain;charset=UTF-8'\n },\n 'video': {\n 'mp4': 'mp4', 'ogg': 'ogg', 'webm': 'webm'\n }\n }\n\n _widget_type = _BkFileDownload\n\n _rename = {\n 'callback': None, 'embed': None, 'file': None,\n '_clicks': 'clicks', 'name': 'title'\n }\n\n def __init__(self, file=None, **params):\n self._default_label = 'label' not in params\n self._synced = False\n super().__init__(file=file, **params)\n if self.embed:\n self._transfer()\n self._update_label()\n\n @param.depends('label', watch=True)\n def _update_default(self):\n self._default_label = False\n\n @param.depends('file', watch=True)\n def _update_filename(self):\n if isinstance(self.file, str):\n self.filename = os.path.basename(self.file)\n\n @param.depends('auto', 'file', 'filename', watch=True)\n def _update_label(self):\n label = 'Download' if self._synced or self.auto else 'Transfer'\n if self._default_label:\n if self.file is None and self.callback is None:\n label = 'No file set'\n else:\n try:\n filename = self.filename or os.path.basename(self.file)\n except TypeError:\n raise ValueError('Must provide filename if file-like '\n 'object is provided.')\n label = '%s %s' % (label, filename)\n self.label = label\n self._default_label = True\n\n @param.depends('embed', 'file', 'callback', watch=True)\n def _update_embed(self):\n if self.embed:\n self._transfer()\n\n @param.depends('_clicks', watch=True)\n def _transfer(self):\n if self.file is None and self.callback is None:\n if self.embed:\n raise ValueError('Must provide a file or a callback '\n 'if it is to be embedded.')\n return\n\n from ..param import ParamFunction\n if self.callback is None:\n fileobj = self.file\n else:\n fileobj = ParamFunction.eval(self.callback)\n filename = self.filename\n if isinstance(fileobj, str):\n if not os.path.isfile(fileobj):\n raise FileNotFoundError('File \"%s\" not found.' % fileobj)\n with open(fileobj, 'rb') as f:\n b64 = b64encode(f.read()).decode(\"utf-8\")\n if filename is None:\n filename = os.path.basename(fileobj)\n elif hasattr(fileobj, 'read'):\n bdata = fileobj.read()\n if not isinstance(bdata, bytes):\n bdata = bdata.encode(\"utf-8\")\n b64 = b64encode(bdata).decode(\"utf-8\")\n if filename is None:\n raise ValueError('Must provide filename if file-like '\n 'object is provided.')\n else:\n raise ValueError('Cannot transfer unknown object of type %s' %\n type(fileobj).__name__)\n\n ext = filename.split('.')[-1]\n for mtype, subtypes in self._mime_types.items():\n stype = None\n if ext in subtypes:\n stype = subtypes[ext]\n break\n if stype is None:\n mime = 'application/octet-stream'\n else:\n mime = '{type}/{subtype}'.format(type=mtype, subtype=stype)\n\n data = \"data:{mime};base64,{b64}\".format(mime=mime, b64=b64)\n self._synced = True\n\n self.param.set_param(data=data, filename=filename)\n self._update_label()\n self._transfers += 1\n", "path": "panel/widgets/misc.py"}]} | 2,425 | 129 |
gh_patches_debug_97 | rasdani/github-patches | git_diff | uccser__cs-unplugged-434 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check desired orientation of binary to alphabet resource
Currently is displayed in portrait but half the page is unused. May be better to switch to landscape which will increase the size of table cells.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/resources/views/binary_to_alphabet.py`
Content:
```
1 """Module for generating Binary to Alphabet resource."""
2
3 from PIL import Image, ImageDraw, ImageFont
4 from utils.retrieve_query_parameter import retrieve_query_parameter
5
6
7 def resource_image(request, resource):
8 """Create a image for Binary to Alphabet resource.
9
10 Args:
11 request: HTTP request object
12 resource: Object of resource data.
13
14 Returns:
15 A Pillow image object.
16 """
17 # Retrieve relevant image
18 parameter_options = valid_options()
19 worksheet_version = retrieve_query_parameter(request, "worksheet_version", parameter_options["worksheet_version"])
20 if worksheet_version == "student":
21 image_path = "static/img/resources/binary-to-alphabet/table.png"
22 else:
23 image_path = "static/img/resources/binary-to-alphabet/table-teacher.png"
24 image = Image.open(image_path)
25 draw = ImageDraw.Draw(image)
26
27 font_size = 30
28 font_path = "static/fonts/PatrickHand-Regular.ttf"
29 font = ImageFont.truetype(font_path, font_size)
30
31 # Draw headings
32 column_headings = ["Base 10", "Binary", "Letter"]
33 heading_coord_x = 18
34 heading_coord_y = 6
35
36 i = 0
37 while i < 9: # 9 = number of columns
38
39 if i % 3 == 0:
40 text = str(column_headings[0])
41 elif i % 3 == 1:
42 text = str(column_headings[1])
43 else:
44 text = str(column_headings[2])
45
46 draw.text(
47 (heading_coord_x, heading_coord_y),
48 text,
49 font=font,
50 fill="#000"
51 )
52
53 heading_coord_x += 113
54
55 i += 1
56
57 # Draw numbers
58 # Column data: (min number, max number), x coord
59 columns_data = [((0, 9), 58), ((9, 18), 397), ((18, 27), 736)]
60
61 for column_set in columns_data:
62 start, end = column_set[0]
63 base_coord_x = column_set[1]
64 base_coord_y = 75
65
66 for number in range(start, end):
67 text = str(number)
68 text_width, text_height = draw.textsize(text, font=font)
69 coord_x = base_coord_x - (text_width / 2)
70 coord_y = base_coord_y - (text_height / 2)
71
72 draw.text(
73 (coord_x, coord_y),
74 text,
75 font=font,
76 fill="#000"
77 )
78
79 base_coord_y += 54
80
81 return image
82
83
84 def subtitle(request, resource):
85 """Return the subtitle string of the resource.
86
87 Used after the resource name in the filename, and
88 also on the resource image.
89
90 Args:
91 request: HTTP request object
92 resource: Object of resource data.
93
94 Returns:
95 text for subtitle (string)
96 """
97 text = "{} - {}".format(
98 retrieve_query_parameter(request, "worksheet_version"),
99 retrieve_query_parameter(request, "paper_size")
100 )
101 return text
102
103
104 def valid_options():
105 """Provide dictionary of all valid parameters.
106
107 This excludes the header text parameter.
108
109 Returns:
110 All valid options (dict).
111 """
112 return {
113 "worksheet_version": ["student", "teacher"],
114 "paper_size": ["a4", "letter"]
115 }
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/resources/views/binary_to_alphabet.py b/csunplugged/resources/views/binary_to_alphabet.py
--- a/csunplugged/resources/views/binary_to_alphabet.py
+++ b/csunplugged/resources/views/binary_to_alphabet.py
@@ -78,6 +78,7 @@
base_coord_y += 54
+ image = image.rotate(90, expand=True)
return image
| {"golden_diff": "diff --git a/csunplugged/resources/views/binary_to_alphabet.py b/csunplugged/resources/views/binary_to_alphabet.py\n--- a/csunplugged/resources/views/binary_to_alphabet.py\n+++ b/csunplugged/resources/views/binary_to_alphabet.py\n@@ -78,6 +78,7 @@\n \n base_coord_y += 54\n \n+ image = image.rotate(90, expand=True)\n return image\n", "issue": "Check desired orientation of binary to alphabet resource\nCurrently is displayed in portrait but half the page is unused. May be better to switch to landscape which will increase the size of table cells.\n", "before_files": [{"content": "\"\"\"Module for generating Binary to Alphabet resource.\"\"\"\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils.retrieve_query_parameter import retrieve_query_parameter\n\n\ndef resource_image(request, resource):\n \"\"\"Create a image for Binary to Alphabet resource.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n A Pillow image object.\n \"\"\"\n # Retrieve relevant image\n parameter_options = valid_options()\n worksheet_version = retrieve_query_parameter(request, \"worksheet_version\", parameter_options[\"worksheet_version\"])\n if worksheet_version == \"student\":\n image_path = \"static/img/resources/binary-to-alphabet/table.png\"\n else:\n image_path = \"static/img/resources/binary-to-alphabet/table-teacher.png\"\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n font_size = 30\n font_path = \"static/fonts/PatrickHand-Regular.ttf\"\n font = ImageFont.truetype(font_path, font_size)\n\n # Draw headings\n column_headings = [\"Base 10\", \"Binary\", \"Letter\"]\n heading_coord_x = 18\n heading_coord_y = 6\n\n i = 0\n while i < 9: # 9 = number of columns\n\n if i % 3 == 0:\n text = str(column_headings[0])\n elif i % 3 == 1:\n text = str(column_headings[1])\n else:\n text = str(column_headings[2])\n\n draw.text(\n (heading_coord_x, heading_coord_y),\n text,\n font=font,\n fill=\"#000\"\n )\n\n heading_coord_x += 113\n\n i += 1\n\n # Draw numbers\n # Column data: (min number, max number), x coord\n columns_data = [((0, 9), 58), ((9, 18), 397), ((18, 27), 736)]\n\n for column_set in columns_data:\n start, end = column_set[0]\n base_coord_x = column_set[1]\n base_coord_y = 75\n\n for number in range(start, end):\n text = str(number)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = base_coord_x - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill=\"#000\"\n )\n\n base_coord_y += 54\n\n return image\n\n\ndef subtitle(request, resource):\n \"\"\"Return the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n text for subtitle (string)\n \"\"\"\n text = \"{} - {}\".format(\n retrieve_query_parameter(request, \"worksheet_version\"),\n retrieve_query_parameter(request, \"paper_size\")\n )\n return text\n\n\ndef valid_options():\n \"\"\"Provide dictionary of all valid parameters.\n\n This excludes the header text parameter.\n\n Returns:\n All valid options (dict).\n \"\"\"\n return {\n \"worksheet_version\": [\"student\", \"teacher\"],\n \"paper_size\": [\"a4\", \"letter\"]\n }\n", "path": "csunplugged/resources/views/binary_to_alphabet.py"}], "after_files": [{"content": "\"\"\"Module for generating Binary to Alphabet resource.\"\"\"\n\nfrom PIL import Image, ImageDraw, ImageFont\nfrom utils.retrieve_query_parameter import retrieve_query_parameter\n\n\ndef resource_image(request, resource):\n \"\"\"Create a image for Binary to Alphabet resource.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n A Pillow image object.\n \"\"\"\n # Retrieve relevant image\n parameter_options = valid_options()\n worksheet_version = retrieve_query_parameter(request, \"worksheet_version\", parameter_options[\"worksheet_version\"])\n if worksheet_version == \"student\":\n image_path = \"static/img/resources/binary-to-alphabet/table.png\"\n else:\n image_path = \"static/img/resources/binary-to-alphabet/table-teacher.png\"\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n font_size = 30\n font_path = \"static/fonts/PatrickHand-Regular.ttf\"\n font = ImageFont.truetype(font_path, font_size)\n\n # Draw headings\n column_headings = [\"Base 10\", \"Binary\", \"Letter\"]\n heading_coord_x = 18\n heading_coord_y = 6\n\n i = 0\n while i < 9: # 9 = number of columns\n\n if i % 3 == 0:\n text = str(column_headings[0])\n elif i % 3 == 1:\n text = str(column_headings[1])\n else:\n text = str(column_headings[2])\n\n draw.text(\n (heading_coord_x, heading_coord_y),\n text,\n font=font,\n fill=\"#000\"\n )\n\n heading_coord_x += 113\n\n i += 1\n\n # Draw numbers\n # Column data: (min number, max number), x coord\n columns_data = [((0, 9), 58), ((9, 18), 397), ((18, 27), 736)]\n\n for column_set in columns_data:\n start, end = column_set[0]\n base_coord_x = column_set[1]\n base_coord_y = 75\n\n for number in range(start, end):\n text = str(number)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = base_coord_x - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill=\"#000\"\n )\n\n base_coord_y += 54\n\n image = image.rotate(90, expand=True)\n return image\n\n\ndef subtitle(request, resource):\n \"\"\"Return the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n\n Returns:\n text for subtitle (string)\n \"\"\"\n text = \"{} - {}\".format(\n retrieve_query_parameter(request, \"worksheet_version\"),\n retrieve_query_parameter(request, \"paper_size\")\n )\n return text\n\n\ndef valid_options():\n \"\"\"Provide dictionary of all valid parameters.\n\n This excludes the header text parameter.\n\n Returns:\n All valid options (dict).\n \"\"\"\n return {\n \"worksheet_version\": [\"student\", \"teacher\"],\n \"paper_size\": [\"a4\", \"letter\"]\n }\n", "path": "csunplugged/resources/views/binary_to_alphabet.py"}]} | 1,296 | 99 |
gh_patches_debug_25899 | rasdani/github-patches | git_diff | ContinualAI__avalanche-138 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add MAC metric
It would be nice to have also a [MAC ](https://en.wikipedia.org/wiki/Multiply%E2%80%93accumulate_operation) metric. It's difficult to compute it in native pytorch for every possible layer, but it would be a nice, hardware independed metric.
Any idea on how to do this easily in PyTorch?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `avalanche/evaluation/metrics.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 ################################################################################
5 # Copyright (c) 2020 ContinualAI Research #
6 # Copyrights licensed under the CC BY 4.0 License. #
7 # See the accompanying LICENSE file for terms. #
8 # #
9 # Date: 1-05-2020 #
10 # Author(s): Vincenzo Lomonaco #
11 # E-mail: [email protected] #
12 # Website: clair.continualai.org #
13 ################################################################################
14
15 """ Common metrics for CL. """
16
17 # Python 2-3 compatible
18 from __future__ import print_function
19 from __future__ import division
20 from __future__ import absolute_import
21
22 import torch
23 import numpy as np
24 import os
25 import psutil
26 from .utils import bytes2human
27 import matplotlib
28 matplotlib.use('Agg')
29 import matplotlib.pyplot as plt
30 from sklearn.metrics import confusion_matrix
31 import PIL.Image
32 from torchvision.transforms import ToTensor
33 import io
34 import queue
35 import subprocess
36 import threading
37 import time
38
39
40 class GPUUsage:
41 """
42 GPU usage metric measured as average usage percentage over time.
43
44 :param gpu_id: GPU device ID
45 :param every: time delay (in seconds) between measurements
46 """
47
48 def __init__(self, gpu_id, every=10):
49 # 'nvidia-smi --loop=1 --query-gpu=utilization.gpu --format=csv'
50 cmd = ['nvidia-smi', f'--loop={every}', '--query-gpu=utilization.gpu',
51 '--format=csv', f'--id={gpu_id}']
52 # something long running
53 try:
54 self.p = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE)
55 except NotADirectoryError:
56 raise ValueError('No GPU available: nvidia-smi command not found.')
57
58 self.lines_queue = queue.Queue()
59 self.read_thread = threading.Thread(target=GPUUsage.push_lines,
60 args=(self,), daemon=True)
61 self.read_thread.start()
62
63 self.n_measurements = 0
64 self.avg_usage = 0
65
66 def compute(self, t):
67 """
68 Compute CPU usage measured in seconds.
69
70 :param t: task id
71 :return: float: average GPU usage
72 """
73 while not self.lines_queue.empty():
74 line = self.lines_queue.get()
75 if line[0] == 'u': # skip first line 'utilization.gpu [%]'
76 continue
77 usage = int(line.strip()[:-1])
78 self.n_measurements += 1
79 self.avg_usage += usage
80
81 if self.n_measurements > 0:
82 self.avg_usage /= float(self.n_measurements)
83 print(f"Train Task {t} - average GPU usage: {self.avg_usage}%")
84
85 return self.avg_usage
86
87 def push_lines(self):
88 while True:
89 line = self.p.stdout.readline()
90 self.lines_queue.put(line.decode('ascii'))
91
92 def close(self):
93 self.p.terminate()
94
95
96 class CPUUsage:
97 """
98 CPU usage metric measured in seconds.
99 """
100
101 def compute(self, t):
102 """
103 Compute CPU usage measured in seconds.
104
105 :param t: task id
106 :return: tuple (float, float): (user CPU time, system CPU time)
107 """
108 p = psutil.Process(os.getpid())
109 times = p.cpu_times()
110 user, sys = times.user, times.system
111 print("Train Task {:} - CPU usage: user {} system {}"
112 .format(t, user, sys))
113 return user, sys
114
115
116 class ACC(object):
117
118 def __init__(self, num_class=None):
119 """
120 Accuracy metrics should be called for each test set
121
122 :param num_class (int, optional): number of classes in the test_set
123 (useful in case the test_set does not cover all the classes
124 in the train_set).
125 """
126
127 self.num_class = num_class
128
129 def compute(self, y, y_hat):
130 """
131 :param y (tensor list or tensor): true labels for each mini-batch
132 :param y_hat (tensor list or tensor): predicted labels for each
133 mini-batch
134
135 :return acc (float): average accuracy for the test set
136 :return accs (float list): accuracy for each class in the training set
137 """
138
139 assert type(y) == type(y_hat), "Predicted and target labels must be \
140 both list (of tensors) or tensors"
141
142 # manage list of tensors by default
143 if not (isinstance(y, list) or isinstance(y, tuple)):
144 y = [y]
145 y_hat = [y_hat]
146
147 if self.num_class is None:
148 num_class = int(max([torch.max(el).item() + 1 for el in y]))
149 else:
150 num_class = self.num_class
151
152 hits_per_class = [0] * num_class
153 pattern_per_class = [0] * num_class
154
155 correct_cnt = 0.
156
157 for true_y, y_pred in zip(y, y_hat):
158
159 correct_cnt += (true_y == y_pred).sum().float()
160
161 for label in true_y:
162 pattern_per_class[int(label)] += 1
163
164 for i, pred in enumerate(y_pred):
165 if pred == true_y[i]:
166 hits_per_class[int(pred)] += 1
167
168 accs = np.zeros(len(hits_per_class), dtype=np.float)
169 hits_per_class = np.asarray(hits_per_class)
170 pattern_per_class = np.asarray(pattern_per_class).astype(float)
171
172 # np.divide prevents the true divide warning from showing up
173 # when one or more elements of pattern_per_class are zero
174 # Also, those elements will be 0 instead of NaN
175 np.divide(hits_per_class, pattern_per_class,
176 where=pattern_per_class != 0, out=accs)
177 accs = torch.from_numpy(accs)
178
179 acc = correct_cnt / float(y_hat[0].size(0) * len(y_hat))
180
181 return acc, accs
182
183
184 class CF(object):
185
186 def __init__(self, num_class=None):
187 """
188 Catastrophic Forgetting metric.
189 """
190
191 self.best_acc = {}
192 self.acc_metric = ACC(num_class=num_class)
193
194 def compute(self, y, y_hat, train_t, test_t):
195 """
196 :param y (tensor list or tensor): true labels for each mini-batch
197 :param y_hat (tensor list or tensor): predicted labels for each
198 mini-batch
199 """
200
201 acc, accs = self.acc_metric.compute(y, y_hat)
202 if train_t not in self.best_acc.keys() and train_t == test_t:
203 self.best_acc[train_t] = acc
204
205 if test_t not in self.best_acc.keys():
206 cf = np.NAN
207 else:
208 cf = self.best_acc[test_t] - acc
209
210 print("Task {:} - CF: {:.4f}"
211 .format(test_t, cf))
212
213 return cf
214
215
216 class RAMU(object):
217
218 def __init__(self):
219 """
220 RAM Usage metric.
221 """
222
223 def compute(self, t):
224
225 process = psutil.Process(os.getpid())
226 mem = process.memory_info().rss # in bytes
227
228 print("Train Task {:} - MU: {:.3f} GB"
229 .format(t, mem / (1024 * 1024 * 1024)))
230
231 return mem / (1024 * 1024 * 1024)
232
233
234 class DiskUsage(object):
235
236 def __init__(self, path_to_monitor=None, disk_io=False):
237 """
238 :param path_to_monitor (string): a valid path to folder.
239 If None, the current working directory is used.
240 :param disk_io: True to enable monitoring of I/O operations on disk.
241 WARNING: Reports are system-wide, grouping all disks.
242 """
243
244 if path_to_monitor is not None:
245 self.path_to_monitor = path_to_monitor
246 else:
247 self.path_to_monitor = os.getcwd()
248
249 self.disk_io = disk_io
250
251 def compute(self, t):
252 """
253 :param t: task id
254
255 :return usage, io (tuple): io is None if disk_io is False
256 """
257
258 usage = psutil.disk_usage(self.path_to_monitor)
259
260 total, used, free, percent = \
261 bytes2human(usage.total), \
262 bytes2human(usage.used), \
263 bytes2human(usage.free), \
264 usage.percent
265
266 print("Disk usage for {:}".format(self.path_to_monitor))
267 print("Task {:} - disk percent: {:}%, \
268 disk total: {:}, \
269 disk used: {:}, \
270 disk free: {:}"
271 .format(t, percent, total, used, free))
272
273 if self.disk_io:
274 io = psutil.disk_io_counters()
275 read_count, write_count = \
276 io.read_count, \
277 io.write_count
278 read_bytes, write_bytes = \
279 bytes2human(io.read_bytes), \
280 bytes2human(io.write_bytes)
281
282 print("Task {:} - read count: {:}, \
283 write count: {:}, \
284 bytes read: {:}, \
285 bytes written: {:}"
286 .format(t, read_count, write_count, read_bytes, write_bytes))
287 else:
288 io = None
289
290 return usage, io
291
292
293 class CM(object):
294
295 def __init__(self, num_class=None):
296 """
297 Confusion Matrix computation
298 """
299 self.num_class = num_class
300
301 def compute(self, y, y_hat, normalize=False):
302 """
303 :param y (tensor or tensors list): true labels for each minibatch
304 :param y_hat (tensor or tensors list): predicted labels for each
305 minibatch
306 """
307
308 assert type(y) == type(y_hat), "Predicted and target labels must be \
309 both list (of tensors) or tensors"
310
311 # manage list of tensors by default
312 if not (isinstance(y, list) or isinstance(y, tuple)):
313 y = [y]
314 y_hat = [y_hat]
315
316 if self.num_class is None:
317 num_class = int(max([torch.max(el).item() + 1 for el in y]))
318 else:
319 num_class = self.num_class
320
321 cmap = plt.cm.Blues
322
323 cm = np.zeros((num_class, num_class))
324 for i, (el, el_hat) in enumerate(zip(y, y_hat)):
325 # Compute confusion matrix
326 cm += confusion_matrix(
327 el.numpy(), el_hat.numpy(),
328 labels=list(range(num_class)))
329
330 # Only use the labels that appear in the data
331 classes = [str(i) for i in range(num_class)]
332 if normalize:
333 cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
334 print("Normalized confusion matrix")
335 else:
336 print('Confusion matrix, without normalization')
337
338 fig, ax = plt.subplots()
339 im = ax.matshow(cm, interpolation='nearest', cmap=cmap)
340 ax.figure.colorbar(im, ax=ax)
341 # We want to show all ticks...
342 ax.set(xticks=np.arange(cm.shape[1]),
343 yticks=np.arange(cm.shape[0]),
344 # ... and label them with the respective list entries
345 xticklabels=classes, yticklabels=classes,
346 title=None,
347 ylabel='True label',
348 xlabel='Predicted label')
349
350 # Rotate the tick labels and set their alignment.
351 plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
352 rotation_mode="anchor")
353
354 fig.tight_layout()
355
356 buf = io.BytesIO()
357 fig.savefig(buf, format='jpg', dpi=50)
358 plt.close(fig)
359
360 buf.seek(0)
361 image = PIL.Image.open(buf)
362 image = ToTensor()(image)
363 return image
364
365
366 class TimeUsage:
367
368 """
369 Time usage metric measured in seconds.
370 """
371
372 def __init__(self):
373 self._start_time = time.perf_counter()
374
375 def compute(self, t):
376 elapsed_time = time.perf_counter() - self._start_time
377 print(f"Elapsed time: {elapsed_time:0.4f} seconds")
378
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/avalanche/evaluation/metrics.py b/avalanche/evaluation/metrics.py
--- a/avalanche/evaluation/metrics.py
+++ b/avalanche/evaluation/metrics.py
@@ -37,6 +37,48 @@
import time
+class MAC:
+ """
+ Multiply-and-accumulate metric. Approximately measure the computational
+ cost of a model in a hardware-independent way by computing the number
+ of multiplications. Currently supports only Linear or Conv2d modules.
+ Other operations are ignored.
+ """
+ def __init__(self):
+ self.hooks = []
+ self._compute_cost = 0
+
+ def compute(self, model, dummy_input):
+ for mod in model.modules():
+ if self.is_recognized_module(mod):
+ def foo(a, b, c):
+ return self.update_compute_cost(a, b, c)
+ handle = mod.register_forward_hook(foo)
+ self.hooks.append(handle)
+
+ self._compute_cost = 0
+ model(dummy_input) # trigger forward hooks
+
+ for handle in self.hooks:
+ handle.remove()
+ self.hooks = []
+ return self._compute_cost
+
+ def update_compute_cost(self, module, input, output):
+ modname = module.__class__.__name__
+ if modname == 'Linear':
+ self._compute_cost += input[0].shape[1] * output.shape[1]
+ elif modname == 'Conv2d':
+ n, cout, hout, wout = output.shape # Batch, Channels, Height, Width
+ ksize = module.kernel_size[0] * module.kernel_size[1]
+ self._compute_cost += cout * hout * wout * (ksize)
+ print(self._compute_cost)
+
+ def is_recognized_module(self, mod):
+ modname = mod.__class__.__name__
+ return modname == 'Linear' or modname == 'Conv2d'
+
+
class GPUUsage:
"""
GPU usage metric measured as average usage percentage over time.
| {"golden_diff": "diff --git a/avalanche/evaluation/metrics.py b/avalanche/evaluation/metrics.py\n--- a/avalanche/evaluation/metrics.py\n+++ b/avalanche/evaluation/metrics.py\n@@ -37,6 +37,48 @@\n import time\n \n \n+class MAC:\n+ \"\"\"\n+ Multiply-and-accumulate metric. Approximately measure the computational\n+ cost of a model in a hardware-independent way by computing the number\n+ of multiplications. Currently supports only Linear or Conv2d modules.\n+ Other operations are ignored.\n+ \"\"\"\n+ def __init__(self):\n+ self.hooks = []\n+ self._compute_cost = 0\n+\n+ def compute(self, model, dummy_input):\n+ for mod in model.modules():\n+ if self.is_recognized_module(mod):\n+ def foo(a, b, c):\n+ return self.update_compute_cost(a, b, c)\n+ handle = mod.register_forward_hook(foo)\n+ self.hooks.append(handle)\n+\n+ self._compute_cost = 0\n+ model(dummy_input) # trigger forward hooks\n+\n+ for handle in self.hooks:\n+ handle.remove()\n+ self.hooks = []\n+ return self._compute_cost\n+\n+ def update_compute_cost(self, module, input, output):\n+ modname = module.__class__.__name__\n+ if modname == 'Linear':\n+ self._compute_cost += input[0].shape[1] * output.shape[1]\n+ elif modname == 'Conv2d':\n+ n, cout, hout, wout = output.shape # Batch, Channels, Height, Width\n+ ksize = module.kernel_size[0] * module.kernel_size[1]\n+ self._compute_cost += cout * hout * wout * (ksize)\n+ print(self._compute_cost)\n+\n+ def is_recognized_module(self, mod):\n+ modname = mod.__class__.__name__\n+ return modname == 'Linear' or modname == 'Conv2d'\n+\n+\n class GPUUsage:\n \"\"\"\n GPU usage metric measured as average usage percentage over time.\n", "issue": "Add MAC metric\nIt would be nice to have also a [MAC ](https://en.wikipedia.org/wiki/Multiply%E2%80%93accumulate_operation) metric. It's difficult to compute it in native pytorch for every possible layer, but it would be a nice, hardware independed metric.\r\n\r\nAny idea on how to do this easily in PyTorch?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n################################################################################\n# Copyright (c) 2020 ContinualAI Research #\n# Copyrights licensed under the CC BY 4.0 License. #\n# See the accompanying LICENSE file for terms. #\n# #\n# Date: 1-05-2020 #\n# Author(s): Vincenzo Lomonaco #\n# E-mail: [email protected] #\n# Website: clair.continualai.org #\n################################################################################\n\n\"\"\" Common metrics for CL. \"\"\"\n\n# Python 2-3 compatible\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport torch\nimport numpy as np\nimport os\nimport psutil\nfrom .utils import bytes2human\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport PIL.Image\nfrom torchvision.transforms import ToTensor\nimport io\nimport queue\nimport subprocess\nimport threading\nimport time\n\n\nclass GPUUsage:\n \"\"\"\n GPU usage metric measured as average usage percentage over time.\n\n :param gpu_id: GPU device ID\n :param every: time delay (in seconds) between measurements\n \"\"\"\n\n def __init__(self, gpu_id, every=10):\n # 'nvidia-smi --loop=1 --query-gpu=utilization.gpu --format=csv'\n cmd = ['nvidia-smi', f'--loop={every}', '--query-gpu=utilization.gpu',\n '--format=csv', f'--id={gpu_id}']\n # something long running\n try:\n self.p = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE)\n except NotADirectoryError:\n raise ValueError('No GPU available: nvidia-smi command not found.')\n\n self.lines_queue = queue.Queue()\n self.read_thread = threading.Thread(target=GPUUsage.push_lines,\n args=(self,), daemon=True)\n self.read_thread.start()\n\n self.n_measurements = 0\n self.avg_usage = 0\n\n def compute(self, t):\n \"\"\"\n Compute CPU usage measured in seconds.\n\n :param t: task id\n :return: float: average GPU usage\n \"\"\"\n while not self.lines_queue.empty():\n line = self.lines_queue.get()\n if line[0] == 'u': # skip first line 'utilization.gpu [%]'\n continue\n usage = int(line.strip()[:-1])\n self.n_measurements += 1\n self.avg_usage += usage\n\n if self.n_measurements > 0:\n self.avg_usage /= float(self.n_measurements)\n print(f\"Train Task {t} - average GPU usage: {self.avg_usage}%\")\n\n return self.avg_usage\n\n def push_lines(self):\n while True:\n line = self.p.stdout.readline()\n self.lines_queue.put(line.decode('ascii'))\n\n def close(self):\n self.p.terminate()\n\n\nclass CPUUsage:\n \"\"\"\n CPU usage metric measured in seconds.\n \"\"\"\n\n def compute(self, t):\n \"\"\"\n Compute CPU usage measured in seconds.\n\n :param t: task id\n :return: tuple (float, float): (user CPU time, system CPU time)\n \"\"\"\n p = psutil.Process(os.getpid())\n times = p.cpu_times()\n user, sys = times.user, times.system\n print(\"Train Task {:} - CPU usage: user {} system {}\"\n .format(t, user, sys))\n return user, sys\n\n\nclass ACC(object):\n\n def __init__(self, num_class=None):\n \"\"\"\n Accuracy metrics should be called for each test set\n\n :param num_class (int, optional): number of classes in the test_set\n (useful in case the test_set does not cover all the classes\n in the train_set).\n \"\"\"\n\n self.num_class = num_class\n\n def compute(self, y, y_hat):\n \"\"\"\n :param y (tensor list or tensor): true labels for each mini-batch\n :param y_hat (tensor list or tensor): predicted labels for each\n mini-batch\n\n :return acc (float): average accuracy for the test set\n :return accs (float list): accuracy for each class in the training set \n \"\"\"\n \n assert type(y) == type(y_hat), \"Predicted and target labels must be \\\n both list (of tensors) or tensors\"\n \n # manage list of tensors by default\n if not (isinstance(y, list) or isinstance(y, tuple)):\n y = [y]\n y_hat = [y_hat]\n\n if self.num_class is None:\n num_class = int(max([torch.max(el).item() + 1 for el in y]))\n else:\n num_class = self.num_class\n\n hits_per_class = [0] * num_class\n pattern_per_class = [0] * num_class\n\n correct_cnt = 0.\n\n for true_y, y_pred in zip(y, y_hat):\n \n correct_cnt += (true_y == y_pred).sum().float()\n\n for label in true_y:\n pattern_per_class[int(label)] += 1\n\n for i, pred in enumerate(y_pred):\n if pred == true_y[i]:\n hits_per_class[int(pred)] += 1\n\n accs = np.zeros(len(hits_per_class), dtype=np.float)\n hits_per_class = np.asarray(hits_per_class)\n pattern_per_class = np.asarray(pattern_per_class).astype(float)\n\n # np.divide prevents the true divide warning from showing up\n # when one or more elements of pattern_per_class are zero\n # Also, those elements will be 0 instead of NaN\n np.divide(hits_per_class, pattern_per_class,\n where=pattern_per_class != 0, out=accs)\n accs = torch.from_numpy(accs)\n\n acc = correct_cnt / float(y_hat[0].size(0) * len(y_hat))\n\n return acc, accs\n\n\nclass CF(object):\n\n def __init__(self, num_class=None):\n \"\"\"\n Catastrophic Forgetting metric.\n \"\"\"\n\n self.best_acc = {}\n self.acc_metric = ACC(num_class=num_class)\n\n def compute(self, y, y_hat, train_t, test_t):\n \"\"\"\n :param y (tensor list or tensor): true labels for each mini-batch\n :param y_hat (tensor list or tensor): predicted labels for each\n mini-batch\n \"\"\"\n\n acc, accs = self.acc_metric.compute(y, y_hat)\n if train_t not in self.best_acc.keys() and train_t == test_t:\n self.best_acc[train_t] = acc\n\n if test_t not in self.best_acc.keys():\n cf = np.NAN\n else:\n cf = self.best_acc[test_t] - acc\n\n print(\"Task {:} - CF: {:.4f}\"\n .format(test_t, cf))\n\n return cf\n\n\nclass RAMU(object):\n\n def __init__(self):\n \"\"\"\n RAM Usage metric.\n \"\"\"\n\n def compute(self, t):\n\n process = psutil.Process(os.getpid())\n mem = process.memory_info().rss # in bytes\n\n print(\"Train Task {:} - MU: {:.3f} GB\"\n .format(t, mem / (1024 * 1024 * 1024)))\n\n return mem / (1024 * 1024 * 1024)\n\n\nclass DiskUsage(object):\n\n def __init__(self, path_to_monitor=None, disk_io=False):\n \"\"\"\n :param path_to_monitor (string): a valid path to folder.\n If None, the current working directory is used.\n :param disk_io: True to enable monitoring of I/O operations on disk.\n WARNING: Reports are system-wide, grouping all disks.\n \"\"\"\n\n if path_to_monitor is not None:\n self.path_to_monitor = path_to_monitor\n else:\n self.path_to_monitor = os.getcwd()\n\n self.disk_io = disk_io\n\n def compute(self, t):\n \"\"\"\n :param t: task id\n\n :return usage, io (tuple): io is None if disk_io is False\n \"\"\"\n\n usage = psutil.disk_usage(self.path_to_monitor)\n\n total, used, free, percent = \\\n bytes2human(usage.total), \\\n bytes2human(usage.used), \\\n bytes2human(usage.free), \\\n usage.percent\n\n print(\"Disk usage for {:}\".format(self.path_to_monitor))\n print(\"Task {:} - disk percent: {:}%, \\\n disk total: {:}, \\\n disk used: {:}, \\\n disk free: {:}\"\n .format(t, percent, total, used, free))\n\n if self.disk_io:\n io = psutil.disk_io_counters()\n read_count, write_count = \\\n io.read_count, \\\n io.write_count\n read_bytes, write_bytes = \\\n bytes2human(io.read_bytes), \\\n bytes2human(io.write_bytes)\n\n print(\"Task {:} - read count: {:}, \\\n write count: {:}, \\\n bytes read: {:}, \\\n bytes written: {:}\"\n .format(t, read_count, write_count, read_bytes, write_bytes))\n else:\n io = None\n\n return usage, io\n\n\nclass CM(object):\n\n def __init__(self, num_class=None):\n \"\"\"\n Confusion Matrix computation\n \"\"\"\n self.num_class = num_class\n\n def compute(self, y, y_hat, normalize=False):\n \"\"\"\n :param y (tensor or tensors list): true labels for each minibatch\n :param y_hat (tensor or tensors list): predicted labels for each\n minibatch\n \"\"\"\n\n assert type(y) == type(y_hat), \"Predicted and target labels must be \\\n both list (of tensors) or tensors\"\n \n # manage list of tensors by default\n if not (isinstance(y, list) or isinstance(y, tuple)):\n y = [y]\n y_hat = [y_hat]\n\n if self.num_class is None:\n num_class = int(max([torch.max(el).item() + 1 for el in y])) \n else:\n num_class = self.num_class\n\n cmap = plt.cm.Blues\n\n cm = np.zeros((num_class, num_class))\n for i, (el, el_hat) in enumerate(zip(y, y_hat)):\n # Compute confusion matrix\n cm += confusion_matrix(\n el.numpy(), el_hat.numpy(), \n labels=list(range(num_class))) \n\n # Only use the labels that appear in the data\n classes = [str(i) for i in range(num_class)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n fig, ax = plt.subplots()\n im = ax.matshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=None,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n fig.tight_layout()\n\n buf = io.BytesIO()\n fig.savefig(buf, format='jpg', dpi=50)\n plt.close(fig)\n\n buf.seek(0)\n image = PIL.Image.open(buf)\n image = ToTensor()(image)\n return image\n\n\nclass TimeUsage:\n\n \"\"\"\n Time usage metric measured in seconds.\n \"\"\"\n\n def __init__(self):\n self._start_time = time.perf_counter()\n\n def compute(self, t):\n elapsed_time = time.perf_counter() - self._start_time\n print(f\"Elapsed time: {elapsed_time:0.4f} seconds\")\n", "path": "avalanche/evaluation/metrics.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n################################################################################\n# Copyright (c) 2020 ContinualAI Research #\n# Copyrights licensed under the CC BY 4.0 License. #\n# See the accompanying LICENSE file for terms. #\n# #\n# Date: 1-05-2020 #\n# Author(s): Vincenzo Lomonaco #\n# E-mail: [email protected] #\n# Website: clair.continualai.org #\n################################################################################\n\n\"\"\" Common metrics for CL. \"\"\"\n\n# Python 2-3 compatible\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport torch\nimport numpy as np\nimport os\nimport psutil\nfrom .utils import bytes2human\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport PIL.Image\nfrom torchvision.transforms import ToTensor\nimport io\nimport queue\nimport subprocess\nimport threading\nimport time\n\n\nclass MAC:\n \"\"\"\n Multiply-and-accumulate metric. Approximately measure the computational\n cost of a model in a hardware-independent way by computing the number\n of multiplications. Currently supports only Linear or Conv2d modules.\n Other operations are ignored.\n \"\"\"\n def __init__(self):\n self.hooks = []\n self._compute_cost = 0\n\n def compute(self, model, dummy_input):\n for mod in model.modules():\n if self.is_recognized_module(mod):\n def foo(a, b, c):\n return self.update_compute_cost(a, b, c)\n handle = mod.register_forward_hook(foo)\n self.hooks.append(handle)\n\n self._compute_cost = 0\n model(dummy_input) # trigger forward hooks\n\n for handle in self.hooks:\n handle.remove()\n self.hooks = []\n return self._compute_cost\n\n def update_compute_cost(self, module, input, output):\n modname = module.__class__.__name__\n if modname == 'Linear':\n self._compute_cost += input[0].shape[1] * output.shape[1]\n elif modname == 'Conv2d':\n n, cout, hout, wout = output.shape # Batch, Channels, Height, Width\n ksize = module.kernel_size[0] * module.kernel_size[1]\n self._compute_cost += cout * hout * wout * (ksize)\n print(self._compute_cost)\n\n def is_recognized_module(self, mod):\n modname = mod.__class__.__name__\n return modname == 'Linear' or modname == 'Conv2d'\n\n\nclass GPUUsage:\n \"\"\"\n GPU usage metric measured as average usage percentage over time.\n\n :param gpu_id: GPU device ID\n :param every: time delay (in seconds) between measurements\n \"\"\"\n\n def __init__(self, gpu_id, every=10):\n # 'nvidia-smi --loop=1 --query-gpu=utilization.gpu --format=csv'\n cmd = ['nvidia-smi', f'--loop={every}', '--query-gpu=utilization.gpu',\n '--format=csv', f'--id={gpu_id}']\n # something long running\n try:\n self.p = subprocess.Popen(cmd, bufsize=1, stdout=subprocess.PIPE)\n except NotADirectoryError:\n raise ValueError('No GPU available: nvidia-smi command not found.')\n\n self.lines_queue = queue.Queue()\n self.read_thread = threading.Thread(target=GPUUsage.push_lines,\n args=(self,), daemon=True)\n self.read_thread.start()\n\n self.n_measurements = 0\n self.avg_usage = 0\n\n def compute(self, t):\n \"\"\"\n Compute CPU usage measured in seconds.\n\n :param t: task id\n :return: float: average GPU usage\n \"\"\"\n while not self.lines_queue.empty():\n line = self.lines_queue.get()\n if line[0] == 'u': # skip first line 'utilization.gpu [%]'\n continue\n usage = int(line.strip()[:-1])\n self.n_measurements += 1\n self.avg_usage += usage\n\n if self.n_measurements > 0:\n self.avg_usage /= float(self.n_measurements)\n print(f\"Train Task {t} - average GPU usage: {self.avg_usage}%\")\n\n return self.avg_usage\n\n def push_lines(self):\n while True:\n line = self.p.stdout.readline()\n self.lines_queue.put(line.decode('ascii'))\n\n def close(self):\n self.p.terminate()\n\n\nclass CPUUsage:\n \"\"\"\n CPU usage metric measured in seconds.\n \"\"\"\n\n def compute(self, t):\n \"\"\"\n Compute CPU usage measured in seconds.\n\n :param t: task id\n :return: tuple (float, float): (user CPU time, system CPU time)\n \"\"\"\n p = psutil.Process(os.getpid())\n times = p.cpu_times()\n user, sys = times.user, times.system\n print(\"Train Task {:} - CPU usage: user {} system {}\"\n .format(t, user, sys))\n return user, sys\n\n\nclass ACC(object):\n\n def __init__(self, num_class=None):\n \"\"\"\n Accuracy metrics should be called for each test set\n\n :param num_class (int, optional): number of classes in the test_set\n (useful in case the test_set does not cover all the classes\n in the train_set).\n \"\"\"\n\n self.num_class = num_class\n\n def compute(self, y, y_hat):\n \"\"\"\n :param y (tensor list or tensor): true labels for each mini-batch\n :param y_hat (tensor list or tensor): predicted labels for each\n mini-batch\n\n :return acc (float): average accuracy for the test set\n :return accs (float list): accuracy for each class in the training set \n \"\"\"\n \n assert type(y) == type(y_hat), \"Predicted and target labels must be \\\n both list (of tensors) or tensors\"\n \n # manage list of tensors by default\n if not (isinstance(y, list) or isinstance(y, tuple)):\n y = [y]\n y_hat = [y_hat]\n\n if self.num_class is None:\n num_class = int(max([torch.max(el).item() + 1 for el in y]))\n else:\n num_class = self.num_class\n\n hits_per_class = [0] * num_class\n pattern_per_class = [0] * num_class\n\n correct_cnt = 0.\n\n for true_y, y_pred in zip(y, y_hat):\n \n correct_cnt += (true_y == y_pred).sum().float()\n\n for label in true_y:\n pattern_per_class[int(label)] += 1\n\n for i, pred in enumerate(y_pred):\n if pred == true_y[i]:\n hits_per_class[int(pred)] += 1\n\n accs = np.zeros(len(hits_per_class), dtype=np.float)\n hits_per_class = np.asarray(hits_per_class)\n pattern_per_class = np.asarray(pattern_per_class).astype(float)\n\n # np.divide prevents the true divide warning from showing up\n # when one or more elements of pattern_per_class are zero\n # Also, those elements will be 0 instead of NaN\n np.divide(hits_per_class, pattern_per_class,\n where=pattern_per_class != 0, out=accs)\n accs = torch.from_numpy(accs)\n\n acc = correct_cnt / float(y_hat[0].size(0) * len(y_hat))\n\n return acc, accs\n\n\nclass CF(object):\n\n def __init__(self, num_class=None):\n \"\"\"\n Catastrophic Forgetting metric.\n \"\"\"\n\n self.best_acc = {}\n self.acc_metric = ACC(num_class=num_class)\n\n def compute(self, y, y_hat, train_t, test_t):\n \"\"\"\n :param y (tensor list or tensor): true labels for each mini-batch\n :param y_hat (tensor list or tensor): predicted labels for each\n mini-batch\n \"\"\"\n\n acc, accs = self.acc_metric.compute(y, y_hat)\n if train_t not in self.best_acc.keys() and train_t == test_t:\n self.best_acc[train_t] = acc\n\n if test_t not in self.best_acc.keys():\n cf = np.NAN\n else:\n cf = self.best_acc[test_t] - acc\n\n print(\"Task {:} - CF: {:.4f}\"\n .format(test_t, cf))\n\n return cf\n\n\nclass RAMU(object):\n\n def __init__(self):\n \"\"\"\n RAM Usage metric.\n \"\"\"\n\n def compute(self, t):\n\n process = psutil.Process(os.getpid())\n mem = process.memory_info().rss # in bytes\n\n print(\"Train Task {:} - MU: {:.3f} GB\"\n .format(t, mem / (1024 * 1024 * 1024)))\n\n return mem / (1024 * 1024 * 1024)\n\n\nclass DiskUsage(object):\n\n def __init__(self, path_to_monitor=None, disk_io=False):\n \"\"\"\n :param path_to_monitor (string): a valid path to folder.\n If None, the current working directory is used.\n :param disk_io: True to enable monitoring of I/O operations on disk.\n WARNING: Reports are system-wide, grouping all disks.\n \"\"\"\n\n if path_to_monitor is not None:\n self.path_to_monitor = path_to_monitor\n else:\n self.path_to_monitor = os.getcwd()\n\n self.disk_io = disk_io\n\n def compute(self, t):\n \"\"\"\n :param t: task id\n\n :return usage, io (tuple): io is None if disk_io is False\n \"\"\"\n\n usage = psutil.disk_usage(self.path_to_monitor)\n\n total, used, free, percent = \\\n bytes2human(usage.total), \\\n bytes2human(usage.used), \\\n bytes2human(usage.free), \\\n usage.percent\n\n print(\"Disk usage for {:}\".format(self.path_to_monitor))\n print(\"Task {:} - disk percent: {:}%, \\\n disk total: {:}, \\\n disk used: {:}, \\\n disk free: {:}\"\n .format(t, percent, total, used, free))\n\n if self.disk_io:\n io = psutil.disk_io_counters()\n read_count, write_count = \\\n io.read_count, \\\n io.write_count\n read_bytes, write_bytes = \\\n bytes2human(io.read_bytes), \\\n bytes2human(io.write_bytes)\n\n print(\"Task {:} - read count: {:}, \\\n write count: {:}, \\\n bytes read: {:}, \\\n bytes written: {:}\"\n .format(t, read_count, write_count, read_bytes, write_bytes))\n else:\n io = None\n\n return usage, io\n\n\nclass CM(object):\n\n def __init__(self, num_class=None):\n \"\"\"\n Confusion Matrix computation\n \"\"\"\n self.num_class = num_class\n\n def compute(self, y, y_hat, normalize=False):\n \"\"\"\n :param y (tensor or tensors list): true labels for each minibatch\n :param y_hat (tensor or tensors list): predicted labels for each\n minibatch\n \"\"\"\n\n assert type(y) == type(y_hat), \"Predicted and target labels must be \\\n both list (of tensors) or tensors\"\n \n # manage list of tensors by default\n if not (isinstance(y, list) or isinstance(y, tuple)):\n y = [y]\n y_hat = [y_hat]\n\n if self.num_class is None:\n num_class = int(max([torch.max(el).item() + 1 for el in y])) \n else:\n num_class = self.num_class\n\n cmap = plt.cm.Blues\n\n cm = np.zeros((num_class, num_class))\n for i, (el, el_hat) in enumerate(zip(y, y_hat)):\n # Compute confusion matrix\n cm += confusion_matrix(\n el.numpy(), el_hat.numpy(), \n labels=list(range(num_class))) \n\n # Only use the labels that appear in the data\n classes = [str(i) for i in range(num_class)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n fig, ax = plt.subplots()\n im = ax.matshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=None,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n fig.tight_layout()\n\n buf = io.BytesIO()\n fig.savefig(buf, format='jpg', dpi=50)\n plt.close(fig)\n\n buf.seek(0)\n image = PIL.Image.open(buf)\n image = ToTensor()(image)\n return image\n\n\nclass TimeUsage:\n\n \"\"\"\n Time usage metric measured in seconds.\n \"\"\"\n\n def __init__(self):\n self._start_time = time.perf_counter()\n\n def compute(self, t):\n elapsed_time = time.perf_counter() - self._start_time\n print(f\"Elapsed time: {elapsed_time:0.4f} seconds\")\n", "path": "avalanche/evaluation/metrics.py"}]} | 4,055 | 471 |
gh_patches_debug_12983 | rasdani/github-patches | git_diff | scikit-image__scikit-image-7211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consistently use lazy loading for all `skimage.*` submodules
### Description:
With `lazy_loader` successfully being used for `skimage`, `skimage.data` and `skimage.filters` why not use it for every of our public submodules? I see no significant disadvantage here (when using the approach with PYI files) and it is what is proposed in [SPEC 1](https://scientific-python.org/specs/spec-0001/).
Feel free to remove the good first issue label if there are concerns. Otherwise I would suggest to tackle this with separate PRs for each module that copy the examples mentioned above.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/metrics/__init__.py`
Content:
```
1 from ._adapted_rand_error import adapted_rand_error
2 from ._contingency_table import contingency_table
3 from ._structural_similarity import structural_similarity
4 from ._variation_of_information import variation_of_information
5 from .set_metrics import hausdorff_distance, hausdorff_pair
6 from .simple_metrics import (
7 mean_squared_error,
8 normalized_mutual_information,
9 normalized_root_mse,
10 peak_signal_noise_ratio,
11 )
12
13 __all__ = [
14 "adapted_rand_error",
15 "variation_of_information",
16 "contingency_table",
17 "mean_squared_error",
18 "normalized_mutual_information",
19 "normalized_root_mse",
20 "peak_signal_noise_ratio",
21 "structural_similarity",
22 "hausdorff_distance",
23 "hausdorff_pair",
24 ]
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/metrics/__init__.py b/skimage/metrics/__init__.py
--- a/skimage/metrics/__init__.py
+++ b/skimage/metrics/__init__.py
@@ -1,24 +1,3 @@
-from ._adapted_rand_error import adapted_rand_error
-from ._contingency_table import contingency_table
-from ._structural_similarity import structural_similarity
-from ._variation_of_information import variation_of_information
-from .set_metrics import hausdorff_distance, hausdorff_pair
-from .simple_metrics import (
- mean_squared_error,
- normalized_mutual_information,
- normalized_root_mse,
- peak_signal_noise_ratio,
-)
+import lazy_loader as lazy
-__all__ = [
- "adapted_rand_error",
- "variation_of_information",
- "contingency_table",
- "mean_squared_error",
- "normalized_mutual_information",
- "normalized_root_mse",
- "peak_signal_noise_ratio",
- "structural_similarity",
- "hausdorff_distance",
- "hausdorff_pair",
-]
+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
| {"golden_diff": "diff --git a/skimage/metrics/__init__.py b/skimage/metrics/__init__.py\n--- a/skimage/metrics/__init__.py\n+++ b/skimage/metrics/__init__.py\n@@ -1,24 +1,3 @@\n-from ._adapted_rand_error import adapted_rand_error\n-from ._contingency_table import contingency_table\n-from ._structural_similarity import structural_similarity\n-from ._variation_of_information import variation_of_information\n-from .set_metrics import hausdorff_distance, hausdorff_pair\n-from .simple_metrics import (\n- mean_squared_error,\n- normalized_mutual_information,\n- normalized_root_mse,\n- peak_signal_noise_ratio,\n-)\n+import lazy_loader as lazy\n \n-__all__ = [\n- \"adapted_rand_error\",\n- \"variation_of_information\",\n- \"contingency_table\",\n- \"mean_squared_error\",\n- \"normalized_mutual_information\",\n- \"normalized_root_mse\",\n- \"peak_signal_noise_ratio\",\n- \"structural_similarity\",\n- \"hausdorff_distance\",\n- \"hausdorff_pair\",\n-]\n+__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)\n", "issue": "Consistently use lazy loading for all `skimage.*` submodules\n### Description:\r\n\r\nWith `lazy_loader` successfully being used for `skimage`, `skimage.data` and `skimage.filters` why not use it for every of our public submodules? I see no significant disadvantage here (when using the approach with PYI files) and it is what is proposed in [SPEC 1](https://scientific-python.org/specs/spec-0001/).\r\n\r\nFeel free to remove the good first issue label if there are concerns. Otherwise I would suggest to tackle this with separate PRs for each module that copy the examples mentioned above.\n", "before_files": [{"content": "from ._adapted_rand_error import adapted_rand_error\nfrom ._contingency_table import contingency_table\nfrom ._structural_similarity import structural_similarity\nfrom ._variation_of_information import variation_of_information\nfrom .set_metrics import hausdorff_distance, hausdorff_pair\nfrom .simple_metrics import (\n mean_squared_error,\n normalized_mutual_information,\n normalized_root_mse,\n peak_signal_noise_ratio,\n)\n\n__all__ = [\n \"adapted_rand_error\",\n \"variation_of_information\",\n \"contingency_table\",\n \"mean_squared_error\",\n \"normalized_mutual_information\",\n \"normalized_root_mse\",\n \"peak_signal_noise_ratio\",\n \"structural_similarity\",\n \"hausdorff_distance\",\n \"hausdorff_pair\",\n]\n", "path": "skimage/metrics/__init__.py"}], "after_files": [{"content": "import lazy_loader as lazy\n\n__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)\n", "path": "skimage/metrics/__init__.py"}]} | 598 | 263 |
gh_patches_debug_12923 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1939 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using Generic class without type parameter in `strawberry.Private` causes ValueError
When using generic types with `strawberry.Private` if a type-parameter is not used within the annotation then `StrawberryAnnotation` will incorrectly treat it as a generic class and try to resolve it as such.
```python
from __future__ import annotations
from typing import Generic, Sequence, TypeVar
import strawberry
T = TypeVar("T")
@strawberry.type
class Foo(Generic[T]):
private_field: strawberry.Private[Sequence]
@strawberry.type
class Query:
@strawberry.field
def foo(self) -> Foo[str]:
return Foo(private_field=[1, 2, 3])
```
This results in the following traceback:
<details>
<summary>Traceback</summary>
```bash
Traceback (most recent call last):
File "~/strawberry/annotation.py", line 21, in <module>
@strawberry.type
File "~\strawberry\object_type.py", line 222, in type
return wrap(cls)
File "~\strawberry\object_type.py", line 208, in wrap
wrapped = _wrap_dataclass(cls)
File "~\strawberry\object_type.py", line 91, in _wrap_dataclass
return dataclasses.dataclass(cls)
File "~\Python37\lib\dataclasses.py", line 1010, in dataclass
return wrap(_cls)
File "~\Python37\lib\dataclasses.py", line 1002, in wrap
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)
File "~\Python37\lib\dataclasses.py", line 850, in _process_class
for name, type in cls_annotations.items()]
File "~\Python37\lib\dataclasses.py", line 850, in <listcomp>
for name, type in cls_annotations.items()]
File "~\Python37\lib\dataclasses.py", line 699, in _get_field
or (isinstance(f.type, str)
File "~\strawberry\field.py", line 207, in type
if self.base_resolver.type is not None:
File "~\strawberry\types\fields\resolver.py", line 218, in type
return self.type_annotation.resolve()
File "~\strawberry\annotation.py", line 84, in resolve
return self.create_concrete_type(evaled_type)
File "~\strawberry\annotation.py", line 111, in create_concrete_type
return type_definition.resolve_generic(evaled_type)
File "~\strawberry\types\types.py", line 62, in resolve_generic
return self.copy_with(type_var_map)
File "~\strawberry\types\types.py", line 71, in copy_with
field_type = field.type
File "~\strawberry\field.py", line 221, in type
return self.type_annotation.resolve()
File "~\strawberry\annotation.py", line 84, in resolve
return self.create_concrete_type(evaled_type)
File "~\strawberry\annotation.py", line 113, in create_concrete_type
raise ValueError(f"Not supported {evaled_type}")
ValueError: Not supported typing_extensions.Annotated[__main__.PrivateGenericType, <strawberry.private.StrawberryPrivate object at 0x000001C3D3E109C8>]
```
</details>
Passing `Any` to the Generic type is currently a workaround for this bug.
```python
private_field: strawberry.Private[Sequence[Any]]
```
As per CPython [docs](https://docs.python.org/3/library/typing.html#user-defined-generic-types):
>Using a generic class without specifying type parameters assumes [Any](https://docs.python.org/3/library/typing.html#typing.Any) for each position. In the following example, MyIterable is not generic but implicitly inherits from Iterable[Any]:
Therefore, defining Generic type annotations without an explicit `Any` should be supported.
## System Information
- Python 3.7.9
- Operating system: Windows 10
- Strawberry version (if applicable): 0.114.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/annotation.py`
Content:
```
1 import sys
2 import typing
3 from collections import abc
4 from enum import Enum
5 from typing import ( # type: ignore[attr-defined]
6 TYPE_CHECKING,
7 Any,
8 Dict,
9 Optional,
10 TypeVar,
11 Union,
12 _eval_type,
13 )
14
15
16 try:
17 from typing import ForwardRef
18 except ImportError: # pragma: no cover
19 # ForwardRef is private in python 3.6 and 3.7
20 from typing import _ForwardRef as ForwardRef # type: ignore
21
22 from strawberry.custom_scalar import ScalarDefinition
23 from strawberry.enum import EnumDefinition
24 from strawberry.lazy_type import LazyType
25 from strawberry.type import (
26 StrawberryList,
27 StrawberryOptional,
28 StrawberryType,
29 StrawberryTypeVar,
30 )
31 from strawberry.types.types import TypeDefinition
32 from strawberry.unset import UNSET
33 from strawberry.utils.typing import is_generic, is_type_var
34
35
36 if TYPE_CHECKING:
37 from strawberry.union import StrawberryUnion
38
39
40 ASYNC_TYPES = (
41 abc.AsyncGenerator,
42 abc.AsyncIterable,
43 abc.AsyncIterator,
44 typing.AsyncContextManager,
45 typing.AsyncGenerator,
46 typing.AsyncIterable,
47 typing.AsyncIterator,
48 )
49
50
51 class StrawberryAnnotation:
52 def __init__(
53 self, annotation: Union[object, str], *, namespace: Optional[Dict] = None
54 ):
55 self.annotation = annotation
56 self.namespace = namespace
57
58 def __eq__(self, other: object) -> bool:
59 if not isinstance(other, StrawberryAnnotation):
60 return NotImplemented
61
62 return self.resolve() == other.resolve()
63
64 def resolve(self) -> Union[StrawberryType, type]:
65 annotation: object
66 if isinstance(self.annotation, str):
67 annotation = ForwardRef(self.annotation)
68 else:
69 annotation = self.annotation
70
71 evaled_type = _eval_type(annotation, self.namespace, None)
72 if self._is_async_type(evaled_type):
73 evaled_type = self._strip_async_type(evaled_type)
74 if self._is_lazy_type(evaled_type):
75 return evaled_type
76
77 if self._is_generic(evaled_type):
78 if any(is_type_var(type_) for type_ in evaled_type.__args__):
79 return evaled_type
80 return self.create_concrete_type(evaled_type)
81
82 # Simply return objects that are already StrawberryTypes
83 if self._is_strawberry_type(evaled_type):
84 return evaled_type
85
86 # Everything remaining should be a raw annotation that needs to be turned into
87 # a StrawberryType
88 if self._is_enum(evaled_type):
89 return self.create_enum(evaled_type)
90 if self._is_list(evaled_type):
91 return self.create_list(evaled_type)
92 elif self._is_optional(evaled_type):
93 return self.create_optional(evaled_type)
94 elif self._is_union(evaled_type):
95 return self.create_union(evaled_type)
96 elif is_type_var(evaled_type):
97 return self.create_type_var(evaled_type)
98
99 # TODO: Raise exception now, or later?
100 # ... raise NotImplementedError(f"Unknown type {evaled_type}")
101 return evaled_type
102
103 def create_concrete_type(self, evaled_type: type) -> type:
104 if _is_object_type(evaled_type):
105 type_definition: TypeDefinition
106 type_definition = evaled_type._type_definition # type: ignore
107 return type_definition.resolve_generic(evaled_type)
108
109 raise ValueError(f"Not supported {evaled_type}")
110
111 def create_enum(self, evaled_type: Any) -> EnumDefinition:
112 return evaled_type._enum_definition
113
114 def create_list(self, evaled_type: Any) -> StrawberryList:
115 of_type = StrawberryAnnotation(
116 annotation=evaled_type.__args__[0],
117 namespace=self.namespace,
118 ).resolve()
119
120 return StrawberryList(of_type)
121
122 def create_optional(self, evaled_type: Any) -> StrawberryOptional:
123 types = evaled_type.__args__
124 non_optional_types = tuple(
125 filter(
126 lambda x: x is not type(None) and x is not type(UNSET), # noqa: E721
127 types,
128 )
129 )
130
131 # Note that passing a single type to `Union` is equivalent to not using `Union`
132 # at all. This allows us to not di any checks for how many types have been
133 # passed as we can safely use `Union` for both optional types
134 # (e.g. `Optional[str]`) and optional unions (e.g.
135 # `Optional[Union[TypeA, TypeB]]`)
136 child_type = Union[non_optional_types] # type: ignore
137
138 of_type = StrawberryAnnotation(
139 annotation=child_type,
140 namespace=self.namespace,
141 ).resolve()
142
143 return StrawberryOptional(of_type)
144
145 def create_type_var(self, evaled_type: TypeVar) -> StrawberryTypeVar:
146 return StrawberryTypeVar(evaled_type)
147
148 def create_union(self, evaled_type) -> "StrawberryUnion":
149 # Prevent import cycles
150 from strawberry.union import StrawberryUnion
151
152 # TODO: Deal with Forward References/origin
153 if isinstance(evaled_type, StrawberryUnion):
154 return evaled_type
155
156 types = evaled_type.__args__
157 union = StrawberryUnion(
158 type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),
159 )
160 return union
161
162 @classmethod
163 def _is_async_type(cls, annotation: type) -> bool:
164 origin = getattr(annotation, "__origin__", None)
165 return origin in ASYNC_TYPES
166
167 @classmethod
168 def _is_enum(cls, annotation: Any) -> bool:
169 # Type aliases are not types so we need to make sure annotation can go into
170 # issubclass
171 if not isinstance(annotation, type):
172 return False
173 return issubclass(annotation, Enum)
174
175 @classmethod
176 def _is_generic(cls, annotation: Any) -> bool:
177 if hasattr(annotation, "__origin__"):
178 return is_generic(annotation.__origin__)
179
180 return False
181
182 @classmethod
183 def _is_lazy_type(cls, annotation: Any) -> bool:
184 return isinstance(annotation, LazyType)
185
186 @classmethod
187 def _is_optional(cls, annotation: Any) -> bool:
188 """Returns True if the annotation is Optional[SomeType]"""
189
190 # Optionals are represented as unions
191 if not cls._is_union(annotation):
192 return False
193
194 types = annotation.__args__
195
196 # A Union to be optional needs to have at least one None type
197 return any(x is type(None) for x in types) # noqa: E721
198
199 @classmethod
200 def _is_list(cls, annotation: Any) -> bool:
201 """Returns True if annotation is a List"""
202
203 annotation_origin = getattr(annotation, "__origin__", None)
204
205 return annotation_origin == list
206
207 @classmethod
208 def _is_strawberry_type(cls, evaled_type: Any) -> bool:
209 # Prevent import cycles
210 from strawberry.union import StrawberryUnion
211
212 if isinstance(evaled_type, EnumDefinition):
213 return True
214 elif _is_input_type(evaled_type): # TODO: Replace with StrawberryInputObject
215 return True
216 # TODO: add support for StrawberryInterface when implemented
217 elif isinstance(evaled_type, StrawberryList):
218 return True
219 elif _is_object_type(evaled_type): # TODO: Replace with StrawberryObject
220 return True
221 elif isinstance(evaled_type, TypeDefinition):
222 return True
223 elif isinstance(evaled_type, StrawberryOptional):
224 return True
225 elif isinstance(
226 evaled_type, ScalarDefinition
227 ): # TODO: Replace with StrawberryScalar
228 return True
229 elif isinstance(evaled_type, StrawberryUnion):
230 return True
231
232 return False
233
234 @classmethod
235 def _is_union(cls, annotation: Any) -> bool:
236 """Returns True if annotation is a Union"""
237
238 # this check is needed because unions declared with the new syntax `A | B`
239 # don't have a `__origin__` property on them, but they are instances of
240 # `UnionType`, which is only available in Python 3.10+
241 if sys.version_info >= (3, 10):
242 from types import UnionType
243
244 if isinstance(annotation, UnionType):
245 return True
246
247 # unions declared as Union[A, B] fall through to this check, even on python 3.10+
248
249 annotation_origin = getattr(annotation, "__origin__", None)
250
251 return annotation_origin is typing.Union
252
253 @classmethod
254 def _strip_async_type(cls, annotation) -> type:
255 return annotation.__args__[0]
256
257 @classmethod
258 def _strip_lazy_type(cls, annotation: LazyType) -> type:
259 return annotation.resolve_type()
260
261
262 ################################################################################
263 # Temporary functions to be removed with new types
264 ################################################################################
265
266
267 def _is_input_type(type_: Any) -> bool:
268 if not _is_object_type(type_):
269 return False
270
271 return type_._type_definition.is_input
272
273
274 def _is_object_type(type_: Any) -> bool:
275 # isinstance(type_, StrawberryObjectType) # noqa: E800
276 return hasattr(type_, "_type_definition")
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/annotation.py b/strawberry/annotation.py
--- a/strawberry/annotation.py
+++ b/strawberry/annotation.py
@@ -12,6 +12,8 @@
_eval_type,
)
+from strawberry.private import is_private
+
try:
from typing import ForwardRef
@@ -69,6 +71,8 @@
annotation = self.annotation
evaled_type = _eval_type(annotation, self.namespace, None)
+ if is_private(evaled_type):
+ return evaled_type
if self._is_async_type(evaled_type):
evaled_type = self._strip_async_type(evaled_type)
if self._is_lazy_type(evaled_type):
| {"golden_diff": "diff --git a/strawberry/annotation.py b/strawberry/annotation.py\n--- a/strawberry/annotation.py\n+++ b/strawberry/annotation.py\n@@ -12,6 +12,8 @@\n _eval_type,\n )\n \n+from strawberry.private import is_private\n+\n \n try:\n from typing import ForwardRef\n@@ -69,6 +71,8 @@\n annotation = self.annotation\n \n evaled_type = _eval_type(annotation, self.namespace, None)\n+ if is_private(evaled_type):\n+ return evaled_type\n if self._is_async_type(evaled_type):\n evaled_type = self._strip_async_type(evaled_type)\n if self._is_lazy_type(evaled_type):\n", "issue": "Using Generic class without type parameter in `strawberry.Private` causes ValueError\nWhen using generic types with `strawberry.Private` if a type-parameter is not used within the annotation then `StrawberryAnnotation` will incorrectly treat it as a generic class and try to resolve it as such.\r\n\r\n```python\r\nfrom __future__ import annotations\r\n\r\nfrom typing import Generic, Sequence, TypeVar\r\n\r\nimport strawberry\r\n\r\n\r\nT = TypeVar(\"T\")\r\n\r\n\r\[email protected]\r\nclass Foo(Generic[T]):\r\n\r\n private_field: strawberry.Private[Sequence]\r\n\r\n\r\[email protected]\r\nclass Query:\r\n @strawberry.field\r\n def foo(self) -> Foo[str]:\r\n return Foo(private_field=[1, 2, 3])\r\n\r\n```\r\n\r\nThis results in the following traceback:\r\n\r\n<details>\r\n <summary>Traceback</summary>\r\n \r\n```bash\r\nTraceback (most recent call last):\r\n File \"~/strawberry/annotation.py\", line 21, in <module>\r\n @strawberry.type\r\n File \"~\\strawberry\\object_type.py\", line 222, in type\r\n return wrap(cls)\r\n File \"~\\strawberry\\object_type.py\", line 208, in wrap\r\n wrapped = _wrap_dataclass(cls)\r\n File \"~\\strawberry\\object_type.py\", line 91, in _wrap_dataclass\r\n return dataclasses.dataclass(cls)\r\n File \"~\\Python37\\lib\\dataclasses.py\", line 1010, in dataclass\r\n return wrap(_cls)\r\n File \"~\\Python37\\lib\\dataclasses.py\", line 1002, in wrap\r\n return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen)\r\n File \"~\\Python37\\lib\\dataclasses.py\", line 850, in _process_class\r\n for name, type in cls_annotations.items()]\r\n File \"~\\Python37\\lib\\dataclasses.py\", line 850, in <listcomp>\r\n for name, type in cls_annotations.items()]\r\n File \"~\\Python37\\lib\\dataclasses.py\", line 699, in _get_field\r\n or (isinstance(f.type, str)\r\n File \"~\\strawberry\\field.py\", line 207, in type\r\n if self.base_resolver.type is not None:\r\n File \"~\\strawberry\\types\\fields\\resolver.py\", line 218, in type\r\n return self.type_annotation.resolve()\r\n File \"~\\strawberry\\annotation.py\", line 84, in resolve\r\n return self.create_concrete_type(evaled_type)\r\n File \"~\\strawberry\\annotation.py\", line 111, in create_concrete_type\r\n return type_definition.resolve_generic(evaled_type)\r\n File \"~\\strawberry\\types\\types.py\", line 62, in resolve_generic\r\n return self.copy_with(type_var_map)\r\n File \"~\\strawberry\\types\\types.py\", line 71, in copy_with\r\n field_type = field.type\r\n File \"~\\strawberry\\field.py\", line 221, in type\r\n return self.type_annotation.resolve()\r\n File \"~\\strawberry\\annotation.py\", line 84, in resolve\r\n return self.create_concrete_type(evaled_type)\r\n File \"~\\strawberry\\annotation.py\", line 113, in create_concrete_type\r\n raise ValueError(f\"Not supported {evaled_type}\")\r\nValueError: Not supported typing_extensions.Annotated[__main__.PrivateGenericType, <strawberry.private.StrawberryPrivate object at 0x000001C3D3E109C8>]\r\n```\r\n\r\n</details>\r\n\r\nPassing `Any` to the Generic type is currently a workaround for this bug.\r\n```python\r\n private_field: strawberry.Private[Sequence[Any]]\r\n```\r\nAs per CPython [docs](https://docs.python.org/3/library/typing.html#user-defined-generic-types):\r\n\r\n>Using a generic class without specifying type parameters assumes [Any](https://docs.python.org/3/library/typing.html#typing.Any) for each position. In the following example, MyIterable is not generic but implicitly inherits from Iterable[Any]:\r\n\r\nTherefore, defining Generic type annotations without an explicit `Any` should be supported.\r\n## System Information\r\n\r\n - Python 3.7.9\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): 0.114.0\n", "before_files": [{"content": "import sys\nimport typing\nfrom collections import abc\nfrom enum import Enum\nfrom typing import ( # type: ignore[attr-defined]\n TYPE_CHECKING,\n Any,\n Dict,\n Optional,\n TypeVar,\n Union,\n _eval_type,\n)\n\n\ntry:\n from typing import ForwardRef\nexcept ImportError: # pragma: no cover\n # ForwardRef is private in python 3.6 and 3.7\n from typing import _ForwardRef as ForwardRef # type: ignore\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.lazy_type import LazyType\nfrom strawberry.type import (\n StrawberryList,\n StrawberryOptional,\n StrawberryType,\n StrawberryTypeVar,\n)\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.unset import UNSET\nfrom strawberry.utils.typing import is_generic, is_type_var\n\n\nif TYPE_CHECKING:\n from strawberry.union import StrawberryUnion\n\n\nASYNC_TYPES = (\n abc.AsyncGenerator,\n abc.AsyncIterable,\n abc.AsyncIterator,\n typing.AsyncContextManager,\n typing.AsyncGenerator,\n typing.AsyncIterable,\n typing.AsyncIterator,\n)\n\n\nclass StrawberryAnnotation:\n def __init__(\n self, annotation: Union[object, str], *, namespace: Optional[Dict] = None\n ):\n self.annotation = annotation\n self.namespace = namespace\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, StrawberryAnnotation):\n return NotImplemented\n\n return self.resolve() == other.resolve()\n\n def resolve(self) -> Union[StrawberryType, type]:\n annotation: object\n if isinstance(self.annotation, str):\n annotation = ForwardRef(self.annotation)\n else:\n annotation = self.annotation\n\n evaled_type = _eval_type(annotation, self.namespace, None)\n if self._is_async_type(evaled_type):\n evaled_type = self._strip_async_type(evaled_type)\n if self._is_lazy_type(evaled_type):\n return evaled_type\n\n if self._is_generic(evaled_type):\n if any(is_type_var(type_) for type_ in evaled_type.__args__):\n return evaled_type\n return self.create_concrete_type(evaled_type)\n\n # Simply return objects that are already StrawberryTypes\n if self._is_strawberry_type(evaled_type):\n return evaled_type\n\n # Everything remaining should be a raw annotation that needs to be turned into\n # a StrawberryType\n if self._is_enum(evaled_type):\n return self.create_enum(evaled_type)\n if self._is_list(evaled_type):\n return self.create_list(evaled_type)\n elif self._is_optional(evaled_type):\n return self.create_optional(evaled_type)\n elif self._is_union(evaled_type):\n return self.create_union(evaled_type)\n elif is_type_var(evaled_type):\n return self.create_type_var(evaled_type)\n\n # TODO: Raise exception now, or later?\n # ... raise NotImplementedError(f\"Unknown type {evaled_type}\")\n return evaled_type\n\n def create_concrete_type(self, evaled_type: type) -> type:\n if _is_object_type(evaled_type):\n type_definition: TypeDefinition\n type_definition = evaled_type._type_definition # type: ignore\n return type_definition.resolve_generic(evaled_type)\n\n raise ValueError(f\"Not supported {evaled_type}\")\n\n def create_enum(self, evaled_type: Any) -> EnumDefinition:\n return evaled_type._enum_definition\n\n def create_list(self, evaled_type: Any) -> StrawberryList:\n of_type = StrawberryAnnotation(\n annotation=evaled_type.__args__[0],\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryList(of_type)\n\n def create_optional(self, evaled_type: Any) -> StrawberryOptional:\n types = evaled_type.__args__\n non_optional_types = tuple(\n filter(\n lambda x: x is not type(None) and x is not type(UNSET), # noqa: E721\n types,\n )\n )\n\n # Note that passing a single type to `Union` is equivalent to not using `Union`\n # at all. This allows us to not di any checks for how many types have been\n # passed as we can safely use `Union` for both optional types\n # (e.g. `Optional[str]`) and optional unions (e.g.\n # `Optional[Union[TypeA, TypeB]]`)\n child_type = Union[non_optional_types] # type: ignore\n\n of_type = StrawberryAnnotation(\n annotation=child_type,\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryOptional(of_type)\n\n def create_type_var(self, evaled_type: TypeVar) -> StrawberryTypeVar:\n return StrawberryTypeVar(evaled_type)\n\n def create_union(self, evaled_type) -> \"StrawberryUnion\":\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n # TODO: Deal with Forward References/origin\n if isinstance(evaled_type, StrawberryUnion):\n return evaled_type\n\n types = evaled_type.__args__\n union = StrawberryUnion(\n type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),\n )\n return union\n\n @classmethod\n def _is_async_type(cls, annotation: type) -> bool:\n origin = getattr(annotation, \"__origin__\", None)\n return origin in ASYNC_TYPES\n\n @classmethod\n def _is_enum(cls, annotation: Any) -> bool:\n # Type aliases are not types so we need to make sure annotation can go into\n # issubclass\n if not isinstance(annotation, type):\n return False\n return issubclass(annotation, Enum)\n\n @classmethod\n def _is_generic(cls, annotation: Any) -> bool:\n if hasattr(annotation, \"__origin__\"):\n return is_generic(annotation.__origin__)\n\n return False\n\n @classmethod\n def _is_lazy_type(cls, annotation: Any) -> bool:\n return isinstance(annotation, LazyType)\n\n @classmethod\n def _is_optional(cls, annotation: Any) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n if not cls._is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any(x is type(None) for x in types) # noqa: E721\n\n @classmethod\n def _is_list(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list\n\n @classmethod\n def _is_strawberry_type(cls, evaled_type: Any) -> bool:\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n if isinstance(evaled_type, EnumDefinition):\n return True\n elif _is_input_type(evaled_type): # TODO: Replace with StrawberryInputObject\n return True\n # TODO: add support for StrawberryInterface when implemented\n elif isinstance(evaled_type, StrawberryList):\n return True\n elif _is_object_type(evaled_type): # TODO: Replace with StrawberryObject\n return True\n elif isinstance(evaled_type, TypeDefinition):\n return True\n elif isinstance(evaled_type, StrawberryOptional):\n return True\n elif isinstance(\n evaled_type, ScalarDefinition\n ): # TODO: Replace with StrawberryScalar\n return True\n elif isinstance(evaled_type, StrawberryUnion):\n return True\n\n return False\n\n @classmethod\n def _is_union(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n # this check is needed because unions declared with the new syntax `A | B`\n # don't have a `__origin__` property on them, but they are instances of\n # `UnionType`, which is only available in Python 3.10+\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if isinstance(annotation, UnionType):\n return True\n\n # unions declared as Union[A, B] fall through to this check, even on python 3.10+\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin is typing.Union\n\n @classmethod\n def _strip_async_type(cls, annotation) -> type:\n return annotation.__args__[0]\n\n @classmethod\n def _strip_lazy_type(cls, annotation: LazyType) -> type:\n return annotation.resolve_type()\n\n\n################################################################################\n# Temporary functions to be removed with new types\n################################################################################\n\n\ndef _is_input_type(type_: Any) -> bool:\n if not _is_object_type(type_):\n return False\n\n return type_._type_definition.is_input\n\n\ndef _is_object_type(type_: Any) -> bool:\n # isinstance(type_, StrawberryObjectType) # noqa: E800\n return hasattr(type_, \"_type_definition\")\n", "path": "strawberry/annotation.py"}], "after_files": [{"content": "import sys\nimport typing\nfrom collections import abc\nfrom enum import Enum\nfrom typing import ( # type: ignore[attr-defined]\n TYPE_CHECKING,\n Any,\n Dict,\n Optional,\n TypeVar,\n Union,\n _eval_type,\n)\n\nfrom strawberry.private import is_private\n\n\ntry:\n from typing import ForwardRef\nexcept ImportError: # pragma: no cover\n # ForwardRef is private in python 3.6 and 3.7\n from typing import _ForwardRef as ForwardRef # type: ignore\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.lazy_type import LazyType\nfrom strawberry.type import (\n StrawberryList,\n StrawberryOptional,\n StrawberryType,\n StrawberryTypeVar,\n)\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.unset import UNSET\nfrom strawberry.utils.typing import is_generic, is_type_var\n\n\nif TYPE_CHECKING:\n from strawberry.union import StrawberryUnion\n\n\nASYNC_TYPES = (\n abc.AsyncGenerator,\n abc.AsyncIterable,\n abc.AsyncIterator,\n typing.AsyncContextManager,\n typing.AsyncGenerator,\n typing.AsyncIterable,\n typing.AsyncIterator,\n)\n\n\nclass StrawberryAnnotation:\n def __init__(\n self, annotation: Union[object, str], *, namespace: Optional[Dict] = None\n ):\n self.annotation = annotation\n self.namespace = namespace\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, StrawberryAnnotation):\n return NotImplemented\n\n return self.resolve() == other.resolve()\n\n def resolve(self) -> Union[StrawberryType, type]:\n annotation: object\n if isinstance(self.annotation, str):\n annotation = ForwardRef(self.annotation)\n else:\n annotation = self.annotation\n\n evaled_type = _eval_type(annotation, self.namespace, None)\n if is_private(evaled_type):\n return evaled_type\n if self._is_async_type(evaled_type):\n evaled_type = self._strip_async_type(evaled_type)\n if self._is_lazy_type(evaled_type):\n return evaled_type\n\n if self._is_generic(evaled_type):\n if any(is_type_var(type_) for type_ in evaled_type.__args__):\n return evaled_type\n return self.create_concrete_type(evaled_type)\n\n # Simply return objects that are already StrawberryTypes\n if self._is_strawberry_type(evaled_type):\n return evaled_type\n\n # Everything remaining should be a raw annotation that needs to be turned into\n # a StrawberryType\n if self._is_enum(evaled_type):\n return self.create_enum(evaled_type)\n if self._is_list(evaled_type):\n return self.create_list(evaled_type)\n elif self._is_optional(evaled_type):\n return self.create_optional(evaled_type)\n elif self._is_union(evaled_type):\n return self.create_union(evaled_type)\n elif is_type_var(evaled_type):\n return self.create_type_var(evaled_type)\n\n # TODO: Raise exception now, or later?\n # ... raise NotImplementedError(f\"Unknown type {evaled_type}\")\n return evaled_type\n\n def create_concrete_type(self, evaled_type: type) -> type:\n if _is_object_type(evaled_type):\n type_definition: TypeDefinition\n type_definition = evaled_type._type_definition # type: ignore\n return type_definition.resolve_generic(evaled_type)\n\n raise ValueError(f\"Not supported {evaled_type}\")\n\n def create_enum(self, evaled_type: Any) -> EnumDefinition:\n return evaled_type._enum_definition\n\n def create_list(self, evaled_type: Any) -> StrawberryList:\n of_type = StrawberryAnnotation(\n annotation=evaled_type.__args__[0],\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryList(of_type)\n\n def create_optional(self, evaled_type: Any) -> StrawberryOptional:\n types = evaled_type.__args__\n non_optional_types = tuple(\n filter(\n lambda x: x is not type(None) and x is not type(UNSET), # noqa: E721\n types,\n )\n )\n\n # Note that passing a single type to `Union` is equivalent to not using `Union`\n # at all. This allows us to not di any checks for how many types have been\n # passed as we can safely use `Union` for both optional types\n # (e.g. `Optional[str]`) and optional unions (e.g.\n # `Optional[Union[TypeA, TypeB]]`)\n child_type = Union[non_optional_types] # type: ignore\n\n of_type = StrawberryAnnotation(\n annotation=child_type,\n namespace=self.namespace,\n ).resolve()\n\n return StrawberryOptional(of_type)\n\n def create_type_var(self, evaled_type: TypeVar) -> StrawberryTypeVar:\n return StrawberryTypeVar(evaled_type)\n\n def create_union(self, evaled_type) -> \"StrawberryUnion\":\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n # TODO: Deal with Forward References/origin\n if isinstance(evaled_type, StrawberryUnion):\n return evaled_type\n\n types = evaled_type.__args__\n union = StrawberryUnion(\n type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),\n )\n return union\n\n @classmethod\n def _is_async_type(cls, annotation: type) -> bool:\n origin = getattr(annotation, \"__origin__\", None)\n return origin in ASYNC_TYPES\n\n @classmethod\n def _is_enum(cls, annotation: Any) -> bool:\n # Type aliases are not types so we need to make sure annotation can go into\n # issubclass\n if not isinstance(annotation, type):\n return False\n return issubclass(annotation, Enum)\n\n @classmethod\n def _is_generic(cls, annotation: Any) -> bool:\n if hasattr(annotation, \"__origin__\"):\n return is_generic(annotation.__origin__)\n\n return False\n\n @classmethod\n def _is_lazy_type(cls, annotation: Any) -> bool:\n return isinstance(annotation, LazyType)\n\n @classmethod\n def _is_optional(cls, annotation: Any) -> bool:\n \"\"\"Returns True if the annotation is Optional[SomeType]\"\"\"\n\n # Optionals are represented as unions\n if not cls._is_union(annotation):\n return False\n\n types = annotation.__args__\n\n # A Union to be optional needs to have at least one None type\n return any(x is type(None) for x in types) # noqa: E721\n\n @classmethod\n def _is_list(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a List\"\"\"\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list\n\n @classmethod\n def _is_strawberry_type(cls, evaled_type: Any) -> bool:\n # Prevent import cycles\n from strawberry.union import StrawberryUnion\n\n if isinstance(evaled_type, EnumDefinition):\n return True\n elif _is_input_type(evaled_type): # TODO: Replace with StrawberryInputObject\n return True\n # TODO: add support for StrawberryInterface when implemented\n elif isinstance(evaled_type, StrawberryList):\n return True\n elif _is_object_type(evaled_type): # TODO: Replace with StrawberryObject\n return True\n elif isinstance(evaled_type, TypeDefinition):\n return True\n elif isinstance(evaled_type, StrawberryOptional):\n return True\n elif isinstance(\n evaled_type, ScalarDefinition\n ): # TODO: Replace with StrawberryScalar\n return True\n elif isinstance(evaled_type, StrawberryUnion):\n return True\n\n return False\n\n @classmethod\n def _is_union(cls, annotation: Any) -> bool:\n \"\"\"Returns True if annotation is a Union\"\"\"\n\n # this check is needed because unions declared with the new syntax `A | B`\n # don't have a `__origin__` property on them, but they are instances of\n # `UnionType`, which is only available in Python 3.10+\n if sys.version_info >= (3, 10):\n from types import UnionType\n\n if isinstance(annotation, UnionType):\n return True\n\n # unions declared as Union[A, B] fall through to this check, even on python 3.10+\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin is typing.Union\n\n @classmethod\n def _strip_async_type(cls, annotation) -> type:\n return annotation.__args__[0]\n\n @classmethod\n def _strip_lazy_type(cls, annotation: LazyType) -> type:\n return annotation.resolve_type()\n\n\n################################################################################\n# Temporary functions to be removed with new types\n################################################################################\n\n\ndef _is_input_type(type_: Any) -> bool:\n if not _is_object_type(type_):\n return False\n\n return type_._type_definition.is_input\n\n\ndef _is_object_type(type_: Any) -> bool:\n # isinstance(type_, StrawberryObjectType) # noqa: E800\n return hasattr(type_, \"_type_definition\")\n", "path": "strawberry/annotation.py"}]} | 3,936 | 163 |
gh_patches_debug_26007 | rasdani/github-patches | git_diff | pytorch__vision-6756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add support for multiple dtypes in prototype ToDtype
Right now, the transform takes a single dtype and one or multiple types to apply the dtype to:
https://github.com/pytorch/vision/blob/b482d896f448cc44fdadb030391ac12723a81546/torchvision/prototype/transforms/_misc.py#L143-L144
In case one needs multiple different dtypes, you need to use multiple transforms, e.g.
```py
transform = transforms.Compose(
[
transforms.ToDtype(torch.uint8, features.Image),
transforms.ToDtype(torch.float32, features.BoundingBox),
transforms.ToDtype(torch.int64, features.Label),
]
)
```
Not only has this runtime implications since we need to recurse three times through the same sample, it would also be better UI, if `ToDtype` accepted a mapping specifying the different dtypes
```py
transform = transforms.ToDtype(
{
features.Image: torch.uint8,
features.BoundingBox: torch.float32,
features.Label: torch.int64,
}
)
```
This would also align this parameter with what we did to `fill`.
cc @vfdev-5 @datumbox @bjuncek
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/prototype/transforms/_misc.py`
Content:
```
1 import functools
2 from typing import Any, Callable, Dict, Sequence, Type, Union
3
4 import PIL.Image
5
6 import torch
7 from torchvision.ops import remove_small_boxes
8 from torchvision.prototype import features
9 from torchvision.prototype.transforms import functional as F, Transform
10
11 from ._utils import _setup_float_or_seq, _setup_size, has_any, query_bounding_box
12
13
14 class Identity(Transform):
15 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
16 return inpt
17
18
19 class Lambda(Transform):
20 def __init__(self, lambd: Callable[[Any], Any], *types: Type):
21 super().__init__()
22 self.lambd = lambd
23 self.types = types or (object,)
24
25 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
26 if isinstance(inpt, self.types):
27 return self.lambd(inpt)
28 else:
29 return inpt
30
31 def extra_repr(self) -> str:
32 extras = []
33 name = getattr(self.lambd, "__name__", None)
34 if name:
35 extras.append(name)
36 extras.append(f"types={[type.__name__ for type in self.types]}")
37 return ", ".join(extras)
38
39
40 class LinearTransformation(Transform):
41 _transformed_types = (features.is_simple_tensor, features.Image, features.Video)
42
43 def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):
44 super().__init__()
45 if transformation_matrix.size(0) != transformation_matrix.size(1):
46 raise ValueError(
47 "transformation_matrix should be square. Got "
48 f"{tuple(transformation_matrix.size())} rectangular matrix."
49 )
50
51 if mean_vector.size(0) != transformation_matrix.size(0):
52 raise ValueError(
53 f"mean_vector should have the same length {mean_vector.size(0)}"
54 f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]"
55 )
56
57 if transformation_matrix.device != mean_vector.device:
58 raise ValueError(
59 f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}"
60 )
61
62 self.transformation_matrix = transformation_matrix
63 self.mean_vector = mean_vector
64
65 def forward(self, *inputs: Any) -> Any:
66 if has_any(inputs, PIL.Image.Image):
67 raise TypeError("LinearTransformation does not work on PIL Images")
68
69 return super().forward(*inputs)
70
71 def _transform(
72 self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]
73 ) -> torch.Tensor:
74 # Image instance after linear transformation is not Image anymore due to unknown data range
75 # Thus we will return Tensor for input Image
76
77 shape = inpt.shape
78 n = shape[-3] * shape[-2] * shape[-1]
79 if n != self.transformation_matrix.shape[0]:
80 raise ValueError(
81 "Input tensor and transformation matrix have incompatible shape."
82 + f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != "
83 + f"{self.transformation_matrix.shape[0]}"
84 )
85
86 if inpt.device.type != self.mean_vector.device.type:
87 raise ValueError(
88 "Input tensor should be on the same device as transformation matrix and mean vector. "
89 f"Got {inpt.device} vs {self.mean_vector.device}"
90 )
91
92 flat_tensor = inpt.view(-1, n) - self.mean_vector
93 transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
94 return transformed_tensor.view(shape)
95
96
97 class Normalize(Transform):
98 _transformed_types = (features.Image, features.is_simple_tensor, features.Video)
99
100 def __init__(self, mean: Sequence[float], std: Sequence[float], inplace: bool = False):
101 super().__init__()
102 self.mean = list(mean)
103 self.std = list(std)
104 self.inplace = inplace
105
106 def _transform(
107 self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]
108 ) -> torch.Tensor:
109 return F.normalize(inpt, mean=self.mean, std=self.std, inplace=self.inplace)
110
111 def forward(self, *inpts: Any) -> Any:
112 if has_any(inpts, PIL.Image.Image):
113 raise TypeError(f"{type(self).__name__}() does not support PIL images.")
114 return super().forward(*inpts)
115
116
117 class GaussianBlur(Transform):
118 def __init__(
119 self, kernel_size: Union[int, Sequence[int]], sigma: Union[int, float, Sequence[float]] = (0.1, 2.0)
120 ) -> None:
121 super().__init__()
122 self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
123 for ks in self.kernel_size:
124 if ks <= 0 or ks % 2 == 0:
125 raise ValueError("Kernel size value should be an odd and positive number.")
126
127 if isinstance(sigma, (int, float)):
128 if sigma <= 0:
129 raise ValueError("If sigma is a single number, it must be positive.")
130 sigma = float(sigma)
131 elif isinstance(sigma, Sequence) and len(sigma) == 2:
132 if not 0.0 < sigma[0] <= sigma[1]:
133 raise ValueError("sigma values should be positive and of the form (min, max).")
134 else:
135 raise TypeError("sigma should be a single int or float or a list/tuple with length 2 floats.")
136
137 self.sigma = _setup_float_or_seq(sigma, "sigma", 2)
138
139 def _get_params(self, sample: Any) -> Dict[str, Any]:
140 sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()
141 return dict(sigma=[sigma, sigma])
142
143 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
144 return F.gaussian_blur(inpt, self.kernel_size, **params)
145
146
147 # TODO: Enhance as described at https://github.com/pytorch/vision/issues/6697
148 class ToDtype(Lambda):
149 def __init__(self, dtype: torch.dtype, *types: Type) -> None:
150 self.dtype = dtype
151 super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types or (torch.Tensor,))
152
153 def extra_repr(self) -> str:
154 return ", ".join([f"dtype={self.dtype}", f"types={[type.__name__ for type in self.types]}"])
155
156
157 class RemoveSmallBoundingBoxes(Transform):
158 _transformed_types = (features.BoundingBox, features.Mask, features.Label, features.OneHotLabel)
159
160 def __init__(self, min_size: float = 1.0) -> None:
161 super().__init__()
162 self.min_size = min_size
163
164 def _get_params(self, sample: Any) -> Dict[str, Any]:
165 bounding_box = query_bounding_box(sample)
166
167 # TODO: We can improve performance here by not using the `remove_small_boxes` function. It requires the box to
168 # be in XYXY format only to calculate the width and height internally. Thus, if the box is in XYWH or CXCYWH
169 # format,we need to convert first just to afterwards compute the width and height again, although they were
170 # there in the first place for these formats.
171 bounding_box = F.convert_format_bounding_box(
172 bounding_box, old_format=bounding_box.format, new_format=features.BoundingBoxFormat.XYXY
173 )
174 valid_indices = remove_small_boxes(bounding_box, min_size=self.min_size)
175
176 return dict(valid_indices=valid_indices)
177
178 def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
179 return inpt.wrap_like(inpt, inpt[params["valid_indices"]])
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py
--- a/torchvision/prototype/transforms/_misc.py
+++ b/torchvision/prototype/transforms/_misc.py
@@ -1,4 +1,5 @@
import functools
+from collections import defaultdict
from typing import Any, Callable, Dict, Sequence, Type, Union
import PIL.Image
@@ -144,14 +145,22 @@
return F.gaussian_blur(inpt, self.kernel_size, **params)
-# TODO: Enhance as described at https://github.com/pytorch/vision/issues/6697
-class ToDtype(Lambda):
- def __init__(self, dtype: torch.dtype, *types: Type) -> None:
+class ToDtype(Transform):
+ _transformed_types = (torch.Tensor,)
+
+ def _default_dtype(self, dtype: torch.dtype) -> torch.dtype:
+ return dtype
+
+ def __init__(self, dtype: Union[torch.dtype, Dict[Type, torch.dtype]]) -> None:
+ super().__init__()
+ if not isinstance(dtype, dict):
+ # This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
+ # If it were possible, we could replace this with `defaultdict(lambda: dtype)`
+ dtype = defaultdict(functools.partial(self._default_dtype, dtype))
self.dtype = dtype
- super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types or (torch.Tensor,))
- def extra_repr(self) -> str:
- return ", ".join([f"dtype={self.dtype}", f"types={[type.__name__ for type in self.types]}"])
+ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
+ return inpt.to(self.dtype[type(inpt)])
class RemoveSmallBoundingBoxes(Transform):
| {"golden_diff": "diff --git a/torchvision/prototype/transforms/_misc.py b/torchvision/prototype/transforms/_misc.py\n--- a/torchvision/prototype/transforms/_misc.py\n+++ b/torchvision/prototype/transforms/_misc.py\n@@ -1,4 +1,5 @@\n import functools\n+from collections import defaultdict\n from typing import Any, Callable, Dict, Sequence, Type, Union\n \n import PIL.Image\n@@ -144,14 +145,22 @@\n return F.gaussian_blur(inpt, self.kernel_size, **params)\n \n \n-# TODO: Enhance as described at https://github.com/pytorch/vision/issues/6697\n-class ToDtype(Lambda):\n- def __init__(self, dtype: torch.dtype, *types: Type) -> None:\n+class ToDtype(Transform):\n+ _transformed_types = (torch.Tensor,)\n+\n+ def _default_dtype(self, dtype: torch.dtype) -> torch.dtype:\n+ return dtype\n+\n+ def __init__(self, dtype: Union[torch.dtype, Dict[Type, torch.dtype]]) -> None:\n+ super().__init__()\n+ if not isinstance(dtype, dict):\n+ # This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.\n+ # If it were possible, we could replace this with `defaultdict(lambda: dtype)`\n+ dtype = defaultdict(functools.partial(self._default_dtype, dtype))\n self.dtype = dtype\n- super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types or (torch.Tensor,))\n \n- def extra_repr(self) -> str:\n- return \", \".join([f\"dtype={self.dtype}\", f\"types={[type.__name__ for type in self.types]}\"])\n+ def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n+ return inpt.to(self.dtype[type(inpt)])\n \n \n class RemoveSmallBoundingBoxes(Transform):\n", "issue": "add support for multiple dtypes in prototype ToDtype \nRight now, the transform takes a single dtype and one or multiple types to apply the dtype to:\r\n\r\nhttps://github.com/pytorch/vision/blob/b482d896f448cc44fdadb030391ac12723a81546/torchvision/prototype/transforms/_misc.py#L143-L144\r\n\r\nIn case one needs multiple different dtypes, you need to use multiple transforms, e.g.\r\n\r\n```py\r\ntransform = transforms.Compose(\r\n [\r\n transforms.ToDtype(torch.uint8, features.Image),\r\n transforms.ToDtype(torch.float32, features.BoundingBox),\r\n transforms.ToDtype(torch.int64, features.Label),\r\n ]\r\n)\r\n```\r\n\r\nNot only has this runtime implications since we need to recurse three times through the same sample, it would also be better UI, if `ToDtype` accepted a mapping specifying the different dtypes\r\n\r\n```py\r\ntransform = transforms.ToDtype(\r\n {\r\n features.Image: torch.uint8,\r\n features.BoundingBox: torch.float32,\r\n features.Label: torch.int64,\r\n }\r\n)\r\n```\r\n\r\nThis would also align this parameter with what we did to `fill`.\n\ncc @vfdev-5 @datumbox @bjuncek\n", "before_files": [{"content": "import functools\nfrom typing import Any, Callable, Dict, Sequence, Type, Union\n\nimport PIL.Image\n\nimport torch\nfrom torchvision.ops import remove_small_boxes\nfrom torchvision.prototype import features\nfrom torchvision.prototype.transforms import functional as F, Transform\n\nfrom ._utils import _setup_float_or_seq, _setup_size, has_any, query_bounding_box\n\n\nclass Identity(Transform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt\n\n\nclass Lambda(Transform):\n def __init__(self, lambd: Callable[[Any], Any], *types: Type):\n super().__init__()\n self.lambd = lambd\n self.types = types or (object,)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if isinstance(inpt, self.types):\n return self.lambd(inpt)\n else:\n return inpt\n\n def extra_repr(self) -> str:\n extras = []\n name = getattr(self.lambd, \"__name__\", None)\n if name:\n extras.append(name)\n extras.append(f\"types={[type.__name__ for type in self.types]}\")\n return \", \".join(extras)\n\n\nclass LinearTransformation(Transform):\n _transformed_types = (features.is_simple_tensor, features.Image, features.Video)\n\n def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):\n super().__init__()\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\n \"transformation_matrix should be square. Got \"\n f\"{tuple(transformation_matrix.size())} rectangular matrix.\"\n )\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\n f\"mean_vector should have the same length {mean_vector.size(0)}\"\n f\" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]\"\n )\n\n if transformation_matrix.device != mean_vector.device:\n raise ValueError(\n f\"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}\"\n )\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def forward(self, *inputs: Any) -> Any:\n if has_any(inputs, PIL.Image.Image):\n raise TypeError(\"LinearTransformation does not work on PIL Images\")\n\n return super().forward(*inputs)\n\n def _transform(\n self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]\n ) -> torch.Tensor:\n # Image instance after linear transformation is not Image anymore due to unknown data range\n # Thus we will return Tensor for input Image\n\n shape = inpt.shape\n n = shape[-3] * shape[-2] * shape[-1]\n if n != self.transformation_matrix.shape[0]:\n raise ValueError(\n \"Input tensor and transformation matrix have incompatible shape.\"\n + f\"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != \"\n + f\"{self.transformation_matrix.shape[0]}\"\n )\n\n if inpt.device.type != self.mean_vector.device.type:\n raise ValueError(\n \"Input tensor should be on the same device as transformation matrix and mean vector. \"\n f\"Got {inpt.device} vs {self.mean_vector.device}\"\n )\n\n flat_tensor = inpt.view(-1, n) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n return transformed_tensor.view(shape)\n\n\nclass Normalize(Transform):\n _transformed_types = (features.Image, features.is_simple_tensor, features.Video)\n\n def __init__(self, mean: Sequence[float], std: Sequence[float], inplace: bool = False):\n super().__init__()\n self.mean = list(mean)\n self.std = list(std)\n self.inplace = inplace\n\n def _transform(\n self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]\n ) -> torch.Tensor:\n return F.normalize(inpt, mean=self.mean, std=self.std, inplace=self.inplace)\n\n def forward(self, *inpts: Any) -> Any:\n if has_any(inpts, PIL.Image.Image):\n raise TypeError(f\"{type(self).__name__}() does not support PIL images.\")\n return super().forward(*inpts)\n\n\nclass GaussianBlur(Transform):\n def __init__(\n self, kernel_size: Union[int, Sequence[int]], sigma: Union[int, float, Sequence[float]] = (0.1, 2.0)\n ) -> None:\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, (int, float)):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = float(sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0.0 < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise TypeError(\"sigma should be a single int or float or a list/tuple with length 2 floats.\")\n\n self.sigma = _setup_float_or_seq(sigma, \"sigma\", 2)\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()\n return dict(sigma=[sigma, sigma])\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.gaussian_blur(inpt, self.kernel_size, **params)\n\n\n# TODO: Enhance as described at https://github.com/pytorch/vision/issues/6697\nclass ToDtype(Lambda):\n def __init__(self, dtype: torch.dtype, *types: Type) -> None:\n self.dtype = dtype\n super().__init__(functools.partial(torch.Tensor.to, dtype=dtype), *types or (torch.Tensor,))\n\n def extra_repr(self) -> str:\n return \", \".join([f\"dtype={self.dtype}\", f\"types={[type.__name__ for type in self.types]}\"])\n\n\nclass RemoveSmallBoundingBoxes(Transform):\n _transformed_types = (features.BoundingBox, features.Mask, features.Label, features.OneHotLabel)\n\n def __init__(self, min_size: float = 1.0) -> None:\n super().__init__()\n self.min_size = min_size\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n bounding_box = query_bounding_box(sample)\n\n # TODO: We can improve performance here by not using the `remove_small_boxes` function. It requires the box to\n # be in XYXY format only to calculate the width and height internally. Thus, if the box is in XYWH or CXCYWH\n # format,we need to convert first just to afterwards compute the width and height again, although they were\n # there in the first place for these formats.\n bounding_box = F.convert_format_bounding_box(\n bounding_box, old_format=bounding_box.format, new_format=features.BoundingBoxFormat.XYXY\n )\n valid_indices = remove_small_boxes(bounding_box, min_size=self.min_size)\n\n return dict(valid_indices=valid_indices)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt.wrap_like(inpt, inpt[params[\"valid_indices\"]])\n", "path": "torchvision/prototype/transforms/_misc.py"}], "after_files": [{"content": "import functools\nfrom collections import defaultdict\nfrom typing import Any, Callable, Dict, Sequence, Type, Union\n\nimport PIL.Image\n\nimport torch\nfrom torchvision.ops import remove_small_boxes\nfrom torchvision.prototype import features\nfrom torchvision.prototype.transforms import functional as F, Transform\n\nfrom ._utils import _setup_float_or_seq, _setup_size, has_any, query_bounding_box\n\n\nclass Identity(Transform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt\n\n\nclass Lambda(Transform):\n def __init__(self, lambd: Callable[[Any], Any], *types: Type):\n super().__init__()\n self.lambd = lambd\n self.types = types or (object,)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if isinstance(inpt, self.types):\n return self.lambd(inpt)\n else:\n return inpt\n\n def extra_repr(self) -> str:\n extras = []\n name = getattr(self.lambd, \"__name__\", None)\n if name:\n extras.append(name)\n extras.append(f\"types={[type.__name__ for type in self.types]}\")\n return \", \".join(extras)\n\n\nclass LinearTransformation(Transform):\n _transformed_types = (features.is_simple_tensor, features.Image, features.Video)\n\n def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):\n super().__init__()\n if transformation_matrix.size(0) != transformation_matrix.size(1):\n raise ValueError(\n \"transformation_matrix should be square. Got \"\n f\"{tuple(transformation_matrix.size())} rectangular matrix.\"\n )\n\n if mean_vector.size(0) != transformation_matrix.size(0):\n raise ValueError(\n f\"mean_vector should have the same length {mean_vector.size(0)}\"\n f\" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]\"\n )\n\n if transformation_matrix.device != mean_vector.device:\n raise ValueError(\n f\"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}\"\n )\n\n self.transformation_matrix = transformation_matrix\n self.mean_vector = mean_vector\n\n def forward(self, *inputs: Any) -> Any:\n if has_any(inputs, PIL.Image.Image):\n raise TypeError(\"LinearTransformation does not work on PIL Images\")\n\n return super().forward(*inputs)\n\n def _transform(\n self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]\n ) -> torch.Tensor:\n # Image instance after linear transformation is not Image anymore due to unknown data range\n # Thus we will return Tensor for input Image\n\n shape = inpt.shape\n n = shape[-3] * shape[-2] * shape[-1]\n if n != self.transformation_matrix.shape[0]:\n raise ValueError(\n \"Input tensor and transformation matrix have incompatible shape.\"\n + f\"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != \"\n + f\"{self.transformation_matrix.shape[0]}\"\n )\n\n if inpt.device.type != self.mean_vector.device.type:\n raise ValueError(\n \"Input tensor should be on the same device as transformation matrix and mean vector. \"\n f\"Got {inpt.device} vs {self.mean_vector.device}\"\n )\n\n flat_tensor = inpt.view(-1, n) - self.mean_vector\n transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)\n return transformed_tensor.view(shape)\n\n\nclass Normalize(Transform):\n _transformed_types = (features.Image, features.is_simple_tensor, features.Video)\n\n def __init__(self, mean: Sequence[float], std: Sequence[float], inplace: bool = False):\n super().__init__()\n self.mean = list(mean)\n self.std = list(std)\n self.inplace = inplace\n\n def _transform(\n self, inpt: Union[features.TensorImageType, features.TensorVideoType], params: Dict[str, Any]\n ) -> torch.Tensor:\n return F.normalize(inpt, mean=self.mean, std=self.std, inplace=self.inplace)\n\n def forward(self, *inpts: Any) -> Any:\n if has_any(inpts, PIL.Image.Image):\n raise TypeError(f\"{type(self).__name__}() does not support PIL images.\")\n return super().forward(*inpts)\n\n\nclass GaussianBlur(Transform):\n def __init__(\n self, kernel_size: Union[int, Sequence[int]], sigma: Union[int, float, Sequence[float]] = (0.1, 2.0)\n ) -> None:\n super().__init__()\n self.kernel_size = _setup_size(kernel_size, \"Kernel size should be a tuple/list of two integers\")\n for ks in self.kernel_size:\n if ks <= 0 or ks % 2 == 0:\n raise ValueError(\"Kernel size value should be an odd and positive number.\")\n\n if isinstance(sigma, (int, float)):\n if sigma <= 0:\n raise ValueError(\"If sigma is a single number, it must be positive.\")\n sigma = float(sigma)\n elif isinstance(sigma, Sequence) and len(sigma) == 2:\n if not 0.0 < sigma[0] <= sigma[1]:\n raise ValueError(\"sigma values should be positive and of the form (min, max).\")\n else:\n raise TypeError(\"sigma should be a single int or float or a list/tuple with length 2 floats.\")\n\n self.sigma = _setup_float_or_seq(sigma, \"sigma\", 2)\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()\n return dict(sigma=[sigma, sigma])\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.gaussian_blur(inpt, self.kernel_size, **params)\n\n\nclass ToDtype(Transform):\n _transformed_types = (torch.Tensor,)\n\n def _default_dtype(self, dtype: torch.dtype) -> torch.dtype:\n return dtype\n\n def __init__(self, dtype: Union[torch.dtype, Dict[Type, torch.dtype]]) -> None:\n super().__init__()\n if not isinstance(dtype, dict):\n # This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.\n # If it were possible, we could replace this with `defaultdict(lambda: dtype)`\n dtype = defaultdict(functools.partial(self._default_dtype, dtype))\n self.dtype = dtype\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt.to(self.dtype[type(inpt)])\n\n\nclass RemoveSmallBoundingBoxes(Transform):\n _transformed_types = (features.BoundingBox, features.Mask, features.Label, features.OneHotLabel)\n\n def __init__(self, min_size: float = 1.0) -> None:\n super().__init__()\n self.min_size = min_size\n\n def _get_params(self, sample: Any) -> Dict[str, Any]:\n bounding_box = query_bounding_box(sample)\n\n # TODO: We can improve performance here by not using the `remove_small_boxes` function. It requires the box to\n # be in XYXY format only to calculate the width and height internally. Thus, if the box is in XYWH or CXCYWH\n # format,we need to convert first just to afterwards compute the width and height again, although they were\n # there in the first place for these formats.\n bounding_box = F.convert_format_bounding_box(\n bounding_box, old_format=bounding_box.format, new_format=features.BoundingBoxFormat.XYXY\n )\n valid_indices = remove_small_boxes(bounding_box, min_size=self.min_size)\n\n return dict(valid_indices=valid_indices)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return inpt.wrap_like(inpt, inpt[params[\"valid_indices\"]])\n", "path": "torchvision/prototype/transforms/_misc.py"}]} | 2,688 | 429 |
gh_patches_debug_22004 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-6695 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to set `server_replay_ignore_params` in the UI
#### Value of server_replay_ignore_params is ignored when set in the UI or through command line
#### Steps to reproduce the behavior:
1. Prepare a mock endpoint that works with 5 s delay. In this example I will use a configured mock https://hello.free.beeceptor.com/delay
2. Prepare a recording for the endpoint that contains calls to the endpoint with a parameter `param` (any value) and save it to `my.flow`.
3. Launch the software in the reverse proxy mode, using any of following:
a. `mitmweb --mode reverse:https://hello.free.beeceptor.com --server-replay my.flow --server-replay-nopop --web-port 8888`
b. `mitmproxy --mode reverse:https://hello.free.beeceptor.com --server-replay my_flow --server-replay-nopop`
4. In the UI:
a. Go to Options, Edit Options, set `server_replay_ignore_params` to `param` and close Options.
b. Press Shift-O for Options, scroll to `server_replay_ignore_params`, press `a`, enter `param`, press `Esc`, `q`, `q`.
5. Issue some requests through the proxy with value of `param` not existing in the recording. E.g.
`curl 'http://localhost:8080/delay?param=value5'`
**EDIT**: corrected repro steps for TUI.
#### Problem
The setting has no effect, you will see 5 s delay in the response.
#### System Information
Mitmproxy: 6.0.2
Python: 3.9.2
OpenSSL: OpenSSL 1.1.1i 8 Dec 2020
Platform: macOS-10.15.7-x86_64-i386-64bit
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/serverplayback.py`
Content:
```
1 import hashlib
2 import logging
3 import urllib
4 from collections.abc import Hashable
5 from collections.abc import Sequence
6 from typing import Any
7
8 import mitmproxy.types
9 from mitmproxy import command
10 from mitmproxy import ctx
11 from mitmproxy import exceptions
12 from mitmproxy import flow
13 from mitmproxy import hooks
14 from mitmproxy import http
15 from mitmproxy import io
16
17 logger = logging.getLogger(__name__)
18
19
20 class ServerPlayback:
21 flowmap: dict[Hashable, list[http.HTTPFlow]]
22 configured: bool
23
24 def __init__(self):
25 self.flowmap = {}
26 self.configured = False
27
28 def load(self, loader):
29 loader.add_option(
30 "server_replay_kill_extra",
31 bool,
32 False,
33 "Kill extra requests during replay (for which no replayable response was found)."
34 "[Deprecated, prefer to use server_replay_extra='kill']",
35 )
36 loader.add_option(
37 "server_replay_extra",
38 str,
39 "forward",
40 "Behaviour for extra requests during replay for which no replayable response was found. "
41 "Setting a numeric string value will return an empty HTTP response with the respective status code.",
42 choices=["forward", "kill", "204", "400", "404", "500"],
43 )
44 loader.add_option(
45 "server_replay_reuse",
46 bool,
47 False,
48 """
49 Don't remove flows from server replay state after use. This makes it
50 possible to replay same response multiple times.
51 """,
52 )
53 loader.add_option(
54 "server_replay_nopop",
55 bool,
56 False,
57 """
58 Deprecated alias for `server_replay_reuse`.
59 """,
60 )
61 loader.add_option(
62 "server_replay_refresh",
63 bool,
64 True,
65 """
66 Refresh server replay responses by adjusting date, expires and
67 last-modified headers, as well as adjusting cookie expiration.
68 """,
69 )
70 loader.add_option(
71 "server_replay_use_headers",
72 Sequence[str],
73 [],
74 """
75 Request headers that need to match while searching for a saved flow
76 to replay.
77 """,
78 )
79 loader.add_option(
80 "server_replay",
81 Sequence[str],
82 [],
83 "Replay server responses from a saved file.",
84 )
85 loader.add_option(
86 "server_replay_ignore_content",
87 bool,
88 False,
89 "Ignore request content while searching for a saved flow to replay.",
90 )
91 loader.add_option(
92 "server_replay_ignore_params",
93 Sequence[str],
94 [],
95 """
96 Request parameters to be ignored while searching for a saved flow
97 to replay.
98 """,
99 )
100 loader.add_option(
101 "server_replay_ignore_payload_params",
102 Sequence[str],
103 [],
104 """
105 Request payload parameters (application/x-www-form-urlencoded or
106 multipart/form-data) to be ignored while searching for a saved flow
107 to replay.
108 """,
109 )
110 loader.add_option(
111 "server_replay_ignore_host",
112 bool,
113 False,
114 """
115 Ignore request destination host while searching for a saved flow
116 to replay.
117 """,
118 )
119 loader.add_option(
120 "server_replay_ignore_port",
121 bool,
122 False,
123 """
124 Ignore request destination port while searching for a saved flow
125 to replay.
126 """,
127 )
128
129 @command.command("replay.server")
130 def load_flows(self, flows: Sequence[flow.Flow]) -> None:
131 """
132 Replay server responses from flows.
133 """
134 self.flowmap = {}
135 self.add_flows(flows)
136
137 @command.command("replay.server.add")
138 def add_flows(self, flows: Sequence[flow.Flow]) -> None:
139 """
140 Add responses from flows to server replay list.
141 """
142 for f in flows:
143 if isinstance(f, http.HTTPFlow):
144 lst = self.flowmap.setdefault(self._hash(f), [])
145 lst.append(f)
146 ctx.master.addons.trigger(hooks.UpdateHook([]))
147
148 @command.command("replay.server.file")
149 def load_file(self, path: mitmproxy.types.Path) -> None:
150 try:
151 flows = io.read_flows_from_paths([path])
152 except exceptions.FlowReadException as e:
153 raise exceptions.CommandError(str(e))
154 self.load_flows(flows)
155
156 @command.command("replay.server.stop")
157 def clear(self) -> None:
158 """
159 Stop server replay.
160 """
161 self.flowmap = {}
162 ctx.master.addons.trigger(hooks.UpdateHook([]))
163
164 @command.command("replay.server.count")
165 def count(self) -> int:
166 return sum(len(i) for i in self.flowmap.values())
167
168 def _hash(self, flow: http.HTTPFlow) -> Hashable:
169 """
170 Calculates a loose hash of the flow request.
171 """
172 r = flow.request
173 _, _, path, _, query, _ = urllib.parse.urlparse(r.url)
174 queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)
175
176 key: list[Any] = [str(r.scheme), str(r.method), str(path)]
177 if not ctx.options.server_replay_ignore_content:
178 if ctx.options.server_replay_ignore_payload_params and r.multipart_form:
179 key.extend(
180 (k, v)
181 for k, v in r.multipart_form.items(multi=True)
182 if k.decode(errors="replace")
183 not in ctx.options.server_replay_ignore_payload_params
184 )
185 elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:
186 key.extend(
187 (k, v)
188 for k, v in r.urlencoded_form.items(multi=True)
189 if k not in ctx.options.server_replay_ignore_payload_params
190 )
191 else:
192 key.append(str(r.raw_content))
193
194 if not ctx.options.server_replay_ignore_host:
195 key.append(r.pretty_host)
196 if not ctx.options.server_replay_ignore_port:
197 key.append(r.port)
198
199 filtered = []
200 ignore_params = ctx.options.server_replay_ignore_params or []
201 for p in queriesArray:
202 if p[0] not in ignore_params:
203 filtered.append(p)
204 for p in filtered:
205 key.append(p[0])
206 key.append(p[1])
207
208 if ctx.options.server_replay_use_headers:
209 headers = []
210 for i in ctx.options.server_replay_use_headers:
211 v = r.headers.get(i)
212 headers.append((i, v))
213 key.append(headers)
214 return hashlib.sha256(repr(key).encode("utf8", "surrogateescape")).digest()
215
216 def next_flow(self, flow: http.HTTPFlow) -> http.HTTPFlow | None:
217 """
218 Returns the next flow object, or None if no matching flow was
219 found.
220 """
221 hash = self._hash(flow)
222 if hash in self.flowmap:
223 if ctx.options.server_replay_reuse or ctx.options.server_replay_nopop:
224 return next(
225 (flow for flow in self.flowmap[hash] if flow.response), None
226 )
227 else:
228 ret = self.flowmap[hash].pop(0)
229 while not ret.response:
230 if self.flowmap[hash]:
231 ret = self.flowmap[hash].pop(0)
232 else:
233 del self.flowmap[hash]
234 return None
235 if not self.flowmap[hash]:
236 del self.flowmap[hash]
237 return ret
238 else:
239 return None
240
241 def configure(self, updated):
242 if ctx.options.server_replay_kill_extra:
243 logger.warning(
244 "server_replay_kill_extra has been deprecated, "
245 "please update your config to use server_replay_extra='kill'."
246 )
247 if ctx.options.server_replay_nopop: # pragma: no cover
248 logger.error(
249 "server_replay_nopop has been renamed to server_replay_reuse, please update your config."
250 )
251 if not self.configured and ctx.options.server_replay:
252 self.configured = True
253 try:
254 flows = io.read_flows_from_paths(ctx.options.server_replay)
255 except exceptions.FlowReadException as e:
256 raise exceptions.OptionsError(str(e))
257 self.load_flows(flows)
258
259 def request(self, f: http.HTTPFlow) -> None:
260 if self.flowmap:
261 rflow = self.next_flow(f)
262 if rflow:
263 assert rflow.response
264 response = rflow.response.copy()
265 if ctx.options.server_replay_refresh:
266 response.refresh()
267 f.response = response
268 f.is_replay = "response"
269 elif (
270 ctx.options.server_replay_kill_extra
271 or ctx.options.server_replay_extra == "kill"
272 ):
273 logging.warning(
274 "server_playback: killed non-replay request {}".format(
275 f.request.url
276 )
277 )
278 f.kill()
279 elif ctx.options.server_replay_extra != "forward":
280 logging.warning(
281 "server_playback: returned {} non-replay request {}".format(
282 ctx.options.server_replay_extra, f.request.url
283 )
284 )
285 f.response = http.Response.make(int(ctx.options.server_replay_extra))
286 f.is_replay = "response"
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/serverplayback.py b/mitmproxy/addons/serverplayback.py
--- a/mitmproxy/addons/serverplayback.py
+++ b/mitmproxy/addons/serverplayback.py
@@ -16,6 +16,15 @@
logger = logging.getLogger(__name__)
+HASH_OPTIONS = [
+ "server_replay_ignore_content",
+ "server_replay_ignore_host",
+ "server_replay_ignore_params",
+ "server_replay_ignore_payload_params",
+ "server_replay_ignore_port",
+ "server_replay_use_headers",
+]
+
class ServerPlayback:
flowmap: dict[Hashable, list[http.HTTPFlow]]
@@ -255,6 +264,16 @@
except exceptions.FlowReadException as e:
raise exceptions.OptionsError(str(e))
self.load_flows(flows)
+ if any(option in updated for option in HASH_OPTIONS):
+ self.recompute_hashes()
+
+ def recompute_hashes(self) -> None:
+ """
+ Rebuild flowmap if the hashing method has changed during execution,
+ see https://github.com/mitmproxy/mitmproxy/issues/4506
+ """
+ flows = [flow for lst in self.flowmap.values() for flow in lst]
+ self.load_flows(flows)
def request(self, f: http.HTTPFlow) -> None:
if self.flowmap:
| {"golden_diff": "diff --git a/mitmproxy/addons/serverplayback.py b/mitmproxy/addons/serverplayback.py\n--- a/mitmproxy/addons/serverplayback.py\n+++ b/mitmproxy/addons/serverplayback.py\n@@ -16,6 +16,15 @@\n \n logger = logging.getLogger(__name__)\n \n+HASH_OPTIONS = [\n+ \"server_replay_ignore_content\",\n+ \"server_replay_ignore_host\",\n+ \"server_replay_ignore_params\",\n+ \"server_replay_ignore_payload_params\",\n+ \"server_replay_ignore_port\",\n+ \"server_replay_use_headers\",\n+]\n+\n \n class ServerPlayback:\n flowmap: dict[Hashable, list[http.HTTPFlow]]\n@@ -255,6 +264,16 @@\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.load_flows(flows)\n+ if any(option in updated for option in HASH_OPTIONS):\n+ self.recompute_hashes()\n+\n+ def recompute_hashes(self) -> None:\n+ \"\"\"\n+ Rebuild flowmap if the hashing method has changed during execution,\n+ see https://github.com/mitmproxy/mitmproxy/issues/4506\n+ \"\"\"\n+ flows = [flow for lst in self.flowmap.values() for flow in lst]\n+ self.load_flows(flows)\n \n def request(self, f: http.HTTPFlow) -> None:\n if self.flowmap:\n", "issue": "Unable to set `server_replay_ignore_params` in the UI\n#### Value of server_replay_ignore_params is ignored when set in the UI or through command line\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Prepare a mock endpoint that works with 5 s delay. In this example I will use a configured mock https://hello.free.beeceptor.com/delay\r\n2. Prepare a recording for the endpoint that contains calls to the endpoint with a parameter `param` (any value) and save it to `my.flow`.\r\n3. Launch the software in the reverse proxy mode, using any of following:\r\n a. `mitmweb --mode reverse:https://hello.free.beeceptor.com --server-replay my.flow --server-replay-nopop --web-port 8888`\r\n b. `mitmproxy --mode reverse:https://hello.free.beeceptor.com --server-replay my_flow --server-replay-nopop`\r\n4. In the UI:\r\n a. Go to Options, Edit Options, set `server_replay_ignore_params` to `param` and close Options.\r\n b. Press Shift-O for Options, scroll to `server_replay_ignore_params`, press `a`, enter `param`, press `Esc`, `q`, `q`.\r\n5. Issue some requests through the proxy with value of `param` not existing in the recording. E.g. \r\n`curl 'http://localhost:8080/delay?param=value5'`\r\n\r\n**EDIT**: corrected repro steps for TUI.\r\n\r\n#### Problem\r\nThe setting has no effect, you will see 5 s delay in the response.\r\n\r\n#### System Information\r\nMitmproxy: 6.0.2\r\nPython: 3.9.2\r\nOpenSSL: OpenSSL 1.1.1i 8 Dec 2020\r\nPlatform: macOS-10.15.7-x86_64-i386-64bit\n", "before_files": [{"content": "import hashlib\nimport logging\nimport urllib\nfrom collections.abc import Hashable\nfrom collections.abc import Sequence\nfrom typing import Any\n\nimport mitmproxy.types\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import hooks\nfrom mitmproxy import http\nfrom mitmproxy import io\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServerPlayback:\n flowmap: dict[Hashable, list[http.HTTPFlow]]\n configured: bool\n\n def __init__(self):\n self.flowmap = {}\n self.configured = False\n\n def load(self, loader):\n loader.add_option(\n \"server_replay_kill_extra\",\n bool,\n False,\n \"Kill extra requests during replay (for which no replayable response was found).\"\n \"[Deprecated, prefer to use server_replay_extra='kill']\",\n )\n loader.add_option(\n \"server_replay_extra\",\n str,\n \"forward\",\n \"Behaviour for extra requests during replay for which no replayable response was found. \"\n \"Setting a numeric string value will return an empty HTTP response with the respective status code.\",\n choices=[\"forward\", \"kill\", \"204\", \"400\", \"404\", \"500\"],\n )\n loader.add_option(\n \"server_replay_reuse\",\n bool,\n False,\n \"\"\"\n Don't remove flows from server replay state after use. This makes it\n possible to replay same response multiple times.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_nopop\",\n bool,\n False,\n \"\"\"\n Deprecated alias for `server_replay_reuse`.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_refresh\",\n bool,\n True,\n \"\"\"\n Refresh server replay responses by adjusting date, expires and\n last-modified headers, as well as adjusting cookie expiration.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_use_headers\",\n Sequence[str],\n [],\n \"\"\"\n Request headers that need to match while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay\",\n Sequence[str],\n [],\n \"Replay server responses from a saved file.\",\n )\n loader.add_option(\n \"server_replay_ignore_content\",\n bool,\n False,\n \"Ignore request content while searching for a saved flow to replay.\",\n )\n loader.add_option(\n \"server_replay_ignore_params\",\n Sequence[str],\n [],\n \"\"\"\n Request parameters to be ignored while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_ignore_payload_params\",\n Sequence[str],\n [],\n \"\"\"\n Request payload parameters (application/x-www-form-urlencoded or\n multipart/form-data) to be ignored while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_ignore_host\",\n bool,\n False,\n \"\"\"\n Ignore request destination host while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_ignore_port\",\n bool,\n False,\n \"\"\"\n Ignore request destination port while searching for a saved flow\n to replay.\n \"\"\",\n )\n\n @command.command(\"replay.server\")\n def load_flows(self, flows: Sequence[flow.Flow]) -> None:\n \"\"\"\n Replay server responses from flows.\n \"\"\"\n self.flowmap = {}\n self.add_flows(flows)\n\n @command.command(\"replay.server.add\")\n def add_flows(self, flows: Sequence[flow.Flow]) -> None:\n \"\"\"\n Add responses from flows to server replay list.\n \"\"\"\n for f in flows:\n if isinstance(f, http.HTTPFlow):\n lst = self.flowmap.setdefault(self._hash(f), [])\n lst.append(f)\n ctx.master.addons.trigger(hooks.UpdateHook([]))\n\n @command.command(\"replay.server.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n self.load_flows(flows)\n\n @command.command(\"replay.server.stop\")\n def clear(self) -> None:\n \"\"\"\n Stop server replay.\n \"\"\"\n self.flowmap = {}\n ctx.master.addons.trigger(hooks.UpdateHook([]))\n\n @command.command(\"replay.server.count\")\n def count(self) -> int:\n return sum(len(i) for i in self.flowmap.values())\n\n def _hash(self, flow: http.HTTPFlow) -> Hashable:\n \"\"\"\n Calculates a loose hash of the flow request.\n \"\"\"\n r = flow.request\n _, _, path, _, query, _ = urllib.parse.urlparse(r.url)\n queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)\n\n key: list[Any] = [str(r.scheme), str(r.method), str(path)]\n if not ctx.options.server_replay_ignore_content:\n if ctx.options.server_replay_ignore_payload_params and r.multipart_form:\n key.extend(\n (k, v)\n for k, v in r.multipart_form.items(multi=True)\n if k.decode(errors=\"replace\")\n not in ctx.options.server_replay_ignore_payload_params\n )\n elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:\n key.extend(\n (k, v)\n for k, v in r.urlencoded_form.items(multi=True)\n if k not in ctx.options.server_replay_ignore_payload_params\n )\n else:\n key.append(str(r.raw_content))\n\n if not ctx.options.server_replay_ignore_host:\n key.append(r.pretty_host)\n if not ctx.options.server_replay_ignore_port:\n key.append(r.port)\n\n filtered = []\n ignore_params = ctx.options.server_replay_ignore_params or []\n for p in queriesArray:\n if p[0] not in ignore_params:\n filtered.append(p)\n for p in filtered:\n key.append(p[0])\n key.append(p[1])\n\n if ctx.options.server_replay_use_headers:\n headers = []\n for i in ctx.options.server_replay_use_headers:\n v = r.headers.get(i)\n headers.append((i, v))\n key.append(headers)\n return hashlib.sha256(repr(key).encode(\"utf8\", \"surrogateescape\")).digest()\n\n def next_flow(self, flow: http.HTTPFlow) -> http.HTTPFlow | None:\n \"\"\"\n Returns the next flow object, or None if no matching flow was\n found.\n \"\"\"\n hash = self._hash(flow)\n if hash in self.flowmap:\n if ctx.options.server_replay_reuse or ctx.options.server_replay_nopop:\n return next(\n (flow for flow in self.flowmap[hash] if flow.response), None\n )\n else:\n ret = self.flowmap[hash].pop(0)\n while not ret.response:\n if self.flowmap[hash]:\n ret = self.flowmap[hash].pop(0)\n else:\n del self.flowmap[hash]\n return None\n if not self.flowmap[hash]:\n del self.flowmap[hash]\n return ret\n else:\n return None\n\n def configure(self, updated):\n if ctx.options.server_replay_kill_extra:\n logger.warning(\n \"server_replay_kill_extra has been deprecated, \"\n \"please update your config to use server_replay_extra='kill'.\"\n )\n if ctx.options.server_replay_nopop: # pragma: no cover\n logger.error(\n \"server_replay_nopop has been renamed to server_replay_reuse, please update your config.\"\n )\n if not self.configured and ctx.options.server_replay:\n self.configured = True\n try:\n flows = io.read_flows_from_paths(ctx.options.server_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.load_flows(flows)\n\n def request(self, f: http.HTTPFlow) -> None:\n if self.flowmap:\n rflow = self.next_flow(f)\n if rflow:\n assert rflow.response\n response = rflow.response.copy()\n if ctx.options.server_replay_refresh:\n response.refresh()\n f.response = response\n f.is_replay = \"response\"\n elif (\n ctx.options.server_replay_kill_extra\n or ctx.options.server_replay_extra == \"kill\"\n ):\n logging.warning(\n \"server_playback: killed non-replay request {}\".format(\n f.request.url\n )\n )\n f.kill()\n elif ctx.options.server_replay_extra != \"forward\":\n logging.warning(\n \"server_playback: returned {} non-replay request {}\".format(\n ctx.options.server_replay_extra, f.request.url\n )\n )\n f.response = http.Response.make(int(ctx.options.server_replay_extra))\n f.is_replay = \"response\"\n", "path": "mitmproxy/addons/serverplayback.py"}], "after_files": [{"content": "import hashlib\nimport logging\nimport urllib\nfrom collections.abc import Hashable\nfrom collections.abc import Sequence\nfrom typing import Any\n\nimport mitmproxy.types\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import hooks\nfrom mitmproxy import http\nfrom mitmproxy import io\n\nlogger = logging.getLogger(__name__)\n\nHASH_OPTIONS = [\n \"server_replay_ignore_content\",\n \"server_replay_ignore_host\",\n \"server_replay_ignore_params\",\n \"server_replay_ignore_payload_params\",\n \"server_replay_ignore_port\",\n \"server_replay_use_headers\",\n]\n\n\nclass ServerPlayback:\n flowmap: dict[Hashable, list[http.HTTPFlow]]\n configured: bool\n\n def __init__(self):\n self.flowmap = {}\n self.configured = False\n\n def load(self, loader):\n loader.add_option(\n \"server_replay_kill_extra\",\n bool,\n False,\n \"Kill extra requests during replay (for which no replayable response was found).\"\n \"[Deprecated, prefer to use server_replay_extra='kill']\",\n )\n loader.add_option(\n \"server_replay_extra\",\n str,\n \"forward\",\n \"Behaviour for extra requests during replay for which no replayable response was found. \"\n \"Setting a numeric string value will return an empty HTTP response with the respective status code.\",\n choices=[\"forward\", \"kill\", \"204\", \"400\", \"404\", \"500\"],\n )\n loader.add_option(\n \"server_replay_reuse\",\n bool,\n False,\n \"\"\"\n Don't remove flows from server replay state after use. This makes it\n possible to replay same response multiple times.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_nopop\",\n bool,\n False,\n \"\"\"\n Deprecated alias for `server_replay_reuse`.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_refresh\",\n bool,\n True,\n \"\"\"\n Refresh server replay responses by adjusting date, expires and\n last-modified headers, as well as adjusting cookie expiration.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_use_headers\",\n Sequence[str],\n [],\n \"\"\"\n Request headers that need to match while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay\",\n Sequence[str],\n [],\n \"Replay server responses from a saved file.\",\n )\n loader.add_option(\n \"server_replay_ignore_content\",\n bool,\n False,\n \"Ignore request content while searching for a saved flow to replay.\",\n )\n loader.add_option(\n \"server_replay_ignore_params\",\n Sequence[str],\n [],\n \"\"\"\n Request parameters to be ignored while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_ignore_payload_params\",\n Sequence[str],\n [],\n \"\"\"\n Request payload parameters (application/x-www-form-urlencoded or\n multipart/form-data) to be ignored while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_ignore_host\",\n bool,\n False,\n \"\"\"\n Ignore request destination host while searching for a saved flow\n to replay.\n \"\"\",\n )\n loader.add_option(\n \"server_replay_ignore_port\",\n bool,\n False,\n \"\"\"\n Ignore request destination port while searching for a saved flow\n to replay.\n \"\"\",\n )\n\n @command.command(\"replay.server\")\n def load_flows(self, flows: Sequence[flow.Flow]) -> None:\n \"\"\"\n Replay server responses from flows.\n \"\"\"\n self.flowmap = {}\n self.add_flows(flows)\n\n @command.command(\"replay.server.add\")\n def add_flows(self, flows: Sequence[flow.Flow]) -> None:\n \"\"\"\n Add responses from flows to server replay list.\n \"\"\"\n for f in flows:\n if isinstance(f, http.HTTPFlow):\n lst = self.flowmap.setdefault(self._hash(f), [])\n lst.append(f)\n ctx.master.addons.trigger(hooks.UpdateHook([]))\n\n @command.command(\"replay.server.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n self.load_flows(flows)\n\n @command.command(\"replay.server.stop\")\n def clear(self) -> None:\n \"\"\"\n Stop server replay.\n \"\"\"\n self.flowmap = {}\n ctx.master.addons.trigger(hooks.UpdateHook([]))\n\n @command.command(\"replay.server.count\")\n def count(self) -> int:\n return sum(len(i) for i in self.flowmap.values())\n\n def _hash(self, flow: http.HTTPFlow) -> Hashable:\n \"\"\"\n Calculates a loose hash of the flow request.\n \"\"\"\n r = flow.request\n _, _, path, _, query, _ = urllib.parse.urlparse(r.url)\n queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)\n\n key: list[Any] = [str(r.scheme), str(r.method), str(path)]\n if not ctx.options.server_replay_ignore_content:\n if ctx.options.server_replay_ignore_payload_params and r.multipart_form:\n key.extend(\n (k, v)\n for k, v in r.multipart_form.items(multi=True)\n if k.decode(errors=\"replace\")\n not in ctx.options.server_replay_ignore_payload_params\n )\n elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:\n key.extend(\n (k, v)\n for k, v in r.urlencoded_form.items(multi=True)\n if k not in ctx.options.server_replay_ignore_payload_params\n )\n else:\n key.append(str(r.raw_content))\n\n if not ctx.options.server_replay_ignore_host:\n key.append(r.pretty_host)\n if not ctx.options.server_replay_ignore_port:\n key.append(r.port)\n\n filtered = []\n ignore_params = ctx.options.server_replay_ignore_params or []\n for p in queriesArray:\n if p[0] not in ignore_params:\n filtered.append(p)\n for p in filtered:\n key.append(p[0])\n key.append(p[1])\n\n if ctx.options.server_replay_use_headers:\n headers = []\n for i in ctx.options.server_replay_use_headers:\n v = r.headers.get(i)\n headers.append((i, v))\n key.append(headers)\n return hashlib.sha256(repr(key).encode(\"utf8\", \"surrogateescape\")).digest()\n\n def next_flow(self, flow: http.HTTPFlow) -> http.HTTPFlow | None:\n \"\"\"\n Returns the next flow object, or None if no matching flow was\n found.\n \"\"\"\n hash = self._hash(flow)\n if hash in self.flowmap:\n if ctx.options.server_replay_reuse or ctx.options.server_replay_nopop:\n return next(\n (flow for flow in self.flowmap[hash] if flow.response), None\n )\n else:\n ret = self.flowmap[hash].pop(0)\n while not ret.response:\n if self.flowmap[hash]:\n ret = self.flowmap[hash].pop(0)\n else:\n del self.flowmap[hash]\n return None\n if not self.flowmap[hash]:\n del self.flowmap[hash]\n return ret\n else:\n return None\n\n def configure(self, updated):\n if ctx.options.server_replay_kill_extra:\n logger.warning(\n \"server_replay_kill_extra has been deprecated, \"\n \"please update your config to use server_replay_extra='kill'.\"\n )\n if ctx.options.server_replay_nopop: # pragma: no cover\n logger.error(\n \"server_replay_nopop has been renamed to server_replay_reuse, please update your config.\"\n )\n if not self.configured and ctx.options.server_replay:\n self.configured = True\n try:\n flows = io.read_flows_from_paths(ctx.options.server_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.load_flows(flows)\n if any(option in updated for option in HASH_OPTIONS):\n self.recompute_hashes()\n\n def recompute_hashes(self) -> None:\n \"\"\"\n Rebuild flowmap if the hashing method has changed during execution,\n see https://github.com/mitmproxy/mitmproxy/issues/4506\n \"\"\"\n flows = [flow for lst in self.flowmap.values() for flow in lst]\n self.load_flows(flows)\n\n def request(self, f: http.HTTPFlow) -> None:\n if self.flowmap:\n rflow = self.next_flow(f)\n if rflow:\n assert rflow.response\n response = rflow.response.copy()\n if ctx.options.server_replay_refresh:\n response.refresh()\n f.response = response\n f.is_replay = \"response\"\n elif (\n ctx.options.server_replay_kill_extra\n or ctx.options.server_replay_extra == \"kill\"\n ):\n logging.warning(\n \"server_playback: killed non-replay request {}\".format(\n f.request.url\n )\n )\n f.kill()\n elif ctx.options.server_replay_extra != \"forward\":\n logging.warning(\n \"server_playback: returned {} non-replay request {}\".format(\n ctx.options.server_replay_extra, f.request.url\n )\n )\n f.response = http.Response.make(int(ctx.options.server_replay_extra))\n f.is_replay = \"response\"\n", "path": "mitmproxy/addons/serverplayback.py"}]} | 3,381 | 315 |
gh_patches_debug_21510 | rasdani/github-patches | git_diff | voxel51__fiftyone-225 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pretty-print view stages when showing dataset summaries (and in general)
I recently ran this:
```py
# Sort by likelihood of mistake (most likely first)
mistake_view = (dataset.view()
.match_tag("processed")
.sort_by("mistakenness", reverse=True)
)
print(mistake_view)
```
and saw this:
```
Dataset: cifar10-test
Num samples: 1000
Tags: ['test', 'processed', 'mistake']
Sample fields:
filepath: fiftyone.core.fields.StringField
tags: fiftyone.core.fields.ListField(fiftyone.core.fields.StringField)
metadata: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.metadata.Metadata)
ground_truth: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)
resnet50: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)
mistakenness: fiftyone.core.fields.FloatField
Pipeline stages:
1. <fiftyone.core.stages.MatchTag object at 0x13afdaf60>
2. <fiftyone.core.stages.SortBy object at 0x13b10e128>
```
We should pretty-print the view stages so their content is human-readable.
Following existing patterns in the codebase, we should implement `ViewStage.__repr__` and `ViewStage.__str__` so that view stages are always pretty-printed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fiftyone/core/stages.py`
Content:
```
1 """
2 FiftyOne stage definitions.
3
4 | Copyright 2017-2020, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 # pragma pylint: disable=redefined-builtin
9 # pragma pylint: disable=unused-wildcard-import
10 # pragma pylint: disable=wildcard-import
11 from __future__ import absolute_import
12 from __future__ import division
13 from __future__ import print_function
14 from __future__ import unicode_literals
15 from builtins import *
16
17 # pragma pylint: enable=redefined-builtin
18 # pragma pylint: enable=unused-wildcard-import
19 # pragma pylint: enable=wildcard-import
20
21 from bson import ObjectId
22 from pymongo import ASCENDING, DESCENDING
23
24 import eta.core.utils as etau
25
26
27 class ViewStage(object):
28 """Abstract base class for all :class:`fiftyone.core.view.DatasetView`
29 stages.
30
31 Args:
32 **kwargs: the concrete :class:`fiftyone.core.stages.ViewStage`
33 arguments
34 """
35
36 def to_mongo(self):
37 """Returns the MongoDB version of the
38 :class:`fiftyone.core.stages.ViewStage` instance
39
40 Returns:
41 a MongoDB aggregation pipeline stage dict
42 """
43 raise NotImplementedError("subclasses must implement `to_mongo()`")
44
45 def _serialize(self):
46 return {
47 "kwargs": self._kwargs(),
48 "_cls": etau.get_class_name(self),
49 }
50
51 def _kwargs(self):
52 raise NotImplementedError("subclasses must implement `_kwargs()`")
53
54 @classmethod
55 def _from_dict(cls, d):
56 return etau.get_class(d["_cls"])(**d["kwargs"])
57
58
59 class ViewStageError(Exception):
60 """An error raise by a :class:`ViewStage`"""
61
62 pass
63
64
65 class Exclude(ViewStage):
66 """Excludes the samples with the given IDs from the view.
67
68 Args:
69 sample_ids: an iterable of sample IDs
70 """
71
72 def __init__(self, sample_ids):
73 self._sample_ids = sample_ids
74
75 def to_mongo(self):
76 """Returns the MongoDB version of the
77 :class:`fiftyone.core.stages.Exclude` instance
78
79 Returns:
80 a MongoDB aggregation pipeline stage dict
81 """
82 sample_ids = [ObjectId(id) for id in self._sample_ids]
83 return Match({"_id": {"$not": {"$in": sample_ids}}}).to_mongo()
84
85 def _kwargs(self):
86 return {"sample_ids": self._sample_ids}
87
88
89 class Exists(ViewStage):
90 """Returns a view containing the samples that have a non-``None`` value
91 for the given field.
92
93 Args:
94 field: the field
95 """
96
97 def __init__(self, field):
98 self._field = field
99
100 def to_mongo(self):
101 """Returns the MongoDB version of the
102 :class:`fiftyone.core.stages.Exists` instance
103
104 Returns:
105 a MongoDB aggregation pipeline stage dict
106 """
107 return Match({self._field: {"$exists": True, "$ne": None}}).to_mongo()
108
109 def _kwargs(self):
110 return {"field": self._field}
111
112
113 class Limit(ViewStage):
114 """Limits the view to the given number of samples.
115
116 Args:
117 num: the maximum number of samples to return. If a non-positive
118 number is provided, an empty view is returned
119 """
120
121 def __init__(self, limit):
122 self._limit = limit
123
124 def to_mongo(self):
125 """Returns the MongoDB version of the :class:`fiftyone.core.stages.Limit`
126 instance
127
128 Returns:
129 a MongoDB aggregation pipeline stage dict
130 """
131 return {"$limit": self._limit}
132
133 def _kwargs(self):
134 return {"limit": self._limit}
135
136
137 class Match(ViewStage):
138 """Filters the samples in the stage by the given filter.
139
140 Args:
141 filter: a MongoDB query dict. See
142 https://docs.mongodb.com/manual/tutorial/query-documents
143 for details
144 """
145
146 def __init__(self, filter):
147 self._filter = filter
148
149 def to_mongo(self):
150 """Returns the MongoDB version of the
151 :class:`fiftyone.core.stages.Match` instance
152
153 Returns:
154 a MongoDB aggregation pipeline stage dict
155 """
156 return {"$match": self._filter}
157
158 def _kwargs(self):
159 return {"filter": self._filter}
160
161
162 class MatchTag(ViewStage):
163 """Returns a view containing the samples that have the given tag.
164
165 Args:
166 tag: a tag
167 """
168
169 def __init__(self, tag):
170 self._tag = tag
171
172 def to_mongo(self):
173 """Returns the MongoDB version of the
174 :class:`fiftyone.core.stages.MatchTag` instance
175
176 Returns:
177 a MongoDB aggregation pipeline stage dict
178 """
179 return Match({"tags": self._tag}).to_mongo()
180
181 def _kwargs(self):
182 return {"tag": self._tag}
183
184
185 class MatchTags(ViewStage):
186 """Returns a view containing the samples that have any of the given
187 tags.
188
189 To match samples that contain a single, use :class:`MatchTag`
190
191 Args:
192 tags: an iterable of tags
193 """
194
195 def __init__(self, tags):
196 self._tags = tags
197
198 def to_mongo(self):
199 """Returns the MongoDB version of the
200 :class:`fiftyone.core.stages.MatchTags` instance
201
202 Returns:
203 a MongoDB aggregation pipeline stage dict
204 """
205 return Match({"tags": {"$in": self._tags}}).to_mongo()
206
207 def _kwargs(self):
208 return {"tags": self._tags}
209
210
211 class Select(ViewStage):
212 """Selects the samples with the given IDs from the view.
213
214 Args:
215 sample_ids: an iterable of sample IDs
216 """
217
218 def __init__(self, sample_ids):
219 self._sample_ids = sample_ids
220
221 def to_mongo(self):
222 """Returns the MongoDB version of the
223 :class:`fiftyone.core.stages.Select` instance
224
225 Returns:
226 a MongoDB aggregation pipeline stage dict
227 """
228 sample_ids = [ObjectId(id) for id in self._sample_ids]
229 return Match({"_id": {"$in": sample_ids}}).to_mongo()
230
231 def _kwargs(self):
232 return {"sample_ids": self._sample_ids}
233
234
235 class SortBy(ViewStage):
236 """Sorts the samples in the view by the given field.
237
238 Args:
239 field: the field to sort by. Example fields::
240
241 filename
242 metadata.size_bytes
243 metadata.frame_size[0]
244
245 reverse (False): whether to return the results in descending order
246 """
247
248 def __init__(self, field, reverse=False):
249 self._field = field
250 self._reverse = reverse
251
252 def to_mongo(self):
253 """Returns the MongoDB version of the
254 :class:`fiftyone.core.stages.SortBy` instance
255
256 Returns:
257 a MongoDB aggregation pipeline stage dict
258 """
259 order = DESCENDING if self._reverse else ASCENDING
260 return {"$sort": {self._field: order}}
261
262 def _kwargs(self):
263 return {"field": self._field, "reverse": self._reverse}
264
265
266 class Skip(ViewStage):
267 """Omits the given number of samples from the head of the view.
268
269 Args:
270 skip: the number of samples to skip. If a non-positive number is
271 provided, no samples are omitted
272 """
273
274 def __init__(self, skip):
275 self._skip = skip
276
277 def to_mongo(self):
278 """Returns the MongoDB version of the :class:`fiftyone.core.stages.Skip`
279 instance
280
281 Returns:
282 a MongoDB aggregation pipeline stage dict
283 """
284 return {"$skip": self._skip}
285
286 def _kwargs(self):
287 return {"skip": self._skip}
288
289
290 class Take(ViewStage):
291 """Randomly samples the given number of samples from the view.
292
293 Args:
294 size: the number of samples to return. If a non-positive number is
295 provided, an empty view is returned
296 """
297
298 def __init__(self, size):
299 self._size = size
300
301 def to_mongo(self):
302 """Returns the MongoDB version of the :class:`fiftyone.core.stages.Take`
303 instance
304
305 Returns:
306 a MongoDB aggregation pipeline stage dict
307 """
308 size = self._size
309
310 if size <= 0:
311 return Match({"_id": None}).to_mongo()
312
313 return {"$sample": {"size": size}}
314
315 def _kwargs(self):
316 return {"size": self._size}
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fiftyone/core/stages.py b/fiftyone/core/stages.py
--- a/fiftyone/core/stages.py
+++ b/fiftyone/core/stages.py
@@ -18,12 +18,18 @@
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
+import reprlib
+
from bson import ObjectId
from pymongo import ASCENDING, DESCENDING
import eta.core.utils as etau
+# max number of list elements to print
+reprlib.aRepr.maxlist = 3
+
+
class ViewStage(object):
"""Abstract base class for all :class:`fiftyone.core.view.DatasetView`
stages.
@@ -33,6 +39,16 @@
arguments
"""
+ def __str__(self):
+ kwarg_str = ", ".join(
+ ["%s=%s" % (k, reprlib.repr(v)) for k, v in self._kwargs().items()]
+ )
+
+ return "%s(%s)" % (self.__class__.__name__, kwarg_str)
+
+ def __repr__(self):
+ return str(self)
+
def to_mongo(self):
"""Returns the MongoDB version of the
:class:`fiftyone.core.stages.ViewStage` instance
| {"golden_diff": "diff --git a/fiftyone/core/stages.py b/fiftyone/core/stages.py\n--- a/fiftyone/core/stages.py\n+++ b/fiftyone/core/stages.py\n@@ -18,12 +18,18 @@\n # pragma pylint: enable=unused-wildcard-import\n # pragma pylint: enable=wildcard-import\n \n+import reprlib\n+\n from bson import ObjectId\n from pymongo import ASCENDING, DESCENDING\n \n import eta.core.utils as etau\n \n \n+# max number of list elements to print\n+reprlib.aRepr.maxlist = 3\n+\n+\n class ViewStage(object):\n \"\"\"Abstract base class for all :class:`fiftyone.core.view.DatasetView`\n stages.\n@@ -33,6 +39,16 @@\n arguments\n \"\"\"\n \n+ def __str__(self):\n+ kwarg_str = \", \".join(\n+ [\"%s=%s\" % (k, reprlib.repr(v)) for k, v in self._kwargs().items()]\n+ )\n+\n+ return \"%s(%s)\" % (self.__class__.__name__, kwarg_str)\n+\n+ def __repr__(self):\n+ return str(self)\n+\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.ViewStage` instance\n", "issue": "Pretty-print view stages when showing dataset summaries (and in general)\nI recently ran this:\r\n\r\n```py\r\n# Sort by likelihood of mistake (most likely first) \r\nmistake_view = (dataset.view() \r\n .match_tag(\"processed\") \r\n.sort_by(\"mistakenness\", reverse=True) \r\n) \r\nprint(mistake_view) \r\n```\r\n\r\nand saw this:\r\n\r\n```\r\nDataset: cifar10-test\r\nNum samples: 1000\r\nTags: ['test', 'processed', 'mistake']\r\nSample fields:\r\n filepath: fiftyone.core.fields.StringField\r\n tags: fiftyone.core.fields.ListField(fiftyone.core.fields.StringField)\r\n metadata: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.metadata.Metadata)\r\n ground_truth: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\r\n resnet50: fiftyone.core.fields.EmbeddedDocumentField(fiftyone.core.labels.Classification)\r\n mistakenness: fiftyone.core.fields.FloatField\r\nPipeline stages:\r\n 1. <fiftyone.core.stages.MatchTag object at 0x13afdaf60>\r\n 2. <fiftyone.core.stages.SortBy object at 0x13b10e128>\r\n```\r\n\r\nWe should pretty-print the view stages so their content is human-readable.\r\n\r\nFollowing existing patterns in the codebase, we should implement `ViewStage.__repr__` and `ViewStage.__str__` so that view stages are always pretty-printed\n", "before_files": [{"content": "\"\"\"\nFiftyOne stage definitions.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nfrom bson import ObjectId\nfrom pymongo import ASCENDING, DESCENDING\n\nimport eta.core.utils as etau\n\n\nclass ViewStage(object):\n \"\"\"Abstract base class for all :class:`fiftyone.core.view.DatasetView`\n stages.\n\n Args:\n **kwargs: the concrete :class:`fiftyone.core.stages.ViewStage`\n arguments\n \"\"\"\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.ViewStage` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n raise NotImplementedError(\"subclasses must implement `to_mongo()`\")\n\n def _serialize(self):\n return {\n \"kwargs\": self._kwargs(),\n \"_cls\": etau.get_class_name(self),\n }\n\n def _kwargs(self):\n raise NotImplementedError(\"subclasses must implement `_kwargs()`\")\n\n @classmethod\n def _from_dict(cls, d):\n return etau.get_class(d[\"_cls\"])(**d[\"kwargs\"])\n\n\nclass ViewStageError(Exception):\n \"\"\"An error raise by a :class:`ViewStage`\"\"\"\n\n pass\n\n\nclass Exclude(ViewStage):\n \"\"\"Excludes the samples with the given IDs from the view.\n\n Args:\n sample_ids: an iterable of sample IDs\n \"\"\"\n\n def __init__(self, sample_ids):\n self._sample_ids = sample_ids\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Exclude` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n sample_ids = [ObjectId(id) for id in self._sample_ids]\n return Match({\"_id\": {\"$not\": {\"$in\": sample_ids}}}).to_mongo()\n\n def _kwargs(self):\n return {\"sample_ids\": self._sample_ids}\n\n\nclass Exists(ViewStage):\n \"\"\"Returns a view containing the samples that have a non-``None`` value\n for the given field.\n\n Args:\n field: the field\n \"\"\"\n\n def __init__(self, field):\n self._field = field\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Exists` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return Match({self._field: {\"$exists\": True, \"$ne\": None}}).to_mongo()\n\n def _kwargs(self):\n return {\"field\": self._field}\n\n\nclass Limit(ViewStage):\n \"\"\"Limits the view to the given number of samples.\n\n Args:\n num: the maximum number of samples to return. If a non-positive\n number is provided, an empty view is returned\n \"\"\"\n\n def __init__(self, limit):\n self._limit = limit\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the :class:`fiftyone.core.stages.Limit`\n instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return {\"$limit\": self._limit}\n\n def _kwargs(self):\n return {\"limit\": self._limit}\n\n\nclass Match(ViewStage):\n \"\"\"Filters the samples in the stage by the given filter.\n\n Args:\n filter: a MongoDB query dict. See\n https://docs.mongodb.com/manual/tutorial/query-documents\n for details\n \"\"\"\n\n def __init__(self, filter):\n self._filter = filter\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Match` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return {\"$match\": self._filter}\n\n def _kwargs(self):\n return {\"filter\": self._filter}\n\n\nclass MatchTag(ViewStage):\n \"\"\"Returns a view containing the samples that have the given tag.\n\n Args:\n tag: a tag\n \"\"\"\n\n def __init__(self, tag):\n self._tag = tag\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.MatchTag` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return Match({\"tags\": self._tag}).to_mongo()\n\n def _kwargs(self):\n return {\"tag\": self._tag}\n\n\nclass MatchTags(ViewStage):\n \"\"\"Returns a view containing the samples that have any of the given\n tags.\n\n To match samples that contain a single, use :class:`MatchTag`\n\n Args:\n tags: an iterable of tags\n \"\"\"\n\n def __init__(self, tags):\n self._tags = tags\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.MatchTags` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return Match({\"tags\": {\"$in\": self._tags}}).to_mongo()\n\n def _kwargs(self):\n return {\"tags\": self._tags}\n\n\nclass Select(ViewStage):\n \"\"\"Selects the samples with the given IDs from the view.\n\n Args:\n sample_ids: an iterable of sample IDs\n \"\"\"\n\n def __init__(self, sample_ids):\n self._sample_ids = sample_ids\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Select` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n sample_ids = [ObjectId(id) for id in self._sample_ids]\n return Match({\"_id\": {\"$in\": sample_ids}}).to_mongo()\n\n def _kwargs(self):\n return {\"sample_ids\": self._sample_ids}\n\n\nclass SortBy(ViewStage):\n \"\"\"Sorts the samples in the view by the given field.\n\n Args:\n field: the field to sort by. Example fields::\n\n filename\n metadata.size_bytes\n metadata.frame_size[0]\n\n reverse (False): whether to return the results in descending order\n \"\"\"\n\n def __init__(self, field, reverse=False):\n self._field = field\n self._reverse = reverse\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.SortBy` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n order = DESCENDING if self._reverse else ASCENDING\n return {\"$sort\": {self._field: order}}\n\n def _kwargs(self):\n return {\"field\": self._field, \"reverse\": self._reverse}\n\n\nclass Skip(ViewStage):\n \"\"\"Omits the given number of samples from the head of the view.\n\n Args:\n skip: the number of samples to skip. If a non-positive number is\n provided, no samples are omitted\n \"\"\"\n\n def __init__(self, skip):\n self._skip = skip\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the :class:`fiftyone.core.stages.Skip`\n instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return {\"$skip\": self._skip}\n\n def _kwargs(self):\n return {\"skip\": self._skip}\n\n\nclass Take(ViewStage):\n \"\"\"Randomly samples the given number of samples from the view.\n\n Args:\n size: the number of samples to return. If a non-positive number is\n provided, an empty view is returned\n \"\"\"\n\n def __init__(self, size):\n self._size = size\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the :class:`fiftyone.core.stages.Take`\n instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n size = self._size\n\n if size <= 0:\n return Match({\"_id\": None}).to_mongo()\n\n return {\"$sample\": {\"size\": size}}\n\n def _kwargs(self):\n return {\"size\": self._size}\n", "path": "fiftyone/core/stages.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne stage definitions.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\n# pragma pylint: disable=redefined-builtin\n# pragma pylint: disable=unused-wildcard-import\n# pragma pylint: disable=wildcard-import\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import *\n\n# pragma pylint: enable=redefined-builtin\n# pragma pylint: enable=unused-wildcard-import\n# pragma pylint: enable=wildcard-import\n\nimport reprlib\n\nfrom bson import ObjectId\nfrom pymongo import ASCENDING, DESCENDING\n\nimport eta.core.utils as etau\n\n\n# max number of list elements to print\nreprlib.aRepr.maxlist = 3\n\n\nclass ViewStage(object):\n \"\"\"Abstract base class for all :class:`fiftyone.core.view.DatasetView`\n stages.\n\n Args:\n **kwargs: the concrete :class:`fiftyone.core.stages.ViewStage`\n arguments\n \"\"\"\n\n def __str__(self):\n kwarg_str = \", \".join(\n [\"%s=%s\" % (k, reprlib.repr(v)) for k, v in self._kwargs().items()]\n )\n\n return \"%s(%s)\" % (self.__class__.__name__, kwarg_str)\n\n def __repr__(self):\n return str(self)\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.ViewStage` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n raise NotImplementedError(\"subclasses must implement `to_mongo()`\")\n\n def _serialize(self):\n return {\n \"kwargs\": self._kwargs(),\n \"_cls\": etau.get_class_name(self),\n }\n\n def _kwargs(self):\n raise NotImplementedError(\"subclasses must implement `_kwargs()`\")\n\n @classmethod\n def _from_dict(cls, d):\n return etau.get_class(d[\"_cls\"])(**d[\"kwargs\"])\n\n\nclass ViewStageError(Exception):\n \"\"\"An error raise by a :class:`ViewStage`\"\"\"\n\n pass\n\n\nclass Exclude(ViewStage):\n \"\"\"Excludes the samples with the given IDs from the view.\n\n Args:\n sample_ids: an iterable of sample IDs\n \"\"\"\n\n def __init__(self, sample_ids):\n self._sample_ids = sample_ids\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Exclude` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n sample_ids = [ObjectId(id) for id in self._sample_ids]\n return Match({\"_id\": {\"$not\": {\"$in\": sample_ids}}}).to_mongo()\n\n def _kwargs(self):\n return {\"sample_ids\": self._sample_ids}\n\n\nclass Exists(ViewStage):\n \"\"\"Returns a view containing the samples that have a non-``None`` value\n for the given field.\n\n Args:\n field: the field\n \"\"\"\n\n def __init__(self, field):\n self._field = field\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Exists` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return Match({self._field: {\"$exists\": True, \"$ne\": None}}).to_mongo()\n\n def _kwargs(self):\n return {\"field\": self._field}\n\n\nclass Limit(ViewStage):\n \"\"\"Limits the view to the given number of samples.\n\n Args:\n num: the maximum number of samples to return. If a non-positive\n number is provided, an empty view is returned\n \"\"\"\n\n def __init__(self, limit):\n self._limit = limit\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the :class:`fiftyone.core.stages.Limit`\n instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return {\"$limit\": self._limit}\n\n def _kwargs(self):\n return {\"limit\": self._limit}\n\n\nclass Match(ViewStage):\n \"\"\"Filters the samples in the stage by the given filter.\n\n Args:\n filter: a MongoDB query dict. See\n https://docs.mongodb.com/manual/tutorial/query-documents\n for details\n \"\"\"\n\n def __init__(self, filter):\n self._filter = filter\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Match` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return {\"$match\": self._filter}\n\n def _kwargs(self):\n return {\"filter\": self._filter}\n\n\nclass MatchTag(ViewStage):\n \"\"\"Returns a view containing the samples that have the given tag.\n\n Args:\n tag: a tag\n \"\"\"\n\n def __init__(self, tag):\n self._tag = tag\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.MatchTag` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return Match({\"tags\": self._tag}).to_mongo()\n\n def _kwargs(self):\n return {\"tag\": self._tag}\n\n\nclass MatchTags(ViewStage):\n \"\"\"Returns a view containing the samples that have any of the given\n tags.\n\n To match samples that contain a single, use :class:`MatchTag`\n\n Args:\n tags: an iterable of tags\n \"\"\"\n\n def __init__(self, tags):\n self._tags = tags\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.MatchTags` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return Match({\"tags\": {\"$in\": self._tags}}).to_mongo()\n\n def _kwargs(self):\n return {\"tags\": self._tags}\n\n\nclass Select(ViewStage):\n \"\"\"Selects the samples with the given IDs from the view.\n\n Args:\n sample_ids: an iterable of sample IDs\n \"\"\"\n\n def __init__(self, sample_ids):\n self._sample_ids = sample_ids\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.Select` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n sample_ids = [ObjectId(id) for id in self._sample_ids]\n return Match({\"_id\": {\"$in\": sample_ids}}).to_mongo()\n\n def _kwargs(self):\n return {\"sample_ids\": self._sample_ids}\n\n\nclass SortBy(ViewStage):\n \"\"\"Sorts the samples in the view by the given field.\n\n Args:\n field: the field to sort by. Example fields::\n\n filename\n metadata.size_bytes\n metadata.frame_size[0]\n\n reverse (False): whether to return the results in descending order\n \"\"\"\n\n def __init__(self, field, reverse=False):\n self._field = field\n self._reverse = reverse\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the\n :class:`fiftyone.core.stages.SortBy` instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n order = DESCENDING if self._reverse else ASCENDING\n return {\"$sort\": {self._field: order}}\n\n def _kwargs(self):\n return {\"field\": self._field, \"reverse\": self._reverse}\n\n\nclass Skip(ViewStage):\n \"\"\"Omits the given number of samples from the head of the view.\n\n Args:\n skip: the number of samples to skip. If a non-positive number is\n provided, no samples are omitted\n \"\"\"\n\n def __init__(self, skip):\n self._skip = skip\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the :class:`fiftyone.core.stages.Skip`\n instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n return {\"$skip\": self._skip}\n\n def _kwargs(self):\n return {\"skip\": self._skip}\n\n\nclass Take(ViewStage):\n \"\"\"Randomly samples the given number of samples from the view.\n\n Args:\n size: the number of samples to return. If a non-positive number is\n provided, an empty view is returned\n \"\"\"\n\n def __init__(self, size):\n self._size = size\n\n def to_mongo(self):\n \"\"\"Returns the MongoDB version of the :class:`fiftyone.core.stages.Take`\n instance\n\n Returns:\n a MongoDB aggregation pipeline stage dict\n \"\"\"\n size = self._size\n\n if size <= 0:\n return Match({\"_id\": None}).to_mongo()\n\n return {\"$sample\": {\"size\": size}}\n\n def _kwargs(self):\n return {\"size\": self._size}\n", "path": "fiftyone/core/stages.py"}]} | 3,347 | 291 |
gh_patches_debug_38417 | rasdani/github-patches | git_diff | microsoft__MLOS-586 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make all parameters visible as `$vars` in paths and other applications that does not involve the OS shell
When constructing paths for file share upload and download, we require that all `$vars` in such paths should be explicitly exposed via `shell_env_params`. This makes sense for local and remote shell commands, as it prevents the leakage of (potentially sensitive) parameters to the shell but is redundant and too restrictive for the path names. We should allow using all parameters in such paths without any explicit declarations.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlos_bench/mlos_bench/environments/local/local_fileshare_env.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Scheduler-side Environment to run scripts locally
7 and upload/download data to the shared storage.
8 """
9
10 import logging
11
12 from datetime import datetime
13 from string import Template
14 from typing import Any, Dict, List, Generator, Iterable, Mapping, Optional, Tuple
15
16 from mlos_bench.services.base_service import Service
17 from mlos_bench.services.types.local_exec_type import SupportsLocalExec
18 from mlos_bench.services.types.fileshare_type import SupportsFileShareOps
19 from mlos_bench.environments.status import Status
20 from mlos_bench.environments.local.local_env import LocalEnv
21 from mlos_bench.tunables.tunable import TunableValue
22 from mlos_bench.tunables.tunable_groups import TunableGroups
23
24 _LOG = logging.getLogger(__name__)
25
26
27 class LocalFileShareEnv(LocalEnv):
28 """
29 Scheduler-side Environment that runs scripts locally
30 and uploads/downloads data to the shared file storage.
31 """
32
33 def __init__(self,
34 *,
35 name: str,
36 config: dict,
37 global_config: Optional[dict] = None,
38 tunables: Optional[TunableGroups] = None,
39 service: Optional[Service] = None):
40 """
41 Create a new application environment with a given config.
42
43 Parameters
44 ----------
45 name: str
46 Human-readable name of the environment.
47 config : dict
48 Free-format dictionary that contains the benchmark environment
49 configuration. Each config must have at least the "tunable_params"
50 and the "const_args" sections.
51 `LocalFileShareEnv` must also have at least some of the following
52 parameters: {setup, upload, run, download, teardown,
53 dump_params_file, read_results_file}
54 global_config : dict
55 Free-format dictionary of global parameters (e.g., security credentials)
56 to be mixed in into the "const_args" section of the local config.
57 tunables : TunableGroups
58 A collection of tunable parameters for *all* environments.
59 service: Service
60 An optional service object (e.g., providing methods to
61 deploy or reboot a VM, etc.).
62 """
63 super().__init__(name=name, config=config, global_config=global_config, tunables=tunables, service=service)
64
65 assert self._service is not None and isinstance(self._service, SupportsLocalExec), \
66 "LocalEnv requires a service that supports local execution"
67 self._local_exec_service: SupportsLocalExec = self._service
68
69 assert self._service is not None and isinstance(self._service, SupportsFileShareOps), \
70 "LocalEnv requires a service that supports file upload/download operations"
71 self._file_share_service: SupportsFileShareOps = self._service
72
73 self._upload = self._template_from_to("upload")
74 self._download = self._template_from_to("download")
75
76 def _template_from_to(self, config_key: str) -> List[Tuple[Template, Template]]:
77 """
78 Convert a list of {"from": "...", "to": "..."} to a list of pairs
79 of string.Template objects so that we can plug in self._params into it later.
80 """
81 return [
82 (Template(d['from']), Template(d['to']))
83 for d in self.config.get(config_key, [])
84 ]
85
86 @staticmethod
87 def _expand(from_to: Iterable[Tuple[Template, Template]],
88 params: Mapping[str, TunableValue]) -> Generator[Tuple[str, str], None, None]:
89 """
90 Substitute $var parameters in from/to path templates.
91 Return a generator of (str, str) pairs of paths.
92 """
93 return (
94 (path_from.safe_substitute(params), path_to.safe_substitute(params))
95 for (path_from, path_to) in from_to
96 )
97
98 def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:
99 """
100 Run setup scripts locally and upload the scripts and data to the shared storage.
101
102 Parameters
103 ----------
104 tunables : TunableGroups
105 A collection of tunable OS and application parameters along with their
106 values. In a local environment these could be used to prepare a config
107 file on the scheduler prior to transferring it to the remote environment,
108 for instance.
109 global_config : dict
110 Free-format dictionary of global parameters of the environment
111 that are not used in the optimization process.
112
113 Returns
114 -------
115 is_success : bool
116 True if operation is successful, false otherwise.
117 """
118 self._is_ready = super().setup(tunables, global_config)
119 if self._is_ready:
120 assert self._temp_dir is not None
121 params = self._get_env_params()
122 params["PWD"] = self._temp_dir
123 for (path_from, path_to) in self._expand(self._upload, params):
124 self._file_share_service.upload(self._params, self._config_loader_service.resolve_path(
125 path_from, extra_paths=[self._temp_dir]), path_to)
126 return self._is_ready
127
128 def _download_files(self, ignore_missing: bool = False) -> None:
129 """
130 Download files from the shared storage.
131
132 Parameters
133 ----------
134 ignore_missing : bool
135 If True, raise an exception when some file cannot be downloaded.
136 If False, proceed with downloading other files and log a warning.
137 """
138 assert self._temp_dir is not None
139 params = self._get_env_params()
140 params["PWD"] = self._temp_dir
141 for (path_from, path_to) in self._expand(self._download, params):
142 try:
143 self._file_share_service.download(self._params,
144 path_from, self._config_loader_service.resolve_path(
145 path_to, extra_paths=[self._temp_dir]))
146 except FileNotFoundError as ex:
147 _LOG.warning("Cannot download: %s", path_from)
148 if not ignore_missing:
149 raise ex
150 except Exception as ex:
151 _LOG.exception("Cannot download %s to %s", path_from, path_to)
152 raise ex
153
154 def run(self) -> Tuple[Status, Optional[Dict[str, TunableValue]]]:
155 """
156 Download benchmark results from the shared storage
157 and run post-processing scripts locally.
158
159 Returns
160 -------
161 (status, output) : (Status, dict)
162 A pair of (Status, output) values, where `output` is a dict
163 with the results or None if the status is not COMPLETED.
164 If run script is a benchmark, then the score is usually expected to
165 be in the `score` field.
166 """
167 self._download_files()
168 return super().run()
169
170 def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:
171 self._download_files(ignore_missing=True)
172 return super().status()
173
```
Path: `mlos_bench/mlos_bench/environments/script_env.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Base scriptable benchmark environment.
7 """
8
9 import abc
10 import logging
11 import re
12 from typing import Dict, Iterable, Optional
13
14 from mlos_bench.environments.base_environment import Environment
15 from mlos_bench.services.base_service import Service
16 from mlos_bench.tunables.tunable import TunableValue
17 from mlos_bench.tunables.tunable_groups import TunableGroups
18
19 from mlos_bench.util import try_parse_val
20
21 _LOG = logging.getLogger(__name__)
22
23
24 class ScriptEnv(Environment, metaclass=abc.ABCMeta):
25 """
26 Base Environment that runs scripts for setup/run/teardown.
27 """
28
29 _RE_INVALID = re.compile(r"[^a-zA-Z0-9_]")
30
31 def __init__(self,
32 *,
33 name: str,
34 config: dict,
35 global_config: Optional[dict] = None,
36 tunables: Optional[TunableGroups] = None,
37 service: Optional[Service] = None):
38 """
39 Create a new environment for script execution.
40
41 Parameters
42 ----------
43 name: str
44 Human-readable name of the environment.
45 config : dict
46 Free-format dictionary that contains the benchmark environment
47 configuration. Each config must have at least the `tunable_params`
48 and the `const_args` sections. It must also have at least one of
49 the following parameters: {`setup`, `run`, `teardown`}.
50 Additional parameters:
51 * `shell_env_params` - an array of parameters to pass to the script
52 as shell environment variables, and
53 * `shell_env_params_rename` - a dictionary of {to: from} mappings
54 of the script parameters. If not specified, replace all
55 non-alphanumeric characters with underscores.
56 If neither `shell_env_params` nor `shell_env_params_rename` are specified,
57 *no* additional shell parameters will be passed to the script.
58 global_config : dict
59 Free-format dictionary of global parameters (e.g., security credentials)
60 to be mixed in into the "const_args" section of the local config.
61 tunables : TunableGroups
62 A collection of tunable parameters for *all* environments.
63 service: Service
64 An optional service object (e.g., providing methods to
65 deploy or reboot a VM, etc.).
66 """
67 super().__init__(name=name, config=config, global_config=global_config,
68 tunables=tunables, service=service)
69
70 self._script_setup = self.config.get("setup")
71 self._script_run = self.config.get("run")
72 self._script_teardown = self.config.get("teardown")
73
74 self._shell_env_params: Iterable[str] = self.config.get("shell_env_params", [])
75 self._shell_env_params_rename: Dict[str, str] = self.config.get("shell_env_params_rename", {})
76
77 results_stdout_pattern = self.config.get("results_stdout_pattern")
78 self._results_stdout_pattern: Optional[re.Pattern[str]] = \
79 re.compile(results_stdout_pattern) if results_stdout_pattern else None
80
81 def _get_env_params(self) -> Dict[str, str]:
82 """
83 Get the *shell* environment parameters to be passed to the script.
84
85 Returns
86 -------
87 env_params : Dict[str, str]
88 Parameters to pass as *shell* environment variables into the script.
89 This is usually a subset of `_params` with some possible conversions.
90 """
91 rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
92 rename.update(self._shell_env_params_rename)
93 return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}
94
95 def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:
96 """
97 Extract the results from the stdout of the script.
98
99 Parameters
100 ----------
101 stdout : str
102 The stdout of the script.
103
104 Returns
105 -------
106 results : Dict[str, TunableValue]
107 A dictionary of results extracted from the stdout.
108 """
109 if not self._results_stdout_pattern:
110 return {}
111 _LOG.debug("Extract regex: '%s' from: '%s'", self._results_stdout_pattern, stdout)
112 return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py b/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py
--- a/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py
+++ b/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py
@@ -118,7 +118,7 @@
self._is_ready = super().setup(tunables, global_config)
if self._is_ready:
assert self._temp_dir is not None
- params = self._get_env_params()
+ params = self._get_env_params(restrict=False)
params["PWD"] = self._temp_dir
for (path_from, path_to) in self._expand(self._upload, params):
self._file_share_service.upload(self._params, self._config_loader_service.resolve_path(
@@ -136,7 +136,7 @@
If False, proceed with downloading other files and log a warning.
"""
assert self._temp_dir is not None
- params = self._get_env_params()
+ params = self._get_env_params(restrict=False)
params["PWD"] = self._temp_dir
for (path_from, path_to) in self._expand(self._download, params):
try:
diff --git a/mlos_bench/mlos_bench/environments/script_env.py b/mlos_bench/mlos_bench/environments/script_env.py
--- a/mlos_bench/mlos_bench/environments/script_env.py
+++ b/mlos_bench/mlos_bench/environments/script_env.py
@@ -78,17 +78,25 @@
self._results_stdout_pattern: Optional[re.Pattern[str]] = \
re.compile(results_stdout_pattern) if results_stdout_pattern else None
- def _get_env_params(self) -> Dict[str, str]:
+ def _get_env_params(self, restrict: bool = True) -> Dict[str, str]:
"""
Get the *shell* environment parameters to be passed to the script.
+ Parameters
+ ----------
+ restrict : bool
+ If True, only return the parameters that are in the `_shell_env_params`
+ list. If False, return all parameters in `_params` with some possible
+ conversions.
+
Returns
-------
env_params : Dict[str, str]
Parameters to pass as *shell* environment variables into the script.
This is usually a subset of `_params` with some possible conversions.
"""
- rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
+ input_params = self._shell_env_params if restrict else self._params.keys()
+ rename = {self._RE_INVALID.sub("_", key): key for key in input_params}
rename.update(self._shell_env_params_rename)
return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}
| {"golden_diff": "diff --git a/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py b/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py\n--- a/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py\n+++ b/mlos_bench/mlos_bench/environments/local/local_fileshare_env.py\n@@ -118,7 +118,7 @@\n self._is_ready = super().setup(tunables, global_config)\n if self._is_ready:\n assert self._temp_dir is not None\n- params = self._get_env_params()\n+ params = self._get_env_params(restrict=False)\n params[\"PWD\"] = self._temp_dir\n for (path_from, path_to) in self._expand(self._upload, params):\n self._file_share_service.upload(self._params, self._config_loader_service.resolve_path(\n@@ -136,7 +136,7 @@\n If False, proceed with downloading other files and log a warning.\n \"\"\"\n assert self._temp_dir is not None\n- params = self._get_env_params()\n+ params = self._get_env_params(restrict=False)\n params[\"PWD\"] = self._temp_dir\n for (path_from, path_to) in self._expand(self._download, params):\n try:\ndiff --git a/mlos_bench/mlos_bench/environments/script_env.py b/mlos_bench/mlos_bench/environments/script_env.py\n--- a/mlos_bench/mlos_bench/environments/script_env.py\n+++ b/mlos_bench/mlos_bench/environments/script_env.py\n@@ -78,17 +78,25 @@\n self._results_stdout_pattern: Optional[re.Pattern[str]] = \\\n re.compile(results_stdout_pattern) if results_stdout_pattern else None\n \n- def _get_env_params(self) -> Dict[str, str]:\n+ def _get_env_params(self, restrict: bool = True) -> Dict[str, str]:\n \"\"\"\n Get the *shell* environment parameters to be passed to the script.\n \n+ Parameters\n+ ----------\n+ restrict : bool\n+ If True, only return the parameters that are in the `_shell_env_params`\n+ list. If False, return all parameters in `_params` with some possible\n+ conversions.\n+\n Returns\n -------\n env_params : Dict[str, str]\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n- rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n+ input_params = self._shell_env_params if restrict else self._params.keys()\n+ rename = {self._RE_INVALID.sub(\"_\", key): key for key in input_params}\n rename.update(self._shell_env_params_rename)\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n", "issue": "Make all parameters visible as `$vars` in paths and other applications that does not involve the OS shell\nWhen constructing paths for file share upload and download, we require that all `$vars` in such paths should be explicitly exposed via `shell_env_params`. This makes sense for local and remote shell commands, as it prevents the leakage of (potentially sensitive) parameters to the shell but is redundant and too restrictive for the path names. We should allow using all parameters in such paths without any explicit declarations.\n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nScheduler-side Environment to run scripts locally\nand upload/download data to the shared storage.\n\"\"\"\n\nimport logging\n\nfrom datetime import datetime\nfrom string import Template\nfrom typing import Any, Dict, List, Generator, Iterable, Mapping, Optional, Tuple\n\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.services.types.local_exec_type import SupportsLocalExec\nfrom mlos_bench.services.types.fileshare_type import SupportsFileShareOps\nfrom mlos_bench.environments.status import Status\nfrom mlos_bench.environments.local.local_env import LocalEnv\nfrom mlos_bench.tunables.tunable import TunableValue\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\n\n_LOG = logging.getLogger(__name__)\n\n\nclass LocalFileShareEnv(LocalEnv):\n \"\"\"\n Scheduler-side Environment that runs scripts locally\n and uploads/downloads data to the shared file storage.\n \"\"\"\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new application environment with a given config.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the \"tunable_params\"\n and the \"const_args\" sections.\n `LocalFileShareEnv` must also have at least some of the following\n parameters: {setup, upload, run, download, teardown,\n dump_params_file, read_results_file}\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config, tunables=tunables, service=service)\n\n assert self._service is not None and isinstance(self._service, SupportsLocalExec), \\\n \"LocalEnv requires a service that supports local execution\"\n self._local_exec_service: SupportsLocalExec = self._service\n\n assert self._service is not None and isinstance(self._service, SupportsFileShareOps), \\\n \"LocalEnv requires a service that supports file upload/download operations\"\n self._file_share_service: SupportsFileShareOps = self._service\n\n self._upload = self._template_from_to(\"upload\")\n self._download = self._template_from_to(\"download\")\n\n def _template_from_to(self, config_key: str) -> List[Tuple[Template, Template]]:\n \"\"\"\n Convert a list of {\"from\": \"...\", \"to\": \"...\"} to a list of pairs\n of string.Template objects so that we can plug in self._params into it later.\n \"\"\"\n return [\n (Template(d['from']), Template(d['to']))\n for d in self.config.get(config_key, [])\n ]\n\n @staticmethod\n def _expand(from_to: Iterable[Tuple[Template, Template]],\n params: Mapping[str, TunableValue]) -> Generator[Tuple[str, str], None, None]:\n \"\"\"\n Substitute $var parameters in from/to path templates.\n Return a generator of (str, str) pairs of paths.\n \"\"\"\n return (\n (path_from.safe_substitute(params), path_to.safe_substitute(params))\n for (path_from, path_to) in from_to\n )\n\n def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:\n \"\"\"\n Run setup scripts locally and upload the scripts and data to the shared storage.\n\n Parameters\n ----------\n tunables : TunableGroups\n A collection of tunable OS and application parameters along with their\n values. In a local environment these could be used to prepare a config\n file on the scheduler prior to transferring it to the remote environment,\n for instance.\n global_config : dict\n Free-format dictionary of global parameters of the environment\n that are not used in the optimization process.\n\n Returns\n -------\n is_success : bool\n True if operation is successful, false otherwise.\n \"\"\"\n self._is_ready = super().setup(tunables, global_config)\n if self._is_ready:\n assert self._temp_dir is not None\n params = self._get_env_params()\n params[\"PWD\"] = self._temp_dir\n for (path_from, path_to) in self._expand(self._upload, params):\n self._file_share_service.upload(self._params, self._config_loader_service.resolve_path(\n path_from, extra_paths=[self._temp_dir]), path_to)\n return self._is_ready\n\n def _download_files(self, ignore_missing: bool = False) -> None:\n \"\"\"\n Download files from the shared storage.\n\n Parameters\n ----------\n ignore_missing : bool\n If True, raise an exception when some file cannot be downloaded.\n If False, proceed with downloading other files and log a warning.\n \"\"\"\n assert self._temp_dir is not None\n params = self._get_env_params()\n params[\"PWD\"] = self._temp_dir\n for (path_from, path_to) in self._expand(self._download, params):\n try:\n self._file_share_service.download(self._params,\n path_from, self._config_loader_service.resolve_path(\n path_to, extra_paths=[self._temp_dir]))\n except FileNotFoundError as ex:\n _LOG.warning(\"Cannot download: %s\", path_from)\n if not ignore_missing:\n raise ex\n except Exception as ex:\n _LOG.exception(\"Cannot download %s to %s\", path_from, path_to)\n raise ex\n\n def run(self) -> Tuple[Status, Optional[Dict[str, TunableValue]]]:\n \"\"\"\n Download benchmark results from the shared storage\n and run post-processing scripts locally.\n\n Returns\n -------\n (status, output) : (Status, dict)\n A pair of (Status, output) values, where `output` is a dict\n with the results or None if the status is not COMPLETED.\n If run script is a benchmark, then the score is usually expected to\n be in the `score` field.\n \"\"\"\n self._download_files()\n return super().run()\n\n def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:\n self._download_files(ignore_missing=True)\n return super().status()\n", "path": "mlos_bench/mlos_bench/environments/local/local_fileshare_env.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nBase scriptable benchmark environment.\n\"\"\"\n\nimport abc\nimport logging\nimport re\nfrom typing import Dict, Iterable, Optional\n\nfrom mlos_bench.environments.base_environment import Environment\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.tunables.tunable import TunableValue\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\n\nfrom mlos_bench.util import try_parse_val\n\n_LOG = logging.getLogger(__name__)\n\n\nclass ScriptEnv(Environment, metaclass=abc.ABCMeta):\n \"\"\"\n Base Environment that runs scripts for setup/run/teardown.\n \"\"\"\n\n _RE_INVALID = re.compile(r\"[^a-zA-Z0-9_]\")\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new environment for script execution.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the `tunable_params`\n and the `const_args` sections. It must also have at least one of\n the following parameters: {`setup`, `run`, `teardown`}.\n Additional parameters:\n * `shell_env_params` - an array of parameters to pass to the script\n as shell environment variables, and\n * `shell_env_params_rename` - a dictionary of {to: from} mappings\n of the script parameters. If not specified, replace all\n non-alphanumeric characters with underscores.\n If neither `shell_env_params` nor `shell_env_params_rename` are specified,\n *no* additional shell parameters will be passed to the script.\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config,\n tunables=tunables, service=service)\n\n self._script_setup = self.config.get(\"setup\")\n self._script_run = self.config.get(\"run\")\n self._script_teardown = self.config.get(\"teardown\")\n\n self._shell_env_params: Iterable[str] = self.config.get(\"shell_env_params\", [])\n self._shell_env_params_rename: Dict[str, str] = self.config.get(\"shell_env_params_rename\", {})\n\n results_stdout_pattern = self.config.get(\"results_stdout_pattern\")\n self._results_stdout_pattern: Optional[re.Pattern[str]] = \\\n re.compile(results_stdout_pattern) if results_stdout_pattern else None\n\n def _get_env_params(self) -> Dict[str, str]:\n \"\"\"\n Get the *shell* environment parameters to be passed to the script.\n\n Returns\n -------\n env_params : Dict[str, str]\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n rename.update(self._shell_env_params_rename)\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n\n def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:\n \"\"\"\n Extract the results from the stdout of the script.\n\n Parameters\n ----------\n stdout : str\n The stdout of the script.\n\n Returns\n -------\n results : Dict[str, TunableValue]\n A dictionary of results extracted from the stdout.\n \"\"\"\n if not self._results_stdout_pattern:\n return {}\n _LOG.debug(\"Extract regex: '%s' from: '%s'\", self._results_stdout_pattern, stdout)\n return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}\n", "path": "mlos_bench/mlos_bench/environments/script_env.py"}], "after_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nScheduler-side Environment to run scripts locally\nand upload/download data to the shared storage.\n\"\"\"\n\nimport logging\n\nfrom datetime import datetime\nfrom string import Template\nfrom typing import Any, Dict, List, Generator, Iterable, Mapping, Optional, Tuple\n\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.services.types.local_exec_type import SupportsLocalExec\nfrom mlos_bench.services.types.fileshare_type import SupportsFileShareOps\nfrom mlos_bench.environments.status import Status\nfrom mlos_bench.environments.local.local_env import LocalEnv\nfrom mlos_bench.tunables.tunable import TunableValue\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\n\n_LOG = logging.getLogger(__name__)\n\n\nclass LocalFileShareEnv(LocalEnv):\n \"\"\"\n Scheduler-side Environment that runs scripts locally\n and uploads/downloads data to the shared file storage.\n \"\"\"\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new application environment with a given config.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the \"tunable_params\"\n and the \"const_args\" sections.\n `LocalFileShareEnv` must also have at least some of the following\n parameters: {setup, upload, run, download, teardown,\n dump_params_file, read_results_file}\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config, tunables=tunables, service=service)\n\n assert self._service is not None and isinstance(self._service, SupportsLocalExec), \\\n \"LocalEnv requires a service that supports local execution\"\n self._local_exec_service: SupportsLocalExec = self._service\n\n assert self._service is not None and isinstance(self._service, SupportsFileShareOps), \\\n \"LocalEnv requires a service that supports file upload/download operations\"\n self._file_share_service: SupportsFileShareOps = self._service\n\n self._upload = self._template_from_to(\"upload\")\n self._download = self._template_from_to(\"download\")\n\n def _template_from_to(self, config_key: str) -> List[Tuple[Template, Template]]:\n \"\"\"\n Convert a list of {\"from\": \"...\", \"to\": \"...\"} to a list of pairs\n of string.Template objects so that we can plug in self._params into it later.\n \"\"\"\n return [\n (Template(d['from']), Template(d['to']))\n for d in self.config.get(config_key, [])\n ]\n\n @staticmethod\n def _expand(from_to: Iterable[Tuple[Template, Template]],\n params: Mapping[str, TunableValue]) -> Generator[Tuple[str, str], None, None]:\n \"\"\"\n Substitute $var parameters in from/to path templates.\n Return a generator of (str, str) pairs of paths.\n \"\"\"\n return (\n (path_from.safe_substitute(params), path_to.safe_substitute(params))\n for (path_from, path_to) in from_to\n )\n\n def setup(self, tunables: TunableGroups, global_config: Optional[dict] = None) -> bool:\n \"\"\"\n Run setup scripts locally and upload the scripts and data to the shared storage.\n\n Parameters\n ----------\n tunables : TunableGroups\n A collection of tunable OS and application parameters along with their\n values. In a local environment these could be used to prepare a config\n file on the scheduler prior to transferring it to the remote environment,\n for instance.\n global_config : dict\n Free-format dictionary of global parameters of the environment\n that are not used in the optimization process.\n\n Returns\n -------\n is_success : bool\n True if operation is successful, false otherwise.\n \"\"\"\n self._is_ready = super().setup(tunables, global_config)\n if self._is_ready:\n assert self._temp_dir is not None\n params = self._get_env_params(restrict=False)\n params[\"PWD\"] = self._temp_dir\n for (path_from, path_to) in self._expand(self._upload, params):\n self._file_share_service.upload(self._params, self._config_loader_service.resolve_path(\n path_from, extra_paths=[self._temp_dir]), path_to)\n return self._is_ready\n\n def _download_files(self, ignore_missing: bool = False) -> None:\n \"\"\"\n Download files from the shared storage.\n\n Parameters\n ----------\n ignore_missing : bool\n If True, raise an exception when some file cannot be downloaded.\n If False, proceed with downloading other files and log a warning.\n \"\"\"\n assert self._temp_dir is not None\n params = self._get_env_params(restrict=False)\n params[\"PWD\"] = self._temp_dir\n for (path_from, path_to) in self._expand(self._download, params):\n try:\n self._file_share_service.download(self._params,\n path_from, self._config_loader_service.resolve_path(\n path_to, extra_paths=[self._temp_dir]))\n except FileNotFoundError as ex:\n _LOG.warning(\"Cannot download: %s\", path_from)\n if not ignore_missing:\n raise ex\n except Exception as ex:\n _LOG.exception(\"Cannot download %s to %s\", path_from, path_to)\n raise ex\n\n def run(self) -> Tuple[Status, Optional[Dict[str, TunableValue]]]:\n \"\"\"\n Download benchmark results from the shared storage\n and run post-processing scripts locally.\n\n Returns\n -------\n (status, output) : (Status, dict)\n A pair of (Status, output) values, where `output` is a dict\n with the results or None if the status is not COMPLETED.\n If run script is a benchmark, then the score is usually expected to\n be in the `score` field.\n \"\"\"\n self._download_files()\n return super().run()\n\n def status(self) -> Tuple[Status, List[Tuple[datetime, str, Any]]]:\n self._download_files(ignore_missing=True)\n return super().status()\n", "path": "mlos_bench/mlos_bench/environments/local/local_fileshare_env.py"}, {"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nBase scriptable benchmark environment.\n\"\"\"\n\nimport abc\nimport logging\nimport re\nfrom typing import Dict, Iterable, Optional\n\nfrom mlos_bench.environments.base_environment import Environment\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.tunables.tunable import TunableValue\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\n\nfrom mlos_bench.util import try_parse_val\n\n_LOG = logging.getLogger(__name__)\n\n\nclass ScriptEnv(Environment, metaclass=abc.ABCMeta):\n \"\"\"\n Base Environment that runs scripts for setup/run/teardown.\n \"\"\"\n\n _RE_INVALID = re.compile(r\"[^a-zA-Z0-9_]\")\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new environment for script execution.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the `tunable_params`\n and the `const_args` sections. It must also have at least one of\n the following parameters: {`setup`, `run`, `teardown`}.\n Additional parameters:\n * `shell_env_params` - an array of parameters to pass to the script\n as shell environment variables, and\n * `shell_env_params_rename` - a dictionary of {to: from} mappings\n of the script parameters. If not specified, replace all\n non-alphanumeric characters with underscores.\n If neither `shell_env_params` nor `shell_env_params_rename` are specified,\n *no* additional shell parameters will be passed to the script.\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config,\n tunables=tunables, service=service)\n\n self._script_setup = self.config.get(\"setup\")\n self._script_run = self.config.get(\"run\")\n self._script_teardown = self.config.get(\"teardown\")\n\n self._shell_env_params: Iterable[str] = self.config.get(\"shell_env_params\", [])\n self._shell_env_params_rename: Dict[str, str] = self.config.get(\"shell_env_params_rename\", {})\n\n results_stdout_pattern = self.config.get(\"results_stdout_pattern\")\n self._results_stdout_pattern: Optional[re.Pattern[str]] = \\\n re.compile(results_stdout_pattern) if results_stdout_pattern else None\n\n def _get_env_params(self, restrict: bool = True) -> Dict[str, str]:\n \"\"\"\n Get the *shell* environment parameters to be passed to the script.\n\n Parameters\n ----------\n restrict : bool\n If True, only return the parameters that are in the `_shell_env_params`\n list. If False, return all parameters in `_params` with some possible\n conversions.\n\n Returns\n -------\n env_params : Dict[str, str]\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n input_params = self._shell_env_params if restrict else self._params.keys()\n rename = {self._RE_INVALID.sub(\"_\", key): key for key in input_params}\n rename.update(self._shell_env_params_rename)\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n\n def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:\n \"\"\"\n Extract the results from the stdout of the script.\n\n Parameters\n ----------\n stdout : str\n The stdout of the script.\n\n Returns\n -------\n results : Dict[str, TunableValue]\n A dictionary of results extracted from the stdout.\n \"\"\"\n if not self._results_stdout_pattern:\n return {}\n _LOG.debug(\"Extract regex: '%s' from: '%s'\", self._results_stdout_pattern, stdout)\n return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}\n", "path": "mlos_bench/mlos_bench/environments/script_env.py"}]} | 3,444 | 647 |
gh_patches_debug_41113 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-819 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add __getitem__() method for Chainer class
`str` argument for getting by id and `int` for getting by index
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deeppavlov/core/commands/infer.py`
Content:
```
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import json
15 import pickle
16 import sys
17 from itertools import islice
18 from logging import getLogger
19 from pathlib import Path
20 from typing import Optional, Union
21
22 from deeppavlov.core.commands.utils import import_packages, parse_config
23 from deeppavlov.core.common.chainer import Chainer
24 from deeppavlov.core.common.params import from_params
25 from deeppavlov.download import deep_download
26
27 log = getLogger(__name__)
28
29
30 def build_model(config: Union[str, Path, dict], mode: str = 'infer',
31 load_trained: bool = False, download: bool = False,
32 serialized: Optional[bytes] = None) -> Chainer:
33 """Build and return the model described in corresponding configuration file."""
34 config = parse_config(config)
35
36 if serialized:
37 serialized: list = pickle.loads(serialized)
38
39 if download:
40 deep_download(config)
41
42 import_packages(config.get('metadata', {}).get('imports', []))
43
44 model_config = config['chainer']
45
46 model = Chainer(model_config['in'], model_config['out'], model_config.get('in_y'))
47
48 for component_config in model_config['pipe']:
49 if load_trained and ('fit_on' in component_config or 'in_y' in component_config):
50 try:
51 component_config['load_path'] = component_config['save_path']
52 except KeyError:
53 log.warning('No "save_path" parameter for the {} component, so "load_path" will not be renewed'
54 .format(component_config.get('class_name', component_config.get('ref', 'UNKNOWN'))))
55
56 if serialized and 'in' in component_config:
57 component_serialized = serialized.pop(0)
58 else:
59 component_serialized = None
60
61 component = from_params(component_config, mode=mode, serialized=component_serialized)
62
63 if 'in' in component_config:
64 c_in = component_config['in']
65 c_out = component_config['out']
66 in_y = component_config.get('in_y', None)
67 main = component_config.get('main', False)
68 model.append(component, c_in, c_out, in_y, main)
69
70 return model
71
72
73 def interact_model(config: Union[str, Path, dict]) -> None:
74 """Start interaction with the model described in corresponding configuration file."""
75 model = build_model(config)
76
77 while True:
78 args = []
79 for in_x in model.in_x:
80 args.append((input('{}::'.format(in_x)),))
81 # check for exit command
82 if args[-1][0] in {'exit', 'stop', 'quit', 'q'}:
83 return
84
85 pred = model(*args)
86 if len(model.out_params) > 1:
87 pred = zip(*pred)
88
89 print('>>', *pred)
90
91
92 def predict_on_stream(config: Union[str, Path, dict], batch_size: int = 1, file_path: Optional[str] = None) -> None:
93 """Make a prediction with the component described in corresponding configuration file."""
94 if file_path is None or file_path == '-':
95 if sys.stdin.isatty():
96 raise RuntimeError('To process data from terminal please use interact mode')
97 f = sys.stdin
98 else:
99 f = open(file_path, encoding='utf8')
100
101 model: Chainer = build_model(config)
102
103 args_count = len(model.in_x)
104 while True:
105 batch = list((l.strip() for l in islice(f, batch_size * args_count)))
106
107 if not batch:
108 break
109
110 args = []
111 for i in range(args_count):
112 args.append(batch[i::args_count])
113
114 res = model(*args)
115 if len(model.out_params) == 1:
116 res = [res]
117 for res in zip(*res):
118 res = json.dumps(res, ensure_ascii=False)
119 print(res, flush=True)
120
121 if f is not sys.stdin:
122 f.close()
123
```
Path: `deeppavlov/core/common/chainer.py`
Content:
```
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import pickle
16 from logging import getLogger
17 from typing import Union, Tuple, List, Optional
18
19 from deeppavlov.core.common.errors import ConfigError
20 from deeppavlov.core.models.component import Component
21 from deeppavlov.core.models.nn_model import NNModel
22 from deeppavlov.core.models.serializable import Serializable
23
24 log = getLogger(__name__)
25
26
27 class Chainer(Component):
28 """
29 Builds an agent/component pipeline from heterogeneous components (Rule-based/ML/DL). It allows to train
30 and infer models in a pipeline as a whole.
31
32 Attributes:
33 pipe: list of components and their input and output variable names for inference
34 train_pipe: list of components and their input and output variable names for training and evaluation
35 in_x: names of inputs for pipeline inference mode
36 out_params: names of pipeline inference outputs
37 in_y: names of additional inputs for pipeline training and evaluation modes
38 forward_map: list of all variables in chainer's memory after running every component in ``self.pipe``
39 train_map: list of all variables in chainer's memory after running every component in ``train_pipe.pipe``
40 main: reference to the main component
41
42 Args:
43 in_x: names of inputs for pipeline inference mode
44 out_params: names of pipeline inference outputs
45 in_y: names of additional inputs for pipeline training and evaluation modes
46 """
47 def __init__(self, in_x: Union[str, list] = None, out_params: Union[str, list] = None,
48 in_y: Union[str, list] = None, *args, **kwargs) -> None:
49 self.pipe: List[Tuple[Tuple[List[str], List[str]], List[str], Component]] = []
50 self.train_pipe = []
51 if isinstance(in_x, str):
52 in_x = [in_x]
53 if isinstance(in_y, str):
54 in_y = [in_y]
55 if isinstance(out_params, str):
56 out_params = [out_params]
57 self.in_x = in_x or ['x']
58 self.in_y = in_y or ['y']
59 self.out_params = out_params or self.in_x
60
61 self.forward_map = set(self.in_x)
62 self.train_map = self.forward_map.union(self.in_y)
63
64 self.main = None
65
66 def append(self, component: Component, in_x: [str, list, dict]=None, out_params: [str, list]=None,
67 in_y: [str, list, dict]=None, main=False):
68 if isinstance(in_x, str):
69 in_x = [in_x]
70 if isinstance(in_y, str):
71 in_y = [in_y]
72 if isinstance(out_params, str):
73 out_params = [out_params]
74 in_x = in_x or self.in_x
75
76 if isinstance(in_x, dict):
77 x_keys, in_x = zip(*in_x.items())
78 else:
79 x_keys = []
80 out_params = out_params or in_x
81 if in_y is not None:
82 if isinstance(in_y, dict):
83 y_keys, in_y = zip(*in_y.items())
84 else:
85 y_keys = []
86 keys = x_keys + y_keys
87
88 if bool(x_keys) != bool(y_keys):
89 raise ConfigError('`in` and `in_y` for a component have to both be lists or dicts')
90
91 component: NNModel
92 main = True
93 assert self.train_map.issuperset(in_x+in_y), ('Arguments {} are expected but only {} are set'
94 .format(in_x+in_y, self.train_map))
95 preprocessor = Chainer(self.in_x, in_x+in_y, self.in_y)
96 for (t_in_x_keys, t_in_x), t_out, t_component in self.train_pipe:
97 if t_in_x_keys:
98 t_in_x = dict(zip(t_in_x_keys, t_in_x))
99 preprocessor.append(t_component, t_in_x, t_out)
100
101 def train_on_batch(*args, **kwargs):
102 preprocessed = preprocessor.compute(*args, **kwargs)
103 if len(in_x+in_y) == 1:
104 preprocessed = [preprocessed]
105 if keys:
106 return component.train_on_batch(**dict(zip(keys, preprocessed)))
107 else:
108 return component.train_on_batch(*preprocessed)
109
110 self.train_on_batch = train_on_batch
111 self.process_event = component.process_event
112 if main:
113 self.main = component
114 if self.forward_map.issuperset(in_x):
115 self.pipe.append(((x_keys, in_x), out_params, component))
116 self.forward_map = self.forward_map.union(out_params)
117
118 if self.train_map.issuperset(in_x):
119 self.train_pipe.append(((x_keys, in_x), out_params, component))
120 self.train_map = self.train_map.union(out_params)
121 else:
122 raise ConfigError('Arguments {} are expected but only {} are set'.format(in_x, self.train_map))
123
124 def compute(self, x, y=None, targets=None):
125 if targets is None:
126 targets = self.out_params
127 in_params = list(self.in_x)
128 if len(in_params) == 1:
129 args = [x]
130 else:
131 args = list(zip(*x))
132
133 if y is None:
134 pipe = self.pipe
135 else:
136 pipe = self.train_pipe
137 if len(self.in_y) == 1:
138 args.append(y)
139 else:
140 args += list(zip(*y))
141 in_params += self.in_y
142
143 return self._compute(*args, pipe=pipe, param_names=in_params, targets=targets)
144
145 def __call__(self, *args):
146 return self._compute(*args, param_names=self.in_x, pipe=self.pipe, targets=self.out_params)
147
148 @staticmethod
149 def _compute(*args, param_names, pipe, targets):
150 expected = set(targets)
151 final_pipe = []
152 for (in_keys, in_params), out_params, component in reversed(pipe):
153 if expected.intersection(out_params):
154 expected = expected - set(out_params) | set(in_params)
155 final_pipe.append(((in_keys, in_params), out_params, component))
156 final_pipe.reverse()
157 if not expected.issubset(param_names):
158 raise RuntimeError(f'{expected} are required to compute {targets} but were not found in memory or inputs')
159 pipe = final_pipe
160
161 mem = dict(zip(param_names, args))
162 del args
163
164 for (in_keys, in_params), out_params, component in pipe:
165 x = [mem[k] for k in in_params]
166 if in_keys:
167 res = component(**dict(zip(in_keys, x)))
168 else:
169 res = component(*x)
170 if len(out_params) == 1:
171 mem[out_params[0]] = res
172 else:
173 mem.update(zip(out_params, res))
174
175 res = [mem[k] for k in targets]
176 if len(res) == 1:
177 res = res[0]
178 return res
179
180 def get_main_component(self) -> Optional[Serializable]:
181 try:
182 return self.main or self.pipe[-1][-1]
183 except IndexError:
184 log.warning('Cannot get a main component for an empty chainer')
185 return None
186
187 def save(self) -> None:
188 main_component = self.get_main_component()
189 if isinstance(main_component, Serializable):
190 main_component.save()
191
192 def load(self) -> None:
193 for in_params, out_params, component in self.train_pipe:
194 if callable(getattr(component, 'load', None)):
195 component.load()
196
197 def reset(self) -> None:
198 for in_params, out_params, component in self.train_pipe:
199 if callable(getattr(component, 'reset', None)):
200 component.reset()
201
202 def destroy(self):
203 if hasattr(self, 'train_pipe'):
204 for in_params, out_params, component in self.train_pipe:
205 if callable(getattr(component, 'destroy', None)):
206 component.destroy()
207 self.train_pipe.clear()
208 if hasattr(self, 'pipe'):
209 self.pipe.clear()
210 super().destroy()
211
212 def serialize(self) -> bytes:
213 data = []
214 for in_params, out_params, component in self.train_pipe:
215 data.append(component.serialize())
216 return pickle.dumps(data, protocol=4)
217
218 def deserialize(self, data: bytes) -> None:
219 data = pickle.loads(data)
220 for in_params, out_params, component in self.train_pipe:
221 component.deserialize(data)
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deeppavlov/core/commands/infer.py b/deeppavlov/core/commands/infer.py
--- a/deeppavlov/core/commands/infer.py
+++ b/deeppavlov/core/commands/infer.py
@@ -60,6 +60,9 @@
component = from_params(component_config, mode=mode, serialized=component_serialized)
+ if 'id' in component_config:
+ model._components_dict[component_config['id']] = component
+
if 'in' in component_config:
c_in = component_config['in']
c_out = component_config['out']
diff --git a/deeppavlov/core/common/chainer.py b/deeppavlov/core/common/chainer.py
--- a/deeppavlov/core/common/chainer.py
+++ b/deeppavlov/core/common/chainer.py
@@ -14,7 +14,7 @@
import pickle
from logging import getLogger
-from typing import Union, Tuple, List, Optional
+from typing import Union, Tuple, List, Optional, Hashable
from deeppavlov.core.common.errors import ConfigError
from deeppavlov.core.models.component import Component
@@ -61,10 +61,68 @@
self.forward_map = set(self.in_x)
self.train_map = self.forward_map.union(self.in_y)
+ self._components_dict = {}
+
self.main = None
- def append(self, component: Component, in_x: [str, list, dict]=None, out_params: [str, list]=None,
- in_y: [str, list, dict]=None, main=False):
+ def __getitem__(self, item):
+ if isinstance(item, int):
+ in_params, out_params, component = self.train_pipe[item]
+ return component
+ return self._components_dict[item]
+
+ def _ipython_key_completions_(self):
+ return self._components_dict.keys()
+
+ def __repr__(self):
+ reversed_components_dict = {v: f'{repr(k)}: ' for k, v in self._components_dict.items()
+ if isinstance(v, Hashable)}
+
+ components_list = []
+ for in_params, out_params, component in self.train_pipe:
+ component_repr = repr(component)
+ if isinstance(component, Hashable) and component in reversed_components_dict:
+ component_repr = reversed_components_dict[component] + component_repr
+ else:
+ for k, v in self._components_dict.items():
+ if v is component:
+ component_repr = f'{k}: {component_repr}'
+ break
+ components_list.append(component_repr)
+
+ return f'Chainer[{", ".join(components_list)}]'
+
+ def _repr_pretty_(self, p, cycle):
+ """method that defines ``Struct``'s pretty printing rules for iPython
+
+ Args:
+ p (IPython.lib.pretty.RepresentationPrinter): pretty printer object
+ cycle (bool): is ``True`` if pretty detected a cycle
+ """
+ if cycle:
+ p.text('Chainer(...)')
+ else:
+ with p.group(8, 'Chainer[', ']'):
+ reversed_components_dict = {v: k for k, v in self._components_dict.items()
+ if isinstance(v, Hashable)}
+ # p.pretty(self.__prepare_repr())
+ for i, (in_params, out_params, component) in enumerate(self.train_pipe):
+ if i > 0:
+ p.text(',')
+ p.breakable()
+ if isinstance(component, Hashable) and component in reversed_components_dict:
+ p.pretty(reversed_components_dict[component])
+ p.text(': ')
+ else:
+ for k, v in self._components_dict.items():
+ if v is component:
+ p.pretty(k)
+ p.text(': ')
+ break
+ p.pretty(component)
+
+ def append(self, component: Component, in_x: [str, list, dict] = None, out_params: [str, list] = None,
+ in_y: [str, list, dict] = None, main: bool = False):
if isinstance(in_x, str):
in_x = [in_x]
if isinstance(in_y, str):
| {"golden_diff": "diff --git a/deeppavlov/core/commands/infer.py b/deeppavlov/core/commands/infer.py\n--- a/deeppavlov/core/commands/infer.py\n+++ b/deeppavlov/core/commands/infer.py\n@@ -60,6 +60,9 @@\n \n component = from_params(component_config, mode=mode, serialized=component_serialized)\n \n+ if 'id' in component_config:\n+ model._components_dict[component_config['id']] = component\n+\n if 'in' in component_config:\n c_in = component_config['in']\n c_out = component_config['out']\ndiff --git a/deeppavlov/core/common/chainer.py b/deeppavlov/core/common/chainer.py\n--- a/deeppavlov/core/common/chainer.py\n+++ b/deeppavlov/core/common/chainer.py\n@@ -14,7 +14,7 @@\n \n import pickle\n from logging import getLogger\n-from typing import Union, Tuple, List, Optional\n+from typing import Union, Tuple, List, Optional, Hashable\n \n from deeppavlov.core.common.errors import ConfigError\n from deeppavlov.core.models.component import Component\n@@ -61,10 +61,68 @@\n self.forward_map = set(self.in_x)\n self.train_map = self.forward_map.union(self.in_y)\n \n+ self._components_dict = {}\n+\n self.main = None\n \n- def append(self, component: Component, in_x: [str, list, dict]=None, out_params: [str, list]=None,\n- in_y: [str, list, dict]=None, main=False):\n+ def __getitem__(self, item):\n+ if isinstance(item, int):\n+ in_params, out_params, component = self.train_pipe[item]\n+ return component\n+ return self._components_dict[item]\n+\n+ def _ipython_key_completions_(self):\n+ return self._components_dict.keys()\n+\n+ def __repr__(self):\n+ reversed_components_dict = {v: f'{repr(k)}: ' for k, v in self._components_dict.items()\n+ if isinstance(v, Hashable)}\n+\n+ components_list = []\n+ for in_params, out_params, component in self.train_pipe:\n+ component_repr = repr(component)\n+ if isinstance(component, Hashable) and component in reversed_components_dict:\n+ component_repr = reversed_components_dict[component] + component_repr\n+ else:\n+ for k, v in self._components_dict.items():\n+ if v is component:\n+ component_repr = f'{k}: {component_repr}'\n+ break\n+ components_list.append(component_repr)\n+\n+ return f'Chainer[{\", \".join(components_list)}]'\n+\n+ def _repr_pretty_(self, p, cycle):\n+ \"\"\"method that defines ``Struct``'s pretty printing rules for iPython\n+\n+ Args:\n+ p (IPython.lib.pretty.RepresentationPrinter): pretty printer object\n+ cycle (bool): is ``True`` if pretty detected a cycle\n+ \"\"\"\n+ if cycle:\n+ p.text('Chainer(...)')\n+ else:\n+ with p.group(8, 'Chainer[', ']'):\n+ reversed_components_dict = {v: k for k, v in self._components_dict.items()\n+ if isinstance(v, Hashable)}\n+ # p.pretty(self.__prepare_repr())\n+ for i, (in_params, out_params, component) in enumerate(self.train_pipe):\n+ if i > 0:\n+ p.text(',')\n+ p.breakable()\n+ if isinstance(component, Hashable) and component in reversed_components_dict:\n+ p.pretty(reversed_components_dict[component])\n+ p.text(': ')\n+ else:\n+ for k, v in self._components_dict.items():\n+ if v is component:\n+ p.pretty(k)\n+ p.text(': ')\n+ break\n+ p.pretty(component)\n+\n+ def append(self, component: Component, in_x: [str, list, dict] = None, out_params: [str, list] = None,\n+ in_y: [str, list, dict] = None, main: bool = False):\n if isinstance(in_x, str):\n in_x = [in_x]\n if isinstance(in_y, str):\n", "issue": "Add __getitem__() method for Chainer class\n`str` argument for getting by id and `int` for getting by index\n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport pickle\nimport sys\nfrom itertools import islice\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nfrom deeppavlov.core.commands.utils import import_packages, parse_config\nfrom deeppavlov.core.common.chainer import Chainer\nfrom deeppavlov.core.common.params import from_params\nfrom deeppavlov.download import deep_download\n\nlog = getLogger(__name__)\n\n\ndef build_model(config: Union[str, Path, dict], mode: str = 'infer',\n load_trained: bool = False, download: bool = False,\n serialized: Optional[bytes] = None) -> Chainer:\n \"\"\"Build and return the model described in corresponding configuration file.\"\"\"\n config = parse_config(config)\n\n if serialized:\n serialized: list = pickle.loads(serialized)\n\n if download:\n deep_download(config)\n\n import_packages(config.get('metadata', {}).get('imports', []))\n\n model_config = config['chainer']\n\n model = Chainer(model_config['in'], model_config['out'], model_config.get('in_y'))\n\n for component_config in model_config['pipe']:\n if load_trained and ('fit_on' in component_config or 'in_y' in component_config):\n try:\n component_config['load_path'] = component_config['save_path']\n except KeyError:\n log.warning('No \"save_path\" parameter for the {} component, so \"load_path\" will not be renewed'\n .format(component_config.get('class_name', component_config.get('ref', 'UNKNOWN'))))\n\n if serialized and 'in' in component_config:\n component_serialized = serialized.pop(0)\n else:\n component_serialized = None\n\n component = from_params(component_config, mode=mode, serialized=component_serialized)\n\n if 'in' in component_config:\n c_in = component_config['in']\n c_out = component_config['out']\n in_y = component_config.get('in_y', None)\n main = component_config.get('main', False)\n model.append(component, c_in, c_out, in_y, main)\n\n return model\n\n\ndef interact_model(config: Union[str, Path, dict]) -> None:\n \"\"\"Start interaction with the model described in corresponding configuration file.\"\"\"\n model = build_model(config)\n\n while True:\n args = []\n for in_x in model.in_x:\n args.append((input('{}::'.format(in_x)),))\n # check for exit command\n if args[-1][0] in {'exit', 'stop', 'quit', 'q'}:\n return\n\n pred = model(*args)\n if len(model.out_params) > 1:\n pred = zip(*pred)\n\n print('>>', *pred)\n\n\ndef predict_on_stream(config: Union[str, Path, dict], batch_size: int = 1, file_path: Optional[str] = None) -> None:\n \"\"\"Make a prediction with the component described in corresponding configuration file.\"\"\"\n if file_path is None or file_path == '-':\n if sys.stdin.isatty():\n raise RuntimeError('To process data from terminal please use interact mode')\n f = sys.stdin\n else:\n f = open(file_path, encoding='utf8')\n\n model: Chainer = build_model(config)\n\n args_count = len(model.in_x)\n while True:\n batch = list((l.strip() for l in islice(f, batch_size * args_count)))\n\n if not batch:\n break\n\n args = []\n for i in range(args_count):\n args.append(batch[i::args_count])\n\n res = model(*args)\n if len(model.out_params) == 1:\n res = [res]\n for res in zip(*res):\n res = json.dumps(res, ensure_ascii=False)\n print(res, flush=True)\n\n if f is not sys.stdin:\n f.close()\n", "path": "deeppavlov/core/commands/infer.py"}, {"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pickle\nfrom logging import getLogger\nfrom typing import Union, Tuple, List, Optional\n\nfrom deeppavlov.core.common.errors import ConfigError\nfrom deeppavlov.core.models.component import Component\nfrom deeppavlov.core.models.nn_model import NNModel\nfrom deeppavlov.core.models.serializable import Serializable\n\nlog = getLogger(__name__)\n\n\nclass Chainer(Component):\n \"\"\"\n Builds an agent/component pipeline from heterogeneous components (Rule-based/ML/DL). It allows to train\n and infer models in a pipeline as a whole.\n\n Attributes:\n pipe: list of components and their input and output variable names for inference\n train_pipe: list of components and their input and output variable names for training and evaluation\n in_x: names of inputs for pipeline inference mode\n out_params: names of pipeline inference outputs\n in_y: names of additional inputs for pipeline training and evaluation modes\n forward_map: list of all variables in chainer's memory after running every component in ``self.pipe``\n train_map: list of all variables in chainer's memory after running every component in ``train_pipe.pipe``\n main: reference to the main component\n\n Args:\n in_x: names of inputs for pipeline inference mode\n out_params: names of pipeline inference outputs\n in_y: names of additional inputs for pipeline training and evaluation modes\n \"\"\"\n def __init__(self, in_x: Union[str, list] = None, out_params: Union[str, list] = None,\n in_y: Union[str, list] = None, *args, **kwargs) -> None:\n self.pipe: List[Tuple[Tuple[List[str], List[str]], List[str], Component]] = []\n self.train_pipe = []\n if isinstance(in_x, str):\n in_x = [in_x]\n if isinstance(in_y, str):\n in_y = [in_y]\n if isinstance(out_params, str):\n out_params = [out_params]\n self.in_x = in_x or ['x']\n self.in_y = in_y or ['y']\n self.out_params = out_params or self.in_x\n\n self.forward_map = set(self.in_x)\n self.train_map = self.forward_map.union(self.in_y)\n\n self.main = None\n\n def append(self, component: Component, in_x: [str, list, dict]=None, out_params: [str, list]=None,\n in_y: [str, list, dict]=None, main=False):\n if isinstance(in_x, str):\n in_x = [in_x]\n if isinstance(in_y, str):\n in_y = [in_y]\n if isinstance(out_params, str):\n out_params = [out_params]\n in_x = in_x or self.in_x\n\n if isinstance(in_x, dict):\n x_keys, in_x = zip(*in_x.items())\n else:\n x_keys = []\n out_params = out_params or in_x\n if in_y is not None:\n if isinstance(in_y, dict):\n y_keys, in_y = zip(*in_y.items())\n else:\n y_keys = []\n keys = x_keys + y_keys\n\n if bool(x_keys) != bool(y_keys):\n raise ConfigError('`in` and `in_y` for a component have to both be lists or dicts')\n\n component: NNModel\n main = True\n assert self.train_map.issuperset(in_x+in_y), ('Arguments {} are expected but only {} are set'\n .format(in_x+in_y, self.train_map))\n preprocessor = Chainer(self.in_x, in_x+in_y, self.in_y)\n for (t_in_x_keys, t_in_x), t_out, t_component in self.train_pipe:\n if t_in_x_keys:\n t_in_x = dict(zip(t_in_x_keys, t_in_x))\n preprocessor.append(t_component, t_in_x, t_out)\n\n def train_on_batch(*args, **kwargs):\n preprocessed = preprocessor.compute(*args, **kwargs)\n if len(in_x+in_y) == 1:\n preprocessed = [preprocessed]\n if keys:\n return component.train_on_batch(**dict(zip(keys, preprocessed)))\n else:\n return component.train_on_batch(*preprocessed)\n\n self.train_on_batch = train_on_batch\n self.process_event = component.process_event\n if main:\n self.main = component\n if self.forward_map.issuperset(in_x):\n self.pipe.append(((x_keys, in_x), out_params, component))\n self.forward_map = self.forward_map.union(out_params)\n\n if self.train_map.issuperset(in_x):\n self.train_pipe.append(((x_keys, in_x), out_params, component))\n self.train_map = self.train_map.union(out_params)\n else:\n raise ConfigError('Arguments {} are expected but only {} are set'.format(in_x, self.train_map))\n\n def compute(self, x, y=None, targets=None):\n if targets is None:\n targets = self.out_params\n in_params = list(self.in_x)\n if len(in_params) == 1:\n args = [x]\n else:\n args = list(zip(*x))\n\n if y is None:\n pipe = self.pipe\n else:\n pipe = self.train_pipe\n if len(self.in_y) == 1:\n args.append(y)\n else:\n args += list(zip(*y))\n in_params += self.in_y\n\n return self._compute(*args, pipe=pipe, param_names=in_params, targets=targets)\n\n def __call__(self, *args):\n return self._compute(*args, param_names=self.in_x, pipe=self.pipe, targets=self.out_params)\n\n @staticmethod\n def _compute(*args, param_names, pipe, targets):\n expected = set(targets)\n final_pipe = []\n for (in_keys, in_params), out_params, component in reversed(pipe):\n if expected.intersection(out_params):\n expected = expected - set(out_params) | set(in_params)\n final_pipe.append(((in_keys, in_params), out_params, component))\n final_pipe.reverse()\n if not expected.issubset(param_names):\n raise RuntimeError(f'{expected} are required to compute {targets} but were not found in memory or inputs')\n pipe = final_pipe\n\n mem = dict(zip(param_names, args))\n del args\n\n for (in_keys, in_params), out_params, component in pipe:\n x = [mem[k] for k in in_params]\n if in_keys:\n res = component(**dict(zip(in_keys, x)))\n else:\n res = component(*x)\n if len(out_params) == 1:\n mem[out_params[0]] = res\n else:\n mem.update(zip(out_params, res))\n\n res = [mem[k] for k in targets]\n if len(res) == 1:\n res = res[0]\n return res\n\n def get_main_component(self) -> Optional[Serializable]:\n try:\n return self.main or self.pipe[-1][-1]\n except IndexError:\n log.warning('Cannot get a main component for an empty chainer')\n return None\n\n def save(self) -> None:\n main_component = self.get_main_component()\n if isinstance(main_component, Serializable):\n main_component.save()\n\n def load(self) -> None:\n for in_params, out_params, component in self.train_pipe:\n if callable(getattr(component, 'load', None)):\n component.load()\n\n def reset(self) -> None:\n for in_params, out_params, component in self.train_pipe:\n if callable(getattr(component, 'reset', None)):\n component.reset()\n\n def destroy(self):\n if hasattr(self, 'train_pipe'):\n for in_params, out_params, component in self.train_pipe:\n if callable(getattr(component, 'destroy', None)):\n component.destroy()\n self.train_pipe.clear()\n if hasattr(self, 'pipe'):\n self.pipe.clear()\n super().destroy()\n\n def serialize(self) -> bytes:\n data = []\n for in_params, out_params, component in self.train_pipe:\n data.append(component.serialize())\n return pickle.dumps(data, protocol=4)\n\n def deserialize(self, data: bytes) -> None:\n data = pickle.loads(data)\n for in_params, out_params, component in self.train_pipe:\n component.deserialize(data)\n", "path": "deeppavlov/core/common/chainer.py"}], "after_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport pickle\nimport sys\nfrom itertools import islice\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nfrom deeppavlov.core.commands.utils import import_packages, parse_config\nfrom deeppavlov.core.common.chainer import Chainer\nfrom deeppavlov.core.common.params import from_params\nfrom deeppavlov.download import deep_download\n\nlog = getLogger(__name__)\n\n\ndef build_model(config: Union[str, Path, dict], mode: str = 'infer',\n load_trained: bool = False, download: bool = False,\n serialized: Optional[bytes] = None) -> Chainer:\n \"\"\"Build and return the model described in corresponding configuration file.\"\"\"\n config = parse_config(config)\n\n if serialized:\n serialized: list = pickle.loads(serialized)\n\n if download:\n deep_download(config)\n\n import_packages(config.get('metadata', {}).get('imports', []))\n\n model_config = config['chainer']\n\n model = Chainer(model_config['in'], model_config['out'], model_config.get('in_y'))\n\n for component_config in model_config['pipe']:\n if load_trained and ('fit_on' in component_config or 'in_y' in component_config):\n try:\n component_config['load_path'] = component_config['save_path']\n except KeyError:\n log.warning('No \"save_path\" parameter for the {} component, so \"load_path\" will not be renewed'\n .format(component_config.get('class_name', component_config.get('ref', 'UNKNOWN'))))\n\n if serialized and 'in' in component_config:\n component_serialized = serialized.pop(0)\n else:\n component_serialized = None\n\n component = from_params(component_config, mode=mode, serialized=component_serialized)\n\n if 'id' in component_config:\n model._components_dict[component_config['id']] = component\n\n if 'in' in component_config:\n c_in = component_config['in']\n c_out = component_config['out']\n in_y = component_config.get('in_y', None)\n main = component_config.get('main', False)\n model.append(component, c_in, c_out, in_y, main)\n\n return model\n\n\ndef interact_model(config: Union[str, Path, dict]) -> None:\n \"\"\"Start interaction with the model described in corresponding configuration file.\"\"\"\n model = build_model(config)\n\n while True:\n args = []\n for in_x in model.in_x:\n args.append((input('{}::'.format(in_x)),))\n # check for exit command\n if args[-1][0] in {'exit', 'stop', 'quit', 'q'}:\n return\n\n pred = model(*args)\n if len(model.out_params) > 1:\n pred = zip(*pred)\n\n print('>>', *pred)\n\n\ndef predict_on_stream(config: Union[str, Path, dict], batch_size: int = 1, file_path: Optional[str] = None) -> None:\n \"\"\"Make a prediction with the component described in corresponding configuration file.\"\"\"\n if file_path is None or file_path == '-':\n if sys.stdin.isatty():\n raise RuntimeError('To process data from terminal please use interact mode')\n f = sys.stdin\n else:\n f = open(file_path, encoding='utf8')\n\n model: Chainer = build_model(config)\n\n args_count = len(model.in_x)\n while True:\n batch = list((l.strip() for l in islice(f, batch_size * args_count)))\n\n if not batch:\n break\n\n args = []\n for i in range(args_count):\n args.append(batch[i::args_count])\n\n res = model(*args)\n if len(model.out_params) == 1:\n res = [res]\n for res in zip(*res):\n res = json.dumps(res, ensure_ascii=False)\n print(res, flush=True)\n\n if f is not sys.stdin:\n f.close()\n", "path": "deeppavlov/core/commands/infer.py"}, {"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pickle\nfrom logging import getLogger\nfrom typing import Union, Tuple, List, Optional, Hashable\n\nfrom deeppavlov.core.common.errors import ConfigError\nfrom deeppavlov.core.models.component import Component\nfrom deeppavlov.core.models.nn_model import NNModel\nfrom deeppavlov.core.models.serializable import Serializable\n\nlog = getLogger(__name__)\n\n\nclass Chainer(Component):\n \"\"\"\n Builds an agent/component pipeline from heterogeneous components (Rule-based/ML/DL). It allows to train\n and infer models in a pipeline as a whole.\n\n Attributes:\n pipe: list of components and their input and output variable names for inference\n train_pipe: list of components and their input and output variable names for training and evaluation\n in_x: names of inputs for pipeline inference mode\n out_params: names of pipeline inference outputs\n in_y: names of additional inputs for pipeline training and evaluation modes\n forward_map: list of all variables in chainer's memory after running every component in ``self.pipe``\n train_map: list of all variables in chainer's memory after running every component in ``train_pipe.pipe``\n main: reference to the main component\n\n Args:\n in_x: names of inputs for pipeline inference mode\n out_params: names of pipeline inference outputs\n in_y: names of additional inputs for pipeline training and evaluation modes\n \"\"\"\n def __init__(self, in_x: Union[str, list] = None, out_params: Union[str, list] = None,\n in_y: Union[str, list] = None, *args, **kwargs) -> None:\n self.pipe: List[Tuple[Tuple[List[str], List[str]], List[str], Component]] = []\n self.train_pipe = []\n if isinstance(in_x, str):\n in_x = [in_x]\n if isinstance(in_y, str):\n in_y = [in_y]\n if isinstance(out_params, str):\n out_params = [out_params]\n self.in_x = in_x or ['x']\n self.in_y = in_y or ['y']\n self.out_params = out_params or self.in_x\n\n self.forward_map = set(self.in_x)\n self.train_map = self.forward_map.union(self.in_y)\n\n self._components_dict = {}\n\n self.main = None\n\n def __getitem__(self, item):\n if isinstance(item, int):\n in_params, out_params, component = self.train_pipe[item]\n return component\n return self._components_dict[item]\n\n def _ipython_key_completions_(self):\n return self._components_dict.keys()\n\n def __repr__(self):\n reversed_components_dict = {v: f'{repr(k)}: ' for k, v in self._components_dict.items()\n if isinstance(v, Hashable)}\n\n components_list = []\n for in_params, out_params, component in self.train_pipe:\n component_repr = repr(component)\n if isinstance(component, Hashable) and component in reversed_components_dict:\n component_repr = reversed_components_dict[component] + component_repr\n else:\n for k, v in self._components_dict.items():\n if v is component:\n component_repr = f'{k}: {component_repr}'\n break\n components_list.append(component_repr)\n\n return f'Chainer[{\", \".join(components_list)}]'\n\n def _repr_pretty_(self, p, cycle):\n \"\"\"method that defines ``Struct``'s pretty printing rules for iPython\n\n Args:\n p (IPython.lib.pretty.RepresentationPrinter): pretty printer object\n cycle (bool): is ``True`` if pretty detected a cycle\n \"\"\"\n if cycle:\n p.text('Chainer(...)')\n else:\n with p.group(8, 'Chainer[', ']'):\n reversed_components_dict = {v: k for k, v in self._components_dict.items()\n if isinstance(v, Hashable)}\n # p.pretty(self.__prepare_repr())\n for i, (in_params, out_params, component) in enumerate(self.train_pipe):\n if i > 0:\n p.text(',')\n p.breakable()\n if isinstance(component, Hashable) and component in reversed_components_dict:\n p.pretty(reversed_components_dict[component])\n p.text(': ')\n else:\n for k, v in self._components_dict.items():\n if v is component:\n p.pretty(k)\n p.text(': ')\n break\n p.pretty(component)\n\n def append(self, component: Component, in_x: [str, list, dict] = None, out_params: [str, list] = None,\n in_y: [str, list, dict] = None, main: bool = False):\n if isinstance(in_x, str):\n in_x = [in_x]\n if isinstance(in_y, str):\n in_y = [in_y]\n if isinstance(out_params, str):\n out_params = [out_params]\n in_x = in_x or self.in_x\n\n if isinstance(in_x, dict):\n x_keys, in_x = zip(*in_x.items())\n else:\n x_keys = []\n out_params = out_params or in_x\n if in_y is not None:\n if isinstance(in_y, dict):\n y_keys, in_y = zip(*in_y.items())\n else:\n y_keys = []\n keys = x_keys + y_keys\n\n if bool(x_keys) != bool(y_keys):\n raise ConfigError('`in` and `in_y` for a component have to both be lists or dicts')\n\n component: NNModel\n main = True\n assert self.train_map.issuperset(in_x+in_y), ('Arguments {} are expected but only {} are set'\n .format(in_x+in_y, self.train_map))\n preprocessor = Chainer(self.in_x, in_x+in_y, self.in_y)\n for (t_in_x_keys, t_in_x), t_out, t_component in self.train_pipe:\n if t_in_x_keys:\n t_in_x = dict(zip(t_in_x_keys, t_in_x))\n preprocessor.append(t_component, t_in_x, t_out)\n\n def train_on_batch(*args, **kwargs):\n preprocessed = preprocessor.compute(*args, **kwargs)\n if len(in_x+in_y) == 1:\n preprocessed = [preprocessed]\n if keys:\n return component.train_on_batch(**dict(zip(keys, preprocessed)))\n else:\n return component.train_on_batch(*preprocessed)\n\n self.train_on_batch = train_on_batch\n self.process_event = component.process_event\n if main:\n self.main = component\n if self.forward_map.issuperset(in_x):\n self.pipe.append(((x_keys, in_x), out_params, component))\n self.forward_map = self.forward_map.union(out_params)\n\n if self.train_map.issuperset(in_x):\n self.train_pipe.append(((x_keys, in_x), out_params, component))\n self.train_map = self.train_map.union(out_params)\n else:\n raise ConfigError('Arguments {} are expected but only {} are set'.format(in_x, self.train_map))\n\n def compute(self, x, y=None, targets=None):\n if targets is None:\n targets = self.out_params\n in_params = list(self.in_x)\n if len(in_params) == 1:\n args = [x]\n else:\n args = list(zip(*x))\n\n if y is None:\n pipe = self.pipe\n else:\n pipe = self.train_pipe\n if len(self.in_y) == 1:\n args.append(y)\n else:\n args += list(zip(*y))\n in_params += self.in_y\n\n return self._compute(*args, pipe=pipe, param_names=in_params, targets=targets)\n\n def __call__(self, *args):\n return self._compute(*args, param_names=self.in_x, pipe=self.pipe, targets=self.out_params)\n\n @staticmethod\n def _compute(*args, param_names, pipe, targets):\n expected = set(targets)\n final_pipe = []\n for (in_keys, in_params), out_params, component in reversed(pipe):\n if expected.intersection(out_params):\n expected = expected - set(out_params) | set(in_params)\n final_pipe.append(((in_keys, in_params), out_params, component))\n final_pipe.reverse()\n if not expected.issubset(param_names):\n raise RuntimeError(f'{expected} are required to compute {targets} but were not found in memory or inputs')\n pipe = final_pipe\n\n mem = dict(zip(param_names, args))\n del args\n\n for (in_keys, in_params), out_params, component in pipe:\n x = [mem[k] for k in in_params]\n if in_keys:\n res = component(**dict(zip(in_keys, x)))\n else:\n res = component(*x)\n if len(out_params) == 1:\n mem[out_params[0]] = res\n else:\n mem.update(zip(out_params, res))\n\n res = [mem[k] for k in targets]\n if len(res) == 1:\n res = res[0]\n return res\n\n def get_main_component(self) -> Optional[Serializable]:\n try:\n return self.main or self.pipe[-1][-1]\n except IndexError:\n log.warning('Cannot get a main component for an empty chainer')\n return None\n\n def save(self) -> None:\n main_component = self.get_main_component()\n if isinstance(main_component, Serializable):\n main_component.save()\n\n def load(self) -> None:\n for in_params, out_params, component in self.train_pipe:\n if callable(getattr(component, 'load', None)):\n component.load()\n\n def reset(self) -> None:\n for in_params, out_params, component in self.train_pipe:\n if callable(getattr(component, 'reset', None)):\n component.reset()\n\n def destroy(self):\n if hasattr(self, 'train_pipe'):\n for in_params, out_params, component in self.train_pipe:\n if callable(getattr(component, 'destroy', None)):\n component.destroy()\n self.train_pipe.clear()\n if hasattr(self, 'pipe'):\n self.pipe.clear()\n super().destroy()\n\n def serialize(self) -> bytes:\n data = []\n for in_params, out_params, component in self.train_pipe:\n data.append(component.serialize())\n return pickle.dumps(data, protocol=4)\n\n def deserialize(self, data: bytes) -> None:\n data = pickle.loads(data)\n for in_params, out_params, component in self.train_pipe:\n component.deserialize(data)\n", "path": "deeppavlov/core/common/chainer.py"}]} | 4,052 | 950 |
gh_patches_debug_4682 | rasdani/github-patches | git_diff | ethereum__web3.py-709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use EthereumTesterProvider backed by eth-tester in default import
`from web3 import EthereumTesterProvider` should use the eth-tester one, not the testrpc one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/__init__.py`
Content:
```
1 import pkg_resources
2 import sys
3
4 if sys.version_info < (3, 5):
5 raise EnvironmentError("Python 3.5 or above is required")
6
7 from eth_account import Account # noqa: E402
8 from web3.main import Web3 # noqa: E402
9 from web3.providers.rpc import ( # noqa: E402
10 HTTPProvider,
11 )
12 from web3.providers.tester import ( # noqa: E402
13 TestRPCProvider,
14 EthereumTesterProvider,
15 )
16 from web3.providers.ipc import ( # noqa: E402
17 IPCProvider,
18 )
19
20 __version__ = pkg_resources.get_distribution("web3").version
21
22 __all__ = [
23 "__version__",
24 "Web3",
25 "HTTPProvider",
26 "IPCProvider",
27 "TestRPCProvider",
28 "EthereumTesterProvider",
29 "Account",
30 ]
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/__init__.py b/web3/__init__.py
--- a/web3/__init__.py
+++ b/web3/__init__.py
@@ -9,9 +9,11 @@
from web3.providers.rpc import ( # noqa: E402
HTTPProvider,
)
+from web3.providers.eth_tester import ( # noqa: E402
+ EthereumTesterProvider,
+)
from web3.providers.tester import ( # noqa: E402
TestRPCProvider,
- EthereumTesterProvider,
)
from web3.providers.ipc import ( # noqa: E402
IPCProvider,
| {"golden_diff": "diff --git a/web3/__init__.py b/web3/__init__.py\n--- a/web3/__init__.py\n+++ b/web3/__init__.py\n@@ -9,9 +9,11 @@\n from web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n )\n+from web3.providers.eth_tester import ( # noqa: E402\n+ EthereumTesterProvider,\n+)\n from web3.providers.tester import ( # noqa: E402\n TestRPCProvider,\n- EthereumTesterProvider,\n )\n from web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n", "issue": "Use EthereumTesterProvider backed by eth-tester in default import\n`from web3 import EthereumTesterProvider` should use the eth-tester one, not the testrpc one.\n", "before_files": [{"content": "import pkg_resources\nimport sys\n\nif sys.version_info < (3, 5):\n raise EnvironmentError(\"Python 3.5 or above is required\")\n\nfrom eth_account import Account # noqa: E402\nfrom web3.main import Web3 # noqa: E402\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.tester import ( # noqa: E402\n TestRPCProvider,\n EthereumTesterProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"TestRPCProvider\",\n \"EthereumTesterProvider\",\n \"Account\",\n]\n", "path": "web3/__init__.py"}], "after_files": [{"content": "import pkg_resources\nimport sys\n\nif sys.version_info < (3, 5):\n raise EnvironmentError(\"Python 3.5 or above is required\")\n\nfrom eth_account import Account # noqa: E402\nfrom web3.main import Web3 # noqa: E402\nfrom web3.providers.rpc import ( # noqa: E402\n HTTPProvider,\n)\nfrom web3.providers.eth_tester import ( # noqa: E402\n EthereumTesterProvider,\n)\nfrom web3.providers.tester import ( # noqa: E402\n TestRPCProvider,\n)\nfrom web3.providers.ipc import ( # noqa: E402\n IPCProvider,\n)\n\n__version__ = pkg_resources.get_distribution(\"web3\").version\n\n__all__ = [\n \"__version__\",\n \"Web3\",\n \"HTTPProvider\",\n \"IPCProvider\",\n \"TestRPCProvider\",\n \"EthereumTesterProvider\",\n \"Account\",\n]\n", "path": "web3/__init__.py"}]} | 545 | 145 |
gh_patches_debug_6065 | rasdani/github-patches | git_diff | mozilla__telemetry-analysis-service-228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'SparkJobProvisioner' object has no attribute 'stop'
While testing locally in a branch I bumped into this. I didn't test it on master but looking at the code this seems like it may also be a problem.
The `SparkJob.terminate` calls `self.provisioner.stop(self.current_run_jobflow_id)`, where `self.provisioner` is a `SparkJobProvisioner`. But currently the `SparkJobProvisioner` has no `stop` method and neither does the parent class, it is only defined on the `ClusterProvisioner`. In my branch I changed the `terminate` method to call `self.cluster_provisioner.stop` instead and it fixed my issue locally.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `atmo/jobs/models.py`
Content:
```
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, you can obtain one at http://mozilla.org/MPL/2.0/.
4 from datetime import timedelta
5
6 from django.core.urlresolvers import reverse
7 from django.db import models
8 from django.utils import timezone
9 from django.utils.encoding import python_2_unicode_compatible
10 from django.utils.functional import cached_property
11
12 from atmo.clusters.provisioners import ClusterProvisioner
13 from .provisioners import SparkJobProvisioner
14 from ..clusters.models import Cluster
15 from ..models import CreatedByModel, EMRReleaseModel
16
17
18 @python_2_unicode_compatible
19 class SparkJob(EMRReleaseModel, CreatedByModel):
20 INTERVAL_DAILY = 24
21 INTERVAL_WEEKLY = INTERVAL_DAILY * 7
22 INTERVAL_MONTHLY = INTERVAL_DAILY * 30
23 INTERVAL_CHOICES = [
24 (INTERVAL_DAILY, 'Daily'),
25 (INTERVAL_WEEKLY, 'Weekly'),
26 (INTERVAL_MONTHLY, 'Monthly'),
27 ]
28 RESULT_PRIVATE = 'private'
29 RESULT_PUBLIC = 'public'
30 RESULT_VISIBILITY_CHOICES = [
31 (RESULT_PRIVATE, 'Private'),
32 (RESULT_PUBLIC, 'Public'),
33 ]
34 FINAL_STATUS_LIST = Cluster.TERMINATED_STATUS_LIST + Cluster.FAILED_STATUS_LIST
35 DEFAULT_STATUS = ''
36
37 identifier = models.CharField(
38 max_length=100,
39 help_text="Job name, used to uniqely identify individual jobs.",
40 unique=True,
41 )
42 description = models.TextField(
43 help_text='Job description.',
44 default='',
45 )
46 notebook_s3_key = models.CharField(
47 max_length=800,
48 help_text="S3 key of the notebook after uploading it to the Spark code bucket."
49 )
50 result_visibility = models.CharField( # can currently be "public" or "private"
51 max_length=50,
52 help_text="Whether notebook results are uploaded to a public or private bucket",
53 choices=RESULT_VISIBILITY_CHOICES,
54 default=RESULT_PRIVATE,
55 )
56 size = models.IntegerField(
57 help_text="Number of computers to use to run the job."
58 )
59 interval_in_hours = models.IntegerField(
60 help_text="Interval at which the job should run, in hours.",
61 choices=INTERVAL_CHOICES,
62 default=INTERVAL_DAILY,
63 )
64 job_timeout = models.IntegerField(
65 help_text="Number of hours before the job times out.",
66 )
67 start_date = models.DateTimeField(
68 help_text="Date/time that the job should start being scheduled to run."
69 )
70 end_date = models.DateTimeField(
71 blank=True,
72 null=True,
73 help_text="Date/time that the job should stop being scheduled to run, null if no end date."
74 )
75 is_enabled = models.BooleanField(
76 default=True,
77 help_text="Whether the job should run or not."
78 )
79 last_run_date = models.DateTimeField(
80 blank=True,
81 null=True,
82 help_text="Date/time that the job was last started, null if never."
83 )
84 current_run_jobflow_id = models.CharField(
85 max_length=50,
86 blank=True,
87 null=True,
88 )
89 most_recent_status = models.CharField(
90 max_length=50,
91 blank=True,
92 default=DEFAULT_STATUS,
93 )
94
95 class Meta:
96 permissions = [
97 ('view_sparkjob', 'Can view Spark job'),
98 ]
99
100 def __str__(self):
101 return self.identifier
102
103 def __repr__(self):
104 return "<SparkJob {} with {} nodes>".format(self.identifier, self.size)
105
106 @property
107 def provisioner(self):
108 return SparkJobProvisioner()
109
110 # TEMPORARY till we have 1:1 relationship to cluster object
111 # and we can then ask for spark_job.cluster.provisioner
112 @property
113 def cluster_provisioner(self):
114 return ClusterProvisioner()
115
116 @property
117 def has_never_run(self):
118 """
119 Whether the job has run before.
120 Looks at both the cluster status and our own record when
121 we asked it to run.
122 """
123 return (self.most_recent_status == self.DEFAULT_STATUS or
124 self.last_run_date is None)
125
126 @property
127 def has_finished(self):
128 """Whether the job's cluster is terminated or failed"""
129 return self.most_recent_status in self.FINAL_STATUS_LIST
130
131 @property
132 def is_runnable(self):
133 """
134 Either the job has never run before or was never finished
135 """
136 return self.has_never_run or self.has_finished
137
138 @property
139 def is_expired(self):
140 """Whether the current job run has run out of time"""
141 if self.has_never_run:
142 # Job isn't even running at the moment and never ran before
143 return False
144 max_run_time = self.last_run_date + timedelta(hours=self.job_timeout)
145 return not self.is_runnable and timezone.now() >= max_run_time
146
147 @property
148 def is_public(self):
149 return self.result_visibility == self.RESULT_PUBLIC
150
151 @property
152 def notebook_name(self):
153 return self.notebook_s3_key.rsplit('/', 1)[-1]
154
155 @cached_property
156 def notebook_s3_object(self):
157 return self.provisioner.get(self.notebook_s3_key)
158
159 def get_absolute_url(self):
160 return reverse('jobs-detail', kwargs={'id': self.id})
161
162 def get_info(self):
163 if self.current_run_jobflow_id is None:
164 return None
165 return self.cluster_provisioner.info(self.current_run_jobflow_id)
166
167 def update_status(self):
168 """
169 Should be called to update latest cluster status
170 in `most_recent_status`.
171 """
172 info = self.get_info()
173 if info is not None:
174 self.most_recent_status = info['state']
175 return self.most_recent_status
176
177 def should_run(self):
178 """Whether the scheduled Spark job should run."""
179 if not self.is_runnable:
180 return False # the job is still running, don't start it again
181 now = timezone.now()
182 active = self.start_date <= now
183 if self.end_date is not None:
184 active = active and self.end_date >= now
185 if self.last_run_date is None:
186 # job has never run before
187 hours_since_last_run = float('inf')
188 else:
189 hours_since_last_run = (now - self.last_run_date).total_seconds() / 3600
190 can_run_now = hours_since_last_run >= self.interval_in_hours
191 return self.is_enabled and active and can_run_now
192
193 def run(self):
194 """Actually run the scheduled Spark job."""
195 # if the job ran before and is still running, don't start it again
196 if not self.is_runnable:
197 return
198 self.current_run_jobflow_id = self.provisioner.run(
199 user_email=self.created_by.email,
200 identifier=self.identifier,
201 emr_release=self.emr_release,
202 size=self.size,
203 notebook_key=self.notebook_s3_key,
204 is_public=self.is_public,
205 job_timeout=self.job_timeout,
206 )
207 self.last_run_date = timezone.now()
208 self.update_status()
209 self.save()
210
211 def terminate(self):
212 """Stop the currently running scheduled Spark job."""
213 if self.current_run_jobflow_id:
214 self.provisioner.stop(self.current_run_jobflow_id)
215
216 def cleanup(self):
217 """Remove the Spark job notebook file from S3"""
218 self.provisioner.remove(self.notebook_s3_key)
219
220 def delete(self, *args, **kwargs):
221 # make sure to shut down the cluster if it's currently running
222 self.terminate()
223 # make sure to clean up the job notebook from storage
224 self.cleanup()
225 super(SparkJob, self).delete(*args, **kwargs)
226
227 def get_results(self):
228 return self.provisioner.results(self.identifier, self.is_public)
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/atmo/jobs/models.py b/atmo/jobs/models.py
--- a/atmo/jobs/models.py
+++ b/atmo/jobs/models.py
@@ -210,8 +210,8 @@
def terminate(self):
"""Stop the currently running scheduled Spark job."""
- if self.current_run_jobflow_id:
- self.provisioner.stop(self.current_run_jobflow_id)
+ if self.is_expired and self.current_run_jobflow_id:
+ self.cluster_provisioner.stop(self.current_run_jobflow_id)
def cleanup(self):
"""Remove the Spark job notebook file from S3"""
| {"golden_diff": "diff --git a/atmo/jobs/models.py b/atmo/jobs/models.py\n--- a/atmo/jobs/models.py\n+++ b/atmo/jobs/models.py\n@@ -210,8 +210,8 @@\n \n def terminate(self):\n \"\"\"Stop the currently running scheduled Spark job.\"\"\"\n- if self.current_run_jobflow_id:\n- self.provisioner.stop(self.current_run_jobflow_id)\n+ if self.is_expired and self.current_run_jobflow_id:\n+ self.cluster_provisioner.stop(self.current_run_jobflow_id)\n \n def cleanup(self):\n \"\"\"Remove the Spark job notebook file from S3\"\"\"\n", "issue": "AttributeError: 'SparkJobProvisioner' object has no attribute 'stop'\nWhile testing locally in a branch I bumped into this. I didn't test it on master but looking at the code this seems like it may also be a problem.\r\n\r\nThe `SparkJob.terminate` calls `self.provisioner.stop(self.current_run_jobflow_id)`, where `self.provisioner` is a `SparkJobProvisioner`. But currently the `SparkJobProvisioner` has no `stop` method and neither does the parent class, it is only defined on the `ClusterProvisioner`. In my branch I changed the `terminate` method to call `self.cluster_provisioner.stop` instead and it fixed my issue locally.\r\n\n", "before_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom datetime import timedelta\n\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.functional import cached_property\n\nfrom atmo.clusters.provisioners import ClusterProvisioner\nfrom .provisioners import SparkJobProvisioner\nfrom ..clusters.models import Cluster\nfrom ..models import CreatedByModel, EMRReleaseModel\n\n\n@python_2_unicode_compatible\nclass SparkJob(EMRReleaseModel, CreatedByModel):\n INTERVAL_DAILY = 24\n INTERVAL_WEEKLY = INTERVAL_DAILY * 7\n INTERVAL_MONTHLY = INTERVAL_DAILY * 30\n INTERVAL_CHOICES = [\n (INTERVAL_DAILY, 'Daily'),\n (INTERVAL_WEEKLY, 'Weekly'),\n (INTERVAL_MONTHLY, 'Monthly'),\n ]\n RESULT_PRIVATE = 'private'\n RESULT_PUBLIC = 'public'\n RESULT_VISIBILITY_CHOICES = [\n (RESULT_PRIVATE, 'Private'),\n (RESULT_PUBLIC, 'Public'),\n ]\n FINAL_STATUS_LIST = Cluster.TERMINATED_STATUS_LIST + Cluster.FAILED_STATUS_LIST\n DEFAULT_STATUS = ''\n\n identifier = models.CharField(\n max_length=100,\n help_text=\"Job name, used to uniqely identify individual jobs.\",\n unique=True,\n )\n description = models.TextField(\n help_text='Job description.',\n default='',\n )\n notebook_s3_key = models.CharField(\n max_length=800,\n help_text=\"S3 key of the notebook after uploading it to the Spark code bucket.\"\n )\n result_visibility = models.CharField( # can currently be \"public\" or \"private\"\n max_length=50,\n help_text=\"Whether notebook results are uploaded to a public or private bucket\",\n choices=RESULT_VISIBILITY_CHOICES,\n default=RESULT_PRIVATE,\n )\n size = models.IntegerField(\n help_text=\"Number of computers to use to run the job.\"\n )\n interval_in_hours = models.IntegerField(\n help_text=\"Interval at which the job should run, in hours.\",\n choices=INTERVAL_CHOICES,\n default=INTERVAL_DAILY,\n )\n job_timeout = models.IntegerField(\n help_text=\"Number of hours before the job times out.\",\n )\n start_date = models.DateTimeField(\n help_text=\"Date/time that the job should start being scheduled to run.\"\n )\n end_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job should stop being scheduled to run, null if no end date.\"\n )\n is_enabled = models.BooleanField(\n default=True,\n help_text=\"Whether the job should run or not.\"\n )\n last_run_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was last started, null if never.\"\n )\n current_run_jobflow_id = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n )\n most_recent_status = models.CharField(\n max_length=50,\n blank=True,\n default=DEFAULT_STATUS,\n )\n\n class Meta:\n permissions = [\n ('view_sparkjob', 'Can view Spark job'),\n ]\n\n def __str__(self):\n return self.identifier\n\n def __repr__(self):\n return \"<SparkJob {} with {} nodes>\".format(self.identifier, self.size)\n\n @property\n def provisioner(self):\n return SparkJobProvisioner()\n\n # TEMPORARY till we have 1:1 relationship to cluster object\n # and we can then ask for spark_job.cluster.provisioner\n @property\n def cluster_provisioner(self):\n return ClusterProvisioner()\n\n @property\n def has_never_run(self):\n \"\"\"\n Whether the job has run before.\n Looks at both the cluster status and our own record when\n we asked it to run.\n \"\"\"\n return (self.most_recent_status == self.DEFAULT_STATUS or\n self.last_run_date is None)\n\n @property\n def has_finished(self):\n \"\"\"Whether the job's cluster is terminated or failed\"\"\"\n return self.most_recent_status in self.FINAL_STATUS_LIST\n\n @property\n def is_runnable(self):\n \"\"\"\n Either the job has never run before or was never finished\n \"\"\"\n return self.has_never_run or self.has_finished\n\n @property\n def is_expired(self):\n \"\"\"Whether the current job run has run out of time\"\"\"\n if self.has_never_run:\n # Job isn't even running at the moment and never ran before\n return False\n max_run_time = self.last_run_date + timedelta(hours=self.job_timeout)\n return not self.is_runnable and timezone.now() >= max_run_time\n\n @property\n def is_public(self):\n return self.result_visibility == self.RESULT_PUBLIC\n\n @property\n def notebook_name(self):\n return self.notebook_s3_key.rsplit('/', 1)[-1]\n\n @cached_property\n def notebook_s3_object(self):\n return self.provisioner.get(self.notebook_s3_key)\n\n def get_absolute_url(self):\n return reverse('jobs-detail', kwargs={'id': self.id})\n\n def get_info(self):\n if self.current_run_jobflow_id is None:\n return None\n return self.cluster_provisioner.info(self.current_run_jobflow_id)\n\n def update_status(self):\n \"\"\"\n Should be called to update latest cluster status\n in `most_recent_status`.\n \"\"\"\n info = self.get_info()\n if info is not None:\n self.most_recent_status = info['state']\n return self.most_recent_status\n\n def should_run(self):\n \"\"\"Whether the scheduled Spark job should run.\"\"\"\n if not self.is_runnable:\n return False # the job is still running, don't start it again\n now = timezone.now()\n active = self.start_date <= now\n if self.end_date is not None:\n active = active and self.end_date >= now\n if self.last_run_date is None:\n # job has never run before\n hours_since_last_run = float('inf')\n else:\n hours_since_last_run = (now - self.last_run_date).total_seconds() / 3600\n can_run_now = hours_since_last_run >= self.interval_in_hours\n return self.is_enabled and active and can_run_now\n\n def run(self):\n \"\"\"Actually run the scheduled Spark job.\"\"\"\n # if the job ran before and is still running, don't start it again\n if not self.is_runnable:\n return\n self.current_run_jobflow_id = self.provisioner.run(\n user_email=self.created_by.email,\n identifier=self.identifier,\n emr_release=self.emr_release,\n size=self.size,\n notebook_key=self.notebook_s3_key,\n is_public=self.is_public,\n job_timeout=self.job_timeout,\n )\n self.last_run_date = timezone.now()\n self.update_status()\n self.save()\n\n def terminate(self):\n \"\"\"Stop the currently running scheduled Spark job.\"\"\"\n if self.current_run_jobflow_id:\n self.provisioner.stop(self.current_run_jobflow_id)\n\n def cleanup(self):\n \"\"\"Remove the Spark job notebook file from S3\"\"\"\n self.provisioner.remove(self.notebook_s3_key)\n\n def delete(self, *args, **kwargs):\n # make sure to shut down the cluster if it's currently running\n self.terminate()\n # make sure to clean up the job notebook from storage\n self.cleanup()\n super(SparkJob, self).delete(*args, **kwargs)\n\n def get_results(self):\n return self.provisioner.results(self.identifier, self.is_public)\n", "path": "atmo/jobs/models.py"}], "after_files": [{"content": "# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\nfrom datetime import timedelta\n\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils.functional import cached_property\n\nfrom atmo.clusters.provisioners import ClusterProvisioner\nfrom .provisioners import SparkJobProvisioner\nfrom ..clusters.models import Cluster\nfrom ..models import CreatedByModel, EMRReleaseModel\n\n\n@python_2_unicode_compatible\nclass SparkJob(EMRReleaseModel, CreatedByModel):\n INTERVAL_DAILY = 24\n INTERVAL_WEEKLY = INTERVAL_DAILY * 7\n INTERVAL_MONTHLY = INTERVAL_DAILY * 30\n INTERVAL_CHOICES = [\n (INTERVAL_DAILY, 'Daily'),\n (INTERVAL_WEEKLY, 'Weekly'),\n (INTERVAL_MONTHLY, 'Monthly'),\n ]\n RESULT_PRIVATE = 'private'\n RESULT_PUBLIC = 'public'\n RESULT_VISIBILITY_CHOICES = [\n (RESULT_PRIVATE, 'Private'),\n (RESULT_PUBLIC, 'Public'),\n ]\n FINAL_STATUS_LIST = Cluster.TERMINATED_STATUS_LIST + Cluster.FAILED_STATUS_LIST\n DEFAULT_STATUS = ''\n\n identifier = models.CharField(\n max_length=100,\n help_text=\"Job name, used to uniqely identify individual jobs.\",\n unique=True,\n )\n description = models.TextField(\n help_text='Job description.',\n default='',\n )\n notebook_s3_key = models.CharField(\n max_length=800,\n help_text=\"S3 key of the notebook after uploading it to the Spark code bucket.\"\n )\n result_visibility = models.CharField( # can currently be \"public\" or \"private\"\n max_length=50,\n help_text=\"Whether notebook results are uploaded to a public or private bucket\",\n choices=RESULT_VISIBILITY_CHOICES,\n default=RESULT_PRIVATE,\n )\n size = models.IntegerField(\n help_text=\"Number of computers to use to run the job.\"\n )\n interval_in_hours = models.IntegerField(\n help_text=\"Interval at which the job should run, in hours.\",\n choices=INTERVAL_CHOICES,\n default=INTERVAL_DAILY,\n )\n job_timeout = models.IntegerField(\n help_text=\"Number of hours before the job times out.\",\n )\n start_date = models.DateTimeField(\n help_text=\"Date/time that the job should start being scheduled to run.\"\n )\n end_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job should stop being scheduled to run, null if no end date.\"\n )\n is_enabled = models.BooleanField(\n default=True,\n help_text=\"Whether the job should run or not.\"\n )\n last_run_date = models.DateTimeField(\n blank=True,\n null=True,\n help_text=\"Date/time that the job was last started, null if never.\"\n )\n current_run_jobflow_id = models.CharField(\n max_length=50,\n blank=True,\n null=True,\n )\n most_recent_status = models.CharField(\n max_length=50,\n blank=True,\n default=DEFAULT_STATUS,\n )\n\n class Meta:\n permissions = [\n ('view_sparkjob', 'Can view Spark job'),\n ]\n\n def __str__(self):\n return self.identifier\n\n def __repr__(self):\n return \"<SparkJob {} with {} nodes>\".format(self.identifier, self.size)\n\n @property\n def provisioner(self):\n return SparkJobProvisioner()\n\n # TEMPORARY till we have 1:1 relationship to cluster object\n # and we can then ask for spark_job.cluster.provisioner\n @property\n def cluster_provisioner(self):\n return ClusterProvisioner()\n\n @property\n def has_never_run(self):\n \"\"\"\n Whether the job has run before.\n Looks at both the cluster status and our own record when\n we asked it to run.\n \"\"\"\n return (self.most_recent_status == self.DEFAULT_STATUS or\n self.last_run_date is None)\n\n @property\n def has_finished(self):\n \"\"\"Whether the job's cluster is terminated or failed\"\"\"\n return self.most_recent_status in self.FINAL_STATUS_LIST\n\n @property\n def is_runnable(self):\n \"\"\"\n Either the job has never run before or was never finished\n \"\"\"\n return self.has_never_run or self.has_finished\n\n @property\n def is_expired(self):\n \"\"\"Whether the current job run has run out of time\"\"\"\n if self.has_never_run:\n # Job isn't even running at the moment and never ran before\n return False\n max_run_time = self.last_run_date + timedelta(hours=self.job_timeout)\n return not self.is_runnable and timezone.now() >= max_run_time\n\n @property\n def is_public(self):\n return self.result_visibility == self.RESULT_PUBLIC\n\n @property\n def notebook_name(self):\n return self.notebook_s3_key.rsplit('/', 1)[-1]\n\n @cached_property\n def notebook_s3_object(self):\n return self.provisioner.get(self.notebook_s3_key)\n\n def get_absolute_url(self):\n return reverse('jobs-detail', kwargs={'id': self.id})\n\n def get_info(self):\n if self.current_run_jobflow_id is None:\n return None\n return self.cluster_provisioner.info(self.current_run_jobflow_id)\n\n def update_status(self):\n \"\"\"\n Should be called to update latest cluster status\n in `most_recent_status`.\n \"\"\"\n info = self.get_info()\n if info is not None:\n self.most_recent_status = info['state']\n return self.most_recent_status\n\n def should_run(self):\n \"\"\"Whether the scheduled Spark job should run.\"\"\"\n if not self.is_runnable:\n return False # the job is still running, don't start it again\n now = timezone.now()\n active = self.start_date <= now\n if self.end_date is not None:\n active = active and self.end_date >= now\n if self.last_run_date is None:\n # job has never run before\n hours_since_last_run = float('inf')\n else:\n hours_since_last_run = (now - self.last_run_date).total_seconds() / 3600\n can_run_now = hours_since_last_run >= self.interval_in_hours\n return self.is_enabled and active and can_run_now\n\n def run(self):\n \"\"\"Actually run the scheduled Spark job.\"\"\"\n # if the job ran before and is still running, don't start it again\n if not self.is_runnable:\n return\n self.current_run_jobflow_id = self.provisioner.run(\n user_email=self.created_by.email,\n identifier=self.identifier,\n emr_release=self.emr_release,\n size=self.size,\n notebook_key=self.notebook_s3_key,\n is_public=self.is_public,\n job_timeout=self.job_timeout,\n )\n self.last_run_date = timezone.now()\n self.update_status()\n self.save()\n\n def terminate(self):\n \"\"\"Stop the currently running scheduled Spark job.\"\"\"\n if self.is_expired and self.current_run_jobflow_id:\n self.cluster_provisioner.stop(self.current_run_jobflow_id)\n\n def cleanup(self):\n \"\"\"Remove the Spark job notebook file from S3\"\"\"\n self.provisioner.remove(self.notebook_s3_key)\n\n def delete(self, *args, **kwargs):\n # make sure to shut down the cluster if it's currently running\n self.terminate()\n # make sure to clean up the job notebook from storage\n self.cleanup()\n super(SparkJob, self).delete(*args, **kwargs)\n\n def get_results(self):\n return self.provisioner.results(self.identifier, self.is_public)\n", "path": "atmo/jobs/models.py"}]} | 2,707 | 137 |
gh_patches_debug_48804 | rasdani/github-patches | git_diff | nonebot__nonebot2-61 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
修复阻止事件传递信号无效的问题
抛出的异常应为 StopPropagation 的一个实例
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nonebot/message.py`
Content:
```
1 """
2 事件处理
3 ========
4
5 NoneBot 内部处理并按优先级分发事件给所有事件响应器,提供了多个插槽以进行事件的预处理等。
6 """
7
8 import asyncio
9 from datetime import datetime
10
11 from nonebot.log import logger
12 from nonebot.rule import TrieRule
13 from nonebot.utils import escape_tag
14 from nonebot.matcher import matchers, Matcher
15 from nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event
16 from nonebot.exception import IgnoredException, StopPropagation
17 from nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor
18
19 _event_preprocessors: Set[EventPreProcessor] = set()
20 _event_postprocessors: Set[EventPostProcessor] = set()
21 _run_preprocessors: Set[RunPreProcessor] = set()
22 _run_postprocessors: Set[RunPostProcessor] = set()
23
24
25 def event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:
26 """
27 :说明:
28 事件预处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之前执行。
29 :参数:
30 事件预处理函数接收三个参数。
31
32 * ``bot: Bot``: Bot 对象
33 * ``event: Event``: Event 对象
34 * ``state: dict``: 当前 State
35 """
36 _event_preprocessors.add(func)
37 return func
38
39
40 def event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:
41 """
42 :说明:
43 事件后处理。装饰一个函数,使它在每次接收到事件并分发给各响应器之后执行。
44 :参数:
45 事件后处理函数接收三个参数。
46
47 * ``bot: Bot``: Bot 对象
48 * ``event: Event``: Event 对象
49 * ``state: dict``: 当前事件运行前 State
50 """
51 _event_postprocessors.add(func)
52 return func
53
54
55 def run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:
56 """
57 :说明:
58 运行预处理。装饰一个函数,使它在每次事件响应器运行前执行。
59 :参数:
60 运行预处理函数接收四个参数。
61
62 * ``matcher: Matcher``: 当前要运行的事件响应器
63 * ``bot: Bot``: Bot 对象
64 * ``event: Event``: Event 对象
65 * ``state: dict``: 当前 State
66 """
67 _run_preprocessors.add(func)
68 return func
69
70
71 def run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:
72 """
73 :说明:
74 运行后处理。装饰一个函数,使它在每次事件响应器运行后执行。
75 :参数:
76 运行后处理函数接收五个参数。
77
78 * ``matcher: Matcher``: 运行完毕的事件响应器
79 * ``exception: Optional[Exception]``: 事件响应器运行错误(如果存在)
80 * ``bot: Bot``: Bot 对象
81 * ``event: Event``: Event 对象
82 * ``state: dict``: 当前 State
83 """
84 _run_postprocessors.add(func)
85 return func
86
87
88 async def _check_matcher(priority: int, bot: Bot, event: Event,
89 state: dict) -> Iterable[Type[Matcher]]:
90 current_matchers = matchers[priority].copy()
91
92 async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,
93 state: dict) -> Optional[Type[Matcher]]:
94 try:
95 if await Matcher.check_perm(
96 bot, event) and await Matcher.check_rule(bot, event, state):
97 return Matcher
98 except Exception as e:
99 logger.opt(colors=True, exception=e).error(
100 f"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>"
101 )
102 return None
103
104 async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:
105 if Matcher.temp or (Matcher.expire_time and
106 datetime.now() > Matcher.expire_time):
107 return Matcher
108 return None
109
110 checking_tasks = [
111 _check(Matcher, bot, event, state) for Matcher in current_matchers
112 ]
113 checking_expire_tasks = [
114 _check_expire(Matcher) for Matcher in current_matchers
115 ]
116 results = await asyncio.gather(*checking_tasks, return_exceptions=True)
117 expired = await asyncio.gather(*checking_expire_tasks)
118 for expired_matcher in filter(lambda x: x and x in results, expired):
119 try:
120 matchers[priority].remove(expired_matcher)
121 except Exception:
122 pass
123 return filter(lambda x: x, results)
124
125
126 async def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,
127 state: dict) -> Union[None, NoReturn]:
128 logger.info(f"Event will be handled by {Matcher}")
129
130 matcher = Matcher()
131
132 coros = list(
133 map(lambda x: x(matcher, bot, event, state), _run_preprocessors))
134 if coros:
135 try:
136 await asyncio.gather(*coros)
137 except IgnoredException:
138 logger.opt(colors=True).info(
139 f"Matcher {matcher} running is <b>cancelled</b>")
140 return
141 except Exception as e:
142 logger.opt(colors=True, exception=e).error(
143 "<r><bg #f8bbd0>Error when running RunPreProcessors. "
144 "Running cancelled!</bg #f8bbd0></r>")
145 return
146
147 exception = None
148
149 try:
150 logger.debug(f"Running matcher {matcher}")
151 await matcher.run(bot, event, state)
152 except Exception as e:
153 logger.opt(colors=True, exception=e).error(
154 f"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>"
155 )
156 exception = e
157
158 coros = list(
159 map(lambda x: x(matcher, exception, bot, event, state),
160 _run_postprocessors))
161 if coros:
162 try:
163 await asyncio.gather(*coros)
164 except Exception as e:
165 logger.opt(colors=True, exception=e).error(
166 "<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>"
167 )
168
169 if matcher.block:
170 raise StopPropagation
171
172
173 async def handle_event(bot: Bot, event: Event):
174 """
175 :说明:
176 处理一个事件。调用该函数以实现分发事件。
177 :参数:
178 * ``bot: Bot``: Bot 对象
179 * ``event: Event``: Event 对象
180 :示例:
181
182 .. code-block:: python
183
184 import asyncio
185 asyncio.create_task(handle_event(bot, event))
186 """
187 show_log = True
188 log_msg = f"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: "
189 if event.type == "message":
190 log_msg += f"Message {event.id} from "
191 log_msg += str(event.user_id)
192 if event.detail_type == "group":
193 log_msg += f"@[群:{event.group_id}]:"
194
195 log_msg += ' "' + "".join(
196 map(
197 lambda x: escape_tag(str(x))
198 if x.type == "text" else f"<le>{escape_tag(str(x))}</le>",
199 event.message)) + '"' # type: ignore
200 elif event.type == "notice":
201 log_msg += f"Notice {event.raw_event}"
202 elif event.type == "request":
203 log_msg += f"Request {event.raw_event}"
204 elif event.type == "meta_event":
205 # log_msg += f"MetaEvent {event.detail_type}"
206 show_log = False
207 if show_log:
208 logger.opt(colors=True).info(log_msg)
209
210 state = {}
211 coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))
212 if coros:
213 try:
214 logger.debug("Running PreProcessors...")
215 await asyncio.gather(*coros)
216 except IgnoredException:
217 logger.opt(
218 colors=True).info(f"Event {event.name} is <b>ignored</b>")
219 return
220 except Exception as e:
221 logger.opt(colors=True, exception=e).error(
222 "<r><bg #f8bbd0>Error when running EventPreProcessors. "
223 "Event ignored!</bg #f8bbd0></r>")
224 return
225
226 # Trie Match
227 _, _ = TrieRule.get_value(bot, event, state)
228
229 break_flag = False
230 for priority in sorted(matchers.keys()):
231 if break_flag:
232 break
233
234 if show_log:
235 logger.debug(f"Checking for matchers in priority {priority}...")
236
237 run_matchers = await _check_matcher(priority, bot, event, state)
238
239 pending_tasks = [
240 _run_matcher(matcher, bot, event, state.copy())
241 for matcher in run_matchers
242 ]
243
244 results = await asyncio.gather(*pending_tasks, return_exceptions=True)
245
246 for result in results:
247 if result is StopPropagation:
248 if not break_flag:
249 break_flag = True
250 logger.debug("Stop event propagation")
251
252 coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))
253 if coros:
254 try:
255 logger.debug("Running PostProcessors...")
256 await asyncio.gather(*coros)
257 except Exception as e:
258 logger.opt(colors=True, exception=e).error(
259 "<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>"
260 )
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nonebot/message.py b/nonebot/message.py
--- a/nonebot/message.py
+++ b/nonebot/message.py
@@ -244,7 +244,7 @@
results = await asyncio.gather(*pending_tasks, return_exceptions=True)
for result in results:
- if result is StopPropagation:
+ if isinstance(result, StopPropagation):
if not break_flag:
break_flag = True
logger.debug("Stop event propagation")
| {"golden_diff": "diff --git a/nonebot/message.py b/nonebot/message.py\n--- a/nonebot/message.py\n+++ b/nonebot/message.py\n@@ -244,7 +244,7 @@\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n \n for result in results:\n- if result is StopPropagation:\n+ if isinstance(result, StopPropagation):\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n", "issue": "\u4fee\u590d\u963b\u6b62\u4e8b\u4ef6\u4f20\u9012\u4fe1\u53f7\u65e0\u6548\u7684\u95ee\u9898\n\u629b\u51fa\u7684\u5f02\u5e38\u5e94\u4e3a StopPropagation \u7684\u4e00\u4e2a\u5b9e\u4f8b\n", "before_files": [{"content": "\"\"\"\n\u4e8b\u4ef6\u5904\u7406\n========\n\nNoneBot \u5185\u90e8\u5904\u7406\u5e76\u6309\u4f18\u5148\u7ea7\u5206\u53d1\u4e8b\u4ef6\u7ed9\u6240\u6709\u4e8b\u4ef6\u54cd\u5e94\u5668\uff0c\u63d0\u4f9b\u4e86\u591a\u4e2a\u63d2\u69fd\u4ee5\u8fdb\u884c\u4e8b\u4ef6\u7684\u9884\u5904\u7406\u7b49\u3002\n\"\"\"\n\nimport asyncio\nfrom datetime import datetime\n\nfrom nonebot.log import logger\nfrom nonebot.rule import TrieRule\nfrom nonebot.utils import escape_tag\nfrom nonebot.matcher import matchers, Matcher\nfrom nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event\nfrom nonebot.exception import IgnoredException, StopPropagation\nfrom nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor\n\n_event_preprocessors: Set[EventPreProcessor] = set()\n_event_postprocessors: Set[EventPostProcessor] = set()\n_run_preprocessors: Set[RunPreProcessor] = set()\n_run_postprocessors: Set[RunPostProcessor] = set()\n\n\ndef event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _event_preprocessors.add(func)\n return func\n\n\ndef event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d\u4e8b\u4ef6\u8fd0\u884c\u524d State\n \"\"\"\n _event_postprocessors.add(func)\n return func\n\n\ndef run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u56db\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u5f53\u524d\u8981\u8fd0\u884c\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_preprocessors.add(func)\n return func\n\n\ndef run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e94\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u8fd0\u884c\u5b8c\u6bd5\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``exception: Optional[Exception]``: \u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u9519\u8bef\uff08\u5982\u679c\u5b58\u5728\uff09\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_postprocessors.add(func)\n return func\n\n\nasync def _check_matcher(priority: int, bot: Bot, event: Event,\n state: dict) -> Iterable[Type[Matcher]]:\n current_matchers = matchers[priority].copy()\n\n async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Optional[Type[Matcher]]:\n try:\n if await Matcher.check_perm(\n bot, event) and await Matcher.check_rule(bot, event, state):\n return Matcher\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>\"\n )\n return None\n\n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n if Matcher.temp or (Matcher.expire_time and\n datetime.now() > Matcher.expire_time):\n return Matcher\n return None\n\n checking_tasks = [\n _check(Matcher, bot, event, state) for Matcher in current_matchers\n ]\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x and x in results, expired):\n try:\n matchers[priority].remove(expired_matcher)\n except Exception:\n pass\n return filter(lambda x: x, results)\n\n\nasync def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Union[None, NoReturn]:\n logger.info(f\"Event will be handled by {Matcher}\")\n\n matcher = Matcher()\n\n coros = list(\n map(lambda x: x(matcher, bot, event, state), _run_preprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Matcher {matcher} running is <b>cancelled</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPreProcessors. \"\n \"Running cancelled!</bg #f8bbd0></r>\")\n return\n\n exception = None\n\n try:\n logger.debug(f\"Running matcher {matcher}\")\n await matcher.run(bot, event, state)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>\"\n )\n exception = e\n\n coros = list(\n map(lambda x: x(matcher, exception, bot, event, state),\n _run_postprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>\"\n )\n\n if matcher.block:\n raise StopPropagation\n\n\nasync def handle_event(bot: Bot, event: Event):\n \"\"\"\n :\u8bf4\u660e:\n \u5904\u7406\u4e00\u4e2a\u4e8b\u4ef6\u3002\u8c03\u7528\u8be5\u51fd\u6570\u4ee5\u5b9e\u73b0\u5206\u53d1\u4e8b\u4ef6\u3002\n :\u53c2\u6570:\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n :\u793a\u4f8b:\n\n .. code-block:: python\n\n import asyncio\n asyncio.create_task(handle_event(bot, event))\n \"\"\"\n show_log = True\n log_msg = f\"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: \"\n if event.type == \"message\":\n log_msg += f\"Message {event.id} from \"\n log_msg += str(event.user_id)\n if event.detail_type == \"group\":\n log_msg += f\"@[\u7fa4:{event.group_id}]:\"\n\n log_msg += ' \"' + \"\".join(\n map(\n lambda x: escape_tag(str(x))\n if x.type == \"text\" else f\"<le>{escape_tag(str(x))}</le>\",\n event.message)) + '\"' # type: ignore\n elif event.type == \"notice\":\n log_msg += f\"Notice {event.raw_event}\"\n elif event.type == \"request\":\n log_msg += f\"Request {event.raw_event}\"\n elif event.type == \"meta_event\":\n # log_msg += f\"MetaEvent {event.detail_type}\"\n show_log = False\n if show_log:\n logger.opt(colors=True).info(log_msg)\n\n state = {}\n coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))\n if coros:\n try:\n logger.debug(\"Running PreProcessors...\")\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(\n colors=True).info(f\"Event {event.name} is <b>ignored</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPreProcessors. \"\n \"Event ignored!</bg #f8bbd0></r>\")\n return\n\n # Trie Match\n _, _ = TrieRule.get_value(bot, event, state)\n\n break_flag = False\n for priority in sorted(matchers.keys()):\n if break_flag:\n break\n\n if show_log:\n logger.debug(f\"Checking for matchers in priority {priority}...\")\n\n run_matchers = await _check_matcher(priority, bot, event, state)\n\n pending_tasks = [\n _run_matcher(matcher, bot, event, state.copy())\n for matcher in run_matchers\n ]\n\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n\n for result in results:\n if result is StopPropagation:\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n\n coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))\n if coros:\n try:\n logger.debug(\"Running PostProcessors...\")\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>\"\n )\n", "path": "nonebot/message.py"}], "after_files": [{"content": "\"\"\"\n\u4e8b\u4ef6\u5904\u7406\n========\n\nNoneBot \u5185\u90e8\u5904\u7406\u5e76\u6309\u4f18\u5148\u7ea7\u5206\u53d1\u4e8b\u4ef6\u7ed9\u6240\u6709\u4e8b\u4ef6\u54cd\u5e94\u5668\uff0c\u63d0\u4f9b\u4e86\u591a\u4e2a\u63d2\u69fd\u4ee5\u8fdb\u884c\u4e8b\u4ef6\u7684\u9884\u5904\u7406\u7b49\u3002\n\"\"\"\n\nimport asyncio\nfrom datetime import datetime\n\nfrom nonebot.log import logger\nfrom nonebot.rule import TrieRule\nfrom nonebot.utils import escape_tag\nfrom nonebot.matcher import matchers, Matcher\nfrom nonebot.typing import Set, Type, Union, Optional, Iterable, NoReturn, Bot, Event\nfrom nonebot.exception import IgnoredException, StopPropagation\nfrom nonebot.typing import EventPreProcessor, RunPreProcessor, EventPostProcessor, RunPostProcessor\n\n_event_preprocessors: Set[EventPreProcessor] = set()\n_event_postprocessors: Set[EventPostProcessor] = set()\n_run_preprocessors: Set[RunPreProcessor] = set()\n_run_postprocessors: Set[RunPostProcessor] = set()\n\n\ndef event_preprocessor(func: EventPreProcessor) -> EventPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _event_preprocessors.add(func)\n return func\n\n\ndef event_postprocessor(func: EventPostProcessor) -> EventPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u4e8b\u4ef6\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u63a5\u6536\u5230\u4e8b\u4ef6\u5e76\u5206\u53d1\u7ed9\u5404\u54cd\u5e94\u5668\u4e4b\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u4e8b\u4ef6\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e09\u4e2a\u53c2\u6570\u3002\n\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d\u4e8b\u4ef6\u8fd0\u884c\u524d State\n \"\"\"\n _event_postprocessors.add(func)\n return func\n\n\ndef run_preprocessor(func: RunPreProcessor) -> RunPreProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u9884\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u524d\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u9884\u5904\u7406\u51fd\u6570\u63a5\u6536\u56db\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u5f53\u524d\u8981\u8fd0\u884c\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_preprocessors.add(func)\n return func\n\n\ndef run_postprocessor(func: RunPostProcessor) -> RunPostProcessor:\n \"\"\"\n :\u8bf4\u660e:\n \u8fd0\u884c\u540e\u5904\u7406\u3002\u88c5\u9970\u4e00\u4e2a\u51fd\u6570\uff0c\u4f7f\u5b83\u5728\u6bcf\u6b21\u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u540e\u6267\u884c\u3002\n :\u53c2\u6570:\n \u8fd0\u884c\u540e\u5904\u7406\u51fd\u6570\u63a5\u6536\u4e94\u4e2a\u53c2\u6570\u3002\n\n * ``matcher: Matcher``: \u8fd0\u884c\u5b8c\u6bd5\u7684\u4e8b\u4ef6\u54cd\u5e94\u5668\n * ``exception: Optional[Exception]``: \u4e8b\u4ef6\u54cd\u5e94\u5668\u8fd0\u884c\u9519\u8bef\uff08\u5982\u679c\u5b58\u5728\uff09\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n * ``state: dict``: \u5f53\u524d State\n \"\"\"\n _run_postprocessors.add(func)\n return func\n\n\nasync def _check_matcher(priority: int, bot: Bot, event: Event,\n state: dict) -> Iterable[Type[Matcher]]:\n current_matchers = matchers[priority].copy()\n\n async def _check(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Optional[Type[Matcher]]:\n try:\n if await Matcher.check_perm(\n bot, event) and await Matcher.check_rule(bot, event, state):\n return Matcher\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Rule check failed for {Matcher}.</bg #f8bbd0></r>\"\n )\n return None\n\n async def _check_expire(Matcher: Type[Matcher]) -> Optional[Type[Matcher]]:\n if Matcher.temp or (Matcher.expire_time and\n datetime.now() > Matcher.expire_time):\n return Matcher\n return None\n\n checking_tasks = [\n _check(Matcher, bot, event, state) for Matcher in current_matchers\n ]\n checking_expire_tasks = [\n _check_expire(Matcher) for Matcher in current_matchers\n ]\n results = await asyncio.gather(*checking_tasks, return_exceptions=True)\n expired = await asyncio.gather(*checking_expire_tasks)\n for expired_matcher in filter(lambda x: x and x in results, expired):\n try:\n matchers[priority].remove(expired_matcher)\n except Exception:\n pass\n return filter(lambda x: x, results)\n\n\nasync def _run_matcher(Matcher: Type[Matcher], bot: Bot, event: Event,\n state: dict) -> Union[None, NoReturn]:\n logger.info(f\"Event will be handled by {Matcher}\")\n\n matcher = Matcher()\n\n coros = list(\n map(lambda x: x(matcher, bot, event, state), _run_preprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(colors=True).info(\n f\"Matcher {matcher} running is <b>cancelled</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPreProcessors. \"\n \"Running cancelled!</bg #f8bbd0></r>\")\n return\n\n exception = None\n\n try:\n logger.debug(f\"Running matcher {matcher}\")\n await matcher.run(bot, event, state)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f\"<r><bg #f8bbd0>Running matcher {matcher} failed.</bg #f8bbd0></r>\"\n )\n exception = e\n\n coros = list(\n map(lambda x: x(matcher, exception, bot, event, state),\n _run_postprocessors))\n if coros:\n try:\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running RunPostProcessors</bg #f8bbd0></r>\"\n )\n\n if matcher.block:\n raise StopPropagation\n\n\nasync def handle_event(bot: Bot, event: Event):\n \"\"\"\n :\u8bf4\u660e:\n \u5904\u7406\u4e00\u4e2a\u4e8b\u4ef6\u3002\u8c03\u7528\u8be5\u51fd\u6570\u4ee5\u5b9e\u73b0\u5206\u53d1\u4e8b\u4ef6\u3002\n :\u53c2\u6570:\n * ``bot: Bot``: Bot \u5bf9\u8c61\n * ``event: Event``: Event \u5bf9\u8c61\n :\u793a\u4f8b:\n\n .. code-block:: python\n\n import asyncio\n asyncio.create_task(handle_event(bot, event))\n \"\"\"\n show_log = True\n log_msg = f\"<m>{bot.type.upper()} </m>| {event.self_id} [{event.name}]: \"\n if event.type == \"message\":\n log_msg += f\"Message {event.id} from \"\n log_msg += str(event.user_id)\n if event.detail_type == \"group\":\n log_msg += f\"@[\u7fa4:{event.group_id}]:\"\n\n log_msg += ' \"' + \"\".join(\n map(\n lambda x: escape_tag(str(x))\n if x.type == \"text\" else f\"<le>{escape_tag(str(x))}</le>\",\n event.message)) + '\"' # type: ignore\n elif event.type == \"notice\":\n log_msg += f\"Notice {event.raw_event}\"\n elif event.type == \"request\":\n log_msg += f\"Request {event.raw_event}\"\n elif event.type == \"meta_event\":\n # log_msg += f\"MetaEvent {event.detail_type}\"\n show_log = False\n if show_log:\n logger.opt(colors=True).info(log_msg)\n\n state = {}\n coros = list(map(lambda x: x(bot, event, state), _event_preprocessors))\n if coros:\n try:\n logger.debug(\"Running PreProcessors...\")\n await asyncio.gather(*coros)\n except IgnoredException:\n logger.opt(\n colors=True).info(f\"Event {event.name} is <b>ignored</b>\")\n return\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPreProcessors. \"\n \"Event ignored!</bg #f8bbd0></r>\")\n return\n\n # Trie Match\n _, _ = TrieRule.get_value(bot, event, state)\n\n break_flag = False\n for priority in sorted(matchers.keys()):\n if break_flag:\n break\n\n if show_log:\n logger.debug(f\"Checking for matchers in priority {priority}...\")\n\n run_matchers = await _check_matcher(priority, bot, event, state)\n\n pending_tasks = [\n _run_matcher(matcher, bot, event, state.copy())\n for matcher in run_matchers\n ]\n\n results = await asyncio.gather(*pending_tasks, return_exceptions=True)\n\n for result in results:\n if isinstance(result, StopPropagation):\n if not break_flag:\n break_flag = True\n logger.debug(\"Stop event propagation\")\n\n coros = list(map(lambda x: x(bot, event, state), _event_postprocessors))\n if coros:\n try:\n logger.debug(\"Running PostProcessors...\")\n await asyncio.gather(*coros)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n \"<r><bg #f8bbd0>Error when running EventPostProcessors</bg #f8bbd0></r>\"\n )\n", "path": "nonebot/message.py"}]} | 3,097 | 105 |
gh_patches_debug_19778 | rasdani/github-patches | git_diff | Mailu__Mailu-1198 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make access logging of admin server dependent on log level
Currently, admin logs all access logs to the console, regardless of which log level is set.
This is caused by starting gunicorn with `--access-logfile -`. This should be switched dependent on the log level set, e.g. on for levels >= INFO and off for <=WARNING
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/admin/start.py`
Content:
```
1 #!/usr/bin/python3
2
3 import os
4
5 os.system("flask mailu advertise")
6 os.system("flask db upgrade")
7
8 account = os.environ.get("INITIAL_ADMIN_ACCOUNT")
9 domain = os.environ.get("INITIAL_ADMIN_DOMAIN")
10 password = os.environ.get("INITIAL_ADMIN_PW")
11
12 if account is not None and domain is not None and password is not None:
13 mode = os.environ.get("INITIAL_ADMIN_MODE", default="ifmissing")
14 os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
15
16 os.system("gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'")
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/admin/start.py b/core/admin/start.py
--- a/core/admin/start.py
+++ b/core/admin/start.py
@@ -1,6 +1,10 @@
#!/usr/bin/python3
import os
+import logging as log
+import sys
+
+log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "INFO"))
os.system("flask mailu advertise")
os.system("flask db upgrade")
@@ -11,6 +15,14 @@
if account is not None and domain is not None and password is not None:
mode = os.environ.get("INITIAL_ADMIN_MODE", default="ifmissing")
+ log.info("Creating initial admin accout %s@%s with mode %s",account,domain,mode)
os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode))
-os.system("gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'")
+start_command="".join([
+ "gunicorn -w 4 -b :80 ",
+ "--access-logfile - " if (log.root.level<=log.INFO) else "",
+ "--error-logfile - ",
+ "--preload ",
+ "'mailu:create_app()'"])
+
+os.system(start_command)
| {"golden_diff": "diff --git a/core/admin/start.py b/core/admin/start.py\n--- a/core/admin/start.py\n+++ b/core/admin/start.py\n@@ -1,6 +1,10 @@\n #!/usr/bin/python3\n \n import os\n+import logging as log\n+import sys\n+\n+log.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"INFO\"))\n \n os.system(\"flask mailu advertise\")\n os.system(\"flask db upgrade\")\n@@ -11,6 +15,14 @@\n \n if account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n+ log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n \n-os.system(\"gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'\")\n+start_command=\"\".join([\n+ \"gunicorn -w 4 -b :80 \",\n+ \"--access-logfile - \" if (log.root.level<=log.INFO) else \"\",\n+ \"--error-logfile - \",\n+ \"--preload \",\n+ \"'mailu:create_app()'\"])\n+\n+os.system(start_command)\n", "issue": "Make access logging of admin server dependent on log level\nCurrently, admin logs all access logs to the console, regardless of which log level is set.\r\nThis is caused by starting gunicorn with `--access-logfile -`. This should be switched dependent on the log level set, e.g. on for levels >= INFO and off for <=WARNING\r\n \n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\n\nos.system(\"flask mailu advertise\")\nos.system(\"flask db upgrade\")\n\naccount = os.environ.get(\"INITIAL_ADMIN_ACCOUNT\")\ndomain = os.environ.get(\"INITIAL_ADMIN_DOMAIN\")\npassword = os.environ.get(\"INITIAL_ADMIN_PW\")\n\nif account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n\nos.system(\"gunicorn -w 4 -b :80 --access-logfile - --error-logfile - --preload 'mailu:create_app()'\")\n", "path": "core/admin/start.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"INFO\"))\n\nos.system(\"flask mailu advertise\")\nos.system(\"flask db upgrade\")\n\naccount = os.environ.get(\"INITIAL_ADMIN_ACCOUNT\")\ndomain = os.environ.get(\"INITIAL_ADMIN_DOMAIN\")\npassword = os.environ.get(\"INITIAL_ADMIN_PW\")\n\nif account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n\nstart_command=\"\".join([\n \"gunicorn -w 4 -b :80 \",\n \"--access-logfile - \" if (log.root.level<=log.INFO) else \"\",\n \"--error-logfile - \",\n \"--preload \",\n \"'mailu:create_app()'\"])\n\nos.system(start_command)\n", "path": "core/admin/start.py"}]} | 516 | 304 |
gh_patches_debug_36656 | rasdani/github-patches | git_diff | Pyomo__pyomo-2904 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Request for an `identify_inconsistent_units` method
## Summary
The Pyomo unit consistency checks current have an `assert_units_consistent` function which will iterate over the constraints in a model and raise a `UnitsError` if a constraint is encountered with inconsistent units. Whilst this is useful for testing, in most cases a model developer would like to get a list of all constraints with unit consistency issues, not just an exception for the first issue encountered.
Additionally, the `assert_units_consistent` function has some `print` statements embedded in it which clutter the output somewhat which could probably be removed (the proposed `identify_inconsistent_units` method would provide a means for getting the full list of problematic constraints in its place).
### Rationale
I am writing a tool as part of https://github.com/IDAES/idaes-pse/issues/1208 which aims to provide users with a summary of all (or at least most) the issues in their model, one of which is unit consistency. As such I would like to generate the list of problematic constraints for the user to reference and to do so without any extraneous text output.
### Description
For the time being, I am using:
```
constraints_with_inconsistent_units = ComponentSet()
for c in self.model.component_data_objects(Constraint, descend_into=True):
try:
assert_units_consistent(c)
except UnitsError:
constraints_with_inconsistent_units.add(c)
return constraints_with_inconsistent_units
```
This generates the desired list, but produces a line of text for each failing constraint; e.g. `Error in units when checking b.c4`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/util/check_units.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright (c) 2008-2022
5 # National Technology and Engineering Solutions of Sandia, LLC
6 # Under the terms of Contract DE-NA0003525 with National Technology and
7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
8 # rights in this software.
9 # This software is distributed under the 3-clause BSD License.
10 # __________________________________________________________________________
11 #
12 #
13 """ Pyomo Units Checking Module
14 This module has some helpful methods to support checking units on Pyomo
15 module objects.
16 """
17 from pyomo.core.base.units_container import units, UnitsError
18 from pyomo.core.base import (
19 Objective,
20 Constraint,
21 Var,
22 Param,
23 Suffix,
24 Set,
25 SetOf,
26 RangeSet,
27 Block,
28 ExternalFunction,
29 Expression,
30 value,
31 BooleanVar,
32 BuildAction,
33 BuildCheck,
34 )
35 from pyomo.dae import ContinuousSet, DerivativeVar
36 from pyomo.network import Port, Arc
37 from pyomo.mpec import Complementarity
38 from pyomo.gdp import Disjunct, Disjunction
39 from pyomo.core.expr.template_expr import IndexTemplate
40 from pyomo.core.expr.numvalue import native_types
41 from pyomo.util.components import iter_component
42
43
44 def check_units_equivalent(*args):
45 """
46 Returns True if the units associated with each of the
47 expressions passed as arguments are all equivalent (and False
48 otherwise).
49
50 Note that this method will raise an exception if the units are
51 inconsistent within an expression (since the units for that
52 expression are not valid).
53
54 Parameters
55 ----------
56 args : an argument list of Pyomo expressions
57
58 Returns
59 -------
60 bool : True if all the expressions passed as arguments have the same units
61 """
62 try:
63 assert_units_equivalent(*args)
64 return True
65 except UnitsError:
66 return False
67
68
69 def assert_units_equivalent(*args):
70 """
71 Raise an exception if the units are inconsistent within an
72 expression, or not equivalent across all the passed
73 expressions.
74
75 Parameters
76 ----------
77 args : an argument list of Pyomo expressions
78 The Pyomo expressions to test
79
80 Raises
81 ------
82 :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`
83 """
84 # this call will raise an exception if an inconsistency is found
85 pint_units = [units._get_pint_units(arg) for arg in args]
86 pint_unit_compare = pint_units[0]
87 for pint_unit in pint_units:
88 if not units._equivalent_pint_units(pint_unit_compare, pint_unit):
89 raise UnitsError(
90 "Units between {} and {} are not consistent.".format(
91 str(pint_unit_compare), str(pint_unit)
92 )
93 )
94
95
96 def _assert_units_consistent_constraint_data(condata):
97 """
98 Raise an exception if the any units in lower, body, upper on a
99 ConstraintData object are not consistent or are not equivalent
100 with each other.
101 """
102 # Pyomo can rearrange expressions, resulting in a value
103 # of 0 for upper or lower that does not have units associated
104 # Therefore, if the lower and/or upper is 0, we allow it to be unitless
105 # and check the consistency of the body only
106 args = list()
107 if condata.lower is not None and value(condata.lower) != 0.0:
108 args.append(condata.lower)
109
110 args.append(condata.body)
111
112 if condata.upper is not None and value(condata.upper) != 0.0:
113 args.append(condata.upper)
114
115 if len(args) == 1:
116 assert_units_consistent(*args)
117 else:
118 assert_units_equivalent(*args)
119
120
121 def _assert_units_consistent_arc_data(arcdata):
122 """
123 Raise an exception if the any units do not match for the connected ports
124 """
125 sport = arcdata.source
126 dport = arcdata.destination
127 if sport is None or dport is None:
128 # nothing to check
129 return
130
131 # both sport and dport are not None
132 # iterate over the vars in one and check against the other
133 for key in sport.vars:
134 svar = sport.vars[key]
135 dvar = dport.vars[key]
136
137 if svar.is_indexed():
138 for k in svar:
139 svardata = svar[k]
140 dvardata = dvar[k]
141 assert_units_equivalent(svardata, dvardata)
142 else:
143 assert_units_equivalent(svar, dvar)
144
145
146 def _assert_units_consistent_property_expr(obj):
147 """
148 Check the .expr property of the object and raise
149 an exception if the units are not consistent
150 """
151 _assert_units_consistent_expression(obj.expr)
152
153
154 def _assert_units_consistent_expression(expr):
155 """
156 Raise an exception if any units in expr are inconsistent.
157 """
158 # this will raise an exception if the units are not consistent
159 # in the expression
160 pint_unit = units._get_pint_units(expr)
161 # pyomo_unit = units.get_units(expr)
162
163
164 # Complementarities that are not in standard form do not
165 # current work with the checking code. The Units container
166 # should be modified to allow sum and relationals with zero
167 # terms (e.g., unitless). Then this code can be enabled.
168 # def _assert_units_complementarity(cdata):
169 # """
170 # Raise an exception if any units in either of the complementarity
171 # expressions are inconsistent, and also check the standard block
172 # methods.
173 # """
174 # if cdata._args[0] is not None:
175 # pyomo_unit, pint_unit = units._get_units_tuple(cdata._args[0])
176 # if cdata._args[1] is not None:
177 # pyomo_unit, pint_unit = units._get_units_tuple(cdata._args[1])
178 # _assert_units_consistent_block(cdata)
179
180
181 def _assert_units_consistent_block(obj):
182 """
183 This method gets all the components from the block
184 and checks if the units are consistent on each of them
185 """
186 # check all the component objects
187 for component in obj.component_objects(descend_into=False, active=True):
188 assert_units_consistent(component)
189
190
191 _component_data_handlers = {
192 Objective: _assert_units_consistent_property_expr,
193 Constraint: _assert_units_consistent_constraint_data,
194 Var: _assert_units_consistent_expression,
195 DerivativeVar: _assert_units_consistent_expression,
196 Port: None,
197 Arc: _assert_units_consistent_arc_data,
198 Expression: _assert_units_consistent_property_expr,
199 Suffix: None,
200 Param: _assert_units_consistent_expression,
201 Disjunct: _assert_units_consistent_block,
202 Disjunction: None,
203 BooleanVar: None,
204 Block: _assert_units_consistent_block,
205 ExternalFunction: None,
206 # TODO: change this when Sets / ContinuousSets sets have units:
207 ContinuousSet: None,
208 Set: None,
209 SetOf: None,
210 RangeSet: None,
211 # TODO: Piecewise: _assert_units_consistent_piecewise,
212 # TODO: SOSConstraint: _assert_units_consistent_sos,
213 # TODO: LogicalConstraint: _assert_units_consistent_logical,
214 BuildAction: None,
215 BuildCheck: None,
216 # complementarities that are not in normal form are not working yet
217 # see comment in test_check_units
218 # TODO: Complementarity: _assert_units_complementarity
219 }
220
221
222 def assert_units_consistent(obj):
223 """
224 This method raises an exception if the units are not
225 consistent on the passed in object. Argument obj can be one
226 of the following components: Pyomo Block (or Model),
227 Constraint, Objective, Expression, or it can be a Pyomo
228 expression object
229
230 Parameters
231 ----------
232 obj : Pyomo component (e.g., Block, Model, Constraint, Objective, or Expression) or Pyomo expression
233 The object or expression to test
234
235 Raises
236 ------
237 :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`
238 """
239 objtype = type(obj)
240 if objtype in native_types:
241 return
242 elif obj.is_expression_type() or objtype is IndexTemplate:
243 try:
244 _assert_units_consistent_expression(obj)
245 except UnitsError:
246 print('Units problem with expression {}'.format(obj))
247 raise
248 return
249
250 # if object is not in our component handler, raise an exception
251 if obj.ctype not in _component_data_handlers:
252 raise TypeError(
253 "Units checking not supported for object of type {}.".format(obj.ctype)
254 )
255
256 # get the function form the list of handlers
257 handler = _component_data_handlers[obj.ctype]
258 if handler is None:
259 return
260
261 if obj.is_indexed():
262 # check all the component data objects
263 for cdata in obj.values():
264 try:
265 handler(cdata)
266 except UnitsError:
267 print('Error in units when checking {}'.format(cdata))
268 raise
269 else:
270 try:
271 handler(obj)
272 except UnitsError:
273 print('Error in units when checking {}'.format(obj))
274 raise
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/util/check_units.py b/pyomo/util/check_units.py
--- a/pyomo/util/check_units.py
+++ b/pyomo/util/check_units.py
@@ -14,6 +14,8 @@
This module has some helpful methods to support checking units on Pyomo
module objects.
"""
+import logging
+
from pyomo.core.base.units_container import units, UnitsError
from pyomo.core.base import (
Objective,
@@ -39,6 +41,9 @@
from pyomo.core.expr.template_expr import IndexTemplate
from pyomo.core.expr.numvalue import native_types
from pyomo.util.components import iter_component
+from pyomo.common.collections import ComponentSet
+
+logger = logging.getLogger(__name__)
def check_units_equivalent(*args):
@@ -243,7 +248,7 @@
try:
_assert_units_consistent_expression(obj)
except UnitsError:
- print('Units problem with expression {}'.format(obj))
+ logger.error('Units problem with expression {}'.format(obj))
raise
return
@@ -264,11 +269,37 @@
try:
handler(cdata)
except UnitsError:
- print('Error in units when checking {}'.format(cdata))
+ logger.error('Error in units when checking {}'.format(cdata))
raise
else:
+ handler(obj)
+
+
+def identify_inconsistent_units(block):
+ """
+ This function generates a ComponentSet of all Constraints, Expressions, and Objectives
+ in a Block or model which have inconsistent units.
+
+ Parameters
+ ----------
+ block : Pyomo Block or Model to test
+
+ Returns
+ ------
+ ComponentSet : contains all Constraints, Expressions or Objectives which were
+ identified as having unit consistency issues
+ """
+ # It would be nice (and more efficient) if there were a method that would check
+ # unit consistency and return a bool for success or failure.
+ # However, the underlying methods (at least as deep as I looked) all raise exceptions
+ # so we need to iterate over the block here and do a try/except for each component
+
+ inconsistent_units = ComponentSet()
+ for obj in block.component_data_objects(
+ [Constraint, Expression, Objective], descend_into=True
+ ):
try:
- handler(obj)
+ assert_units_consistent(obj)
except UnitsError:
- print('Error in units when checking {}'.format(obj))
- raise
+ inconsistent_units.add(obj)
+ return inconsistent_units
| {"golden_diff": "diff --git a/pyomo/util/check_units.py b/pyomo/util/check_units.py\n--- a/pyomo/util/check_units.py\n+++ b/pyomo/util/check_units.py\n@@ -14,6 +14,8 @@\n This module has some helpful methods to support checking units on Pyomo\n module objects.\n \"\"\"\n+import logging\n+\n from pyomo.core.base.units_container import units, UnitsError\n from pyomo.core.base import (\n Objective,\n@@ -39,6 +41,9 @@\n from pyomo.core.expr.template_expr import IndexTemplate\n from pyomo.core.expr.numvalue import native_types\n from pyomo.util.components import iter_component\n+from pyomo.common.collections import ComponentSet\n+\n+logger = logging.getLogger(__name__)\n \n \n def check_units_equivalent(*args):\n@@ -243,7 +248,7 @@\n try:\n _assert_units_consistent_expression(obj)\n except UnitsError:\n- print('Units problem with expression {}'.format(obj))\n+ logger.error('Units problem with expression {}'.format(obj))\n raise\n return\n \n@@ -264,11 +269,37 @@\n try:\n handler(cdata)\n except UnitsError:\n- print('Error in units when checking {}'.format(cdata))\n+ logger.error('Error in units when checking {}'.format(cdata))\n raise\n else:\n+ handler(obj)\n+\n+\n+def identify_inconsistent_units(block):\n+ \"\"\"\n+ This function generates a ComponentSet of all Constraints, Expressions, and Objectives\n+ in a Block or model which have inconsistent units.\n+\n+ Parameters\n+ ----------\n+ block : Pyomo Block or Model to test\n+\n+ Returns\n+ ------\n+ ComponentSet : contains all Constraints, Expressions or Objectives which were\n+ identified as having unit consistency issues\n+ \"\"\"\n+ # It would be nice (and more efficient) if there were a method that would check\n+ # unit consistency and return a bool for success or failure.\n+ # However, the underlying methods (at least as deep as I looked) all raise exceptions\n+ # so we need to iterate over the block here and do a try/except for each component\n+\n+ inconsistent_units = ComponentSet()\n+ for obj in block.component_data_objects(\n+ [Constraint, Expression, Objective], descend_into=True\n+ ):\n try:\n- handler(obj)\n+ assert_units_consistent(obj)\n except UnitsError:\n- print('Error in units when checking {}'.format(obj))\n- raise\n+ inconsistent_units.add(obj)\n+ return inconsistent_units\n", "issue": "Request for an `identify_inconsistent_units` method\n## Summary\r\n\r\nThe Pyomo unit consistency checks current have an `assert_units_consistent` function which will iterate over the constraints in a model and raise a `UnitsError` if a constraint is encountered with inconsistent units. Whilst this is useful for testing, in most cases a model developer would like to get a list of all constraints with unit consistency issues, not just an exception for the first issue encountered.\r\n\r\nAdditionally, the `assert_units_consistent` function has some `print` statements embedded in it which clutter the output somewhat which could probably be removed (the proposed `identify_inconsistent_units` method would provide a means for getting the full list of problematic constraints in its place).\r\n\r\n### Rationale\r\n\r\nI am writing a tool as part of https://github.com/IDAES/idaes-pse/issues/1208 which aims to provide users with a summary of all (or at least most) the issues in their model, one of which is unit consistency. As such I would like to generate the list of problematic constraints for the user to reference and to do so without any extraneous text output.\r\n\r\n### Description\r\n\r\nFor the time being, I am using:\r\n\r\n```\r\nconstraints_with_inconsistent_units = ComponentSet()\r\nfor c in self.model.component_data_objects(Constraint, descend_into=True):\r\n try:\r\n assert_units_consistent(c)\r\n except UnitsError:\r\n constraints_with_inconsistent_units.add(c)\r\nreturn constraints_with_inconsistent_units\r\n```\r\n\r\nThis generates the desired list, but produces a line of text for each failing constraint; e.g. `Error in units when checking b.c4`.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# __________________________________________________________________________\n#\n#\n\"\"\" Pyomo Units Checking Module\nThis module has some helpful methods to support checking units on Pyomo\nmodule objects.\n\"\"\"\nfrom pyomo.core.base.units_container import units, UnitsError\nfrom pyomo.core.base import (\n Objective,\n Constraint,\n Var,\n Param,\n Suffix,\n Set,\n SetOf,\n RangeSet,\n Block,\n ExternalFunction,\n Expression,\n value,\n BooleanVar,\n BuildAction,\n BuildCheck,\n)\nfrom pyomo.dae import ContinuousSet, DerivativeVar\nfrom pyomo.network import Port, Arc\nfrom pyomo.mpec import Complementarity\nfrom pyomo.gdp import Disjunct, Disjunction\nfrom pyomo.core.expr.template_expr import IndexTemplate\nfrom pyomo.core.expr.numvalue import native_types\nfrom pyomo.util.components import iter_component\n\n\ndef check_units_equivalent(*args):\n \"\"\"\n Returns True if the units associated with each of the\n expressions passed as arguments are all equivalent (and False\n otherwise).\n\n Note that this method will raise an exception if the units are\n inconsistent within an expression (since the units for that\n expression are not valid).\n\n Parameters\n ----------\n args : an argument list of Pyomo expressions\n\n Returns\n -------\n bool : True if all the expressions passed as arguments have the same units\n \"\"\"\n try:\n assert_units_equivalent(*args)\n return True\n except UnitsError:\n return False\n\n\ndef assert_units_equivalent(*args):\n \"\"\"\n Raise an exception if the units are inconsistent within an\n expression, or not equivalent across all the passed\n expressions.\n\n Parameters\n ----------\n args : an argument list of Pyomo expressions\n The Pyomo expressions to test\n\n Raises\n ------\n :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`\n \"\"\"\n # this call will raise an exception if an inconsistency is found\n pint_units = [units._get_pint_units(arg) for arg in args]\n pint_unit_compare = pint_units[0]\n for pint_unit in pint_units:\n if not units._equivalent_pint_units(pint_unit_compare, pint_unit):\n raise UnitsError(\n \"Units between {} and {} are not consistent.\".format(\n str(pint_unit_compare), str(pint_unit)\n )\n )\n\n\ndef _assert_units_consistent_constraint_data(condata):\n \"\"\"\n Raise an exception if the any units in lower, body, upper on a\n ConstraintData object are not consistent or are not equivalent\n with each other.\n \"\"\"\n # Pyomo can rearrange expressions, resulting in a value\n # of 0 for upper or lower that does not have units associated\n # Therefore, if the lower and/or upper is 0, we allow it to be unitless\n # and check the consistency of the body only\n args = list()\n if condata.lower is not None and value(condata.lower) != 0.0:\n args.append(condata.lower)\n\n args.append(condata.body)\n\n if condata.upper is not None and value(condata.upper) != 0.0:\n args.append(condata.upper)\n\n if len(args) == 1:\n assert_units_consistent(*args)\n else:\n assert_units_equivalent(*args)\n\n\ndef _assert_units_consistent_arc_data(arcdata):\n \"\"\"\n Raise an exception if the any units do not match for the connected ports\n \"\"\"\n sport = arcdata.source\n dport = arcdata.destination\n if sport is None or dport is None:\n # nothing to check\n return\n\n # both sport and dport are not None\n # iterate over the vars in one and check against the other\n for key in sport.vars:\n svar = sport.vars[key]\n dvar = dport.vars[key]\n\n if svar.is_indexed():\n for k in svar:\n svardata = svar[k]\n dvardata = dvar[k]\n assert_units_equivalent(svardata, dvardata)\n else:\n assert_units_equivalent(svar, dvar)\n\n\ndef _assert_units_consistent_property_expr(obj):\n \"\"\"\n Check the .expr property of the object and raise\n an exception if the units are not consistent\n \"\"\"\n _assert_units_consistent_expression(obj.expr)\n\n\ndef _assert_units_consistent_expression(expr):\n \"\"\"\n Raise an exception if any units in expr are inconsistent.\n \"\"\"\n # this will raise an exception if the units are not consistent\n # in the expression\n pint_unit = units._get_pint_units(expr)\n # pyomo_unit = units.get_units(expr)\n\n\n# Complementarities that are not in standard form do not\n# current work with the checking code. The Units container\n# should be modified to allow sum and relationals with zero\n# terms (e.g., unitless). Then this code can be enabled.\n# def _assert_units_complementarity(cdata):\n# \"\"\"\n# Raise an exception if any units in either of the complementarity\n# expressions are inconsistent, and also check the standard block\n# methods.\n# \"\"\"\n# if cdata._args[0] is not None:\n# pyomo_unit, pint_unit = units._get_units_tuple(cdata._args[0])\n# if cdata._args[1] is not None:\n# pyomo_unit, pint_unit = units._get_units_tuple(cdata._args[1])\n# _assert_units_consistent_block(cdata)\n\n\ndef _assert_units_consistent_block(obj):\n \"\"\"\n This method gets all the components from the block\n and checks if the units are consistent on each of them\n \"\"\"\n # check all the component objects\n for component in obj.component_objects(descend_into=False, active=True):\n assert_units_consistent(component)\n\n\n_component_data_handlers = {\n Objective: _assert_units_consistent_property_expr,\n Constraint: _assert_units_consistent_constraint_data,\n Var: _assert_units_consistent_expression,\n DerivativeVar: _assert_units_consistent_expression,\n Port: None,\n Arc: _assert_units_consistent_arc_data,\n Expression: _assert_units_consistent_property_expr,\n Suffix: None,\n Param: _assert_units_consistent_expression,\n Disjunct: _assert_units_consistent_block,\n Disjunction: None,\n BooleanVar: None,\n Block: _assert_units_consistent_block,\n ExternalFunction: None,\n # TODO: change this when Sets / ContinuousSets sets have units:\n ContinuousSet: None,\n Set: None,\n SetOf: None,\n RangeSet: None,\n # TODO: Piecewise: _assert_units_consistent_piecewise,\n # TODO: SOSConstraint: _assert_units_consistent_sos,\n # TODO: LogicalConstraint: _assert_units_consistent_logical,\n BuildAction: None,\n BuildCheck: None,\n # complementarities that are not in normal form are not working yet\n # see comment in test_check_units\n # TODO: Complementarity: _assert_units_complementarity\n}\n\n\ndef assert_units_consistent(obj):\n \"\"\"\n This method raises an exception if the units are not\n consistent on the passed in object. Argument obj can be one\n of the following components: Pyomo Block (or Model),\n Constraint, Objective, Expression, or it can be a Pyomo\n expression object\n\n Parameters\n ----------\n obj : Pyomo component (e.g., Block, Model, Constraint, Objective, or Expression) or Pyomo expression\n The object or expression to test\n\n Raises\n ------\n :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`\n \"\"\"\n objtype = type(obj)\n if objtype in native_types:\n return\n elif obj.is_expression_type() or objtype is IndexTemplate:\n try:\n _assert_units_consistent_expression(obj)\n except UnitsError:\n print('Units problem with expression {}'.format(obj))\n raise\n return\n\n # if object is not in our component handler, raise an exception\n if obj.ctype not in _component_data_handlers:\n raise TypeError(\n \"Units checking not supported for object of type {}.\".format(obj.ctype)\n )\n\n # get the function form the list of handlers\n handler = _component_data_handlers[obj.ctype]\n if handler is None:\n return\n\n if obj.is_indexed():\n # check all the component data objects\n for cdata in obj.values():\n try:\n handler(cdata)\n except UnitsError:\n print('Error in units when checking {}'.format(cdata))\n raise\n else:\n try:\n handler(obj)\n except UnitsError:\n print('Error in units when checking {}'.format(obj))\n raise\n", "path": "pyomo/util/check_units.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# __________________________________________________________________________\n#\n#\n\"\"\" Pyomo Units Checking Module\nThis module has some helpful methods to support checking units on Pyomo\nmodule objects.\n\"\"\"\nimport logging\n\nfrom pyomo.core.base.units_container import units, UnitsError\nfrom pyomo.core.base import (\n Objective,\n Constraint,\n Var,\n Param,\n Suffix,\n Set,\n SetOf,\n RangeSet,\n Block,\n ExternalFunction,\n Expression,\n value,\n BooleanVar,\n BuildAction,\n BuildCheck,\n)\nfrom pyomo.dae import ContinuousSet, DerivativeVar\nfrom pyomo.network import Port, Arc\nfrom pyomo.mpec import Complementarity\nfrom pyomo.gdp import Disjunct, Disjunction\nfrom pyomo.core.expr.template_expr import IndexTemplate\nfrom pyomo.core.expr.numvalue import native_types\nfrom pyomo.util.components import iter_component\nfrom pyomo.common.collections import ComponentSet\n\nlogger = logging.getLogger(__name__)\n\n\ndef check_units_equivalent(*args):\n \"\"\"\n Returns True if the units associated with each of the\n expressions passed as arguments are all equivalent (and False\n otherwise).\n\n Note that this method will raise an exception if the units are\n inconsistent within an expression (since the units for that\n expression are not valid).\n\n Parameters\n ----------\n args : an argument list of Pyomo expressions\n\n Returns\n -------\n bool : True if all the expressions passed as arguments have the same units\n \"\"\"\n try:\n assert_units_equivalent(*args)\n return True\n except UnitsError:\n return False\n\n\ndef assert_units_equivalent(*args):\n \"\"\"\n Raise an exception if the units are inconsistent within an\n expression, or not equivalent across all the passed\n expressions.\n\n Parameters\n ----------\n args : an argument list of Pyomo expressions\n The Pyomo expressions to test\n\n Raises\n ------\n :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`\n \"\"\"\n # this call will raise an exception if an inconsistency is found\n pint_units = [units._get_pint_units(arg) for arg in args]\n pint_unit_compare = pint_units[0]\n for pint_unit in pint_units:\n if not units._equivalent_pint_units(pint_unit_compare, pint_unit):\n raise UnitsError(\n \"Units between {} and {} are not consistent.\".format(\n str(pint_unit_compare), str(pint_unit)\n )\n )\n\n\ndef _assert_units_consistent_constraint_data(condata):\n \"\"\"\n Raise an exception if the any units in lower, body, upper on a\n ConstraintData object are not consistent or are not equivalent\n with each other.\n \"\"\"\n # Pyomo can rearrange expressions, resulting in a value\n # of 0 for upper or lower that does not have units associated\n # Therefore, if the lower and/or upper is 0, we allow it to be unitless\n # and check the consistency of the body only\n args = list()\n if condata.lower is not None and value(condata.lower) != 0.0:\n args.append(condata.lower)\n\n args.append(condata.body)\n\n if condata.upper is not None and value(condata.upper) != 0.0:\n args.append(condata.upper)\n\n if len(args) == 1:\n assert_units_consistent(*args)\n else:\n assert_units_equivalent(*args)\n\n\ndef _assert_units_consistent_arc_data(arcdata):\n \"\"\"\n Raise an exception if the any units do not match for the connected ports\n \"\"\"\n sport = arcdata.source\n dport = arcdata.destination\n if sport is None or dport is None:\n # nothing to check\n return\n\n # both sport and dport are not None\n # iterate over the vars in one and check against the other\n for key in sport.vars:\n svar = sport.vars[key]\n dvar = dport.vars[key]\n\n if svar.is_indexed():\n for k in svar:\n svardata = svar[k]\n dvardata = dvar[k]\n assert_units_equivalent(svardata, dvardata)\n else:\n assert_units_equivalent(svar, dvar)\n\n\ndef _assert_units_consistent_property_expr(obj):\n \"\"\"\n Check the .expr property of the object and raise\n an exception if the units are not consistent\n \"\"\"\n _assert_units_consistent_expression(obj.expr)\n\n\ndef _assert_units_consistent_expression(expr):\n \"\"\"\n Raise an exception if any units in expr are inconsistent.\n \"\"\"\n # this will raise an exception if the units are not consistent\n # in the expression\n pint_unit = units._get_pint_units(expr)\n # pyomo_unit = units.get_units(expr)\n\n\n# Complementarities that are not in standard form do not\n# current work with the checking code. The Units container\n# should be modified to allow sum and relationals with zero\n# terms (e.g., unitless). Then this code can be enabled.\n# def _assert_units_complementarity(cdata):\n# \"\"\"\n# Raise an exception if any units in either of the complementarity\n# expressions are inconsistent, and also check the standard block\n# methods.\n# \"\"\"\n# if cdata._args[0] is not None:\n# pyomo_unit, pint_unit = units._get_units_tuple(cdata._args[0])\n# if cdata._args[1] is not None:\n# pyomo_unit, pint_unit = units._get_units_tuple(cdata._args[1])\n# _assert_units_consistent_block(cdata)\n\n\ndef _assert_units_consistent_block(obj):\n \"\"\"\n This method gets all the components from the block\n and checks if the units are consistent on each of them\n \"\"\"\n # check all the component objects\n for component in obj.component_objects(descend_into=False, active=True):\n assert_units_consistent(component)\n\n\n_component_data_handlers = {\n Objective: _assert_units_consistent_property_expr,\n Constraint: _assert_units_consistent_constraint_data,\n Var: _assert_units_consistent_expression,\n DerivativeVar: _assert_units_consistent_expression,\n Port: None,\n Arc: _assert_units_consistent_arc_data,\n Expression: _assert_units_consistent_property_expr,\n Suffix: None,\n Param: _assert_units_consistent_expression,\n Disjunct: _assert_units_consistent_block,\n Disjunction: None,\n BooleanVar: None,\n Block: _assert_units_consistent_block,\n ExternalFunction: None,\n # TODO: change this when Sets / ContinuousSets sets have units:\n ContinuousSet: None,\n Set: None,\n SetOf: None,\n RangeSet: None,\n # TODO: Piecewise: _assert_units_consistent_piecewise,\n # TODO: SOSConstraint: _assert_units_consistent_sos,\n # TODO: LogicalConstraint: _assert_units_consistent_logical,\n BuildAction: None,\n BuildCheck: None,\n # complementarities that are not in normal form are not working yet\n # see comment in test_check_units\n # TODO: Complementarity: _assert_units_complementarity\n}\n\n\ndef assert_units_consistent(obj):\n \"\"\"\n This method raises an exception if the units are not\n consistent on the passed in object. Argument obj can be one\n of the following components: Pyomo Block (or Model),\n Constraint, Objective, Expression, or it can be a Pyomo\n expression object\n\n Parameters\n ----------\n obj : Pyomo component (e.g., Block, Model, Constraint, Objective, or Expression) or Pyomo expression\n The object or expression to test\n\n Raises\n ------\n :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`\n \"\"\"\n objtype = type(obj)\n if objtype in native_types:\n return\n elif obj.is_expression_type() or objtype is IndexTemplate:\n try:\n _assert_units_consistent_expression(obj)\n except UnitsError:\n logger.error('Units problem with expression {}'.format(obj))\n raise\n return\n\n # if object is not in our component handler, raise an exception\n if obj.ctype not in _component_data_handlers:\n raise TypeError(\n \"Units checking not supported for object of type {}.\".format(obj.ctype)\n )\n\n # get the function form the list of handlers\n handler = _component_data_handlers[obj.ctype]\n if handler is None:\n return\n\n if obj.is_indexed():\n # check all the component data objects\n for cdata in obj.values():\n try:\n handler(cdata)\n except UnitsError:\n logger.error('Error in units when checking {}'.format(cdata))\n raise\n else:\n handler(obj)\n\n\ndef identify_inconsistent_units(block):\n \"\"\"\n This function generates a ComponentSet of all Constraints, Expressions, and Objectives\n in a Block or model which have inconsistent units.\n\n Parameters\n ----------\n block : Pyomo Block or Model to test\n\n Returns\n ------\n ComponentSet : contains all Constraints, Expressions or Objectives which were\n identified as having unit consistency issues\n \"\"\"\n # It would be nice (and more efficient) if there were a method that would check\n # unit consistency and return a bool for success or failure.\n # However, the underlying methods (at least as deep as I looked) all raise exceptions\n # so we need to iterate over the block here and do a try/except for each component\n\n inconsistent_units = ComponentSet()\n for obj in block.component_data_objects(\n [Constraint, Expression, Objective], descend_into=True\n ):\n try:\n assert_units_consistent(obj)\n except UnitsError:\n inconsistent_units.add(obj)\n return inconsistent_units\n", "path": "pyomo/util/check_units.py"}]} | 3,361 | 562 |
gh_patches_debug_18950 | rasdani/github-patches | git_diff | flairNLP__flair-154 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Text Classifier: Clear embeddings in predict()
In the `predict()` method of the text classification model the sentence embeddings are not cleared after calculation. This might lead to memory issues.
TODO: Clear embeddings at the end of the predict method.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/models/text_classification_model.py`
Content:
```
1 import warnings
2 from typing import List, Union
3
4 import torch
5 import torch.nn as nn
6
7 import flair.embeddings
8 from flair.data import Dictionary, Sentence, Label
9 from flair.training_utils import convert_labels_to_one_hot, clear_embeddings
10
11
12 class TextClassifier(nn.Module):
13 """
14 Text Classification Model
15 The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the
16 text representation in the end into a linear layer to get the actual class label.
17 The model can handle single and multi class data sets.
18 """
19
20 def __init__(self,
21 document_embeddings: flair.embeddings.DocumentEmbeddings,
22 label_dictionary: Dictionary,
23 multi_label: bool):
24
25 super(TextClassifier, self).__init__()
26
27 self.document_embeddings = document_embeddings
28 self.label_dictionary: Dictionary = label_dictionary
29 self.multi_label = multi_label
30
31 self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings
32
33 self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))
34
35 self._init_weights()
36
37 if multi_label:
38 self.loss_function = nn.BCELoss()
39 else:
40 self.loss_function = nn.CrossEntropyLoss()
41
42 # auto-spawn on GPU if available
43 if torch.cuda.is_available():
44 self.cuda()
45
46 def _init_weights(self):
47 nn.init.xavier_uniform_(self.decoder.weight)
48
49 def forward(self, sentences) -> List[List[float]]:
50 self.document_embeddings.embed(sentences)
51
52 text_embedding_list = [sentence.get_embedding() for sentence in sentences]
53 text_embedding_tensor = torch.cat(text_embedding_list, 0)
54
55 if torch.cuda.is_available():
56 text_embedding_tensor = text_embedding_tensor.cuda()
57
58 label_scores = self.decoder(text_embedding_tensor)
59
60 return label_scores
61
62 def save(self, model_file: str):
63 """
64 Saves the current model to the provided file.
65 :param model_file: the model file
66 """
67 model_state = {
68 'state_dict': self.state_dict(),
69 'document_embeddings': self.document_embeddings,
70 'label_dictionary': self.label_dictionary,
71 'multi_label': self.multi_label,
72 }
73 torch.save(model_state, model_file, pickle_protocol=4)
74
75 @classmethod
76 def load_from_file(cls, model_file):
77 """
78 Loads the model from the given file.
79 :param model_file: the model file
80 :return: the loaded text classifier model
81 """
82
83 # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive
84 # serialization of torch objects
85 warnings.filterwarnings("ignore")
86 if torch.cuda.is_available():
87 state = torch.load(model_file)
88 else:
89 state = torch.load(model_file, map_location={'cuda:0': 'cpu'})
90 warnings.filterwarnings("default")
91
92 model = TextClassifier(
93 document_embeddings=state['document_embeddings'],
94 label_dictionary=state['label_dictionary'],
95 multi_label=state['multi_label']
96 )
97
98 model.load_state_dict(state['state_dict'])
99 model.eval()
100 return model
101
102 def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32, embeddings_in_memory: bool = True) -> List[Sentence]:
103 """
104 Predicts the class labels for the given sentences. The labels are directly added to the sentences.
105 :param sentences: list of sentences
106 :param mini_batch_size: mini batch size to use
107 :return: the list of sentences containing the labels
108 """
109 if type(sentences) is Sentence:
110 sentences = [sentences]
111
112 batches = [sentences[x:x + mini_batch_size] for x in range(0, len(sentences), mini_batch_size)]
113
114 for batch in batches:
115 scores = self.forward(batch)
116 predicted_labels = self.obtain_labels(scores)
117
118 for (sentence, labels) in zip(batch, predicted_labels):
119 sentence.labels = labels
120
121 if not embeddings_in_memory:
122 clear_embeddings(batch)
123
124 return sentences
125
126 def calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:
127 """
128 Calculates the loss.
129 :param scores: the prediction scores from the model
130 :param sentences: list of sentences
131 :return: loss value
132 """
133 if self.multi_label:
134 return self._calculate_multi_label_loss(scores, sentences)
135
136 return self._calculate_single_label_loss(scores, sentences)
137
138 def obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:
139 """
140 Predicts the labels of sentences.
141 :param scores: the prediction scores from the model
142 :return: list of predicted labels
143 """
144
145 if self.multi_label:
146 return [self._get_multi_label(s) for s in scores]
147
148 return [self._get_single_label(s) for s in scores]
149
150 def _get_multi_label(self, label_scores) -> List[Label]:
151 labels = []
152
153 sigmoid = torch.nn.Sigmoid()
154
155 results = list(map(lambda x: sigmoid(x), label_scores))
156 for idx, conf in enumerate(results):
157 if conf > 0.5:
158 label = self.label_dictionary.get_item_for_index(idx)
159 labels.append(Label(label, conf.item()))
160
161 return labels
162
163 def _get_single_label(self, label_scores) -> List[Label]:
164 conf, idx = torch.max(label_scores, 0)
165 label = self.label_dictionary.get_item_for_index(idx.item())
166
167 return [Label(label, conf.item())]
168
169 def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:
170 sigmoid = nn.Sigmoid()
171 return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))
172
173 def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:
174 return self.loss_function(label_scores, self._labels_to_indices(sentences))
175
176 def _labels_to_one_hot(self, sentences: List[Sentence]):
177 label_list = [sentence.get_label_names() for sentence in sentences]
178 one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)
179 one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]
180 one_hot = torch.cat(one_hot, 0)
181 if torch.cuda.is_available():
182 one_hot = one_hot.cuda()
183 return one_hot
184
185 def _labels_to_indices(self, sentences: List[Sentence]):
186 indices = [
187 torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])
188 for sentence in sentences
189 ]
190
191 vec = torch.cat(indices, 0)
192 if torch.cuda.is_available():
193 vec = vec.cuda()
194
195 return vec
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flair/models/text_classification_model.py b/flair/models/text_classification_model.py
--- a/flair/models/text_classification_model.py
+++ b/flair/models/text_classification_model.py
@@ -99,7 +99,7 @@
model.eval()
return model
- def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32, embeddings_in_memory: bool = True) -> List[Sentence]:
+ def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:
"""
Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
@@ -118,8 +118,7 @@
for (sentence, labels) in zip(batch, predicted_labels):
sentence.labels = labels
- if not embeddings_in_memory:
- clear_embeddings(batch)
+ clear_embeddings(batch)
return sentences
| {"golden_diff": "diff --git a/flair/models/text_classification_model.py b/flair/models/text_classification_model.py\n--- a/flair/models/text_classification_model.py\n+++ b/flair/models/text_classification_model.py\n@@ -99,7 +99,7 @@\n model.eval()\n return model\n \n- def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32, embeddings_in_memory: bool = True) -> List[Sentence]:\n+ def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:\n \"\"\"\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n@@ -118,8 +118,7 @@\n for (sentence, labels) in zip(batch, predicted_labels):\n sentence.labels = labels\n \n- if not embeddings_in_memory:\n- clear_embeddings(batch)\n+ clear_embeddings(batch)\n \n return sentences\n", "issue": "Text Classifier: Clear embeddings in predict()\nIn the `predict()` method of the text classification model the sentence embeddings are not cleared after calculation. This might lead to memory issues. \r\n\r\nTODO: Clear embeddings at the end of the predict method.\n", "before_files": [{"content": "import warnings\nfrom typing import List, Union\n\nimport torch\nimport torch.nn as nn\n\nimport flair.embeddings\nfrom flair.data import Dictionary, Sentence, Label\nfrom flair.training_utils import convert_labels_to_one_hot, clear_embeddings\n\n\nclass TextClassifier(nn.Module):\n \"\"\"\n Text Classification Model\n The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the\n text representation in the end into a linear layer to get the actual class label.\n The model can handle single and multi class data sets.\n \"\"\"\n\n def __init__(self,\n document_embeddings: flair.embeddings.DocumentEmbeddings,\n label_dictionary: Dictionary,\n multi_label: bool):\n\n super(TextClassifier, self).__init__()\n\n self.document_embeddings = document_embeddings\n self.label_dictionary: Dictionary = label_dictionary\n self.multi_label = multi_label\n\n self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n\n self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))\n\n self._init_weights()\n\n if multi_label:\n self.loss_function = nn.BCELoss()\n else:\n self.loss_function = nn.CrossEntropyLoss()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def _init_weights(self):\n nn.init.xavier_uniform_(self.decoder.weight)\n\n def forward(self, sentences) -> List[List[float]]:\n self.document_embeddings.embed(sentences)\n\n text_embedding_list = [sentence.get_embedding() for sentence in sentences]\n text_embedding_tensor = torch.cat(text_embedding_list, 0)\n\n if torch.cuda.is_available():\n text_embedding_tensor = text_embedding_tensor.cuda()\n\n label_scores = self.decoder(text_embedding_tensor)\n\n return label_scores\n\n def save(self, model_file: str):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n }\n torch.save(model_state, model_file, pickle_protocol=4)\n\n @classmethod\n def load_from_file(cls, model_file):\n \"\"\"\n Loads the model from the given file.\n :param model_file: the model file\n :return: the loaded text classifier model\n \"\"\"\n\n # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive\n # serialization of torch objects\n warnings.filterwarnings(\"ignore\")\n if torch.cuda.is_available():\n state = torch.load(model_file)\n else:\n state = torch.load(model_file, map_location={'cuda:0': 'cpu'})\n warnings.filterwarnings(\"default\")\n\n model = TextClassifier(\n document_embeddings=state['document_embeddings'],\n label_dictionary=state['label_dictionary'],\n multi_label=state['multi_label']\n )\n\n model.load_state_dict(state['state_dict'])\n model.eval()\n return model\n\n def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32, embeddings_in_memory: bool = True) -> List[Sentence]:\n \"\"\"\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :return: the list of sentences containing the labels\n \"\"\"\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n batches = [sentences[x:x + mini_batch_size] for x in range(0, len(sentences), mini_batch_size)]\n\n for batch in batches:\n scores = self.forward(batch)\n predicted_labels = self.obtain_labels(scores)\n\n for (sentence, labels) in zip(batch, predicted_labels):\n sentence.labels = labels\n\n if not embeddings_in_memory:\n clear_embeddings(batch)\n\n return sentences\n\n def calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:\n \"\"\"\n Calculates the loss.\n :param scores: the prediction scores from the model\n :param sentences: list of sentences\n :return: loss value\n \"\"\"\n if self.multi_label:\n return self._calculate_multi_label_loss(scores, sentences)\n\n return self._calculate_single_label_loss(scores, sentences)\n\n def obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:\n \"\"\"\n Predicts the labels of sentences.\n :param scores: the prediction scores from the model\n :return: list of predicted labels\n \"\"\"\n\n if self.multi_label:\n return [self._get_multi_label(s) for s in scores]\n\n return [self._get_single_label(s) for s in scores]\n\n def _get_multi_label(self, label_scores) -> List[Label]:\n labels = []\n\n sigmoid = torch.nn.Sigmoid()\n\n results = list(map(lambda x: sigmoid(x), label_scores))\n for idx, conf in enumerate(results):\n if conf > 0.5:\n label = self.label_dictionary.get_item_for_index(idx)\n labels.append(Label(label, conf.item()))\n\n return labels\n\n def _get_single_label(self, label_scores) -> List[Label]:\n conf, idx = torch.max(label_scores, 0)\n label = self.label_dictionary.get_item_for_index(idx.item())\n\n return [Label(label, conf.item())]\n\n def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n sigmoid = nn.Sigmoid()\n return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))\n\n def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n return self.loss_function(label_scores, self._labels_to_indices(sentences))\n\n def _labels_to_one_hot(self, sentences: List[Sentence]):\n label_list = [sentence.get_label_names() for sentence in sentences]\n one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)\n one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]\n one_hot = torch.cat(one_hot, 0)\n if torch.cuda.is_available():\n one_hot = one_hot.cuda()\n return one_hot\n\n def _labels_to_indices(self, sentences: List[Sentence]):\n indices = [\n torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])\n for sentence in sentences\n ]\n\n vec = torch.cat(indices, 0)\n if torch.cuda.is_available():\n vec = vec.cuda()\n\n return vec", "path": "flair/models/text_classification_model.py"}], "after_files": [{"content": "import warnings\nfrom typing import List, Union\n\nimport torch\nimport torch.nn as nn\n\nimport flair.embeddings\nfrom flair.data import Dictionary, Sentence, Label\nfrom flair.training_utils import convert_labels_to_one_hot, clear_embeddings\n\n\nclass TextClassifier(nn.Module):\n \"\"\"\n Text Classification Model\n The model takes word embeddings, puts them into an LSTM to obtain a text representation, and puts the\n text representation in the end into a linear layer to get the actual class label.\n The model can handle single and multi class data sets.\n \"\"\"\n\n def __init__(self,\n document_embeddings: flair.embeddings.DocumentEmbeddings,\n label_dictionary: Dictionary,\n multi_label: bool):\n\n super(TextClassifier, self).__init__()\n\n self.document_embeddings = document_embeddings\n self.label_dictionary: Dictionary = label_dictionary\n self.multi_label = multi_label\n\n self.document_embeddings: flair.embeddings.DocumentLSTMEmbeddings = document_embeddings\n\n self.decoder = nn.Linear(self.document_embeddings.embedding_length, len(self.label_dictionary))\n\n self._init_weights()\n\n if multi_label:\n self.loss_function = nn.BCELoss()\n else:\n self.loss_function = nn.CrossEntropyLoss()\n\n # auto-spawn on GPU if available\n if torch.cuda.is_available():\n self.cuda()\n\n def _init_weights(self):\n nn.init.xavier_uniform_(self.decoder.weight)\n\n def forward(self, sentences) -> List[List[float]]:\n self.document_embeddings.embed(sentences)\n\n text_embedding_list = [sentence.get_embedding() for sentence in sentences]\n text_embedding_tensor = torch.cat(text_embedding_list, 0)\n\n if torch.cuda.is_available():\n text_embedding_tensor = text_embedding_tensor.cuda()\n\n label_scores = self.decoder(text_embedding_tensor)\n\n return label_scores\n\n def save(self, model_file: str):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = {\n 'state_dict': self.state_dict(),\n 'document_embeddings': self.document_embeddings,\n 'label_dictionary': self.label_dictionary,\n 'multi_label': self.multi_label,\n }\n torch.save(model_state, model_file, pickle_protocol=4)\n\n @classmethod\n def load_from_file(cls, model_file):\n \"\"\"\n Loads the model from the given file.\n :param model_file: the model file\n :return: the loaded text classifier model\n \"\"\"\n\n # ATTENTION: suppressing torch serialization warnings. This needs to be taken out once we sort out recursive\n # serialization of torch objects\n warnings.filterwarnings(\"ignore\")\n if torch.cuda.is_available():\n state = torch.load(model_file)\n else:\n state = torch.load(model_file, map_location={'cuda:0': 'cpu'})\n warnings.filterwarnings(\"default\")\n\n model = TextClassifier(\n document_embeddings=state['document_embeddings'],\n label_dictionary=state['label_dictionary'],\n multi_label=state['multi_label']\n )\n\n model.load_state_dict(state['state_dict'])\n model.eval()\n return model\n\n def predict(self, sentences: Union[Sentence, List[Sentence]], mini_batch_size: int = 32) -> List[Sentence]:\n \"\"\"\n Predicts the class labels for the given sentences. The labels are directly added to the sentences.\n :param sentences: list of sentences\n :param mini_batch_size: mini batch size to use\n :return: the list of sentences containing the labels\n \"\"\"\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n batches = [sentences[x:x + mini_batch_size] for x in range(0, len(sentences), mini_batch_size)]\n\n for batch in batches:\n scores = self.forward(batch)\n predicted_labels = self.obtain_labels(scores)\n\n for (sentence, labels) in zip(batch, predicted_labels):\n sentence.labels = labels\n\n clear_embeddings(batch)\n\n return sentences\n\n def calculate_loss(self, scores: List[List[float]], sentences: List[Sentence]) -> float:\n \"\"\"\n Calculates the loss.\n :param scores: the prediction scores from the model\n :param sentences: list of sentences\n :return: loss value\n \"\"\"\n if self.multi_label:\n return self._calculate_multi_label_loss(scores, sentences)\n\n return self._calculate_single_label_loss(scores, sentences)\n\n def obtain_labels(self, scores: List[List[float]]) -> List[List[Label]]:\n \"\"\"\n Predicts the labels of sentences.\n :param scores: the prediction scores from the model\n :return: list of predicted labels\n \"\"\"\n\n if self.multi_label:\n return [self._get_multi_label(s) for s in scores]\n\n return [self._get_single_label(s) for s in scores]\n\n def _get_multi_label(self, label_scores) -> List[Label]:\n labels = []\n\n sigmoid = torch.nn.Sigmoid()\n\n results = list(map(lambda x: sigmoid(x), label_scores))\n for idx, conf in enumerate(results):\n if conf > 0.5:\n label = self.label_dictionary.get_item_for_index(idx)\n labels.append(Label(label, conf.item()))\n\n return labels\n\n def _get_single_label(self, label_scores) -> List[Label]:\n conf, idx = torch.max(label_scores, 0)\n label = self.label_dictionary.get_item_for_index(idx.item())\n\n return [Label(label, conf.item())]\n\n def _calculate_multi_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n sigmoid = nn.Sigmoid()\n return self.loss_function(sigmoid(label_scores), self._labels_to_one_hot(sentences))\n\n def _calculate_single_label_loss(self, label_scores, sentences: List[Sentence]) -> float:\n return self.loss_function(label_scores, self._labels_to_indices(sentences))\n\n def _labels_to_one_hot(self, sentences: List[Sentence]):\n label_list = [sentence.get_label_names() for sentence in sentences]\n one_hot = convert_labels_to_one_hot(label_list, self.label_dictionary)\n one_hot = [torch.FloatTensor(l).unsqueeze(0) for l in one_hot]\n one_hot = torch.cat(one_hot, 0)\n if torch.cuda.is_available():\n one_hot = one_hot.cuda()\n return one_hot\n\n def _labels_to_indices(self, sentences: List[Sentence]):\n indices = [\n torch.LongTensor([self.label_dictionary.get_idx_for_item(label.value) for label in sentence.labels])\n for sentence in sentences\n ]\n\n vec = torch.cat(indices, 0)\n if torch.cuda.is_available():\n vec = vec.cuda()\n\n return vec", "path": "flair/models/text_classification_model.py"}]} | 2,233 | 220 |
gh_patches_debug_4918 | rasdani/github-patches | git_diff | rlworkgroup__garage-1639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs page "Ensure your experiments are reproducible"
See #1426
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/garage/envs/point_env.py`
Content:
```
1 """Simple 2D environment containing a point and a goal location."""
2 import gym
3 import numpy as np
4
5 from garage.envs.step import Step
6
7
8 class PointEnv(gym.Env):
9 """A simple 2D point environment.
10
11 Attributes:
12 observation_space (gym.spaces.Box): The observation space
13 action_space (gym.spaces.Box): The action space
14
15 Args:
16 goal (np.ndarray): A 2D array representing the goal position
17 arena_size (float): The size of arena where the point is constrained
18 within (-arena_size, arena_size) in each dimension
19 done_bonus (float): A numerical bonus added to the reward
20 once the point as reached the goal
21 never_done (bool): Never send a `done` signal, even if the
22 agent achieves the goal
23
24 """
25
26 def __init__(
27 self,
28 goal=np.array((1., 1.), dtype=np.float32),
29 arena_size=5.,
30 done_bonus=0.,
31 never_done=False,
32 ):
33 goal = np.array(goal, dtype=np.float32)
34 self._goal = goal
35 self._done_bonus = done_bonus
36 self._never_done = never_done
37 self._arena_size = arena_size
38
39 assert ((goal >= -arena_size) & (goal <= arena_size)).all()
40
41 self._point = np.zeros_like(self._goal)
42 self._task = {'goal': self._goal}
43 self._observation_space = gym.spaces.Box(low=-np.inf,
44 high=np.inf,
45 shape=(3, ),
46 dtype=np.float32)
47 self._action_space = gym.spaces.Box(low=-0.1,
48 high=0.1,
49 shape=(2, ),
50 dtype=np.float32)
51
52 @property
53 def observation_space(self):
54 """gym.spaces.Box: The observation space."""
55 return self._observation_space
56
57 @property
58 def action_space(self):
59 """gym.spaces.Box: The action space."""
60 return self._action_space
61
62 def reset(self):
63 """Reset the environment.
64
65 Returns:
66 np.ndarray: Observation of the environment.
67
68 """
69 self._point = np.zeros_like(self._goal)
70 dist = np.linalg.norm(self._point - self._goal)
71 return np.concatenate([self._point, (dist, )])
72
73 def step(self, action):
74 """Step the environment state.
75
76 Args:
77 action (np.ndarray): The action to take in the environment.
78
79 Returns:
80 np.ndarray: Observation. The observation of the environment.
81 float: Reward. The reward acquired at this time step.
82 boolean: Done. Whether the environment was completed at this
83 time step. Always False for this environment.
84
85 """
86 # enforce action space
87 a = action.copy() # NOTE: we MUST copy the action before modifying it
88 a = np.clip(a, self.action_space.low, self.action_space.high)
89
90 self._point = np.clip(self._point + a, -self._arena_size,
91 self._arena_size)
92 dist = np.linalg.norm(self._point - self._goal)
93 succ = dist < np.linalg.norm(self.action_space.low)
94
95 # dense reward
96 reward = -dist
97 # done bonus
98 if succ:
99 reward += self._done_bonus
100
101 # sometimes we don't want to terminate
102 done = succ and not self._never_done
103
104 obs = np.concatenate([self._point, (dist, )])
105
106 return Step(obs, reward, done, task=self._task, success=succ)
107
108 def render(self, mode='human'):
109 """Draw the environment.
110
111 Not implemented.
112
113 Args:
114 mode (str): Ignored.
115
116 """
117 # pylint: disable=no-self-use
118
119 def sample_tasks(self, num_tasks):
120 """Sample a list of `num_tasks` tasks.
121
122 Args:
123 num_tasks (int): Number of tasks to sample.
124
125 Returns:
126 list[dict[str, np.ndarray]]: A list of "tasks", where each task is
127 a dictionary containing a single key, "goal", mapping to a
128 point in 2D space.
129
130 """
131 goals = np.random.uniform(-2, 2, size=(num_tasks, 2))
132 tasks = [{'goal': goal} for goal in goals]
133 return tasks
134
135 def set_task(self, task):
136 """Reset with a task.
137
138 Args:
139 task (dict[str, np.ndarray]): A task (a dictionary containing a
140 single key, "goal", which should be a point in 2D space).
141
142 """
143 self._task = task
144 self._goal = task['goal']
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/garage/envs/point_env.py b/src/garage/envs/point_env.py
--- a/src/garage/envs/point_env.py
+++ b/src/garage/envs/point_env.py
@@ -24,11 +24,11 @@
"""
def __init__(
- self,
- goal=np.array((1., 1.), dtype=np.float32),
- arena_size=5.,
- done_bonus=0.,
- never_done=False,
+ self,
+ goal=np.array((1., 1.), dtype=np.float32),
+ arena_size=5.,
+ done_bonus=0.,
+ never_done=False,
):
goal = np.array(goal, dtype=np.float32)
self._goal = goal
| {"golden_diff": "diff --git a/src/garage/envs/point_env.py b/src/garage/envs/point_env.py\n--- a/src/garage/envs/point_env.py\n+++ b/src/garage/envs/point_env.py\n@@ -24,11 +24,11 @@\n \"\"\"\n \n def __init__(\n- self,\n- goal=np.array((1., 1.), dtype=np.float32),\n- arena_size=5.,\n- done_bonus=0.,\n- never_done=False,\n+ self,\n+ goal=np.array((1., 1.), dtype=np.float32),\n+ arena_size=5.,\n+ done_bonus=0.,\n+ never_done=False,\n ):\n goal = np.array(goal, dtype=np.float32)\n self._goal = goal\n", "issue": "Docs page \"Ensure your experiments are reproducible\"\nSee #1426 \n", "before_files": [{"content": "\"\"\"Simple 2D environment containing a point and a goal location.\"\"\"\nimport gym\nimport numpy as np\n\nfrom garage.envs.step import Step\n\n\nclass PointEnv(gym.Env):\n \"\"\"A simple 2D point environment.\n\n Attributes:\n observation_space (gym.spaces.Box): The observation space\n action_space (gym.spaces.Box): The action space\n\n Args:\n goal (np.ndarray): A 2D array representing the goal position\n arena_size (float): The size of arena where the point is constrained\n within (-arena_size, arena_size) in each dimension\n done_bonus (float): A numerical bonus added to the reward\n once the point as reached the goal\n never_done (bool): Never send a `done` signal, even if the\n agent achieves the goal\n\n \"\"\"\n\n def __init__(\n self,\n goal=np.array((1., 1.), dtype=np.float32),\n arena_size=5.,\n done_bonus=0.,\n never_done=False,\n ):\n goal = np.array(goal, dtype=np.float32)\n self._goal = goal\n self._done_bonus = done_bonus\n self._never_done = never_done\n self._arena_size = arena_size\n\n assert ((goal >= -arena_size) & (goal <= arena_size)).all()\n\n self._point = np.zeros_like(self._goal)\n self._task = {'goal': self._goal}\n self._observation_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(3, ),\n dtype=np.float32)\n self._action_space = gym.spaces.Box(low=-0.1,\n high=0.1,\n shape=(2, ),\n dtype=np.float32)\n\n @property\n def observation_space(self):\n \"\"\"gym.spaces.Box: The observation space.\"\"\"\n return self._observation_space\n\n @property\n def action_space(self):\n \"\"\"gym.spaces.Box: The action space.\"\"\"\n return self._action_space\n\n def reset(self):\n \"\"\"Reset the environment.\n\n Returns:\n np.ndarray: Observation of the environment.\n\n \"\"\"\n self._point = np.zeros_like(self._goal)\n dist = np.linalg.norm(self._point - self._goal)\n return np.concatenate([self._point, (dist, )])\n\n def step(self, action):\n \"\"\"Step the environment state.\n\n Args:\n action (np.ndarray): The action to take in the environment.\n\n Returns:\n np.ndarray: Observation. The observation of the environment.\n float: Reward. The reward acquired at this time step.\n boolean: Done. Whether the environment was completed at this\n time step. Always False for this environment.\n\n \"\"\"\n # enforce action space\n a = action.copy() # NOTE: we MUST copy the action before modifying it\n a = np.clip(a, self.action_space.low, self.action_space.high)\n\n self._point = np.clip(self._point + a, -self._arena_size,\n self._arena_size)\n dist = np.linalg.norm(self._point - self._goal)\n succ = dist < np.linalg.norm(self.action_space.low)\n\n # dense reward\n reward = -dist\n # done bonus\n if succ:\n reward += self._done_bonus\n\n # sometimes we don't want to terminate\n done = succ and not self._never_done\n\n obs = np.concatenate([self._point, (dist, )])\n\n return Step(obs, reward, done, task=self._task, success=succ)\n\n def render(self, mode='human'):\n \"\"\"Draw the environment.\n\n Not implemented.\n\n Args:\n mode (str): Ignored.\n\n \"\"\"\n # pylint: disable=no-self-use\n\n def sample_tasks(self, num_tasks):\n \"\"\"Sample a list of `num_tasks` tasks.\n\n Args:\n num_tasks (int): Number of tasks to sample.\n\n Returns:\n list[dict[str, np.ndarray]]: A list of \"tasks\", where each task is\n a dictionary containing a single key, \"goal\", mapping to a\n point in 2D space.\n\n \"\"\"\n goals = np.random.uniform(-2, 2, size=(num_tasks, 2))\n tasks = [{'goal': goal} for goal in goals]\n return tasks\n\n def set_task(self, task):\n \"\"\"Reset with a task.\n\n Args:\n task (dict[str, np.ndarray]): A task (a dictionary containing a\n single key, \"goal\", which should be a point in 2D space).\n\n \"\"\"\n self._task = task\n self._goal = task['goal']\n", "path": "src/garage/envs/point_env.py"}], "after_files": [{"content": "\"\"\"Simple 2D environment containing a point and a goal location.\"\"\"\nimport gym\nimport numpy as np\n\nfrom garage.envs.step import Step\n\n\nclass PointEnv(gym.Env):\n \"\"\"A simple 2D point environment.\n\n Attributes:\n observation_space (gym.spaces.Box): The observation space\n action_space (gym.spaces.Box): The action space\n\n Args:\n goal (np.ndarray): A 2D array representing the goal position\n arena_size (float): The size of arena where the point is constrained\n within (-arena_size, arena_size) in each dimension\n done_bonus (float): A numerical bonus added to the reward\n once the point as reached the goal\n never_done (bool): Never send a `done` signal, even if the\n agent achieves the goal\n\n \"\"\"\n\n def __init__(\n self,\n goal=np.array((1., 1.), dtype=np.float32),\n arena_size=5.,\n done_bonus=0.,\n never_done=False,\n ):\n goal = np.array(goal, dtype=np.float32)\n self._goal = goal\n self._done_bonus = done_bonus\n self._never_done = never_done\n self._arena_size = arena_size\n\n assert ((goal >= -arena_size) & (goal <= arena_size)).all()\n\n self._point = np.zeros_like(self._goal)\n self._task = {'goal': self._goal}\n self._observation_space = gym.spaces.Box(low=-np.inf,\n high=np.inf,\n shape=(3, ),\n dtype=np.float32)\n self._action_space = gym.spaces.Box(low=-0.1,\n high=0.1,\n shape=(2, ),\n dtype=np.float32)\n\n @property\n def observation_space(self):\n \"\"\"gym.spaces.Box: The observation space.\"\"\"\n return self._observation_space\n\n @property\n def action_space(self):\n \"\"\"gym.spaces.Box: The action space.\"\"\"\n return self._action_space\n\n def reset(self):\n \"\"\"Reset the environment.\n\n Returns:\n np.ndarray: Observation of the environment.\n\n \"\"\"\n self._point = np.zeros_like(self._goal)\n dist = np.linalg.norm(self._point - self._goal)\n return np.concatenate([self._point, (dist, )])\n\n def step(self, action):\n \"\"\"Step the environment state.\n\n Args:\n action (np.ndarray): The action to take in the environment.\n\n Returns:\n np.ndarray: Observation. The observation of the environment.\n float: Reward. The reward acquired at this time step.\n boolean: Done. Whether the environment was completed at this\n time step. Always False for this environment.\n\n \"\"\"\n # enforce action space\n a = action.copy() # NOTE: we MUST copy the action before modifying it\n a = np.clip(a, self.action_space.low, self.action_space.high)\n\n self._point = np.clip(self._point + a, -self._arena_size,\n self._arena_size)\n dist = np.linalg.norm(self._point - self._goal)\n succ = dist < np.linalg.norm(self.action_space.low)\n\n # dense reward\n reward = -dist\n # done bonus\n if succ:\n reward += self._done_bonus\n\n # sometimes we don't want to terminate\n done = succ and not self._never_done\n\n obs = np.concatenate([self._point, (dist, )])\n\n return Step(obs, reward, done, task=self._task, success=succ)\n\n def render(self, mode='human'):\n \"\"\"Draw the environment.\n\n Not implemented.\n\n Args:\n mode (str): Ignored.\n\n \"\"\"\n # pylint: disable=no-self-use\n\n def sample_tasks(self, num_tasks):\n \"\"\"Sample a list of `num_tasks` tasks.\n\n Args:\n num_tasks (int): Number of tasks to sample.\n\n Returns:\n list[dict[str, np.ndarray]]: A list of \"tasks\", where each task is\n a dictionary containing a single key, \"goal\", mapping to a\n point in 2D space.\n\n \"\"\"\n goals = np.random.uniform(-2, 2, size=(num_tasks, 2))\n tasks = [{'goal': goal} for goal in goals]\n return tasks\n\n def set_task(self, task):\n \"\"\"Reset with a task.\n\n Args:\n task (dict[str, np.ndarray]): A task (a dictionary containing a\n single key, \"goal\", which should be a point in 2D space).\n\n \"\"\"\n self._task = task\n self._goal = task['goal']\n", "path": "src/garage/envs/point_env.py"}]} | 1,635 | 178 |
gh_patches_debug_1666 | rasdani/github-patches | git_diff | OpenMined__PySyft-2278 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Splitting up serde broke serialization for multipointers
**Describe the bug**
```
@staticmethod
def simplify(tensor: "MultiPointerTensor") -> tuple:
"""
This function takes the attributes of a MultiPointerTensor and saves them in a tuple
Args:
tensor (MultiPointerTensor): a MultiPointerTensor
Returns:
tuple: a tuple holding the unique attributes of the additive shared tensor
Examples:
data = simplify(tensor)
"""
chain = None
if hasattr(tensor, "child"):
> chain = sy.serde.simplify(tensor.child)
E AttributeError: module 'syft.serde' has no attribute 'simplify'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/frameworks/torch/tensors/interpreters/multi_pointer.py`
Content:
```
1 import torch
2 from typing import List
3 from typing import Union
4
5 import syft as sy
6 from syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor
7 from syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor
8 from syft.workers import BaseWorker
9 from syft.frameworks.torch.overload_torch import overloaded
10
11 from syft.workers import AbstractWorker
12
13
14 class MultiPointerTensor(AbstractTensor):
15 ""
16
17 def __init__(
18 self,
19 location: BaseWorker = None,
20 id_at_location: Union[str, int] = None,
21 register: bool = False,
22 owner: BaseWorker = None,
23 id: Union[str, int] = None,
24 garbage_collect_data: bool = True,
25 point_to_attr: str = None,
26 tags: List[str] = None,
27 description: str = None,
28 children: List[AbstractTensor] = [],
29 ):
30
31 super().__init__(tags, description)
32
33 self.location = location
34 self.id_at_location = id_at_location
35 self.owner = owner
36 self.id = id
37 self.garbage_collect_data = garbage_collect_data
38 self.point_to_attr = point_to_attr
39
40 self.child = {}
41 for c in children:
42 assert c.shape == children[0].shape
43 self.child[c.location.id] = c
44
45 def __str__(self):
46 type_name = type(self).__name__
47 out = f"[" f"{type_name}]"
48 for v in self.child.values():
49 out += "\n\t-> " + str(v)
50 return out
51
52 def __eq__(self, other):
53 return torch.eq(self, other)
54
55 def __add__(self, other):
56 """
57 Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an
58 AdditiveShared Tensor, so if we have this configuration, we permute self and
59 other to use the fact that other.__add__(...) return an object of type other
60
61 Else, we just redirect to .add which works well
62 """
63 if isinstance(other, AdditiveSharingTensor):
64 return other.__add__(self)
65 else:
66 return self.add(other)
67
68 def __mul__(self, other):
69 """
70 See __add__ for details but, MPT * AST should return AST
71 """
72 if isinstance(other, AdditiveSharingTensor):
73 return other.__mul__(self)
74 else:
75 return self.mul(other)
76
77 @property
78 def shape(self) -> torch.Size:
79 """This method returns the shape of the data being pointed to.
80 This shape information SHOULD be cached on self._shape, but
81 occasionally this information may not be present. If this is the
82 case, then it requests the shape information from the remote object
83 directly (which is inefficient and should be avoided)."""
84
85 return list(self.child.values())[0].shape
86
87 def dim(self) -> int:
88 """This method fixes the error that the result of dim was a list of ints
89 stored inside a multipointer tensor"""
90
91 return len(self.shape)
92
93 def get(self, sum_results: bool = False) -> torch.Tensor:
94
95 results = list()
96 for v in self.child.values():
97 results.append(v.get())
98
99 if sum_results:
100 return sum(results)
101
102 return results
103
104 def virtual_get(self, sum_results: bool = False):
105 """Get the value of the tensor without calling get - Only for VirtualWorkers"""
106
107 results = list()
108 for v in self.child.values():
109 value = v.location._objects[v.id_at_location]
110 results.append(value)
111
112 if sum_results:
113 return sum(results)
114
115 return results
116
117 @staticmethod
118 def dispatch(args, worker):
119 """
120 utility function for handle_func_command which help to select
121 shares (seen as elements of dict) in an argument set. It could
122 perhaps be put elsewhere
123
124 Args:
125 args: arguments to give to a functions
126 worker: owner of the shares to select
127
128 Return:
129 args where the MultiPointerTensor are replaced by
130 the appropriate share
131 """
132 return map(lambda x: x[worker] if isinstance(x, dict) else x, args)
133
134 @classmethod
135 def handle_func_command(cls, command):
136 """
137 Receive an instruction for a function to be applied on a Syft Tensor,
138 Replace in the args all the LogTensors with
139 their child attribute, forward the command instruction to the
140 handle_function_command of the type of the child attributes, get the
141 response and replace a Syft Tensor on top of all tensors found in
142 the response.
143
144 Args:
145 command: instruction of a function command: (command name,
146 <no self>, arguments[, kwargs])
147
148 Returns:
149 the response of the function command
150 """
151
152 cmd, _, args, kwargs = command
153
154 tensor = args[0]
155
156 # Check that the function has not been overwritten
157 try:
158 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
159 cmd = cls.rgetattr(cls, cmd)
160 return cmd(*args, **kwargs)
161 except AttributeError:
162 pass
163
164 # TODO: I can't manage the import issue, can you?
165 # Replace all LoggingTensor with their child attribute
166 new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(
167 cmd, args, kwargs
168 )
169
170 results = {}
171 for worker, share in new_args[0].items():
172 new_type = type(share)
173 new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))
174
175 # build the new command
176 new_command = (cmd, None, new_args_worker, new_kwargs)
177
178 # Send it to the appropriate class and get the response
179 results[worker] = new_type.handle_func_command(new_command)
180
181 # Put back MultiPointerTensor on the tensors found in the response
182 response = sy.frameworks.torch.hook_args.hook_response(
183 cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()
184 )
185
186 return response
187
188 def set_garbage_collect_data(self, value):
189 shares = self.child
190 for _, share in shares.items():
191 share.child.garbage_collect_data = value
192
193 @staticmethod
194 def simplify(tensor: "MultiPointerTensor") -> tuple:
195 """
196 This function takes the attributes of a MultiPointerTensor and saves them in a tuple
197 Args:
198 tensor (MultiPointerTensor): a MultiPointerTensor
199 Returns:
200 tuple: a tuple holding the unique attributes of the additive shared tensor
201 Examples:
202 data = simplify(tensor)
203 """
204
205 chain = None
206 if hasattr(tensor, "child"):
207 chain = sy.serde.simplify(tensor.child)
208 return (tensor.id, chain)
209
210 @staticmethod
211 def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "MultiPointerTensor":
212 """
213 This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.
214 Args:
215 worker: the worker doing the deserialization
216 tensor_tuple: a tuple holding the attributes of the MultiPointerTensor
217 Returns:
218 MultiPointerTensor: a MultiPointerTensor
219 Examples:
220 multi_pointer_tensor = detail(data)
221 """
222
223 tensor_id, chain = tensor_tuple
224
225 tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)
226
227 if chain is not None:
228 chain = sy.serde._detail(worker, chain)
229 tensor.child = chain
230
231 return tensor
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py
--- a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py
+++ b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py
@@ -204,7 +204,7 @@
chain = None
if hasattr(tensor, "child"):
- chain = sy.serde.simplify(tensor.child)
+ chain = sy.serde._simplify(tensor.child)
return (tensor.id, chain)
@staticmethod
| {"golden_diff": "diff --git a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py\n--- a/syft/frameworks/torch/tensors/interpreters/multi_pointer.py\n+++ b/syft/frameworks/torch/tensors/interpreters/multi_pointer.py\n@@ -204,7 +204,7 @@\n \n chain = None\n if hasattr(tensor, \"child\"):\n- chain = sy.serde.simplify(tensor.child)\n+ chain = sy.serde._simplify(tensor.child)\n return (tensor.id, chain)\n \n @staticmethod\n", "issue": "Splitting up serde broke serialization for multipointers\n**Describe the bug**\r\n```\r\n @staticmethod\r\n def simplify(tensor: \"MultiPointerTensor\") -> tuple:\r\n \"\"\"\r\n This function takes the attributes of a MultiPointerTensor and saves them in a tuple\r\n Args:\r\n tensor (MultiPointerTensor): a MultiPointerTensor\r\n Returns:\r\n tuple: a tuple holding the unique attributes of the additive shared tensor\r\n Examples:\r\n data = simplify(tensor)\r\n \"\"\"\r\n\r\n chain = None\r\n if hasattr(tensor, \"child\"):\r\n> chain = sy.serde.simplify(tensor.child)\r\nE AttributeError: module 'syft.serde' has no attribute 'simplify'\r\n```\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom typing import List\nfrom typing import Union\n\nimport syft as sy\nfrom syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor\nfrom syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor\nfrom syft.workers import BaseWorker\nfrom syft.frameworks.torch.overload_torch import overloaded\n\nfrom syft.workers import AbstractWorker\n\n\nclass MultiPointerTensor(AbstractTensor):\n \"\"\n\n def __init__(\n self,\n location: BaseWorker = None,\n id_at_location: Union[str, int] = None,\n register: bool = False,\n owner: BaseWorker = None,\n id: Union[str, int] = None,\n garbage_collect_data: bool = True,\n point_to_attr: str = None,\n tags: List[str] = None,\n description: str = None,\n children: List[AbstractTensor] = [],\n ):\n\n super().__init__(tags, description)\n\n self.location = location\n self.id_at_location = id_at_location\n self.owner = owner\n self.id = id\n self.garbage_collect_data = garbage_collect_data\n self.point_to_attr = point_to_attr\n\n self.child = {}\n for c in children:\n assert c.shape == children[0].shape\n self.child[c.location.id] = c\n\n def __str__(self):\n type_name = type(self).__name__\n out = f\"[\" f\"{type_name}]\"\n for v in self.child.values():\n out += \"\\n\\t-> \" + str(v)\n return out\n\n def __eq__(self, other):\n return torch.eq(self, other)\n\n def __add__(self, other):\n \"\"\"\n Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an\n AdditiveShared Tensor, so if we have this configuration, we permute self and\n other to use the fact that other.__add__(...) return an object of type other\n\n Else, we just redirect to .add which works well\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__add__(self)\n else:\n return self.add(other)\n\n def __mul__(self, other):\n \"\"\"\n See __add__ for details but, MPT * AST should return AST\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__mul__(self)\n else:\n return self.mul(other)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"This method returns the shape of the data being pointed to.\n This shape information SHOULD be cached on self._shape, but\n occasionally this information may not be present. If this is the\n case, then it requests the shape information from the remote object\n directly (which is inefficient and should be avoided).\"\"\"\n\n return list(self.child.values())[0].shape\n\n def dim(self) -> int:\n \"\"\"This method fixes the error that the result of dim was a list of ints\n stored inside a multipointer tensor\"\"\"\n\n return len(self.shape)\n\n def get(self, sum_results: bool = False) -> torch.Tensor:\n\n results = list()\n for v in self.child.values():\n results.append(v.get())\n\n if sum_results:\n return sum(results)\n\n return results\n\n def virtual_get(self, sum_results: bool = False):\n \"\"\"Get the value of the tensor without calling get - Only for VirtualWorkers\"\"\"\n\n results = list()\n for v in self.child.values():\n value = v.location._objects[v.id_at_location]\n results.append(value)\n\n if sum_results:\n return sum(results)\n\n return results\n\n @staticmethod\n def dispatch(args, worker):\n \"\"\"\n utility function for handle_func_command which help to select\n shares (seen as elements of dict) in an argument set. It could\n perhaps be put elsewhere\n\n Args:\n args: arguments to give to a functions\n worker: owner of the shares to select\n\n Return:\n args where the MultiPointerTensor are replaced by\n the appropriate share\n \"\"\"\n return map(lambda x: x[worker] if isinstance(x, dict) else x, args)\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a Syft Tensor,\n Replace in the args all the LogTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a Syft Tensor on top of all tensors found in\n the response.\n\n Args:\n command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n\n Returns:\n the response of the function command\n \"\"\"\n\n cmd, _, args, kwargs = command\n\n tensor = args[0]\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd)\n return cmd(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: I can't manage the import issue, can you?\n # Replace all LoggingTensor with their child attribute\n new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(\n cmd, args, kwargs\n )\n\n results = {}\n for worker, share in new_args[0].items():\n new_type = type(share)\n new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))\n\n # build the new command\n new_command = (cmd, None, new_args_worker, new_kwargs)\n\n # Send it to the appropriate class and get the response\n results[worker] = new_type.handle_func_command(new_command)\n\n # Put back MultiPointerTensor on the tensors found in the response\n response = sy.frameworks.torch.hook_args.hook_response(\n cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def set_garbage_collect_data(self, value):\n shares = self.child\n for _, share in shares.items():\n share.child.garbage_collect_data = value\n\n @staticmethod\n def simplify(tensor: \"MultiPointerTensor\") -> tuple:\n \"\"\"\n This function takes the attributes of a MultiPointerTensor and saves them in a tuple\n Args:\n tensor (MultiPointerTensor): a MultiPointerTensor\n Returns:\n tuple: a tuple holding the unique attributes of the additive shared tensor\n Examples:\n data = simplify(tensor)\n \"\"\"\n\n chain = None\n if hasattr(tensor, \"child\"):\n chain = sy.serde.simplify(tensor.child)\n return (tensor.id, chain)\n\n @staticmethod\n def detail(worker: AbstractWorker, tensor_tuple: tuple) -> \"MultiPointerTensor\":\n \"\"\"\n This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.\n Args:\n worker: the worker doing the deserialization\n tensor_tuple: a tuple holding the attributes of the MultiPointerTensor\n Returns:\n MultiPointerTensor: a MultiPointerTensor\n Examples:\n multi_pointer_tensor = detail(data)\n \"\"\"\n\n tensor_id, chain = tensor_tuple\n\n tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)\n\n if chain is not None:\n chain = sy.serde._detail(worker, chain)\n tensor.child = chain\n\n return tensor\n", "path": "syft/frameworks/torch/tensors/interpreters/multi_pointer.py"}], "after_files": [{"content": "import torch\nfrom typing import List\nfrom typing import Union\n\nimport syft as sy\nfrom syft.frameworks.torch.tensors.interpreters.abstract import AbstractTensor\nfrom syft.frameworks.torch.tensors.interpreters import AdditiveSharingTensor\nfrom syft.workers import BaseWorker\nfrom syft.frameworks.torch.overload_torch import overloaded\n\nfrom syft.workers import AbstractWorker\n\n\nclass MultiPointerTensor(AbstractTensor):\n \"\"\n\n def __init__(\n self,\n location: BaseWorker = None,\n id_at_location: Union[str, int] = None,\n register: bool = False,\n owner: BaseWorker = None,\n id: Union[str, int] = None,\n garbage_collect_data: bool = True,\n point_to_attr: str = None,\n tags: List[str] = None,\n description: str = None,\n children: List[AbstractTensor] = [],\n ):\n\n super().__init__(tags, description)\n\n self.location = location\n self.id_at_location = id_at_location\n self.owner = owner\n self.id = id\n self.garbage_collect_data = garbage_collect_data\n self.point_to_attr = point_to_attr\n\n self.child = {}\n for c in children:\n assert c.shape == children[0].shape\n self.child[c.location.id] = c\n\n def __str__(self):\n type_name = type(self).__name__\n out = f\"[\" f\"{type_name}]\"\n for v in self.child.values():\n out += \"\\n\\t-> \" + str(v)\n return out\n\n def __eq__(self, other):\n return torch.eq(self, other)\n\n def __add__(self, other):\n \"\"\"\n Adding a MultiPointer (MPT) and an AdditiveShared Tensor (AST) should return an\n AdditiveShared Tensor, so if we have this configuration, we permute self and\n other to use the fact that other.__add__(...) return an object of type other\n\n Else, we just redirect to .add which works well\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__add__(self)\n else:\n return self.add(other)\n\n def __mul__(self, other):\n \"\"\"\n See __add__ for details but, MPT * AST should return AST\n \"\"\"\n if isinstance(other, AdditiveSharingTensor):\n return other.__mul__(self)\n else:\n return self.mul(other)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"This method returns the shape of the data being pointed to.\n This shape information SHOULD be cached on self._shape, but\n occasionally this information may not be present. If this is the\n case, then it requests the shape information from the remote object\n directly (which is inefficient and should be avoided).\"\"\"\n\n return list(self.child.values())[0].shape\n\n def dim(self) -> int:\n \"\"\"This method fixes the error that the result of dim was a list of ints\n stored inside a multipointer tensor\"\"\"\n\n return len(self.shape)\n\n def get(self, sum_results: bool = False) -> torch.Tensor:\n\n results = list()\n for v in self.child.values():\n results.append(v.get())\n\n if sum_results:\n return sum(results)\n\n return results\n\n def virtual_get(self, sum_results: bool = False):\n \"\"\"Get the value of the tensor without calling get - Only for VirtualWorkers\"\"\"\n\n results = list()\n for v in self.child.values():\n value = v.location._objects[v.id_at_location]\n results.append(value)\n\n if sum_results:\n return sum(results)\n\n return results\n\n @staticmethod\n def dispatch(args, worker):\n \"\"\"\n utility function for handle_func_command which help to select\n shares (seen as elements of dict) in an argument set. It could\n perhaps be put elsewhere\n\n Args:\n args: arguments to give to a functions\n worker: owner of the shares to select\n\n Return:\n args where the MultiPointerTensor are replaced by\n the appropriate share\n \"\"\"\n return map(lambda x: x[worker] if isinstance(x, dict) else x, args)\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a Syft Tensor,\n Replace in the args all the LogTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a Syft Tensor on top of all tensors found in\n the response.\n\n Args:\n command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs])\n\n Returns:\n the response of the function command\n \"\"\"\n\n cmd, _, args, kwargs = command\n\n tensor = args[0]\n\n # Check that the function has not been overwritten\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd)\n return cmd(*args, **kwargs)\n except AttributeError:\n pass\n\n # TODO: I can't manage the import issue, can you?\n # Replace all LoggingTensor with their child attribute\n new_args, new_kwargs, new_type = sy.frameworks.torch.hook_args.hook_function_args(\n cmd, args, kwargs\n )\n\n results = {}\n for worker, share in new_args[0].items():\n new_type = type(share)\n new_args_worker = tuple(MultiPointerTensor.dispatch(new_args, worker))\n\n # build the new command\n new_command = (cmd, None, new_args_worker, new_kwargs)\n\n # Send it to the appropriate class and get the response\n results[worker] = new_type.handle_func_command(new_command)\n\n # Put back MultiPointerTensor on the tensors found in the response\n response = sy.frameworks.torch.hook_args.hook_response(\n cmd, results, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def set_garbage_collect_data(self, value):\n shares = self.child\n for _, share in shares.items():\n share.child.garbage_collect_data = value\n\n @staticmethod\n def simplify(tensor: \"MultiPointerTensor\") -> tuple:\n \"\"\"\n This function takes the attributes of a MultiPointerTensor and saves them in a tuple\n Args:\n tensor (MultiPointerTensor): a MultiPointerTensor\n Returns:\n tuple: a tuple holding the unique attributes of the additive shared tensor\n Examples:\n data = simplify(tensor)\n \"\"\"\n\n chain = None\n if hasattr(tensor, \"child\"):\n chain = sy.serde._simplify(tensor.child)\n return (tensor.id, chain)\n\n @staticmethod\n def detail(worker: AbstractWorker, tensor_tuple: tuple) -> \"MultiPointerTensor\":\n \"\"\"\n This function reconstructs a MultiPointerTensor given it's attributes in form of a tuple.\n Args:\n worker: the worker doing the deserialization\n tensor_tuple: a tuple holding the attributes of the MultiPointerTensor\n Returns:\n MultiPointerTensor: a MultiPointerTensor\n Examples:\n multi_pointer_tensor = detail(data)\n \"\"\"\n\n tensor_id, chain = tensor_tuple\n\n tensor = sy.MultiPointerTensor(owner=worker, id=tensor_id)\n\n if chain is not None:\n chain = sy.serde._detail(worker, chain)\n tensor.child = chain\n\n return tensor\n", "path": "syft/frameworks/torch/tensors/interpreters/multi_pointer.py"}]} | 2,660 | 142 |
gh_patches_debug_7303 | rasdani/github-patches | git_diff | sanic-org__sanic-1137 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
restrictive call to handlers from sanic.handlers.ErrorHandler.response
`sanic.handlers.ErrorHandler.response` use keywords arguments on handler call and so force the signature of the handler
Consider this very basic application:
```python
from sanic import Sanic
from sanic.exceptions import SanicException
from sanic.response import text
app = Sanic()
@app.exception(SanicException)
def http_error_handler(request, exception):
return text(":(")
app.run()
```
it works as intended:
$ python server.py
[2018-02-21 00:35:04 +0100] [23560] [INFO] Goin' Fast @ http://127.0.0.1:8000
[2018-02-21 00:35:04 +0100] [23560] [INFO] Starting worker [23560]
[2018-02-21 00:35:16 +0100] - (sanic.access)[INFO][1:2]: GET http://127.0.0.1:8000/ 200 2
[2018-02-21 00:36:01 +0100] [23560] [INFO] Stopping worker [23560]
[2018-02-21 00:36:01 +0100] [23560] [INFO] Server Stopped
But you cannot change the argument names:
```python
@app.exception(SanicException)
def http_error_handler(req, exc):
return text(":(")
```
because `ErrorHandler.response` use keyword arguments:
$ python server.py
[2018-02-21 00:36:02 +0100] [23608] [INFO] Goin' Fast @ http://127.0.0.1:8000
[2018-02-21 00:36:02 +0100] [23608] [INFO] Starting worker [23608]
[2018-02-21 00:36:05 +0100] [23608] [ERROR] Traceback (most recent call last):
File "[..]/sanic/app.py", line 546, in handle_request
handler, args, kwargs, uri = self.router.get(request)
File "[..]/sanic/router.py", line 344, in get
return self._get(request.path, request.method, '')
File "[..]/sanic/router.py", line 393, in _get
raise NotFound('Requested URL {} not found'.format(url))
sanic.exceptions.NotFound: Requested URL / not found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "[..]/sanic/handlers.py", line 82, in response
response = handler(request=request, exception=exception)
TypeError: http_error_handler() got an unexpected keyword argument 'request'
[2018-02-21 00:36:05 +0100] - (sanic.access)[INFO][1:2]: GET http://127.0.0.1:8000/ 500 41
[2018-02-21 00:36:10 +0100] [23608] [INFO] KeepAlive Timeout. Closing connection.
[2018-02-21 00:38:27 +0100] [23608] [INFO] Stopping worker [23608]
[2018-02-21 00:38:27 +0100] [23608] [INFO] Server Stopped
A pull request is coming shortly to use regular arguments instead of keywords in the `ErrorHandler.response`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/handlers.py`
Content:
```
1 import sys
2 from traceback import format_exc, extract_tb
3
4 from sanic.exceptions import (
5 ContentRangeError,
6 HeaderNotFound,
7 INTERNAL_SERVER_ERROR_HTML,
8 InvalidRangeType,
9 SanicException,
10 TRACEBACK_LINE_HTML,
11 TRACEBACK_STYLE,
12 TRACEBACK_WRAPPER_HTML,
13 TRACEBACK_WRAPPER_INNER_HTML,
14 TRACEBACK_BORDER)
15 from sanic.log import logger
16 from sanic.response import text, html
17
18
19 class ErrorHandler:
20 handlers = None
21 cached_handlers = None
22 _missing = object()
23
24 def __init__(self):
25 self.handlers = []
26 self.cached_handlers = {}
27 self.debug = False
28
29 def _render_exception(self, exception):
30 frames = extract_tb(exception.__traceback__)
31
32 frame_html = []
33 for frame in frames:
34 frame_html.append(TRACEBACK_LINE_HTML.format(frame))
35
36 return TRACEBACK_WRAPPER_INNER_HTML.format(
37 exc_name=exception.__class__.__name__,
38 exc_value=exception,
39 frame_html=''.join(frame_html))
40
41 def _render_traceback_html(self, exception, request):
42 exc_type, exc_value, tb = sys.exc_info()
43 exceptions = []
44
45 while exc_value:
46 exceptions.append(self._render_exception(exc_value))
47 exc_value = exc_value.__cause__
48
49 return TRACEBACK_WRAPPER_HTML.format(
50 style=TRACEBACK_STYLE,
51 exc_name=exception.__class__.__name__,
52 exc_value=exception,
53 inner_html=TRACEBACK_BORDER.join(reversed(exceptions)),
54 path=request.path)
55
56 def add(self, exception, handler):
57 self.handlers.append((exception, handler))
58
59 def lookup(self, exception):
60 handler = self.cached_handlers.get(exception, self._missing)
61 if handler is self._missing:
62 for exception_class, handler in self.handlers:
63 if isinstance(exception, exception_class):
64 self.cached_handlers[type(exception)] = handler
65 return handler
66 self.cached_handlers[type(exception)] = None
67 handler = None
68 return handler
69
70 def response(self, request, exception):
71 """Fetches and executes an exception handler and returns a response
72 object
73
74 :param request: Request
75 :param exception: Exception to handle
76 :return: Response object
77 """
78 handler = self.lookup(exception)
79 response = None
80 try:
81 if handler:
82 response = handler(request=request, exception=exception)
83 if response is None:
84 response = self.default(request=request, exception=exception)
85 except Exception:
86 self.log(format_exc())
87 if self.debug:
88 url = getattr(request, 'url', 'unknown')
89 response_message = ('Exception raised in exception handler '
90 '"%s" for uri: "%s"\n%s')
91 logger.error(response_message,
92 handler.__name__, url, format_exc())
93
94 return text(response_message % (
95 handler.__name__, url, format_exc()), 500)
96 else:
97 return text('An error occurred while handling an error', 500)
98 return response
99
100 def log(self, message, level='error'):
101 """
102 Override this method in an ErrorHandler subclass to prevent
103 logging exceptions.
104 """
105 getattr(logger, level)(message)
106
107 def default(self, request, exception):
108 self.log(format_exc())
109 if issubclass(type(exception), SanicException):
110 return text(
111 'Error: {}'.format(exception),
112 status=getattr(exception, 'status_code', 500),
113 headers=getattr(exception, 'headers', dict())
114 )
115 elif self.debug:
116 html_output = self._render_traceback_html(exception, request)
117
118 response_message = ('Exception occurred while handling uri: '
119 '"%s"\n%s')
120 logger.error(response_message, request.url, format_exc())
121 return html(html_output, status=500)
122 else:
123 return html(INTERNAL_SERVER_ERROR_HTML, status=500)
124
125
126 class ContentRangeHandler:
127 """Class responsible for parsing request header"""
128 __slots__ = ('start', 'end', 'size', 'total', 'headers')
129
130 def __init__(self, request, stats):
131 self.total = stats.st_size
132 _range = request.headers.get('Range')
133 if _range is None:
134 raise HeaderNotFound('Range Header Not Found')
135 unit, _, value = tuple(map(str.strip, _range.partition('=')))
136 if unit != 'bytes':
137 raise InvalidRangeType(
138 '%s is not a valid Range Type' % (unit,), self)
139 start_b, _, end_b = tuple(map(str.strip, value.partition('-')))
140 try:
141 self.start = int(start_b) if start_b else None
142 except ValueError:
143 raise ContentRangeError(
144 '\'%s\' is invalid for Content Range' % (start_b,), self)
145 try:
146 self.end = int(end_b) if end_b else None
147 except ValueError:
148 raise ContentRangeError(
149 '\'%s\' is invalid for Content Range' % (end_b,), self)
150 if self.end is None:
151 if self.start is None:
152 raise ContentRangeError(
153 'Invalid for Content Range parameters', self)
154 else:
155 # this case represents `Content-Range: bytes 5-`
156 self.end = self.total
157 else:
158 if self.start is None:
159 # this case represents `Content-Range: bytes -5`
160 self.start = self.total - self.end
161 self.end = self.total
162 if self.start >= self.end:
163 raise ContentRangeError(
164 'Invalid for Content Range parameters', self)
165 self.size = self.end - self.start
166 self.headers = {
167 'Content-Range': "bytes %s-%s/%s" % (
168 self.start, self.end, self.total)}
169
170 def __bool__(self):
171 return self.size > 0
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/handlers.py b/sanic/handlers.py
--- a/sanic/handlers.py
+++ b/sanic/handlers.py
@@ -79,9 +79,9 @@
response = None
try:
if handler:
- response = handler(request=request, exception=exception)
+ response = handler(request, exception)
if response is None:
- response = self.default(request=request, exception=exception)
+ response = self.default(request, exception)
except Exception:
self.log(format_exc())
if self.debug:
| {"golden_diff": "diff --git a/sanic/handlers.py b/sanic/handlers.py\n--- a/sanic/handlers.py\n+++ b/sanic/handlers.py\n@@ -79,9 +79,9 @@\n response = None\n try:\n if handler:\n- response = handler(request=request, exception=exception)\n+ response = handler(request, exception)\n if response is None:\n- response = self.default(request=request, exception=exception)\n+ response = self.default(request, exception)\n except Exception:\n self.log(format_exc())\n if self.debug:\n", "issue": "restrictive call to handlers from sanic.handlers.ErrorHandler.response\n`sanic.handlers.ErrorHandler.response` use keywords arguments on handler call and so force the signature of the handler\r\n\r\nConsider this very basic application:\r\n\r\n```python\r\nfrom sanic import Sanic\r\nfrom sanic.exceptions import SanicException\r\nfrom sanic.response import text\r\n\r\napp = Sanic()\r\n\r\[email protected](SanicException)\r\ndef http_error_handler(request, exception):\r\n\treturn text(\":(\")\r\n\r\napp.run()\r\n```\r\n\r\nit works as intended:\r\n\r\n\t$ python server.py \r\n\t[2018-02-21 00:35:04 +0100] [23560] [INFO] Goin' Fast @ http://127.0.0.1:8000\r\n\t[2018-02-21 00:35:04 +0100] [23560] [INFO] Starting worker [23560]\r\n\t[2018-02-21 00:35:16 +0100] - (sanic.access)[INFO][1:2]: GET http://127.0.0.1:8000/ 200 2\r\n\t[2018-02-21 00:36:01 +0100] [23560] [INFO] Stopping worker [23560]\r\n\t[2018-02-21 00:36:01 +0100] [23560] [INFO] Server Stopped\r\n\r\nBut you cannot change the argument names:\r\n\r\n```python\r\[email protected](SanicException)\r\ndef http_error_handler(req, exc):\r\n\treturn text(\":(\")\r\n```\r\n\r\nbecause `ErrorHandler.response` use keyword arguments:\r\n\r\n\t$ python server.py \r\n\t[2018-02-21 00:36:02 +0100] [23608] [INFO] Goin' Fast @ http://127.0.0.1:8000\r\n\t[2018-02-21 00:36:02 +0100] [23608] [INFO] Starting worker [23608]\r\n\t[2018-02-21 00:36:05 +0100] [23608] [ERROR] Traceback (most recent call last):\r\n\t File \"[..]/sanic/app.py\", line 546, in handle_request\r\n\t handler, args, kwargs, uri = self.router.get(request)\r\n\t File \"[..]/sanic/router.py\", line 344, in get\r\n\t return self._get(request.path, request.method, '')\r\n\t File \"[..]/sanic/router.py\", line 393, in _get\r\n\t raise NotFound('Requested URL {} not found'.format(url))\r\n\tsanic.exceptions.NotFound: Requested URL / not found\r\n\r\n\tDuring handling of the above exception, another exception occurred:\r\n\r\n\tTraceback (most recent call last):\r\n\t File \"[..]/sanic/handlers.py\", line 82, in response\r\n\t response = handler(request=request, exception=exception)\r\n\tTypeError: http_error_handler() got an unexpected keyword argument 'request'\r\n\r\n\t[2018-02-21 00:36:05 +0100] - (sanic.access)[INFO][1:2]: GET http://127.0.0.1:8000/ 500 41\r\n\t[2018-02-21 00:36:10 +0100] [23608] [INFO] KeepAlive Timeout. Closing connection.\r\n\t[2018-02-21 00:38:27 +0100] [23608] [INFO] Stopping worker [23608]\r\n\t[2018-02-21 00:38:27 +0100] [23608] [INFO] Server Stopped\r\n\r\n\r\nA pull request is coming shortly to use regular arguments instead of keywords in the `ErrorHandler.response`.\n", "before_files": [{"content": "import sys\nfrom traceback import format_exc, extract_tb\n\nfrom sanic.exceptions import (\n ContentRangeError,\n HeaderNotFound,\n INTERNAL_SERVER_ERROR_HTML,\n InvalidRangeType,\n SanicException,\n TRACEBACK_LINE_HTML,\n TRACEBACK_STYLE,\n TRACEBACK_WRAPPER_HTML,\n TRACEBACK_WRAPPER_INNER_HTML,\n TRACEBACK_BORDER)\nfrom sanic.log import logger\nfrom sanic.response import text, html\n\n\nclass ErrorHandler:\n handlers = None\n cached_handlers = None\n _missing = object()\n\n def __init__(self):\n self.handlers = []\n self.cached_handlers = {}\n self.debug = False\n\n def _render_exception(self, exception):\n frames = extract_tb(exception.__traceback__)\n\n frame_html = []\n for frame in frames:\n frame_html.append(TRACEBACK_LINE_HTML.format(frame))\n\n return TRACEBACK_WRAPPER_INNER_HTML.format(\n exc_name=exception.__class__.__name__,\n exc_value=exception,\n frame_html=''.join(frame_html))\n\n def _render_traceback_html(self, exception, request):\n exc_type, exc_value, tb = sys.exc_info()\n exceptions = []\n\n while exc_value:\n exceptions.append(self._render_exception(exc_value))\n exc_value = exc_value.__cause__\n\n return TRACEBACK_WRAPPER_HTML.format(\n style=TRACEBACK_STYLE,\n exc_name=exception.__class__.__name__,\n exc_value=exception,\n inner_html=TRACEBACK_BORDER.join(reversed(exceptions)),\n path=request.path)\n\n def add(self, exception, handler):\n self.handlers.append((exception, handler))\n\n def lookup(self, exception):\n handler = self.cached_handlers.get(exception, self._missing)\n if handler is self._missing:\n for exception_class, handler in self.handlers:\n if isinstance(exception, exception_class):\n self.cached_handlers[type(exception)] = handler\n return handler\n self.cached_handlers[type(exception)] = None\n handler = None\n return handler\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Request\n :param exception: Exception to handle\n :return: Response object\n \"\"\"\n handler = self.lookup(exception)\n response = None\n try:\n if handler:\n response = handler(request=request, exception=exception)\n if response is None:\n response = self.default(request=request, exception=exception)\n except Exception:\n self.log(format_exc())\n if self.debug:\n url = getattr(request, 'url', 'unknown')\n response_message = ('Exception raised in exception handler '\n '\"%s\" for uri: \"%s\"\\n%s')\n logger.error(response_message,\n handler.__name__, url, format_exc())\n\n return text(response_message % (\n handler.__name__, url, format_exc()), 500)\n else:\n return text('An error occurred while handling an error', 500)\n return response\n\n def log(self, message, level='error'):\n \"\"\"\n Override this method in an ErrorHandler subclass to prevent\n logging exceptions.\n \"\"\"\n getattr(logger, level)(message)\n\n def default(self, request, exception):\n self.log(format_exc())\n if issubclass(type(exception), SanicException):\n return text(\n 'Error: {}'.format(exception),\n status=getattr(exception, 'status_code', 500),\n headers=getattr(exception, 'headers', dict())\n )\n elif self.debug:\n html_output = self._render_traceback_html(exception, request)\n\n response_message = ('Exception occurred while handling uri: '\n '\"%s\"\\n%s')\n logger.error(response_message, request.url, format_exc())\n return html(html_output, status=500)\n else:\n return html(INTERNAL_SERVER_ERROR_HTML, status=500)\n\n\nclass ContentRangeHandler:\n \"\"\"Class responsible for parsing request header\"\"\"\n __slots__ = ('start', 'end', 'size', 'total', 'headers')\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.get('Range')\n if _range is None:\n raise HeaderNotFound('Range Header Not Found')\n unit, _, value = tuple(map(str.strip, _range.partition('=')))\n if unit != 'bytes':\n raise InvalidRangeType(\n '%s is not a valid Range Type' % (unit,), self)\n start_b, _, end_b = tuple(map(str.strip, value.partition('-')))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise ContentRangeError(\n '\\'%s\\' is invalid for Content Range' % (start_b,), self)\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise ContentRangeError(\n '\\'%s\\' is invalid for Content Range' % (end_b,), self)\n if self.end is None:\n if self.start is None:\n raise ContentRangeError(\n 'Invalid for Content Range parameters', self)\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total\n if self.start >= self.end:\n raise ContentRangeError(\n 'Invalid for Content Range parameters', self)\n self.size = self.end - self.start\n self.headers = {\n 'Content-Range': \"bytes %s-%s/%s\" % (\n self.start, self.end, self.total)}\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py"}], "after_files": [{"content": "import sys\nfrom traceback import format_exc, extract_tb\n\nfrom sanic.exceptions import (\n ContentRangeError,\n HeaderNotFound,\n INTERNAL_SERVER_ERROR_HTML,\n InvalidRangeType,\n SanicException,\n TRACEBACK_LINE_HTML,\n TRACEBACK_STYLE,\n TRACEBACK_WRAPPER_HTML,\n TRACEBACK_WRAPPER_INNER_HTML,\n TRACEBACK_BORDER)\nfrom sanic.log import logger\nfrom sanic.response import text, html\n\n\nclass ErrorHandler:\n handlers = None\n cached_handlers = None\n _missing = object()\n\n def __init__(self):\n self.handlers = []\n self.cached_handlers = {}\n self.debug = False\n\n def _render_exception(self, exception):\n frames = extract_tb(exception.__traceback__)\n\n frame_html = []\n for frame in frames:\n frame_html.append(TRACEBACK_LINE_HTML.format(frame))\n\n return TRACEBACK_WRAPPER_INNER_HTML.format(\n exc_name=exception.__class__.__name__,\n exc_value=exception,\n frame_html=''.join(frame_html))\n\n def _render_traceback_html(self, exception, request):\n exc_type, exc_value, tb = sys.exc_info()\n exceptions = []\n\n while exc_value:\n exceptions.append(self._render_exception(exc_value))\n exc_value = exc_value.__cause__\n\n return TRACEBACK_WRAPPER_HTML.format(\n style=TRACEBACK_STYLE,\n exc_name=exception.__class__.__name__,\n exc_value=exception,\n inner_html=TRACEBACK_BORDER.join(reversed(exceptions)),\n path=request.path)\n\n def add(self, exception, handler):\n self.handlers.append((exception, handler))\n\n def lookup(self, exception):\n handler = self.cached_handlers.get(exception, self._missing)\n if handler is self._missing:\n for exception_class, handler in self.handlers:\n if isinstance(exception, exception_class):\n self.cached_handlers[type(exception)] = handler\n return handler\n self.cached_handlers[type(exception)] = None\n handler = None\n return handler\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Request\n :param exception: Exception to handle\n :return: Response object\n \"\"\"\n handler = self.lookup(exception)\n response = None\n try:\n if handler:\n response = handler(request, exception)\n if response is None:\n response = self.default(request, exception)\n except Exception:\n self.log(format_exc())\n if self.debug:\n url = getattr(request, 'url', 'unknown')\n response_message = ('Exception raised in exception handler '\n '\"%s\" for uri: \"%s\"\\n%s')\n logger.error(response_message,\n handler.__name__, url, format_exc())\n\n return text(response_message % (\n handler.__name__, url, format_exc()), 500)\n else:\n return text('An error occurred while handling an error', 500)\n return response\n\n def log(self, message, level='error'):\n \"\"\"\n Override this method in an ErrorHandler subclass to prevent\n logging exceptions.\n \"\"\"\n getattr(logger, level)(message)\n\n def default(self, request, exception):\n self.log(format_exc())\n if issubclass(type(exception), SanicException):\n return text(\n 'Error: {}'.format(exception),\n status=getattr(exception, 'status_code', 500),\n headers=getattr(exception, 'headers', dict())\n )\n elif self.debug:\n html_output = self._render_traceback_html(exception, request)\n\n response_message = ('Exception occurred while handling uri: '\n '\"%s\"\\n%s')\n logger.error(response_message, request.url, format_exc())\n return html(html_output, status=500)\n else:\n return html(INTERNAL_SERVER_ERROR_HTML, status=500)\n\n\nclass ContentRangeHandler:\n \"\"\"Class responsible for parsing request header\"\"\"\n __slots__ = ('start', 'end', 'size', 'total', 'headers')\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.get('Range')\n if _range is None:\n raise HeaderNotFound('Range Header Not Found')\n unit, _, value = tuple(map(str.strip, _range.partition('=')))\n if unit != 'bytes':\n raise InvalidRangeType(\n '%s is not a valid Range Type' % (unit,), self)\n start_b, _, end_b = tuple(map(str.strip, value.partition('-')))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise ContentRangeError(\n '\\'%s\\' is invalid for Content Range' % (start_b,), self)\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise ContentRangeError(\n '\\'%s\\' is invalid for Content Range' % (end_b,), self)\n if self.end is None:\n if self.start is None:\n raise ContentRangeError(\n 'Invalid for Content Range parameters', self)\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total\n if self.start >= self.end:\n raise ContentRangeError(\n 'Invalid for Content Range parameters', self)\n self.size = self.end - self.start\n self.headers = {\n 'Content-Range': \"bytes %s-%s/%s\" % (\n self.start, self.end, self.total)}\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py"}]} | 2,868 | 125 |
gh_patches_debug_25231 | rasdani/github-patches | git_diff | litestar-org__litestar-1286 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: `SessionCookieConfig` import DeprecationWarning - suggested import path does not exist
**Describe the bug**
In Starlite >= 1.47, the statement `from starlite.middleware.session.cookie_backend import CookieBackendConfig` raises a DeprecationWarning:
```
DeprecationWarning: Import of deprecated import 'SessionCookieConfig from starlite.middleware.session'.
Deprecated in starlite 1.47.0. This import will be removed in the next major version.
Use "'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'" instead
```
The suggested import path does not exist and contains a typo.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/middleware/session/__init__.py`
Content:
```
1 from typing import Any
2
3 from starlite.utils import warn_deprecation
4
5 from .base import SessionMiddleware
6
7
8 def __getattr__(name: str) -> Any:
9 """Provide lazy importing as per https://peps.python.org/pep-0562/"""
10
11 if name != "SessionCookieConfig":
12 raise AttributeError(f"Module {__package__} has no attribute {name}")
13
14 from .cookie_backend import CookieBackendConfig
15
16 warn_deprecation(
17 deprecated_name=f"{name} from {__package__}",
18 kind="import",
19 alternative="'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'",
20 version="1.47.0",
21 )
22
23 globals()[name] = CookieBackendConfig
24 return CookieBackendConfig
25
26
27 __all__ = ["SessionMiddleware"]
28
```
Path: `starlite/template/__init__.py`
Content:
```
1 from typing import Any
2
3 from .base import TemplateEngineProtocol, TemplateProtocol
4
5 __all__ = ("TemplateEngineProtocol", "TemplateProtocol")
6
7 from ..utils import warn_deprecation
8
9
10 def __getattr__(name: str) -> Any:
11 """Provide lazy importing as per https://peps.python.org/pep-0562/"""
12
13 if name not in {"JinjaTemplateEngine", "MakoTemplateEngine", "MakoTemplate"}:
14 raise AttributeError(f"Module {__package__} has no attribute {name}")
15
16 if name == "JinjaTemplateEngine":
17 from starlite.contrib.jinja import JinjaTemplateEngine
18
19 export: Any = JinjaTemplateEngine
20 module = "jinja"
21 elif name == "MakoTemplateEngine":
22 from starlite.contrib.mako import MakoTemplateEngine
23
24 export = MakoTemplateEngine
25 module = "mako"
26 else:
27 from starlite.contrib.mako import MakoTemplate
28
29 export = MakoTemplate
30 module = "mako"
31
32 warn_deprecation(
33 deprecated_name=f"{name} from {__package__}",
34 kind="import",
35 alternative=f"'from startlite.contrib.{module} import {name}'",
36 version="1.46.0",
37 )
38
39 globals()[name] = export
40 return export
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlite/middleware/session/__init__.py b/starlite/middleware/session/__init__.py
--- a/starlite/middleware/session/__init__.py
+++ b/starlite/middleware/session/__init__.py
@@ -8,7 +8,7 @@
def __getattr__(name: str) -> Any:
"""Provide lazy importing as per https://peps.python.org/pep-0562/"""
- if name != "SessionCookieConfig":
+ if name != "CookieBackendConfig":
raise AttributeError(f"Module {__package__} has no attribute {name}")
from .cookie_backend import CookieBackendConfig
@@ -16,7 +16,7 @@
warn_deprecation(
deprecated_name=f"{name} from {__package__}",
kind="import",
- alternative="'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'",
+ alternative="'from starlite.middleware.session.cookie_backend import CookieBackendConfig'",
version="1.47.0",
)
diff --git a/starlite/template/__init__.py b/starlite/template/__init__.py
--- a/starlite/template/__init__.py
+++ b/starlite/template/__init__.py
@@ -32,7 +32,7 @@
warn_deprecation(
deprecated_name=f"{name} from {__package__}",
kind="import",
- alternative=f"'from startlite.contrib.{module} import {name}'",
+ alternative=f"'from starlite.contrib.{module} import {name}'",
version="1.46.0",
)
| {"golden_diff": "diff --git a/starlite/middleware/session/__init__.py b/starlite/middleware/session/__init__.py\n--- a/starlite/middleware/session/__init__.py\n+++ b/starlite/middleware/session/__init__.py\n@@ -8,7 +8,7 @@\n def __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n \n- if name != \"SessionCookieConfig\":\n+ if name != \"CookieBackendConfig\":\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n \n from .cookie_backend import CookieBackendConfig\n@@ -16,7 +16,7 @@\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n- alternative=\"'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'\",\n+ alternative=\"'from starlite.middleware.session.cookie_backend import CookieBackendConfig'\",\n version=\"1.47.0\",\n )\n \ndiff --git a/starlite/template/__init__.py b/starlite/template/__init__.py\n--- a/starlite/template/__init__.py\n+++ b/starlite/template/__init__.py\n@@ -32,7 +32,7 @@\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n- alternative=f\"'from startlite.contrib.{module} import {name}'\",\n+ alternative=f\"'from starlite.contrib.{module} import {name}'\",\n version=\"1.46.0\",\n )\n", "issue": "Bug: `SessionCookieConfig` import DeprecationWarning - suggested import path does not exist\n**Describe the bug**\r\nIn Starlite >= 1.47, the statement `from starlite.middleware.session.cookie_backend import CookieBackendConfig` raises a DeprecationWarning:\r\n```\r\nDeprecationWarning: Import of deprecated import 'SessionCookieConfig from starlite.middleware.session'.\r\nDeprecated in starlite 1.47.0. This import will be removed in the next major version.\r\nUse \"'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'\" instead\r\n```\r\nThe suggested import path does not exist and contains a typo.\r\n\r\n\n", "before_files": [{"content": "from typing import Any\n\nfrom starlite.utils import warn_deprecation\n\nfrom .base import SessionMiddleware\n\n\ndef __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n\n if name != \"SessionCookieConfig\":\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n\n from .cookie_backend import CookieBackendConfig\n\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n alternative=\"'from startlite.middleware.sessions.cookie_backend import CookieBackendConfig'\",\n version=\"1.47.0\",\n )\n\n globals()[name] = CookieBackendConfig\n return CookieBackendConfig\n\n\n__all__ = [\"SessionMiddleware\"]\n", "path": "starlite/middleware/session/__init__.py"}, {"content": "from typing import Any\n\nfrom .base import TemplateEngineProtocol, TemplateProtocol\n\n__all__ = (\"TemplateEngineProtocol\", \"TemplateProtocol\")\n\nfrom ..utils import warn_deprecation\n\n\ndef __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n\n if name not in {\"JinjaTemplateEngine\", \"MakoTemplateEngine\", \"MakoTemplate\"}:\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n\n if name == \"JinjaTemplateEngine\":\n from starlite.contrib.jinja import JinjaTemplateEngine\n\n export: Any = JinjaTemplateEngine\n module = \"jinja\"\n elif name == \"MakoTemplateEngine\":\n from starlite.contrib.mako import MakoTemplateEngine\n\n export = MakoTemplateEngine\n module = \"mako\"\n else:\n from starlite.contrib.mako import MakoTemplate\n\n export = MakoTemplate\n module = \"mako\"\n\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n alternative=f\"'from startlite.contrib.{module} import {name}'\",\n version=\"1.46.0\",\n )\n\n globals()[name] = export\n return export\n", "path": "starlite/template/__init__.py"}], "after_files": [{"content": "from typing import Any\n\nfrom starlite.utils import warn_deprecation\n\nfrom .base import SessionMiddleware\n\n\ndef __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n\n if name != \"CookieBackendConfig\":\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n\n from .cookie_backend import CookieBackendConfig\n\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n alternative=\"'from starlite.middleware.session.cookie_backend import CookieBackendConfig'\",\n version=\"1.47.0\",\n )\n\n globals()[name] = CookieBackendConfig\n return CookieBackendConfig\n\n\n__all__ = [\"SessionMiddleware\"]\n", "path": "starlite/middleware/session/__init__.py"}, {"content": "from typing import Any\n\nfrom .base import TemplateEngineProtocol, TemplateProtocol\n\n__all__ = (\"TemplateEngineProtocol\", \"TemplateProtocol\")\n\nfrom ..utils import warn_deprecation\n\n\ndef __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n\n if name not in {\"JinjaTemplateEngine\", \"MakoTemplateEngine\", \"MakoTemplate\"}:\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n\n if name == \"JinjaTemplateEngine\":\n from starlite.contrib.jinja import JinjaTemplateEngine\n\n export: Any = JinjaTemplateEngine\n module = \"jinja\"\n elif name == \"MakoTemplateEngine\":\n from starlite.contrib.mako import MakoTemplateEngine\n\n export = MakoTemplateEngine\n module = \"mako\"\n else:\n from starlite.contrib.mako import MakoTemplate\n\n export = MakoTemplate\n module = \"mako\"\n\n warn_deprecation(\n deprecated_name=f\"{name} from {__package__}\",\n kind=\"import\",\n alternative=f\"'from starlite.contrib.{module} import {name}'\",\n version=\"1.46.0\",\n )\n\n globals()[name] = export\n return export\n", "path": "starlite/template/__init__.py"}]} | 1,003 | 342 |
gh_patches_debug_7119 | rasdani/github-patches | git_diff | Netflix__lemur-148 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error messages not displaying long enough
Currently error messages are displayed for only a period of time. They should be displayed until the user acknowledges the error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/common/utils.py`
Content:
```
1 """
2 .. module: lemur.common.utils
3 :platform: Unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6
7 .. moduleauthor:: Kevin Glisson <[email protected]>
8 """
9 import string
10 import random
11 from functools import wraps
12
13 from flask import current_app
14
15 from flask.ext.restful import marshal
16 from flask.ext.restful.reqparse import RequestParser
17 from flask.ext.sqlalchemy import Pagination
18
19
20 def get_psuedo_random_string():
21 """
22 Create a random and strongish challenge.
23 """
24 challenge = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) # noqa
25 challenge += ''.join(random.choice("~!@#$%^&*()_+") for x in range(6)) # noqa
26 challenge += ''.join(random.choice(string.ascii_lowercase) for x in range(6))
27 challenge += ''.join(random.choice(string.digits) for x in range(6)) # noqa
28 return challenge
29
30
31 class marshal_items(object):
32 def __init__(self, fields, envelope=None):
33 self.fields = fields
34 self.envelop = envelope
35
36 def __call__(self, f):
37 def _filter_items(items):
38 filtered_items = []
39 for item in items:
40 filtered_items.append(marshal(item, self.fields))
41 return filtered_items
42
43 @wraps(f)
44 def wrapper(*args, **kwargs):
45 try:
46 resp = f(*args, **kwargs)
47
48 # this is a bit weird way to handle non standard error codes returned from the marshaled function
49 if isinstance(resp, tuple):
50 return resp[0], resp[1]
51
52 if isinstance(resp, Pagination):
53 return {'items': _filter_items(resp.items), 'total': resp.total}
54
55 if isinstance(resp, list):
56 return {'items': _filter_items(resp), 'total': len(resp)}
57
58 return marshal(resp, self.fields)
59 except Exception as e:
60 current_app.logger.exception(e)
61 # this is a little weird hack to respect flask restful parsing errors on marshaled functions
62 if hasattr(e, 'code'):
63 if hasattr(e, 'data'):
64 return {'message': e.data['message']}, 400
65 else:
66 return {'message': 'unknown'}, 400
67 else:
68 return {'message': str(e)}, 400
69 return wrapper
70
71
72 paginated_parser = RequestParser()
73
74 paginated_parser.add_argument('count', type=int, default=10, location='args')
75 paginated_parser.add_argument('page', type=int, default=1, location='args')
76 paginated_parser.add_argument('sortDir', type=str, dest='sort_dir', location='args')
77 paginated_parser.add_argument('sortBy', type=str, dest='sort_by', location='args')
78 paginated_parser.add_argument('filter', type=str, location='args')
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lemur/common/utils.py b/lemur/common/utils.py
--- a/lemur/common/utils.py
+++ b/lemur/common/utils.py
@@ -63,9 +63,9 @@
if hasattr(e, 'data'):
return {'message': e.data['message']}, 400
else:
- return {'message': 'unknown'}, 400
+ return {'message': {'exception': 'unknown'}}, 400
else:
- return {'message': str(e)}, 400
+ return {'message': {'exception': str(e)}}, 400
return wrapper
| {"golden_diff": "diff --git a/lemur/common/utils.py b/lemur/common/utils.py\n--- a/lemur/common/utils.py\n+++ b/lemur/common/utils.py\n@@ -63,9 +63,9 @@\n if hasattr(e, 'data'):\n return {'message': e.data['message']}, 400\n else:\n- return {'message': 'unknown'}, 400\n+ return {'message': {'exception': 'unknown'}}, 400\n else:\n- return {'message': str(e)}, 400\n+ return {'message': {'exception': str(e)}}, 400\n return wrapper\n", "issue": "Error messages not displaying long enough\nCurrently error messages are displayed for only a period of time. They should be displayed until the user acknowledges the error. \n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.common.utils\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport string\nimport random\nfrom functools import wraps\n\nfrom flask import current_app\n\nfrom flask.ext.restful import marshal\nfrom flask.ext.restful.reqparse import RequestParser\nfrom flask.ext.sqlalchemy import Pagination\n\n\ndef get_psuedo_random_string():\n \"\"\"\n Create a random and strongish challenge.\n \"\"\"\n challenge = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) # noqa\n challenge += ''.join(random.choice(\"~!@#$%^&*()_+\") for x in range(6)) # noqa\n challenge += ''.join(random.choice(string.ascii_lowercase) for x in range(6))\n challenge += ''.join(random.choice(string.digits) for x in range(6)) # noqa\n return challenge\n\n\nclass marshal_items(object):\n def __init__(self, fields, envelope=None):\n self.fields = fields\n self.envelop = envelope\n\n def __call__(self, f):\n def _filter_items(items):\n filtered_items = []\n for item in items:\n filtered_items.append(marshal(item, self.fields))\n return filtered_items\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n resp = f(*args, **kwargs)\n\n # this is a bit weird way to handle non standard error codes returned from the marshaled function\n if isinstance(resp, tuple):\n return resp[0], resp[1]\n\n if isinstance(resp, Pagination):\n return {'items': _filter_items(resp.items), 'total': resp.total}\n\n if isinstance(resp, list):\n return {'items': _filter_items(resp), 'total': len(resp)}\n\n return marshal(resp, self.fields)\n except Exception as e:\n current_app.logger.exception(e)\n # this is a little weird hack to respect flask restful parsing errors on marshaled functions\n if hasattr(e, 'code'):\n if hasattr(e, 'data'):\n return {'message': e.data['message']}, 400\n else:\n return {'message': 'unknown'}, 400\n else:\n return {'message': str(e)}, 400\n return wrapper\n\n\npaginated_parser = RequestParser()\n\npaginated_parser.add_argument('count', type=int, default=10, location='args')\npaginated_parser.add_argument('page', type=int, default=1, location='args')\npaginated_parser.add_argument('sortDir', type=str, dest='sort_dir', location='args')\npaginated_parser.add_argument('sortBy', type=str, dest='sort_by', location='args')\npaginated_parser.add_argument('filter', type=str, location='args')\n", "path": "lemur/common/utils.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.common.utils\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport string\nimport random\nfrom functools import wraps\n\nfrom flask import current_app\n\nfrom flask.ext.restful import marshal\nfrom flask.ext.restful.reqparse import RequestParser\nfrom flask.ext.sqlalchemy import Pagination\n\n\ndef get_psuedo_random_string():\n \"\"\"\n Create a random and strongish challenge.\n \"\"\"\n challenge = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) # noqa\n challenge += ''.join(random.choice(\"~!@#$%^&*()_+\") for x in range(6)) # noqa\n challenge += ''.join(random.choice(string.ascii_lowercase) for x in range(6))\n challenge += ''.join(random.choice(string.digits) for x in range(6)) # noqa\n return challenge\n\n\nclass marshal_items(object):\n def __init__(self, fields, envelope=None):\n self.fields = fields\n self.envelop = envelope\n\n def __call__(self, f):\n def _filter_items(items):\n filtered_items = []\n for item in items:\n filtered_items.append(marshal(item, self.fields))\n return filtered_items\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n resp = f(*args, **kwargs)\n\n # this is a bit weird way to handle non standard error codes returned from the marshaled function\n if isinstance(resp, tuple):\n return resp[0], resp[1]\n\n if isinstance(resp, Pagination):\n return {'items': _filter_items(resp.items), 'total': resp.total}\n\n if isinstance(resp, list):\n return {'items': _filter_items(resp), 'total': len(resp)}\n\n return marshal(resp, self.fields)\n except Exception as e:\n current_app.logger.exception(e)\n # this is a little weird hack to respect flask restful parsing errors on marshaled functions\n if hasattr(e, 'code'):\n if hasattr(e, 'data'):\n return {'message': e.data['message']}, 400\n else:\n return {'message': {'exception': 'unknown'}}, 400\n else:\n return {'message': {'exception': str(e)}}, 400\n return wrapper\n\n\npaginated_parser = RequestParser()\n\npaginated_parser.add_argument('count', type=int, default=10, location='args')\npaginated_parser.add_argument('page', type=int, default=1, location='args')\npaginated_parser.add_argument('sortDir', type=str, dest='sort_dir', location='args')\npaginated_parser.add_argument('sortBy', type=str, dest='sort_by', location='args')\npaginated_parser.add_argument('filter', type=str, location='args')\n", "path": "lemur/common/utils.py"}]} | 1,077 | 146 |
gh_patches_debug_12561 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2317 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
InternalError while running the install script for demo mode
## Description
<!-- A clear and concise description of what the bug is. -->
While executing the `load_datasets(engine)` module of the install.py script of the demo app, the following error occurs:
```
InternalError: (psycopg2.errors.DependentObjectsStillExist) cannot drop schema Library Management because other objects depend on it
DETAIL: table "Library Management"."Authors" depends on schema Library Management
table "Library Management"."Books" depends on schema Library Management
table "Library Management"."Checkouts" depends on schema Library Management
table "Library Management"."Items" depends on schema Library Management
table "Library Management"."Media" depends on schema Library Management
table "Library Management"."Patrons" depends on schema Library Management
table "Library Management"."Publishers" depends on schema Library Management
```
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
We should be able to execute `load_datasets(engine)` without any errors.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `demo/install.py`
Content:
```
1 import bz2
2 import os
3
4 from sqlalchemy import text
5 from sqlalchemy.exc import OperationalError
6
7 from db.engine import create_future_engine
8
9 from demo.arxiv_skeleton import setup_and_register_schema_for_receiving_arxiv_data
10 from mathesar.models.base import Table, Schema, PreviewColumnSettings
11
12 FILE_DIR = os.path.abspath(os.path.dirname(__file__))
13 RESOURCES = os.path.join(FILE_DIR, "resources")
14 LIBRARY_ONE = os.path.join(RESOURCES, "library_without_checkouts.sql")
15 LIBRARY_TWO = os.path.join(RESOURCES, "library_add_checkouts.sql")
16 DEVCON_DATASET = os.path.join(RESOURCES, "devcon_dataset.sql")
17 MOVIES_SQL_BZ2 = os.path.join(RESOURCES, "movie_collection.sql.bz2")
18
19 LIBRARY_MANAGEMENT = 'Library Management'
20 MATHESAR_CON = 'Mathesar Con'
21 MOVIE_COLLECTION = 'Movie Collection'
22 ARXIV = 'Latest Papers from arXiv'
23 MOVIES_SQL_BZ2 = os.path.join(RESOURCES, "movie_collection.sql.bz2")
24
25
26 def load_datasets(engine):
27 """Load some SQL files with demo data to DB targeted by `engine`."""
28 _load_library_dataset(engine)
29 _load_movies_dataset(engine)
30 _load_devcon_dataset(engine)
31 _load_arxiv_data_skeleton(engine)
32
33
34 def _load_library_dataset(engine):
35 """
36 Load the library dataset into a "Library Management" schema.
37
38 Uses given engine to define database to load into.
39 Destructive, and will knock out any previous "Library Management"
40 schema in the given database.
41 """
42 drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{LIBRARY_MANAGEMENT}";""")
43 create_schema_query = text(f"""CREATE SCHEMA "{LIBRARY_MANAGEMENT}";""")
44 set_search_path = text(f"""SET search_path="{LIBRARY_MANAGEMENT}";""")
45 with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:
46 conn.execute(drop_schema_query)
47 conn.execute(create_schema_query)
48 conn.execute(set_search_path)
49 conn.execute(text(f1.read()))
50 conn.execute(text(f2.read()))
51
52
53 def _load_movies_dataset(engine):
54 drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{MOVIE_COLLECTION}" CASCADE;""")
55 create_schema_query = text(f"""CREATE SCHEMA "{MOVIE_COLLECTION}";""")
56 set_search_path = text(f"""SET search_path="{MOVIE_COLLECTION}";""")
57 with engine.begin() as conn, bz2.open(MOVIES_SQL_BZ2, 'rt') as f:
58 conn.execute(drop_schema_query)
59 conn.execute(create_schema_query)
60 conn.execute(set_search_path)
61 conn.execute(text(f.read()))
62
63
64 def _load_devcon_dataset(engine):
65 drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{MATHESAR_CON}" CASCADE;""")
66 create_schema_query = text(f"""CREATE SCHEMA "{MATHESAR_CON}";""")
67 set_search_path = text(f"""SET search_path="{MATHESAR_CON}";""")
68 with engine.begin() as conn, open(DEVCON_DATASET) as f:
69 conn.execute(drop_schema_query)
70 conn.execute(create_schema_query)
71 conn.execute(set_search_path)
72 conn.execute(text(f.read()))
73
74
75 def _load_arxiv_data_skeleton(engine):
76 setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)
77
78
79 def customize_settings(engine):
80 """Set preview settings so demo data looks good."""
81 _customize_library_preview_settings(engine)
82 _customize_devcon_preview_settings(engine)
83
84
85 def _customize_library_preview_settings(engine):
86 schema = _get_dj_schema_by_name(engine, LIBRARY_MANAGEMENT)
87 authors = _get_dj_table_by_name(schema, 'Authors')
88 _set_first_and_last_names_preview(authors)
89 patrons = _get_dj_table_by_name(schema, 'Patrons')
90 _set_first_and_last_names_preview(patrons)
91
92
93 def _customize_devcon_preview_settings(engine):
94 schema = _get_dj_schema_by_name(engine, MATHESAR_CON)
95 presenters = _get_dj_table_by_name(schema, 'Presenters')
96 _set_first_and_last_names_preview(presenters)
97
98
99 def _set_first_and_last_names_preview(table):
100 first_name = _get_dj_column_by_name(table, 'First Name')
101 last_name = _get_dj_column_by_name(table, 'Last Name')
102 template = f'{{{first_name.id}}} {{{last_name.id}}}'
103 new_preview_settings = PreviewColumnSettings.objects.create(
104 customized=True, template=template
105 )
106 table.settings.preview_settings = new_preview_settings
107 table.settings.save()
108
109
110 def _get_dj_schema_by_name(engine, name):
111 db_name = engine.url.database
112 schemas = Schema.objects.filter(database__name=db_name)
113 for s in schemas:
114 if s.name == name:
115 return s
116
117
118 def _get_dj_table_by_name(schema, name):
119 tables = Table.objects.filter(schema=schema)
120 for t in tables:
121 if t.name == name:
122 return t
123
124
125 def _get_dj_column_by_name(table, name):
126 columns = table.columns.all()
127 for c in columns:
128 if c.name == name:
129 return c
130
131
132 def create_demo_database(
133 user_db, username, password, hostname, root_db, port, template_db
134 ):
135 """Create database, install Mathesar on it, add demo data."""
136 user_db_engine = create_future_engine(
137 username, password, hostname, user_db, port
138 )
139 try:
140 user_db_engine.connect()
141 user_db_engine.dispose()
142 print(f"Database {user_db} already exists! Skipping...")
143 except OperationalError:
144 root_db_engine = create_future_engine(
145 username, password, hostname, root_db, port,
146 )
147 with root_db_engine.connect() as conn:
148 conn.execution_options(isolation_level="AUTOCOMMIT")
149 conn.execute(text(f"CREATE DATABASE {user_db} TEMPLATE {template_db};"))
150 root_db_engine.dispose()
151 user_db_engine.dispose()
152 print(f"Created DB is {user_db}.")
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/demo/install.py b/demo/install.py
--- a/demo/install.py
+++ b/demo/install.py
@@ -39,7 +39,7 @@
Destructive, and will knock out any previous "Library Management"
schema in the given database.
"""
- drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{LIBRARY_MANAGEMENT}";""")
+ drop_schema_query = text(f"""DROP SCHEMA IF EXISTS "{LIBRARY_MANAGEMENT}" CASCADE;""")
create_schema_query = text(f"""CREATE SCHEMA "{LIBRARY_MANAGEMENT}";""")
set_search_path = text(f"""SET search_path="{LIBRARY_MANAGEMENT}";""")
with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:
| {"golden_diff": "diff --git a/demo/install.py b/demo/install.py\n--- a/demo/install.py\n+++ b/demo/install.py\n@@ -39,7 +39,7 @@\n Destructive, and will knock out any previous \"Library Management\"\n schema in the given database.\n \"\"\"\n- drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{LIBRARY_MANAGEMENT}\";\"\"\")\n+ drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{LIBRARY_MANAGEMENT}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{LIBRARY_MANAGEMENT}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{LIBRARY_MANAGEMENT}\";\"\"\")\n with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:\n", "issue": "InternalError while running the install script for demo mode\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhile executing the `load_datasets(engine)` module of the install.py script of the demo app, the following error occurs: \r\n```\r\nInternalError: (psycopg2.errors.DependentObjectsStillExist) cannot drop schema Library Management because other objects depend on it\r\nDETAIL: table \"Library Management\".\"Authors\" depends on schema Library Management\r\ntable \"Library Management\".\"Books\" depends on schema Library Management\r\ntable \"Library Management\".\"Checkouts\" depends on schema Library Management\r\ntable \"Library Management\".\"Items\" depends on schema Library Management\r\ntable \"Library Management\".\"Media\" depends on schema Library Management\r\ntable \"Library Management\".\"Patrons\" depends on schema Library Management\r\ntable \"Library Management\".\"Publishers\" depends on schema Library Management\r\n\r\n```\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nWe should be able to execute `load_datasets(engine)` without any errors.\n", "before_files": [{"content": "import bz2\nimport os\n\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db.engine import create_future_engine\n\nfrom demo.arxiv_skeleton import setup_and_register_schema_for_receiving_arxiv_data\nfrom mathesar.models.base import Table, Schema, PreviewColumnSettings\n\nFILE_DIR = os.path.abspath(os.path.dirname(__file__))\nRESOURCES = os.path.join(FILE_DIR, \"resources\")\nLIBRARY_ONE = os.path.join(RESOURCES, \"library_without_checkouts.sql\")\nLIBRARY_TWO = os.path.join(RESOURCES, \"library_add_checkouts.sql\")\nDEVCON_DATASET = os.path.join(RESOURCES, \"devcon_dataset.sql\")\nMOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n\nLIBRARY_MANAGEMENT = 'Library Management'\nMATHESAR_CON = 'Mathesar Con'\nMOVIE_COLLECTION = 'Movie Collection'\nARXIV = 'Latest Papers from arXiv'\nMOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n\n\ndef load_datasets(engine):\n \"\"\"Load some SQL files with demo data to DB targeted by `engine`.\"\"\"\n _load_library_dataset(engine)\n _load_movies_dataset(engine)\n _load_devcon_dataset(engine)\n _load_arxiv_data_skeleton(engine)\n\n\ndef _load_library_dataset(engine):\n \"\"\"\n Load the library dataset into a \"Library Management\" schema.\n\n Uses given engine to define database to load into.\n Destructive, and will knock out any previous \"Library Management\"\n schema in the given database.\n \"\"\"\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{LIBRARY_MANAGEMENT}\";\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{LIBRARY_MANAGEMENT}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{LIBRARY_MANAGEMENT}\";\"\"\")\n with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f1.read()))\n conn.execute(text(f2.read()))\n\n\ndef _load_movies_dataset(engine):\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MOVIE_COLLECTION}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MOVIE_COLLECTION}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{MOVIE_COLLECTION}\";\"\"\")\n with engine.begin() as conn, bz2.open(MOVIES_SQL_BZ2, 'rt') as f:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f.read()))\n\n\ndef _load_devcon_dataset(engine):\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MATHESAR_CON}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MATHESAR_CON}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{MATHESAR_CON}\";\"\"\")\n with engine.begin() as conn, open(DEVCON_DATASET) as f:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f.read()))\n\n\ndef _load_arxiv_data_skeleton(engine):\n setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)\n\n\ndef customize_settings(engine):\n \"\"\"Set preview settings so demo data looks good.\"\"\"\n _customize_library_preview_settings(engine)\n _customize_devcon_preview_settings(engine)\n\n\ndef _customize_library_preview_settings(engine):\n schema = _get_dj_schema_by_name(engine, LIBRARY_MANAGEMENT)\n authors = _get_dj_table_by_name(schema, 'Authors')\n _set_first_and_last_names_preview(authors)\n patrons = _get_dj_table_by_name(schema, 'Patrons')\n _set_first_and_last_names_preview(patrons)\n\n\ndef _customize_devcon_preview_settings(engine):\n schema = _get_dj_schema_by_name(engine, MATHESAR_CON)\n presenters = _get_dj_table_by_name(schema, 'Presenters')\n _set_first_and_last_names_preview(presenters)\n\n\ndef _set_first_and_last_names_preview(table):\n first_name = _get_dj_column_by_name(table, 'First Name')\n last_name = _get_dj_column_by_name(table, 'Last Name')\n template = f'{{{first_name.id}}} {{{last_name.id}}}'\n new_preview_settings = PreviewColumnSettings.objects.create(\n customized=True, template=template\n )\n table.settings.preview_settings = new_preview_settings\n table.settings.save()\n\n\ndef _get_dj_schema_by_name(engine, name):\n db_name = engine.url.database\n schemas = Schema.objects.filter(database__name=db_name)\n for s in schemas:\n if s.name == name:\n return s\n\n\ndef _get_dj_table_by_name(schema, name):\n tables = Table.objects.filter(schema=schema)\n for t in tables:\n if t.name == name:\n return t\n\n\ndef _get_dj_column_by_name(table, name):\n columns = table.columns.all()\n for c in columns:\n if c.name == name:\n return c\n\n\ndef create_demo_database(\n user_db, username, password, hostname, root_db, port, template_db\n):\n \"\"\"Create database, install Mathesar on it, add demo data.\"\"\"\n user_db_engine = create_future_engine(\n username, password, hostname, user_db, port\n )\n try:\n user_db_engine.connect()\n user_db_engine.dispose()\n print(f\"Database {user_db} already exists! Skipping...\")\n except OperationalError:\n root_db_engine = create_future_engine(\n username, password, hostname, root_db, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {user_db} TEMPLATE {template_db};\"))\n root_db_engine.dispose()\n user_db_engine.dispose()\n print(f\"Created DB is {user_db}.\")\n", "path": "demo/install.py"}], "after_files": [{"content": "import bz2\nimport os\n\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db.engine import create_future_engine\n\nfrom demo.arxiv_skeleton import setup_and_register_schema_for_receiving_arxiv_data\nfrom mathesar.models.base import Table, Schema, PreviewColumnSettings\n\nFILE_DIR = os.path.abspath(os.path.dirname(__file__))\nRESOURCES = os.path.join(FILE_DIR, \"resources\")\nLIBRARY_ONE = os.path.join(RESOURCES, \"library_without_checkouts.sql\")\nLIBRARY_TWO = os.path.join(RESOURCES, \"library_add_checkouts.sql\")\nDEVCON_DATASET = os.path.join(RESOURCES, \"devcon_dataset.sql\")\nMOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n\nLIBRARY_MANAGEMENT = 'Library Management'\nMATHESAR_CON = 'Mathesar Con'\nMOVIE_COLLECTION = 'Movie Collection'\nARXIV = 'Latest Papers from arXiv'\nMOVIES_SQL_BZ2 = os.path.join(RESOURCES, \"movie_collection.sql.bz2\")\n\n\ndef load_datasets(engine):\n \"\"\"Load some SQL files with demo data to DB targeted by `engine`.\"\"\"\n _load_library_dataset(engine)\n _load_movies_dataset(engine)\n _load_devcon_dataset(engine)\n _load_arxiv_data_skeleton(engine)\n\n\ndef _load_library_dataset(engine):\n \"\"\"\n Load the library dataset into a \"Library Management\" schema.\n\n Uses given engine to define database to load into.\n Destructive, and will knock out any previous \"Library Management\"\n schema in the given database.\n \"\"\"\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{LIBRARY_MANAGEMENT}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{LIBRARY_MANAGEMENT}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{LIBRARY_MANAGEMENT}\";\"\"\")\n with engine.begin() as conn, open(LIBRARY_ONE) as f1, open(LIBRARY_TWO) as f2:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f1.read()))\n conn.execute(text(f2.read()))\n\n\ndef _load_movies_dataset(engine):\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MOVIE_COLLECTION}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MOVIE_COLLECTION}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{MOVIE_COLLECTION}\";\"\"\")\n with engine.begin() as conn, bz2.open(MOVIES_SQL_BZ2, 'rt') as f:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f.read()))\n\n\ndef _load_devcon_dataset(engine):\n drop_schema_query = text(f\"\"\"DROP SCHEMA IF EXISTS \"{MATHESAR_CON}\" CASCADE;\"\"\")\n create_schema_query = text(f\"\"\"CREATE SCHEMA \"{MATHESAR_CON}\";\"\"\")\n set_search_path = text(f\"\"\"SET search_path=\"{MATHESAR_CON}\";\"\"\")\n with engine.begin() as conn, open(DEVCON_DATASET) as f:\n conn.execute(drop_schema_query)\n conn.execute(create_schema_query)\n conn.execute(set_search_path)\n conn.execute(text(f.read()))\n\n\ndef _load_arxiv_data_skeleton(engine):\n setup_and_register_schema_for_receiving_arxiv_data(engine, schema_name=ARXIV)\n\n\ndef customize_settings(engine):\n \"\"\"Set preview settings so demo data looks good.\"\"\"\n _customize_library_preview_settings(engine)\n _customize_devcon_preview_settings(engine)\n\n\ndef _customize_library_preview_settings(engine):\n schema = _get_dj_schema_by_name(engine, LIBRARY_MANAGEMENT)\n authors = _get_dj_table_by_name(schema, 'Authors')\n _set_first_and_last_names_preview(authors)\n patrons = _get_dj_table_by_name(schema, 'Patrons')\n _set_first_and_last_names_preview(patrons)\n\n\ndef _customize_devcon_preview_settings(engine):\n schema = _get_dj_schema_by_name(engine, MATHESAR_CON)\n presenters = _get_dj_table_by_name(schema, 'Presenters')\n _set_first_and_last_names_preview(presenters)\n\n\ndef _set_first_and_last_names_preview(table):\n first_name = _get_dj_column_by_name(table, 'First Name')\n last_name = _get_dj_column_by_name(table, 'Last Name')\n template = f'{{{first_name.id}}} {{{last_name.id}}}'\n new_preview_settings = PreviewColumnSettings.objects.create(\n customized=True, template=template\n )\n table.settings.preview_settings = new_preview_settings\n table.settings.save()\n\n\ndef _get_dj_schema_by_name(engine, name):\n db_name = engine.url.database\n schemas = Schema.objects.filter(database__name=db_name)\n for s in schemas:\n if s.name == name:\n return s\n\n\ndef _get_dj_table_by_name(schema, name):\n tables = Table.objects.filter(schema=schema)\n for t in tables:\n if t.name == name:\n return t\n\n\ndef _get_dj_column_by_name(table, name):\n columns = table.columns.all()\n for c in columns:\n if c.name == name:\n return c\n\n\ndef create_demo_database(\n user_db, username, password, hostname, root_db, port, template_db\n):\n \"\"\"Create database, install Mathesar on it, add demo data.\"\"\"\n user_db_engine = create_future_engine(\n username, password, hostname, user_db, port\n )\n try:\n user_db_engine.connect()\n user_db_engine.dispose()\n print(f\"Database {user_db} already exists! Skipping...\")\n except OperationalError:\n root_db_engine = create_future_engine(\n username, password, hostname, root_db, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {user_db} TEMPLATE {template_db};\"))\n root_db_engine.dispose()\n user_db_engine.dispose()\n print(f\"Created DB is {user_db}.\")\n", "path": "demo/install.py"}]} | 2,142 | 171 |
gh_patches_debug_40864 | rasdani/github-patches | git_diff | spack__spack-1973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't uninstall all packages
I would like to be able to uninstall all packages. The following doesn't work:
```
$ spack uninstall -a
==> Error: uninstall requires at least one package argument.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/cmd/uninstall.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from __future__ import print_function
26
27 import argparse
28
29 import llnl.util.tty as tty
30 import spack
31 import spack.cmd
32 import spack.repository
33
34 description = "Remove an installed package"
35
36 error_message = """You can either:
37 a) Use a more specific spec, or
38 b) use spack uninstall -a to uninstall ALL matching specs.
39 """
40
41 # Arguments for display_specs when we find ambiguity
42 display_args = {
43 'long': True,
44 'show_flags': True,
45 'variants': True
46 }
47
48
49 def setup_parser(subparser):
50 subparser.add_argument(
51 '-f', '--force', action='store_true', dest='force',
52 help="Remove regardless of whether other packages depend on this one.")
53
54 subparser.add_argument(
55 '-a', '--all', action='store_true', dest='all',
56 help="USE CAREFULLY. Remove ALL installed packages that match each "
57 "supplied spec. i.e., if you say uninstall libelf, ALL versions "
58 "of libelf are uninstalled. This is both useful and dangerous, "
59 "like rm -r.")
60
61 subparser.add_argument(
62 '-d', '--dependents', action='store_true', dest='dependents',
63 help='Also uninstall any packages that depend on the ones given '
64 'via command line.')
65
66 subparser.add_argument(
67 '-y', '--yes-to-all', action='store_true', dest='yes_to_all',
68 help='Assume "yes" is the answer to every confirmation requested')
69
70 subparser.add_argument(
71 'packages',
72 nargs=argparse.REMAINDER,
73 help="specs of packages to uninstall")
74
75
76 def concretize_specs(specs, allow_multiple_matches=False, force=False):
77 """Returns a list of specs matching the non necessarily
78 concretized specs given from cli
79
80 Args:
81 specs: list of specs to be matched against installed packages
82 allow_multiple_matches : if True multiple matches are admitted
83
84 Return:
85 list of specs
86 """
87 # List of specs that match expressions given via command line
88 specs_from_cli = []
89 has_errors = False
90 for spec in specs:
91 matching = spack.installed_db.query(spec)
92 # For each spec provided, make sure it refers to only one package.
93 # Fail and ask user to be unambiguous if it doesn't
94 if not allow_multiple_matches and len(matching) > 1:
95 tty.error("%s matches multiple packages:" % spec)
96 print()
97 spack.cmd.display_specs(matching, **display_args)
98 print()
99 has_errors = True
100
101 # No installed package matches the query
102 if len(matching) == 0:
103 tty.error("%s does not match any installed packages." % spec)
104 has_errors = True
105
106 specs_from_cli.extend(matching)
107 if has_errors:
108 tty.die(error_message)
109
110 return specs_from_cli
111
112
113 def installed_dependents(specs):
114 """Returns a dictionary that maps a spec with a list of its
115 installed dependents
116
117 Args:
118 specs: list of specs to be checked for dependents
119
120 Returns:
121 dictionary of installed dependents
122 """
123 dependents = {}
124 for item in specs:
125 lst = [x for x in item.package.installed_dependents if x not in specs]
126 if lst:
127 lst = list(set(lst))
128 dependents[item] = lst
129 return dependents
130
131
132 def do_uninstall(specs, force):
133 """
134 Uninstalls all the specs in a list.
135
136 Args:
137 specs: list of specs to be uninstalled
138 force: force uninstallation (boolean)
139 """
140 packages = []
141 for item in specs:
142 try:
143 # should work if package is known to spack
144 packages.append(item.package)
145 except spack.repository.UnknownPackageError:
146 # The package.py file has gone away -- but still
147 # want to uninstall.
148 spack.Package(item).do_uninstall(force=True)
149
150 # Sort packages to be uninstalled by the number of installed dependents
151 # This ensures we do things in the right order
152 def num_installed_deps(pkg):
153 return len(pkg.installed_dependents)
154
155 packages.sort(key=num_installed_deps)
156 for item in packages:
157 item.do_uninstall(force=force)
158
159
160 def uninstall(parser, args):
161 if not args.packages:
162 tty.die("uninstall requires at least one package argument.")
163
164 with spack.installed_db.write_transaction():
165 specs = spack.cmd.parse_specs(args.packages)
166 # Gets the list of installed specs that match the ones give via cli
167 # takes care of '-a' is given in the cli
168 uninstall_list = concretize_specs(specs, args.all, args.force)
169 dependent_list = installed_dependents(
170 uninstall_list) # takes care of '-d'
171
172 # Process dependent_list and update uninstall_list
173 has_error = False
174 if dependent_list and not args.dependents and not args.force:
175 for spec, lst in dependent_list.items():
176 tty.error("Will not uninstall %s" %
177 spec.format("$_$@$%@$#", color=True))
178 print('')
179 print("The following packages depend on it:")
180 spack.cmd.display_specs(lst, **display_args)
181 print('')
182 has_error = True
183 elif args.dependents:
184 for key, lst in dependent_list.items():
185 uninstall_list.extend(lst)
186 uninstall_list = list(set(uninstall_list))
187
188 if has_error:
189 tty.die('You can use spack uninstall --dependents '
190 'to uninstall these dependencies as well')
191
192 if not args.yes_to_all:
193 tty.msg("The following packages will be uninstalled : ")
194 print('')
195 spack.cmd.display_specs(uninstall_list, **display_args)
196 print('')
197 spack.cmd.ask_for_confirmation('Do you want to proceed ? ')
198
199 # Uninstall everything on the list
200 do_uninstall(uninstall_list, args.force)
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py
--- a/lib/spack/spack/cmd/uninstall.py
+++ b/lib/spack/spack/cmd/uninstall.py
@@ -54,9 +54,10 @@
subparser.add_argument(
'-a', '--all', action='store_true', dest='all',
help="USE CAREFULLY. Remove ALL installed packages that match each "
- "supplied spec. i.e., if you say uninstall libelf, ALL versions "
- "of libelf are uninstalled. This is both useful and dangerous, "
- "like rm -r.")
+ "supplied spec. i.e., if you say uninstall `libelf`,"
+ " ALL versions of `libelf` are uninstalled. If no spec is "
+ "supplied all installed software will be uninstalled. This "
+ "is both useful and dangerous, like rm -r.")
subparser.add_argument(
'-d', '--dependents', action='store_true', dest='dependents',
@@ -157,37 +158,44 @@
item.do_uninstall(force=force)
+def get_uninstall_list(args):
+ specs = [any]
+ if args.packages:
+ specs = spack.cmd.parse_specs(args.packages)
+ # Gets the list of installed specs that match the ones give via cli
+ # takes care of '-a' is given in the cli
+ uninstall_list = concretize_specs(specs, args.all, args.force)
+ # Takes care of '-d'
+ dependent_list = installed_dependents(uninstall_list)
+ # Process dependent_list and update uninstall_list
+ has_error = False
+ if dependent_list and not args.dependents and not args.force:
+ for spec, lst in dependent_list.items():
+ tty.error("Will not uninstall %s" %
+ spec.format("$_$@$%@$#", color=True))
+ print('')
+ print("The following packages depend on it:")
+ spack.cmd.display_specs(lst, **display_args)
+ print('')
+ has_error = True
+ elif args.dependents:
+ for key, lst in dependent_list.items():
+ uninstall_list.extend(lst)
+ uninstall_list = list(set(uninstall_list))
+ if has_error:
+ tty.die('You can use spack uninstall --dependents '
+ 'to uninstall these dependencies as well')
+
+ return uninstall_list
+
+
def uninstall(parser, args):
- if not args.packages:
+ if not args.packages and not args.all:
tty.die("uninstall requires at least one package argument.")
with spack.installed_db.write_transaction():
- specs = spack.cmd.parse_specs(args.packages)
- # Gets the list of installed specs that match the ones give via cli
- # takes care of '-a' is given in the cli
- uninstall_list = concretize_specs(specs, args.all, args.force)
- dependent_list = installed_dependents(
- uninstall_list) # takes care of '-d'
-
- # Process dependent_list and update uninstall_list
- has_error = False
- if dependent_list and not args.dependents and not args.force:
- for spec, lst in dependent_list.items():
- tty.error("Will not uninstall %s" %
- spec.format("$_$@$%@$#", color=True))
- print('')
- print("The following packages depend on it:")
- spack.cmd.display_specs(lst, **display_args)
- print('')
- has_error = True
- elif args.dependents:
- for key, lst in dependent_list.items():
- uninstall_list.extend(lst)
- uninstall_list = list(set(uninstall_list))
-
- if has_error:
- tty.die('You can use spack uninstall --dependents '
- 'to uninstall these dependencies as well')
+
+ uninstall_list = get_uninstall_list(args)
if not args.yes_to_all:
tty.msg("The following packages will be uninstalled : ")
| {"golden_diff": "diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py\n--- a/lib/spack/spack/cmd/uninstall.py\n+++ b/lib/spack/spack/cmd/uninstall.py\n@@ -54,9 +54,10 @@\n subparser.add_argument(\n '-a', '--all', action='store_true', dest='all',\n help=\"USE CAREFULLY. Remove ALL installed packages that match each \"\n- \"supplied spec. i.e., if you say uninstall libelf, ALL versions \"\n- \"of libelf are uninstalled. This is both useful and dangerous, \"\n- \"like rm -r.\")\n+ \"supplied spec. i.e., if you say uninstall `libelf`,\"\n+ \" ALL versions of `libelf` are uninstalled. If no spec is \"\n+ \"supplied all installed software will be uninstalled. This \"\n+ \"is both useful and dangerous, like rm -r.\")\n \n subparser.add_argument(\n '-d', '--dependents', action='store_true', dest='dependents',\n@@ -157,37 +158,44 @@\n item.do_uninstall(force=force)\n \n \n+def get_uninstall_list(args):\n+ specs = [any]\n+ if args.packages:\n+ specs = spack.cmd.parse_specs(args.packages)\n+ # Gets the list of installed specs that match the ones give via cli\n+ # takes care of '-a' is given in the cli\n+ uninstall_list = concretize_specs(specs, args.all, args.force)\n+ # Takes care of '-d'\n+ dependent_list = installed_dependents(uninstall_list)\n+ # Process dependent_list and update uninstall_list\n+ has_error = False\n+ if dependent_list and not args.dependents and not args.force:\n+ for spec, lst in dependent_list.items():\n+ tty.error(\"Will not uninstall %s\" %\n+ spec.format(\"$_$@$%@$#\", color=True))\n+ print('')\n+ print(\"The following packages depend on it:\")\n+ spack.cmd.display_specs(lst, **display_args)\n+ print('')\n+ has_error = True\n+ elif args.dependents:\n+ for key, lst in dependent_list.items():\n+ uninstall_list.extend(lst)\n+ uninstall_list = list(set(uninstall_list))\n+ if has_error:\n+ tty.die('You can use spack uninstall --dependents '\n+ 'to uninstall these dependencies as well')\n+\n+ return uninstall_list\n+\n+\n def uninstall(parser, args):\n- if not args.packages:\n+ if not args.packages and not args.all:\n tty.die(\"uninstall requires at least one package argument.\")\n \n with spack.installed_db.write_transaction():\n- specs = spack.cmd.parse_specs(args.packages)\n- # Gets the list of installed specs that match the ones give via cli\n- # takes care of '-a' is given in the cli\n- uninstall_list = concretize_specs(specs, args.all, args.force)\n- dependent_list = installed_dependents(\n- uninstall_list) # takes care of '-d'\n-\n- # Process dependent_list and update uninstall_list\n- has_error = False\n- if dependent_list and not args.dependents and not args.force:\n- for spec, lst in dependent_list.items():\n- tty.error(\"Will not uninstall %s\" %\n- spec.format(\"$_$@$%@$#\", color=True))\n- print('')\n- print(\"The following packages depend on it:\")\n- spack.cmd.display_specs(lst, **display_args)\n- print('')\n- has_error = True\n- elif args.dependents:\n- for key, lst in dependent_list.items():\n- uninstall_list.extend(lst)\n- uninstall_list = list(set(uninstall_list))\n-\n- if has_error:\n- tty.die('You can use spack uninstall --dependents '\n- 'to uninstall these dependencies as well')\n+\n+ uninstall_list = get_uninstall_list(args)\n \n if not args.yes_to_all:\n tty.msg(\"The following packages will be uninstalled : \")\n", "issue": "Can't uninstall all packages\nI would like to be able to uninstall all packages. The following doesn't work:\n\n```\n$ spack uninstall -a\n==> Error: uninstall requires at least one package argument.\n```\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom __future__ import print_function\n\nimport argparse\n\nimport llnl.util.tty as tty\nimport spack\nimport spack.cmd\nimport spack.repository\n\ndescription = \"Remove an installed package\"\n\nerror_message = \"\"\"You can either:\n a) Use a more specific spec, or\n b) use spack uninstall -a to uninstall ALL matching specs.\n\"\"\"\n\n# Arguments for display_specs when we find ambiguity\ndisplay_args = {\n 'long': True,\n 'show_flags': True,\n 'variants': True\n}\n\n\ndef setup_parser(subparser):\n subparser.add_argument(\n '-f', '--force', action='store_true', dest='force',\n help=\"Remove regardless of whether other packages depend on this one.\")\n\n subparser.add_argument(\n '-a', '--all', action='store_true', dest='all',\n help=\"USE CAREFULLY. Remove ALL installed packages that match each \"\n \"supplied spec. i.e., if you say uninstall libelf, ALL versions \"\n \"of libelf are uninstalled. This is both useful and dangerous, \"\n \"like rm -r.\")\n\n subparser.add_argument(\n '-d', '--dependents', action='store_true', dest='dependents',\n help='Also uninstall any packages that depend on the ones given '\n 'via command line.')\n\n subparser.add_argument(\n '-y', '--yes-to-all', action='store_true', dest='yes_to_all',\n help='Assume \"yes\" is the answer to every confirmation requested')\n\n subparser.add_argument(\n 'packages',\n nargs=argparse.REMAINDER,\n help=\"specs of packages to uninstall\")\n\n\ndef concretize_specs(specs, allow_multiple_matches=False, force=False):\n \"\"\"Returns a list of specs matching the non necessarily\n concretized specs given from cli\n\n Args:\n specs: list of specs to be matched against installed packages\n allow_multiple_matches : if True multiple matches are admitted\n\n Return:\n list of specs\n \"\"\"\n # List of specs that match expressions given via command line\n specs_from_cli = []\n has_errors = False\n for spec in specs:\n matching = spack.installed_db.query(spec)\n # For each spec provided, make sure it refers to only one package.\n # Fail and ask user to be unambiguous if it doesn't\n if not allow_multiple_matches and len(matching) > 1:\n tty.error(\"%s matches multiple packages:\" % spec)\n print()\n spack.cmd.display_specs(matching, **display_args)\n print()\n has_errors = True\n\n # No installed package matches the query\n if len(matching) == 0:\n tty.error(\"%s does not match any installed packages.\" % spec)\n has_errors = True\n\n specs_from_cli.extend(matching)\n if has_errors:\n tty.die(error_message)\n\n return specs_from_cli\n\n\ndef installed_dependents(specs):\n \"\"\"Returns a dictionary that maps a spec with a list of its\n installed dependents\n\n Args:\n specs: list of specs to be checked for dependents\n\n Returns:\n dictionary of installed dependents\n \"\"\"\n dependents = {}\n for item in specs:\n lst = [x for x in item.package.installed_dependents if x not in specs]\n if lst:\n lst = list(set(lst))\n dependents[item] = lst\n return dependents\n\n\ndef do_uninstall(specs, force):\n \"\"\"\n Uninstalls all the specs in a list.\n\n Args:\n specs: list of specs to be uninstalled\n force: force uninstallation (boolean)\n \"\"\"\n packages = []\n for item in specs:\n try:\n # should work if package is known to spack\n packages.append(item.package)\n except spack.repository.UnknownPackageError:\n # The package.py file has gone away -- but still\n # want to uninstall.\n spack.Package(item).do_uninstall(force=True)\n\n # Sort packages to be uninstalled by the number of installed dependents\n # This ensures we do things in the right order\n def num_installed_deps(pkg):\n return len(pkg.installed_dependents)\n\n packages.sort(key=num_installed_deps)\n for item in packages:\n item.do_uninstall(force=force)\n\n\ndef uninstall(parser, args):\n if not args.packages:\n tty.die(\"uninstall requires at least one package argument.\")\n\n with spack.installed_db.write_transaction():\n specs = spack.cmd.parse_specs(args.packages)\n # Gets the list of installed specs that match the ones give via cli\n # takes care of '-a' is given in the cli\n uninstall_list = concretize_specs(specs, args.all, args.force)\n dependent_list = installed_dependents(\n uninstall_list) # takes care of '-d'\n\n # Process dependent_list and update uninstall_list\n has_error = False\n if dependent_list and not args.dependents and not args.force:\n for spec, lst in dependent_list.items():\n tty.error(\"Will not uninstall %s\" %\n spec.format(\"$_$@$%@$#\", color=True))\n print('')\n print(\"The following packages depend on it:\")\n spack.cmd.display_specs(lst, **display_args)\n print('')\n has_error = True\n elif args.dependents:\n for key, lst in dependent_list.items():\n uninstall_list.extend(lst)\n uninstall_list = list(set(uninstall_list))\n\n if has_error:\n tty.die('You can use spack uninstall --dependents '\n 'to uninstall these dependencies as well')\n\n if not args.yes_to_all:\n tty.msg(\"The following packages will be uninstalled : \")\n print('')\n spack.cmd.display_specs(uninstall_list, **display_args)\n print('')\n spack.cmd.ask_for_confirmation('Do you want to proceed ? ')\n\n # Uninstall everything on the list\n do_uninstall(uninstall_list, args.force)\n", "path": "lib/spack/spack/cmd/uninstall.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom __future__ import print_function\n\nimport argparse\n\nimport llnl.util.tty as tty\nimport spack\nimport spack.cmd\nimport spack.repository\n\ndescription = \"Remove an installed package\"\n\nerror_message = \"\"\"You can either:\n a) Use a more specific spec, or\n b) use spack uninstall -a to uninstall ALL matching specs.\n\"\"\"\n\n# Arguments for display_specs when we find ambiguity\ndisplay_args = {\n 'long': True,\n 'show_flags': True,\n 'variants': True\n}\n\n\ndef setup_parser(subparser):\n subparser.add_argument(\n '-f', '--force', action='store_true', dest='force',\n help=\"Remove regardless of whether other packages depend on this one.\")\n\n subparser.add_argument(\n '-a', '--all', action='store_true', dest='all',\n help=\"USE CAREFULLY. Remove ALL installed packages that match each \"\n \"supplied spec. i.e., if you say uninstall `libelf`,\"\n \" ALL versions of `libelf` are uninstalled. If no spec is \"\n \"supplied all installed software will be uninstalled. This \"\n \"is both useful and dangerous, like rm -r.\")\n\n subparser.add_argument(\n '-d', '--dependents', action='store_true', dest='dependents',\n help='Also uninstall any packages that depend on the ones given '\n 'via command line.')\n\n subparser.add_argument(\n '-y', '--yes-to-all', action='store_true', dest='yes_to_all',\n help='Assume \"yes\" is the answer to every confirmation requested')\n\n subparser.add_argument(\n 'packages',\n nargs=argparse.REMAINDER,\n help=\"specs of packages to uninstall\")\n\n\ndef concretize_specs(specs, allow_multiple_matches=False, force=False):\n \"\"\"Returns a list of specs matching the non necessarily\n concretized specs given from cli\n\n Args:\n specs: list of specs to be matched against installed packages\n allow_multiple_matches : if True multiple matches are admitted\n\n Return:\n list of specs\n \"\"\"\n # List of specs that match expressions given via command line\n specs_from_cli = []\n has_errors = False\n for spec in specs:\n matching = spack.installed_db.query(spec)\n # For each spec provided, make sure it refers to only one package.\n # Fail and ask user to be unambiguous if it doesn't\n if not allow_multiple_matches and len(matching) > 1:\n tty.error(\"%s matches multiple packages:\" % spec)\n print()\n spack.cmd.display_specs(matching, **display_args)\n print()\n has_errors = True\n\n # No installed package matches the query\n if len(matching) == 0:\n tty.error(\"%s does not match any installed packages.\" % spec)\n has_errors = True\n\n specs_from_cli.extend(matching)\n if has_errors:\n tty.die(error_message)\n\n return specs_from_cli\n\n\ndef installed_dependents(specs):\n \"\"\"Returns a dictionary that maps a spec with a list of its\n installed dependents\n\n Args:\n specs: list of specs to be checked for dependents\n\n Returns:\n dictionary of installed dependents\n \"\"\"\n dependents = {}\n for item in specs:\n lst = [x for x in item.package.installed_dependents if x not in specs]\n if lst:\n lst = list(set(lst))\n dependents[item] = lst\n return dependents\n\n\ndef do_uninstall(specs, force):\n \"\"\"\n Uninstalls all the specs in a list.\n\n Args:\n specs: list of specs to be uninstalled\n force: force uninstallation (boolean)\n \"\"\"\n packages = []\n for item in specs:\n try:\n # should work if package is known to spack\n packages.append(item.package)\n except spack.repository.UnknownPackageError:\n # The package.py file has gone away -- but still\n # want to uninstall.\n spack.Package(item).do_uninstall(force=True)\n\n # Sort packages to be uninstalled by the number of installed dependents\n # This ensures we do things in the right order\n def num_installed_deps(pkg):\n return len(pkg.installed_dependents)\n\n packages.sort(key=num_installed_deps)\n for item in packages:\n item.do_uninstall(force=force)\n\n\ndef get_uninstall_list(args):\n specs = [any]\n if args.packages:\n specs = spack.cmd.parse_specs(args.packages)\n # Gets the list of installed specs that match the ones give via cli\n # takes care of '-a' is given in the cli\n uninstall_list = concretize_specs(specs, args.all, args.force)\n # Takes care of '-d'\n dependent_list = installed_dependents(uninstall_list)\n # Process dependent_list and update uninstall_list\n has_error = False\n if dependent_list and not args.dependents and not args.force:\n for spec, lst in dependent_list.items():\n tty.error(\"Will not uninstall %s\" %\n spec.format(\"$_$@$%@$#\", color=True))\n print('')\n print(\"The following packages depend on it:\")\n spack.cmd.display_specs(lst, **display_args)\n print('')\n has_error = True\n elif args.dependents:\n for key, lst in dependent_list.items():\n uninstall_list.extend(lst)\n uninstall_list = list(set(uninstall_list))\n if has_error:\n tty.die('You can use spack uninstall --dependents '\n 'to uninstall these dependencies as well')\n\n return uninstall_list\n\n\ndef uninstall(parser, args):\n if not args.packages and not args.all:\n tty.die(\"uninstall requires at least one package argument.\")\n\n with spack.installed_db.write_transaction():\n\n uninstall_list = get_uninstall_list(args)\n\n if not args.yes_to_all:\n tty.msg(\"The following packages will be uninstalled : \")\n print('')\n spack.cmd.display_specs(uninstall_list, **display_args)\n print('')\n spack.cmd.ask_for_confirmation('Do you want to proceed ? ')\n\n # Uninstall everything on the list\n do_uninstall(uninstall_list, args.force)\n", "path": "lib/spack/spack/cmd/uninstall.py"}]} | 2,383 | 905 |
gh_patches_debug_18043 | rasdani/github-patches | git_diff | microsoft__Qcodes-610 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
subplots do not work with MatPlot
A minimal example:
```
import qcodes
import qcodes.tests.data_mocks
ds=qcodes.tests.data_mocks.DataSet1D()
p = qcodes.MatPlot()
p.add(ds.default_parameter_array() )
p = qcodes.MatPlot(subplots=(2,2))
p.add(ds.default_parameter_array()) # fail
p.add(ds.default_parameter_array(), subplot=1) # fail
```
@giulioungaretti
subplots do not work with MatPlot
A minimal example:
```
import qcodes
import qcodes.tests.data_mocks
ds=qcodes.tests.data_mocks.DataSet1D()
p = qcodes.MatPlot()
p.add(ds.default_parameter_array() )
p = qcodes.MatPlot(subplots=(2,2))
p.add(ds.default_parameter_array()) # fail
p.add(ds.default_parameter_array(), subplot=1) # fail
```
@giulioungaretti
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/plots/qcmatplotlib.py`
Content:
```
1 """
2 Live plotting in Jupyter notebooks
3 using the nbagg backend and matplotlib
4 """
5 from collections import Mapping
6
7 import matplotlib.pyplot as plt
8 from matplotlib.transforms import Bbox
9 import numpy as np
10 from numpy.ma import masked_invalid, getmask
11
12 from .base import BasePlot
13
14
15 class MatPlot(BasePlot):
16 """
17 Plot x/y lines or x/y/z heatmap data. The first trace may be included
18 in the constructor, other traces can be added with MatPlot.add()
19
20 Args:
21 *args: shortcut to provide the x/y/z data. See BasePlot.add
22
23 figsize (Tuple[Float, Float]): (width, height) tuple in inches to pass to plt.figure
24 default (8, 5)
25
26 interval: period in seconds between update checks
27
28 subplots: either a sequence (args) or mapping (kwargs) to pass to
29 plt.subplots. default is a single simple subplot (1, 1)
30 you can use this to pass kwargs to the plt.figure constructor
31
32 num: integer or None
33 specifies the index of the matplotlib figure window to use. If None
34 then open a new window
35
36 **kwargs: passed along to MatPlot.add() to add the first data trace
37 """
38 def __init__(self, *args, figsize=None, interval=1, subplots=None, num=None,
39 **kwargs):
40
41 super().__init__(interval)
42
43 self._init_plot(subplots, figsize, num=num)
44 if args or kwargs:
45 self.add(*args, **kwargs)
46
47 def _init_plot(self, subplots=None, figsize=None, num=None):
48 if figsize is None:
49 figsize = (8, 5)
50
51 if subplots is None:
52 subplots = (1, 1)
53
54 if isinstance(subplots, Mapping):
55 self.fig, self.subplots = plt.subplots(figsize=figsize, num=num,
56 **subplots)
57 else:
58 self.fig, self.subplots = plt.subplots(*subplots, num=num,
59 figsize=figsize)
60 if not hasattr(self.subplots, '__len__'):
61 self.subplots = (self.subplots,)
62
63 self.title = self.fig.suptitle('')
64
65 def clear(self, subplots=None, figsize=None):
66 """
67 Clears the plot window and removes all subplots and traces
68 so that the window can be reused.
69 """
70 self.traces = []
71 self.fig.clf()
72 self._init_plot(subplots, figsize, num=self.fig.number)
73
74 def add_to_plot(self, **kwargs):
75 """
76 adds one trace to this MatPlot.
77
78 kwargs: with the following exceptions (mostly the data!), these are
79 passed directly to the matplotlib plotting routine.
80
81 `subplot`: the 1-based axes number to append to (default 1)
82
83 if kwargs include `z`, we will draw a heatmap (ax.pcolormesh):
84 `x`, `y`, and `z` are passed as positional args to pcolormesh
85
86 without `z` we draw a scatter/lines plot (ax.plot):
87 `x`, `y`, and `fmt` (if present) are passed as positional args
88 """
89 # TODO some way to specify overlaid axes?
90 ax = self._get_axes(kwargs)
91 if 'z' in kwargs:
92 plot_object = self._draw_pcolormesh(ax, **kwargs)
93 else:
94 plot_object = self._draw_plot(ax, **kwargs)
95
96 self._update_labels(ax, kwargs)
97 prev_default_title = self.get_default_title()
98
99 self.traces.append({
100 'config': kwargs,
101 'plot_object': plot_object
102 })
103
104 if prev_default_title == self.title.get_text():
105 # in case the user has updated title, don't change it anymore
106 self.title.set_text(self.get_default_title())
107
108 def _get_axes(self, config):
109 return self.subplots[config.get('subplot', 1) - 1]
110
111 def _update_labels(self, ax, config):
112 for axletter in ("x", "y"):
113 if axletter+'label' in config:
114 label = config[axletter+'label']
115 else:
116 label = None
117
118 # find if any kwarg from plot.add in the base class
119 # matches xunit or yunit, signaling a custom unit
120 if axletter+'unit' in config:
121 unit = config[axletter+'unit']
122 else:
123 unit = None
124
125 # find ( more hope to) unit and label from
126 # the data array inside the config
127 getter = getattr(ax, "get_{}label".format(axletter))
128 if axletter in config and not getter():
129 # now if we did not have any kwarg for label or unit
130 # fallback to the data_array
131 if unit is None:
132 _, unit = self.get_label(config[axletter])
133 if label is None:
134 label, _ = self.get_label(config[axletter])
135 elif getter():
136 # The axis already has label. Assume that is correct
137 # We should probably check consistent units and error or warn
138 # if not consistent. It's also not at all clear how to handle
139 # labels/names as these will in general not be consistent on
140 # at least one axis
141 return
142 axsetter = getattr(ax, "set_{}label".format(axletter))
143 axsetter("{} ({})".format(label, unit))
144
145 def update_plot(self):
146 """
147 update the plot. The DataSets themselves have already been updated
148 in update, here we just push the changes to the plot.
149 """
150 # matplotlib doesn't know how to autoscale to a pcolormesh after the
151 # first draw (relim ignores it...) so we have to do this ourselves
152 bboxes = dict(zip(self.subplots, [[] for p in self.subplots]))
153
154 for trace in self.traces:
155 config = trace['config']
156 plot_object = trace['plot_object']
157 if 'z' in config:
158 # pcolormesh doesn't seem to allow editing x and y data, only z
159 # so instead, we'll remove and re-add the data.
160 if plot_object:
161 plot_object.remove()
162
163 ax = self._get_axes(config)
164 plot_object = self._draw_pcolormesh(ax, **config)
165 trace['plot_object'] = plot_object
166
167 if plot_object:
168 bboxes[plot_object.axes].append(
169 plot_object.get_datalim(plot_object.axes.transData))
170 else:
171 for axletter in 'xy':
172 setter = 'set_' + axletter + 'data'
173 if axletter in config:
174 getattr(plot_object, setter)(config[axletter])
175
176 for ax in self.subplots:
177 if ax.get_autoscale_on():
178 ax.relim()
179 if bboxes[ax]:
180 bbox = Bbox.union(bboxes[ax])
181 if np.all(np.isfinite(ax.dataLim)):
182 # should take care of the case of lines + heatmaps
183 # where there's already a finite dataLim from relim
184 ax.dataLim.set(Bbox.union(ax.dataLim, bbox))
185 else:
186 # when there's only a heatmap, relim gives inf bounds
187 # so just completely overwrite it
188 ax.dataLim = bbox
189 ax.autoscale()
190
191 self.fig.canvas.draw()
192
193 def _draw_plot(self, ax, y, x=None, fmt=None, subplot=1,
194 xlabel=None,
195 ylabel=None,
196 zlabel=None,
197 xunit=None,
198 yunit=None,
199 zunit=None,
200 **kwargs):
201 # NOTE(alexj)stripping out subplot because which subplot we're in is already
202 # described by ax, and it's not a kwarg to matplotlib's ax.plot. But I
203 # didn't want to strip it out of kwargs earlier because it should stay
204 # part of trace['config'].
205 args = [arg for arg in [x, y, fmt] if arg is not None]
206 line, = ax.plot(*args, **kwargs)
207 return line
208
209 def _draw_pcolormesh(self, ax, z, x=None, y=None, subplot=1,
210 xlabel=None,
211 ylabel=None,
212 zlabel=None,
213 xunit=None,
214 yunit=None,
215 zunit=None,
216 **kwargs):
217 # NOTE(alexj)stripping out subplot because which subplot we're in is already
218 # described by ax, and it's not a kwarg to matplotlib's ax.plot. But I
219 # didn't want to strip it out of kwargs earlier because it should stay
220 # part of trace['config'].
221 args = [masked_invalid(arg) for arg in [x, y, z]
222 if arg is not None]
223
224 for arg in args:
225 if np.all(getmask(arg)):
226 # if any entire array is masked, don't draw at all
227 # there's nothing to draw, and anyway it throws a warning
228 return False
229 pc = ax.pcolormesh(*args, **kwargs)
230
231 if getattr(ax, 'qcodes_colorbar', None):
232 # update_normal doesn't seem to work...
233 ax.qcodes_colorbar.update_bruteforce(pc)
234 else:
235 # TODO: what if there are several colormeshes on this subplot,
236 # do they get the same colorscale?
237 # We should make sure they do, and have it include
238 # the full range of both.
239 ax.qcodes_colorbar = self.fig.colorbar(pc, ax=ax)
240
241 # ideally this should have been in _update_labels, but
242 # the colorbar doesn't necessarily exist there.
243 # I guess we could create the colorbar no matter what,
244 # and just give it a dummy mappable to start, so we could
245 # put this where it belongs.
246 if zunit is None:
247 _, zunit = self.get_label(z)
248 if zlabel is None:
249 zlabel, _ = self.get_label(z)
250
251 label = "{} ({})".format(zlabel, zunit)
252 ax.qcodes_colorbar.set_label(label)
253
254 return pc
255
256 def save(self, filename=None):
257 """
258 Save current plot to filename, by default
259 to the location corresponding to the default
260 title.
261
262 Args:
263 filename (Optional[str]): Location of the file
264 """
265 default = "{}.png".format(self.get_default_title())
266 filename = filename or default
267 self.fig.savefig(filename)
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qcodes/plots/qcmatplotlib.py b/qcodes/plots/qcmatplotlib.py
--- a/qcodes/plots/qcmatplotlib.py
+++ b/qcodes/plots/qcmatplotlib.py
@@ -53,12 +53,16 @@
if isinstance(subplots, Mapping):
self.fig, self.subplots = plt.subplots(figsize=figsize, num=num,
- **subplots)
+ **subplots, squeeze=False)
else:
self.fig, self.subplots = plt.subplots(*subplots, num=num,
- figsize=figsize)
- if not hasattr(self.subplots, '__len__'):
- self.subplots = (self.subplots,)
+ figsize=figsize, squeeze=False)
+
+ # squeeze=False ensures that subplots is always a 2D array independent of the number
+ # of subplots.
+ # However the qcodes api assumes that subplots is always a 1D array
+ # so flatten here
+ self.subplots = self.subplots.flatten()
self.title = self.fig.suptitle('')
| {"golden_diff": "diff --git a/qcodes/plots/qcmatplotlib.py b/qcodes/plots/qcmatplotlib.py\n--- a/qcodes/plots/qcmatplotlib.py\n+++ b/qcodes/plots/qcmatplotlib.py\n@@ -53,12 +53,16 @@\n \n if isinstance(subplots, Mapping):\n self.fig, self.subplots = plt.subplots(figsize=figsize, num=num,\n- **subplots)\n+ **subplots, squeeze=False)\n else:\n self.fig, self.subplots = plt.subplots(*subplots, num=num,\n- figsize=figsize)\n- if not hasattr(self.subplots, '__len__'):\n- self.subplots = (self.subplots,)\n+ figsize=figsize, squeeze=False)\n+\n+ # squeeze=False ensures that subplots is always a 2D array independent of the number\n+ # of subplots.\n+ # However the qcodes api assumes that subplots is always a 1D array\n+ # so flatten here\n+ self.subplots = self.subplots.flatten()\n \n self.title = self.fig.suptitle('')\n", "issue": "subplots do not work with MatPlot\nA minimal example:\r\n```\r\nimport qcodes\r\nimport qcodes.tests.data_mocks\r\n\r\nds=qcodes.tests.data_mocks.DataSet1D()\r\np = qcodes.MatPlot()\r\np.add(ds.default_parameter_array() )\r\n\r\np = qcodes.MatPlot(subplots=(2,2))\r\np.add(ds.default_parameter_array()) # fail\r\np.add(ds.default_parameter_array(), subplot=1) # fail\r\n```\r\n\r\n@giulioungaretti \nsubplots do not work with MatPlot\nA minimal example:\r\n```\r\nimport qcodes\r\nimport qcodes.tests.data_mocks\r\n\r\nds=qcodes.tests.data_mocks.DataSet1D()\r\np = qcodes.MatPlot()\r\np.add(ds.default_parameter_array() )\r\n\r\np = qcodes.MatPlot(subplots=(2,2))\r\np.add(ds.default_parameter_array()) # fail\r\np.add(ds.default_parameter_array(), subplot=1) # fail\r\n```\r\n\r\n@giulioungaretti \n", "before_files": [{"content": "\"\"\"\nLive plotting in Jupyter notebooks\nusing the nbagg backend and matplotlib\n\"\"\"\nfrom collections import Mapping\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nimport numpy as np\nfrom numpy.ma import masked_invalid, getmask\n\nfrom .base import BasePlot\n\n\nclass MatPlot(BasePlot):\n \"\"\"\n Plot x/y lines or x/y/z heatmap data. The first trace may be included\n in the constructor, other traces can be added with MatPlot.add()\n\n Args:\n *args: shortcut to provide the x/y/z data. See BasePlot.add\n\n figsize (Tuple[Float, Float]): (width, height) tuple in inches to pass to plt.figure\n default (8, 5)\n\n interval: period in seconds between update checks\n\n subplots: either a sequence (args) or mapping (kwargs) to pass to\n plt.subplots. default is a single simple subplot (1, 1)\n you can use this to pass kwargs to the plt.figure constructor\n\n num: integer or None\n specifies the index of the matplotlib figure window to use. If None\n then open a new window\n\n **kwargs: passed along to MatPlot.add() to add the first data trace\n \"\"\"\n def __init__(self, *args, figsize=None, interval=1, subplots=None, num=None,\n **kwargs):\n\n super().__init__(interval)\n\n self._init_plot(subplots, figsize, num=num)\n if args or kwargs:\n self.add(*args, **kwargs)\n\n def _init_plot(self, subplots=None, figsize=None, num=None):\n if figsize is None:\n figsize = (8, 5)\n\n if subplots is None:\n subplots = (1, 1)\n\n if isinstance(subplots, Mapping):\n self.fig, self.subplots = plt.subplots(figsize=figsize, num=num,\n **subplots)\n else:\n self.fig, self.subplots = plt.subplots(*subplots, num=num,\n figsize=figsize)\n if not hasattr(self.subplots, '__len__'):\n self.subplots = (self.subplots,)\n\n self.title = self.fig.suptitle('')\n\n def clear(self, subplots=None, figsize=None):\n \"\"\"\n Clears the plot window and removes all subplots and traces\n so that the window can be reused.\n \"\"\"\n self.traces = []\n self.fig.clf()\n self._init_plot(subplots, figsize, num=self.fig.number)\n\n def add_to_plot(self, **kwargs):\n \"\"\"\n adds one trace to this MatPlot.\n\n kwargs: with the following exceptions (mostly the data!), these are\n passed directly to the matplotlib plotting routine.\n\n `subplot`: the 1-based axes number to append to (default 1)\n\n if kwargs include `z`, we will draw a heatmap (ax.pcolormesh):\n `x`, `y`, and `z` are passed as positional args to pcolormesh\n\n without `z` we draw a scatter/lines plot (ax.plot):\n `x`, `y`, and `fmt` (if present) are passed as positional args\n \"\"\"\n # TODO some way to specify overlaid axes?\n ax = self._get_axes(kwargs)\n if 'z' in kwargs:\n plot_object = self._draw_pcolormesh(ax, **kwargs)\n else:\n plot_object = self._draw_plot(ax, **kwargs)\n\n self._update_labels(ax, kwargs)\n prev_default_title = self.get_default_title()\n\n self.traces.append({\n 'config': kwargs,\n 'plot_object': plot_object\n })\n\n if prev_default_title == self.title.get_text():\n # in case the user has updated title, don't change it anymore\n self.title.set_text(self.get_default_title())\n\n def _get_axes(self, config):\n return self.subplots[config.get('subplot', 1) - 1]\n\n def _update_labels(self, ax, config):\n for axletter in (\"x\", \"y\"):\n if axletter+'label' in config:\n label = config[axletter+'label']\n else:\n label = None\n\n # find if any kwarg from plot.add in the base class\n # matches xunit or yunit, signaling a custom unit\n if axletter+'unit' in config:\n unit = config[axletter+'unit']\n else:\n unit = None\n\n # find ( more hope to) unit and label from\n # the data array inside the config\n getter = getattr(ax, \"get_{}label\".format(axletter))\n if axletter in config and not getter():\n # now if we did not have any kwarg for label or unit\n # fallback to the data_array\n if unit is None:\n _, unit = self.get_label(config[axletter])\n if label is None:\n label, _ = self.get_label(config[axletter])\n elif getter():\n # The axis already has label. Assume that is correct\n # We should probably check consistent units and error or warn\n # if not consistent. It's also not at all clear how to handle\n # labels/names as these will in general not be consistent on\n # at least one axis\n return\n axsetter = getattr(ax, \"set_{}label\".format(axletter))\n axsetter(\"{} ({})\".format(label, unit))\n\n def update_plot(self):\n \"\"\"\n update the plot. The DataSets themselves have already been updated\n in update, here we just push the changes to the plot.\n \"\"\"\n # matplotlib doesn't know how to autoscale to a pcolormesh after the\n # first draw (relim ignores it...) so we have to do this ourselves\n bboxes = dict(zip(self.subplots, [[] for p in self.subplots]))\n\n for trace in self.traces:\n config = trace['config']\n plot_object = trace['plot_object']\n if 'z' in config:\n # pcolormesh doesn't seem to allow editing x and y data, only z\n # so instead, we'll remove and re-add the data.\n if plot_object:\n plot_object.remove()\n\n ax = self._get_axes(config)\n plot_object = self._draw_pcolormesh(ax, **config)\n trace['plot_object'] = plot_object\n\n if plot_object:\n bboxes[plot_object.axes].append(\n plot_object.get_datalim(plot_object.axes.transData))\n else:\n for axletter in 'xy':\n setter = 'set_' + axletter + 'data'\n if axletter in config:\n getattr(plot_object, setter)(config[axletter])\n\n for ax in self.subplots:\n if ax.get_autoscale_on():\n ax.relim()\n if bboxes[ax]:\n bbox = Bbox.union(bboxes[ax])\n if np.all(np.isfinite(ax.dataLim)):\n # should take care of the case of lines + heatmaps\n # where there's already a finite dataLim from relim\n ax.dataLim.set(Bbox.union(ax.dataLim, bbox))\n else:\n # when there's only a heatmap, relim gives inf bounds\n # so just completely overwrite it\n ax.dataLim = bbox\n ax.autoscale()\n\n self.fig.canvas.draw()\n\n def _draw_plot(self, ax, y, x=None, fmt=None, subplot=1,\n xlabel=None,\n ylabel=None,\n zlabel=None,\n xunit=None,\n yunit=None,\n zunit=None,\n **kwargs):\n # NOTE(alexj)stripping out subplot because which subplot we're in is already\n # described by ax, and it's not a kwarg to matplotlib's ax.plot. But I\n # didn't want to strip it out of kwargs earlier because it should stay\n # part of trace['config'].\n args = [arg for arg in [x, y, fmt] if arg is not None]\n line, = ax.plot(*args, **kwargs)\n return line\n\n def _draw_pcolormesh(self, ax, z, x=None, y=None, subplot=1,\n xlabel=None,\n ylabel=None,\n zlabel=None,\n xunit=None,\n yunit=None,\n zunit=None,\n **kwargs):\n # NOTE(alexj)stripping out subplot because which subplot we're in is already\n # described by ax, and it's not a kwarg to matplotlib's ax.plot. But I\n # didn't want to strip it out of kwargs earlier because it should stay\n # part of trace['config'].\n args = [masked_invalid(arg) for arg in [x, y, z]\n if arg is not None]\n\n for arg in args:\n if np.all(getmask(arg)):\n # if any entire array is masked, don't draw at all\n # there's nothing to draw, and anyway it throws a warning\n return False\n pc = ax.pcolormesh(*args, **kwargs)\n\n if getattr(ax, 'qcodes_colorbar', None):\n # update_normal doesn't seem to work...\n ax.qcodes_colorbar.update_bruteforce(pc)\n else:\n # TODO: what if there are several colormeshes on this subplot,\n # do they get the same colorscale?\n # We should make sure they do, and have it include\n # the full range of both.\n ax.qcodes_colorbar = self.fig.colorbar(pc, ax=ax)\n\n # ideally this should have been in _update_labels, but\n # the colorbar doesn't necessarily exist there.\n # I guess we could create the colorbar no matter what,\n # and just give it a dummy mappable to start, so we could\n # put this where it belongs.\n if zunit is None:\n _, zunit = self.get_label(z)\n if zlabel is None:\n zlabel, _ = self.get_label(z)\n\n label = \"{} ({})\".format(zlabel, zunit)\n ax.qcodes_colorbar.set_label(label)\n\n return pc\n\n def save(self, filename=None):\n \"\"\"\n Save current plot to filename, by default\n to the location corresponding to the default \n title.\n\n Args:\n filename (Optional[str]): Location of the file\n \"\"\"\n default = \"{}.png\".format(self.get_default_title())\n filename = filename or default\n self.fig.savefig(filename)\n", "path": "qcodes/plots/qcmatplotlib.py"}], "after_files": [{"content": "\"\"\"\nLive plotting in Jupyter notebooks\nusing the nbagg backend and matplotlib\n\"\"\"\nfrom collections import Mapping\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.transforms import Bbox\nimport numpy as np\nfrom numpy.ma import masked_invalid, getmask\n\nfrom .base import BasePlot\n\n\nclass MatPlot(BasePlot):\n \"\"\"\n Plot x/y lines or x/y/z heatmap data. The first trace may be included\n in the constructor, other traces can be added with MatPlot.add()\n\n Args:\n *args: shortcut to provide the x/y/z data. See BasePlot.add\n\n figsize (Tuple[Float, Float]): (width, height) tuple in inches to pass to plt.figure\n default (8, 5)\n\n interval: period in seconds between update checks\n\n subplots: either a sequence (args) or mapping (kwargs) to pass to\n plt.subplots. default is a single simple subplot (1, 1)\n you can use this to pass kwargs to the plt.figure constructor\n\n num: integer or None\n specifies the index of the matplotlib figure window to use. If None\n then open a new window\n\n **kwargs: passed along to MatPlot.add() to add the first data trace\n \"\"\"\n def __init__(self, *args, figsize=None, interval=1, subplots=None, num=None,\n **kwargs):\n\n super().__init__(interval)\n\n self._init_plot(subplots, figsize, num=num)\n if args or kwargs:\n self.add(*args, **kwargs)\n\n def _init_plot(self, subplots=None, figsize=None, num=None):\n if figsize is None:\n figsize = (8, 5)\n\n if subplots is None:\n subplots = (1, 1)\n\n if isinstance(subplots, Mapping):\n self.fig, self.subplots = plt.subplots(figsize=figsize, num=num,\n **subplots, squeeze=False)\n else:\n self.fig, self.subplots = plt.subplots(*subplots, num=num,\n figsize=figsize, squeeze=False)\n\n # squeeze=False ensures that subplots is always a 2D array independent of the number\n # of subplots.\n # However the qcodes api assumes that subplots is always a 1D array\n # so flatten here\n self.subplots = self.subplots.flatten()\n\n self.title = self.fig.suptitle('')\n\n def clear(self, subplots=None, figsize=None):\n \"\"\"\n Clears the plot window and removes all subplots and traces\n so that the window can be reused.\n \"\"\"\n self.traces = []\n self.fig.clf()\n self._init_plot(subplots, figsize, num=self.fig.number)\n\n def add_to_plot(self, **kwargs):\n \"\"\"\n adds one trace to this MatPlot.\n\n kwargs: with the following exceptions (mostly the data!), these are\n passed directly to the matplotlib plotting routine.\n\n `subplot`: the 1-based axes number to append to (default 1)\n\n if kwargs include `z`, we will draw a heatmap (ax.pcolormesh):\n `x`, `y`, and `z` are passed as positional args to pcolormesh\n\n without `z` we draw a scatter/lines plot (ax.plot):\n `x`, `y`, and `fmt` (if present) are passed as positional args\n \"\"\"\n # TODO some way to specify overlaid axes?\n ax = self._get_axes(kwargs)\n if 'z' in kwargs:\n plot_object = self._draw_pcolormesh(ax, **kwargs)\n else:\n plot_object = self._draw_plot(ax, **kwargs)\n\n self._update_labels(ax, kwargs)\n prev_default_title = self.get_default_title()\n\n self.traces.append({\n 'config': kwargs,\n 'plot_object': plot_object\n })\n\n if prev_default_title == self.title.get_text():\n # in case the user has updated title, don't change it anymore\n self.title.set_text(self.get_default_title())\n\n def _get_axes(self, config):\n return self.subplots[config.get('subplot', 1) - 1]\n\n def _update_labels(self, ax, config):\n for axletter in (\"x\", \"y\"):\n if axletter+'label' in config:\n label = config[axletter+'label']\n else:\n label = None\n\n # find if any kwarg from plot.add in the base class\n # matches xunit or yunit, signaling a custom unit\n if axletter+'unit' in config:\n unit = config[axletter+'unit']\n else:\n unit = None\n\n # find ( more hope to) unit and label from\n # the data array inside the config\n getter = getattr(ax, \"get_{}label\".format(axletter))\n if axletter in config and not getter():\n # now if we did not have any kwarg for label or unit\n # fallback to the data_array\n if unit is None:\n _, unit = self.get_label(config[axletter])\n if label is None:\n label, _ = self.get_label(config[axletter])\n elif getter():\n # The axis already has label. Assume that is correct\n # We should probably check consistent units and error or warn\n # if not consistent. It's also not at all clear how to handle\n # labels/names as these will in general not be consistent on\n # at least one axis\n return\n axsetter = getattr(ax, \"set_{}label\".format(axletter))\n axsetter(\"{} ({})\".format(label, unit))\n\n def update_plot(self):\n \"\"\"\n update the plot. The DataSets themselves have already been updated\n in update, here we just push the changes to the plot.\n \"\"\"\n # matplotlib doesn't know how to autoscale to a pcolormesh after the\n # first draw (relim ignores it...) so we have to do this ourselves\n bboxes = dict(zip(self.subplots, [[] for p in self.subplots]))\n\n for trace in self.traces:\n config = trace['config']\n plot_object = trace['plot_object']\n if 'z' in config:\n # pcolormesh doesn't seem to allow editing x and y data, only z\n # so instead, we'll remove and re-add the data.\n if plot_object:\n plot_object.remove()\n\n ax = self._get_axes(config)\n plot_object = self._draw_pcolormesh(ax, **config)\n trace['plot_object'] = plot_object\n\n if plot_object:\n bboxes[plot_object.axes].append(\n plot_object.get_datalim(plot_object.axes.transData))\n else:\n for axletter in 'xy':\n setter = 'set_' + axletter + 'data'\n if axletter in config:\n getattr(plot_object, setter)(config[axletter])\n\n for ax in self.subplots:\n if ax.get_autoscale_on():\n ax.relim()\n if bboxes[ax]:\n bbox = Bbox.union(bboxes[ax])\n if np.all(np.isfinite(ax.dataLim)):\n # should take care of the case of lines + heatmaps\n # where there's already a finite dataLim from relim\n ax.dataLim.set(Bbox.union(ax.dataLim, bbox))\n else:\n # when there's only a heatmap, relim gives inf bounds\n # so just completely overwrite it\n ax.dataLim = bbox\n ax.autoscale()\n\n self.fig.canvas.draw()\n\n def _draw_plot(self, ax, y, x=None, fmt=None, subplot=1,\n xlabel=None,\n ylabel=None,\n zlabel=None,\n xunit=None,\n yunit=None,\n zunit=None,\n **kwargs):\n # NOTE(alexj)stripping out subplot because which subplot we're in is already\n # described by ax, and it's not a kwarg to matplotlib's ax.plot. But I\n # didn't want to strip it out of kwargs earlier because it should stay\n # part of trace['config'].\n args = [arg for arg in [x, y, fmt] if arg is not None]\n line, = ax.plot(*args, **kwargs)\n return line\n\n def _draw_pcolormesh(self, ax, z, x=None, y=None, subplot=1,\n xlabel=None,\n ylabel=None,\n zlabel=None,\n xunit=None,\n yunit=None,\n zunit=None,\n **kwargs):\n # NOTE(alexj)stripping out subplot because which subplot we're in is already\n # described by ax, and it's not a kwarg to matplotlib's ax.plot. But I\n # didn't want to strip it out of kwargs earlier because it should stay\n # part of trace['config'].\n args = [masked_invalid(arg) for arg in [x, y, z]\n if arg is not None]\n\n for arg in args:\n if np.all(getmask(arg)):\n # if any entire array is masked, don't draw at all\n # there's nothing to draw, and anyway it throws a warning\n return False\n pc = ax.pcolormesh(*args, **kwargs)\n\n if getattr(ax, 'qcodes_colorbar', None):\n # update_normal doesn't seem to work...\n ax.qcodes_colorbar.update_bruteforce(pc)\n else:\n # TODO: what if there are several colormeshes on this subplot,\n # do they get the same colorscale?\n # We should make sure they do, and have it include\n # the full range of both.\n ax.qcodes_colorbar = self.fig.colorbar(pc, ax=ax)\n\n # ideally this should have been in _update_labels, but\n # the colorbar doesn't necessarily exist there.\n # I guess we could create the colorbar no matter what,\n # and just give it a dummy mappable to start, so we could\n # put this where it belongs.\n if zunit is None:\n _, zunit = self.get_label(z)\n if zlabel is None:\n zlabel, _ = self.get_label(z)\n\n label = \"{} ({})\".format(zlabel, zunit)\n ax.qcodes_colorbar.set_label(label)\n\n return pc\n\n def save(self, filename=None):\n \"\"\"\n Save current plot to filename, by default\n to the location corresponding to the default \n title.\n\n Args:\n filename (Optional[str]): Location of the file\n \"\"\"\n default = \"{}.png\".format(self.get_default_title())\n filename = filename or default\n self.fig.savefig(filename)\n", "path": "qcodes/plots/qcmatplotlib.py"}]} | 3,436 | 236 |
gh_patches_debug_25874 | rasdani/github-patches | git_diff | encode__starlette-985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Arbitrary path traversal possible with StaticFiles
If you pass in an absolute path by using an URL formatted like this: http://localhost:5000/staticmount/c%3a%5cwindows%5cwin.ini
Then you can read any file on the host machine.
This is because of the following python behavior:
```python
>>> dir = "c:\workspace\somepoject\somepackag\statics"
>>> path = "c:\windows\win.ini"
>>> os.path.join(dir, path)
'c:\\windows\\win.ini'
```
So staticfiles.py needs to be updated by placing in a check for absolute paths inside lookup_path(...).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/staticfiles.py`
Content:
```
1 import importlib.util
2 import os
3 import stat
4 import typing
5 from email.utils import parsedate
6
7 from aiofiles.os import stat as aio_stat
8
9 from starlette.datastructures import URL, Headers
10 from starlette.responses import (
11 FileResponse,
12 PlainTextResponse,
13 RedirectResponse,
14 Response,
15 )
16 from starlette.types import Receive, Scope, Send
17
18
19 class NotModifiedResponse(Response):
20 NOT_MODIFIED_HEADERS = (
21 "cache-control",
22 "content-location",
23 "date",
24 "etag",
25 "expires",
26 "vary",
27 )
28
29 def __init__(self, headers: Headers):
30 super().__init__(
31 status_code=304,
32 headers={
33 name: value
34 for name, value in headers.items()
35 if name in self.NOT_MODIFIED_HEADERS
36 },
37 )
38
39
40 class StaticFiles:
41 def __init__(
42 self,
43 *,
44 directory: str = None,
45 packages: typing.List[str] = None,
46 html: bool = False,
47 check_dir: bool = True,
48 ) -> None:
49 self.directory = directory
50 self.packages = packages
51 self.all_directories = self.get_directories(directory, packages)
52 self.html = html
53 self.config_checked = False
54 if check_dir and directory is not None and not os.path.isdir(directory):
55 raise RuntimeError(f"Directory '{directory}' does not exist")
56
57 def get_directories(
58 self, directory: str = None, packages: typing.List[str] = None
59 ) -> typing.List[str]:
60 """
61 Given `directory` and `packages` arguments, return a list of all the
62 directories that should be used for serving static files from.
63 """
64 directories = []
65 if directory is not None:
66 directories.append(directory)
67
68 for package in packages or []:
69 spec = importlib.util.find_spec(package)
70 assert spec is not None, f"Package {package!r} could not be found."
71 assert (
72 spec.origin is not None
73 ), f"Directory 'statics' in package {package!r} could not be found."
74 directory = os.path.normpath(os.path.join(spec.origin, "..", "statics"))
75 assert os.path.isdir(
76 directory
77 ), f"Directory 'statics' in package {package!r} could not be found."
78 directories.append(directory)
79
80 return directories
81
82 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
83 """
84 The ASGI entry point.
85 """
86 assert scope["type"] == "http"
87
88 if not self.config_checked:
89 await self.check_config()
90 self.config_checked = True
91
92 path = self.get_path(scope)
93 response = await self.get_response(path, scope)
94 await response(scope, receive, send)
95
96 def get_path(self, scope: Scope) -> str:
97 """
98 Given the ASGI scope, return the `path` string to serve up,
99 with OS specific path seperators, and any '..', '.' components removed.
100 """
101 return os.path.normpath(os.path.join(*scope["path"].split("/")))
102
103 async def get_response(self, path: str, scope: Scope) -> Response:
104 """
105 Returns an HTTP response, given the incoming path, method and request headers.
106 """
107 if scope["method"] not in ("GET", "HEAD"):
108 return PlainTextResponse("Method Not Allowed", status_code=405)
109
110 if path.startswith(".."):
111 # Most clients will normalize the path, so we shouldn't normally
112 # get this, but don't allow misbehaving clients to break out of
113 # the static files directory.
114 return PlainTextResponse("Not Found", status_code=404)
115
116 full_path, stat_result = await self.lookup_path(path)
117
118 if stat_result and stat.S_ISREG(stat_result.st_mode):
119 # We have a static file to serve.
120 return self.file_response(full_path, stat_result, scope)
121
122 elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:
123 # We're in HTML mode, and have got a directory URL.
124 # Check if we have 'index.html' file to serve.
125 index_path = os.path.join(path, "index.html")
126 full_path, stat_result = await self.lookup_path(index_path)
127 if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
128 if not scope["path"].endswith("/"):
129 # Directory URLs should redirect to always end in "/".
130 url = URL(scope=scope)
131 url = url.replace(path=url.path + "/")
132 return RedirectResponse(url=url)
133 return self.file_response(full_path, stat_result, scope)
134
135 if self.html:
136 # Check for '404.html' if we're in HTML mode.
137 full_path, stat_result = await self.lookup_path("404.html")
138 if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
139 return self.file_response(
140 full_path, stat_result, scope, status_code=404
141 )
142
143 return PlainTextResponse("Not Found", status_code=404)
144
145 async def lookup_path(
146 self, path: str
147 ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
148 for directory in self.all_directories:
149 full_path = os.path.join(directory, path)
150 try:
151 stat_result = await aio_stat(full_path)
152 return (full_path, stat_result)
153 except FileNotFoundError:
154 pass
155 return ("", None)
156
157 def file_response(
158 self,
159 full_path: str,
160 stat_result: os.stat_result,
161 scope: Scope,
162 status_code: int = 200,
163 ) -> Response:
164 method = scope["method"]
165 request_headers = Headers(scope=scope)
166
167 response = FileResponse(
168 full_path, status_code=status_code, stat_result=stat_result, method=method
169 )
170 if self.is_not_modified(response.headers, request_headers):
171 return NotModifiedResponse(response.headers)
172 return response
173
174 async def check_config(self) -> None:
175 """
176 Perform a one-off configuration check that StaticFiles is actually
177 pointed at a directory, so that we can raise loud errors rather than
178 just returning 404 responses.
179 """
180 if self.directory is None:
181 return
182
183 try:
184 stat_result = await aio_stat(self.directory)
185 except FileNotFoundError:
186 raise RuntimeError(
187 f"StaticFiles directory '{self.directory}' does not exist."
188 )
189 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
190 raise RuntimeError(
191 f"StaticFiles path '{self.directory}' is not a directory."
192 )
193
194 def is_not_modified(
195 self, response_headers: Headers, request_headers: Headers
196 ) -> bool:
197 """
198 Given the request and response headers, return `True` if an HTTP
199 "Not Modified" response could be returned instead.
200 """
201 try:
202 if_none_match = request_headers["if-none-match"]
203 etag = response_headers["etag"]
204 if if_none_match == etag:
205 return True
206 except KeyError:
207 pass
208
209 try:
210 if_modified_since = parsedate(request_headers["if-modified-since"])
211 last_modified = parsedate(response_headers["last-modified"])
212 if (
213 if_modified_since is not None
214 and last_modified is not None
215 and if_modified_since >= last_modified
216 ):
217 return True
218 except KeyError:
219 pass
220
221 return False
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -107,12 +107,6 @@
if scope["method"] not in ("GET", "HEAD"):
return PlainTextResponse("Method Not Allowed", status_code=405)
- if path.startswith(".."):
- # Most clients will normalize the path, so we shouldn't normally
- # get this, but don't allow misbehaving clients to break out of
- # the static files directory.
- return PlainTextResponse("Not Found", status_code=404)
-
full_path, stat_result = await self.lookup_path(path)
if stat_result and stat.S_ISREG(stat_result.st_mode):
@@ -147,6 +141,12 @@
) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
for directory in self.all_directories:
full_path = os.path.join(directory, path)
+ if (
+ os.path.commonprefix([os.path.realpath(full_path), directory])
+ != directory
+ ):
+ # Don't allow misbehaving clients to break out of the static files directory.
+ continue
try:
stat_result = await aio_stat(full_path)
return (full_path, stat_result)
| {"golden_diff": "diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -107,12 +107,6 @@\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n \n- if path.startswith(\"..\"):\n- # Most clients will normalize the path, so we shouldn't normally\n- # get this, but don't allow misbehaving clients to break out of\n- # the static files directory.\n- return PlainTextResponse(\"Not Found\", status_code=404)\n-\n full_path, stat_result = await self.lookup_path(path)\n \n if stat_result and stat.S_ISREG(stat_result.st_mode):\n@@ -147,6 +141,12 @@\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n full_path = os.path.join(directory, path)\n+ if (\n+ os.path.commonprefix([os.path.realpath(full_path), directory])\n+ != directory\n+ ):\n+ # Don't allow misbehaving clients to break out of the static files directory.\n+ continue\n try:\n stat_result = await aio_stat(full_path)\n return (full_path, stat_result)\n", "issue": "Arbitrary path traversal possible with StaticFiles\nIf you pass in an absolute path by using an URL formatted like this: http://localhost:5000/staticmount/c%3a%5cwindows%5cwin.ini\r\n\r\nThen you can read any file on the host machine.\r\n\r\nThis is because of the following python behavior:\r\n\r\n```python\r\n>>> dir = \"c:\\workspace\\somepoject\\somepackag\\statics\"\r\n>>> path = \"c:\\windows\\win.ini\"\r\n>>> os.path.join(dir, path)\r\n'c:\\\\windows\\\\win.ini'\r\n```\r\n\r\nSo staticfiles.py needs to be updated by placing in a check for absolute paths inside lookup_path(...).\n", "before_files": [{"content": "import importlib.util\nimport os\nimport stat\nimport typing\nfrom email.utils import parsedate\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.datastructures import URL, Headers\nfrom starlette.responses import (\n FileResponse,\n PlainTextResponse,\n RedirectResponse,\n Response,\n)\nfrom starlette.types import Receive, Scope, Send\n\n\nclass NotModifiedResponse(Response):\n NOT_MODIFIED_HEADERS = (\n \"cache-control\",\n \"content-location\",\n \"date\",\n \"etag\",\n \"expires\",\n \"vary\",\n )\n\n def __init__(self, headers: Headers):\n super().__init__(\n status_code=304,\n headers={\n name: value\n for name, value in headers.items()\n if name in self.NOT_MODIFIED_HEADERS\n },\n )\n\n\nclass StaticFiles:\n def __init__(\n self,\n *,\n directory: str = None,\n packages: typing.List[str] = None,\n html: bool = False,\n check_dir: bool = True,\n ) -> None:\n self.directory = directory\n self.packages = packages\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n if check_dir and directory is not None and not os.path.isdir(directory):\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n\n def get_directories(\n self, directory: str = None, packages: typing.List[str] = None\n ) -> typing.List[str]:\n \"\"\"\n Given `directory` and `packages` arguments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n directories = []\n if directory is not None:\n directories.append(directory)\n\n for package in packages or []:\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert (\n spec.origin is not None\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n directory = os.path.normpath(os.path.join(spec.origin, \"..\", \"statics\"))\n assert os.path.isdir(\n directory\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n directories.append(directory)\n\n return directories\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The ASGI entry point.\n \"\"\"\n assert scope[\"type\"] == \"http\"\n\n if not self.config_checked:\n await self.check_config()\n self.config_checked = True\n\n path = self.get_path(scope)\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n\n def get_path(self, scope: Scope) -> str:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path seperators, and any '..', '.' components removed.\n \"\"\"\n return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n\n async def get_response(self, path: str, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n\n if path.startswith(\"..\"):\n # Most clients will normalize the path, so we shouldn't normally\n # get this, but don't allow misbehaving clients to break out of\n # the static files directory.\n return PlainTextResponse(\"Not Found\", status_code=404)\n\n full_path, stat_result = await self.lookup_path(path)\n\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n return self.file_response(full_path, stat_result, scope)\n\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = os.path.join(path, \"index.html\")\n full_path, stat_result = await self.lookup_path(index_path)\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n url = URL(scope=scope)\n url = url.replace(path=url.path + \"/\")\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n\n if self.html:\n # Check for '404.html' if we're in HTML mode.\n full_path, stat_result = await self.lookup_path(\"404.html\")\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n return self.file_response(\n full_path, stat_result, scope, status_code=404\n )\n\n return PlainTextResponse(\"Not Found\", status_code=404)\n\n async def lookup_path(\n self, path: str\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n full_path = os.path.join(directory, path)\n try:\n stat_result = await aio_stat(full_path)\n return (full_path, stat_result)\n except FileNotFoundError:\n pass\n return (\"\", None)\n\n def file_response(\n self,\n full_path: str,\n stat_result: os.stat_result,\n scope: Scope,\n status_code: int = 200,\n ) -> Response:\n method = scope[\"method\"]\n request_headers = Headers(scope=scope)\n\n response = FileResponse(\n full_path, status_code=status_code, stat_result=stat_result, method=method\n )\n if self.is_not_modified(response.headers, request_headers):\n return NotModifiedResponse(response.headers)\n return response\n\n async def check_config(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n if self.directory is None:\n return\n\n try:\n stat_result = await aio_stat(self.directory)\n except FileNotFoundError:\n raise RuntimeError(\n f\"StaticFiles directory '{self.directory}' does not exist.\"\n )\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\n f\"StaticFiles path '{self.directory}' is not a directory.\"\n )\n\n def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n \"\"\"\n Given the request and response headers, return `True` if an HTTP\n \"Not Modified\" response could be returned instead.\n \"\"\"\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False\n", "path": "starlette/staticfiles.py"}], "after_files": [{"content": "import importlib.util\nimport os\nimport stat\nimport typing\nfrom email.utils import parsedate\n\nfrom aiofiles.os import stat as aio_stat\n\nfrom starlette.datastructures import URL, Headers\nfrom starlette.responses import (\n FileResponse,\n PlainTextResponse,\n RedirectResponse,\n Response,\n)\nfrom starlette.types import Receive, Scope, Send\n\n\nclass NotModifiedResponse(Response):\n NOT_MODIFIED_HEADERS = (\n \"cache-control\",\n \"content-location\",\n \"date\",\n \"etag\",\n \"expires\",\n \"vary\",\n )\n\n def __init__(self, headers: Headers):\n super().__init__(\n status_code=304,\n headers={\n name: value\n for name, value in headers.items()\n if name in self.NOT_MODIFIED_HEADERS\n },\n )\n\n\nclass StaticFiles:\n def __init__(\n self,\n *,\n directory: str = None,\n packages: typing.List[str] = None,\n html: bool = False,\n check_dir: bool = True,\n ) -> None:\n self.directory = directory\n self.packages = packages\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n if check_dir and directory is not None and not os.path.isdir(directory):\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n\n def get_directories(\n self, directory: str = None, packages: typing.List[str] = None\n ) -> typing.List[str]:\n \"\"\"\n Given `directory` and `packages` arguments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n directories = []\n if directory is not None:\n directories.append(directory)\n\n for package in packages or []:\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert (\n spec.origin is not None\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n directory = os.path.normpath(os.path.join(spec.origin, \"..\", \"statics\"))\n assert os.path.isdir(\n directory\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n directories.append(directory)\n\n return directories\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The ASGI entry point.\n \"\"\"\n assert scope[\"type\"] == \"http\"\n\n if not self.config_checked:\n await self.check_config()\n self.config_checked = True\n\n path = self.get_path(scope)\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n\n def get_path(self, scope: Scope) -> str:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path seperators, and any '..', '.' components removed.\n \"\"\"\n return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n\n async def get_response(self, path: str, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n\n full_path, stat_result = await self.lookup_path(path)\n\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n return self.file_response(full_path, stat_result, scope)\n\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = os.path.join(path, \"index.html\")\n full_path, stat_result = await self.lookup_path(index_path)\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n url = URL(scope=scope)\n url = url.replace(path=url.path + \"/\")\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n\n if self.html:\n # Check for '404.html' if we're in HTML mode.\n full_path, stat_result = await self.lookup_path(\"404.html\")\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n return self.file_response(\n full_path, stat_result, scope, status_code=404\n )\n\n return PlainTextResponse(\"Not Found\", status_code=404)\n\n async def lookup_path(\n self, path: str\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n full_path = os.path.join(directory, path)\n if (\n os.path.commonprefix([os.path.realpath(full_path), directory])\n != directory\n ):\n # Don't allow misbehaving clients to break out of the static files directory.\n continue\n try:\n stat_result = await aio_stat(full_path)\n return (full_path, stat_result)\n except FileNotFoundError:\n pass\n return (\"\", None)\n\n def file_response(\n self,\n full_path: str,\n stat_result: os.stat_result,\n scope: Scope,\n status_code: int = 200,\n ) -> Response:\n method = scope[\"method\"]\n request_headers = Headers(scope=scope)\n\n response = FileResponse(\n full_path, status_code=status_code, stat_result=stat_result, method=method\n )\n if self.is_not_modified(response.headers, request_headers):\n return NotModifiedResponse(response.headers)\n return response\n\n async def check_config(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n if self.directory is None:\n return\n\n try:\n stat_result = await aio_stat(self.directory)\n except FileNotFoundError:\n raise RuntimeError(\n f\"StaticFiles directory '{self.directory}' does not exist.\"\n )\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\n f\"StaticFiles path '{self.directory}' is not a directory.\"\n )\n\n def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n \"\"\"\n Given the request and response headers, return `True` if an HTTP\n \"Not Modified\" response could be returned instead.\n \"\"\"\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False\n", "path": "starlette/staticfiles.py"}]} | 2,583 | 297 |
gh_patches_debug_25792 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
zipkin exporter does not work with jaeger collector for spanIds/traceIds/parentIds starting with a 0
**Describe your environment**
I am using Jaeger as backend and can only collect with the zipkin format (Don't ask me why...).
I am using opentelemetry-ext-zipkin to do so
**Steps to reproduce**
Upload a couple of spans until randomly one has an Id with a zero as first hex digit, and you will get an error like this:
```
Traces cannot be uploaded; status code: 400, message Unable to process request body: validation failure list:
id in body should be at least 16 chars long
traceId in body should be at least 16 chars long
```
**Cause of the problem and proposed fix**
This is due to the `format(traceId, "x")` in the code which does not do any zero padding, which can be replaced with `format(traceId, "032x")` and `format(spanId, "016x")`.
It fixes the problem on jaeger-collector for sure, but I will let you check it for the real zipkin collector, I suppose it will not have issues with zero-padded hex.
I am currently writing a PR, which I will link in the next few minutes.
I will let you decide if it is legitimate, I think it is, or instead push a more permissive acceptance on the jaeger-collector side.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows to export tracing data to `Zipkin <https://zipkin.io/>`_.
17
18 Usage
19 -----
20
21 The **OpenTelemetry Zipkin Exporter** allows to export `OpenTelemetry`_ traces to `Zipkin`_.
22 This exporter always send traces to the configured Zipkin collector using HTTP.
23
24
25 .. _Zipkin: https://zipkin.io/
26 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/
27
28 .. code:: python
29
30 from opentelemetry import trace
31 from opentelemetry.exporter import zipkin
32 from opentelemetry.sdk.trace import TracerProvider
33 from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
34
35 trace.set_tracer_provider(TracerProvider())
36 tracer = trace.get_tracer(__name__)
37
38 # create a ZipkinSpanExporter
39 zipkin_exporter = zipkin.ZipkinSpanExporter(
40 service_name="my-helloworld-service",
41 # optional:
42 # host_name="localhost",
43 # port=9411,
44 # endpoint="/api/v2/spans",
45 # protocol="http",
46 # ipv4="",
47 # ipv6="",
48 # retry=False,
49 )
50
51 # Create a BatchExportSpanProcessor and add the exporter to it
52 span_processor = BatchExportSpanProcessor(zipkin_exporter)
53
54 # add to the tracer
55 trace.get_tracer_provider().add_span_processor(span_processor)
56
57 with tracer.start_as_current_span("foo"):
58 print("Hello world!")
59
60 API
61 ---
62 """
63
64 import json
65 import logging
66 from typing import Optional, Sequence
67
68 import requests
69
70 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
71 from opentelemetry.trace import Span, SpanContext, SpanKind
72
73 DEFAULT_ENDPOINT = "/api/v2/spans"
74 DEFAULT_HOST_NAME = "localhost"
75 DEFAULT_PORT = 9411
76 DEFAULT_PROTOCOL = "http"
77 DEFAULT_RETRY = False
78 ZIPKIN_HEADERS = {"Content-Type": "application/json"}
79
80 SPAN_KIND_MAP = {
81 SpanKind.INTERNAL: None,
82 SpanKind.SERVER: "SERVER",
83 SpanKind.CLIENT: "CLIENT",
84 SpanKind.PRODUCER: "PRODUCER",
85 SpanKind.CONSUMER: "CONSUMER",
86 }
87
88 SUCCESS_STATUS_CODES = (200, 202)
89
90 logger = logging.getLogger(__name__)
91
92
93 class ZipkinSpanExporter(SpanExporter):
94 """Zipkin span exporter for OpenTelemetry.
95
96 Args:
97 service_name: Service that logged an annotation in a trace.Classifier
98 when query for spans.
99 host_name: The host name of the Zipkin server
100 port: The port of the Zipkin server
101 endpoint: The endpoint of the Zipkin server
102 protocol: The protocol used for the request.
103 ipv4: Primary IPv4 address associated with this connection.
104 ipv6: Primary IPv6 address associated with this connection.
105 retry: Set to True to configure the exporter to retry on failure.
106 """
107
108 def __init__(
109 self,
110 service_name: str,
111 host_name: str = DEFAULT_HOST_NAME,
112 port: int = DEFAULT_PORT,
113 endpoint: str = DEFAULT_ENDPOINT,
114 protocol: str = DEFAULT_PROTOCOL,
115 ipv4: Optional[str] = None,
116 ipv6: Optional[str] = None,
117 retry: Optional[str] = DEFAULT_RETRY,
118 ):
119 self.service_name = service_name
120 self.host_name = host_name
121 self.port = port
122 self.endpoint = endpoint
123 self.protocol = protocol
124 self.url = "{}://{}:{}{}".format(
125 self.protocol, self.host_name, self.port, self.endpoint
126 )
127 self.ipv4 = ipv4
128 self.ipv6 = ipv6
129 self.retry = retry
130
131 def export(self, spans: Sequence[Span]) -> SpanExportResult:
132 zipkin_spans = self._translate_to_zipkin(spans)
133 result = requests.post(
134 url=self.url, data=json.dumps(zipkin_spans), headers=ZIPKIN_HEADERS
135 )
136
137 if result.status_code not in SUCCESS_STATUS_CODES:
138 logger.error(
139 "Traces cannot be uploaded; status code: %s, message %s",
140 result.status_code,
141 result.text,
142 )
143
144 if self.retry:
145 return SpanExportResult.FAILURE
146 return SpanExportResult.FAILURE
147 return SpanExportResult.SUCCESS
148
149 def _translate_to_zipkin(self, spans: Sequence[Span]):
150
151 local_endpoint = {"serviceName": self.service_name, "port": self.port}
152
153 if self.ipv4 is not None:
154 local_endpoint["ipv4"] = self.ipv4
155
156 if self.ipv6 is not None:
157 local_endpoint["ipv6"] = self.ipv6
158
159 zipkin_spans = []
160 for span in spans:
161 context = span.get_context()
162 trace_id = context.trace_id
163 span_id = context.span_id
164
165 # Timestamp in zipkin spans is int of microseconds.
166 # see: https://zipkin.io/pages/instrumenting.html
167 start_timestamp_mus = _nsec_to_usec_round(span.start_time)
168 duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)
169
170 zipkin_span = {
171 "traceId": format(trace_id, "x"),
172 "id": format(span_id, "x"),
173 "name": span.name,
174 "timestamp": start_timestamp_mus,
175 "duration": duration_mus,
176 "localEndpoint": local_endpoint,
177 "kind": SPAN_KIND_MAP[span.kind],
178 "tags": _extract_tags_from_span(span),
179 "annotations": _extract_annotations_from_events(span.events),
180 }
181
182 if context.trace_flags.sampled:
183 zipkin_span["debug"] = True
184
185 if isinstance(span.parent, Span):
186 zipkin_span["parentId"] = format(
187 span.parent.get_context().span_id, "x"
188 )
189 elif isinstance(span.parent, SpanContext):
190 zipkin_span["parentId"] = format(span.parent.span_id, "x")
191
192 zipkin_spans.append(zipkin_span)
193 return zipkin_spans
194
195 def shutdown(self) -> None:
196 pass
197
198
199 def _extract_tags_from_dict(tags_dict):
200 tags = {}
201 if not tags_dict:
202 return tags
203 for attribute_key, attribute_value in tags_dict.items():
204 if isinstance(attribute_value, (int, bool, float)):
205 value = str(attribute_value)
206 elif isinstance(attribute_value, str):
207 value = attribute_value[:128]
208 else:
209 logger.warning("Could not serialize tag %s", attribute_key)
210 continue
211 tags[attribute_key] = value
212 return tags
213
214
215 def _extract_tags_from_span(span: Span):
216 tags = _extract_tags_from_dict(getattr(span, "attributes", None))
217 if span.resource:
218 tags.update(_extract_tags_from_dict(span.resource.labels))
219 return tags
220
221
222 def _extract_annotations_from_events(events):
223 return (
224 [
225 {"timestamp": _nsec_to_usec_round(e.timestamp), "value": e.name}
226 for e in events
227 ]
228 if events
229 else None
230 )
231
232
233 def _nsec_to_usec_round(nsec):
234 """Round nanoseconds to microseconds"""
235 return (nsec + 500) // 10 ** 3
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
--- a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
+++ b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py
@@ -168,8 +168,9 @@
duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)
zipkin_span = {
- "traceId": format(trace_id, "x"),
- "id": format(span_id, "x"),
+ # Ensure left-zero-padding of traceId, spanId, parentId
+ "traceId": format(trace_id, "032x"),
+ "id": format(span_id, "016x"),
"name": span.name,
"timestamp": start_timestamp_mus,
"duration": duration_mus,
@@ -184,10 +185,10 @@
if isinstance(span.parent, Span):
zipkin_span["parentId"] = format(
- span.parent.get_context().span_id, "x"
+ span.parent.get_context().span_id, "016x"
)
elif isinstance(span.parent, SpanContext):
- zipkin_span["parentId"] = format(span.parent.span_id, "x")
+ zipkin_span["parentId"] = format(span.parent.span_id, "016x")
zipkin_spans.append(zipkin_span)
return zipkin_spans
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py\n--- a/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py\n+++ b/exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py\n@@ -168,8 +168,9 @@\n duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)\n \n zipkin_span = {\n- \"traceId\": format(trace_id, \"x\"),\n- \"id\": format(span_id, \"x\"),\n+ # Ensure left-zero-padding of traceId, spanId, parentId\n+ \"traceId\": format(trace_id, \"032x\"),\n+ \"id\": format(span_id, \"016x\"),\n \"name\": span.name,\n \"timestamp\": start_timestamp_mus,\n \"duration\": duration_mus,\n@@ -184,10 +185,10 @@\n \n if isinstance(span.parent, Span):\n zipkin_span[\"parentId\"] = format(\n- span.parent.get_context().span_id, \"x\"\n+ span.parent.get_context().span_id, \"016x\"\n )\n elif isinstance(span.parent, SpanContext):\n- zipkin_span[\"parentId\"] = format(span.parent.span_id, \"x\")\n+ zipkin_span[\"parentId\"] = format(span.parent.span_id, \"016x\")\n \n zipkin_spans.append(zipkin_span)\n return zipkin_spans\n", "issue": "zipkin exporter does not work with jaeger collector for spanIds/traceIds/parentIds starting with a 0\n**Describe your environment**\r\nI am using Jaeger as backend and can only collect with the zipkin format (Don't ask me why...). \r\nI am using opentelemetry-ext-zipkin to do so\r\n\r\n**Steps to reproduce**\r\nUpload a couple of spans until randomly one has an Id with a zero as first hex digit, and you will get an error like this:\r\n```\r\nTraces cannot be uploaded; status code: 400, message Unable to process request body: validation failure list:\r\nid in body should be at least 16 chars long\r\ntraceId in body should be at least 16 chars long\r\n```\r\n\r\n**Cause of the problem and proposed fix**\r\nThis is due to the `format(traceId, \"x\")` in the code which does not do any zero padding, which can be replaced with `format(traceId, \"032x\")` and `format(spanId, \"016x\")`.\r\nIt fixes the problem on jaeger-collector for sure, but I will let you check it for the real zipkin collector, I suppose it will not have issues with zero-padded hex.\r\n\r\nI am currently writing a PR, which I will link in the next few minutes.\r\nI will let you decide if it is legitimate, I think it is, or instead push a more permissive acceptance on the jaeger-collector side.\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows to export tracing data to `Zipkin <https://zipkin.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Zipkin Exporter** allows to export `OpenTelemetry`_ traces to `Zipkin`_.\nThis exporter always send traces to the configured Zipkin collector using HTTP.\n\n\n.. _Zipkin: https://zipkin.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from opentelemetry import trace\n from opentelemetry.exporter import zipkin\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import BatchExportSpanProcessor\n\n trace.set_tracer_provider(TracerProvider())\n tracer = trace.get_tracer(__name__)\n\n # create a ZipkinSpanExporter\n zipkin_exporter = zipkin.ZipkinSpanExporter(\n service_name=\"my-helloworld-service\",\n # optional:\n # host_name=\"localhost\",\n # port=9411,\n # endpoint=\"/api/v2/spans\",\n # protocol=\"http\",\n # ipv4=\"\",\n # ipv6=\"\",\n # retry=False,\n )\n\n # Create a BatchExportSpanProcessor and add the exporter to it\n span_processor = BatchExportSpanProcessor(zipkin_exporter)\n\n # add to the tracer\n trace.get_tracer_provider().add_span_processor(span_processor)\n\n with tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n\nAPI\n---\n\"\"\"\n\nimport json\nimport logging\nfrom typing import Optional, Sequence\n\nimport requests\n\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.trace import Span, SpanContext, SpanKind\n\nDEFAULT_ENDPOINT = \"/api/v2/spans\"\nDEFAULT_HOST_NAME = \"localhost\"\nDEFAULT_PORT = 9411\nDEFAULT_PROTOCOL = \"http\"\nDEFAULT_RETRY = False\nZIPKIN_HEADERS = {\"Content-Type\": \"application/json\"}\n\nSPAN_KIND_MAP = {\n SpanKind.INTERNAL: None,\n SpanKind.SERVER: \"SERVER\",\n SpanKind.CLIENT: \"CLIENT\",\n SpanKind.PRODUCER: \"PRODUCER\",\n SpanKind.CONSUMER: \"CONSUMER\",\n}\n\nSUCCESS_STATUS_CODES = (200, 202)\n\nlogger = logging.getLogger(__name__)\n\n\nclass ZipkinSpanExporter(SpanExporter):\n \"\"\"Zipkin span exporter for OpenTelemetry.\n\n Args:\n service_name: Service that logged an annotation in a trace.Classifier\n when query for spans.\n host_name: The host name of the Zipkin server\n port: The port of the Zipkin server\n endpoint: The endpoint of the Zipkin server\n protocol: The protocol used for the request.\n ipv4: Primary IPv4 address associated with this connection.\n ipv6: Primary IPv6 address associated with this connection.\n retry: Set to True to configure the exporter to retry on failure.\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n host_name: str = DEFAULT_HOST_NAME,\n port: int = DEFAULT_PORT,\n endpoint: str = DEFAULT_ENDPOINT,\n protocol: str = DEFAULT_PROTOCOL,\n ipv4: Optional[str] = None,\n ipv6: Optional[str] = None,\n retry: Optional[str] = DEFAULT_RETRY,\n ):\n self.service_name = service_name\n self.host_name = host_name\n self.port = port\n self.endpoint = endpoint\n self.protocol = protocol\n self.url = \"{}://{}:{}{}\".format(\n self.protocol, self.host_name, self.port, self.endpoint\n )\n self.ipv4 = ipv4\n self.ipv6 = ipv6\n self.retry = retry\n\n def export(self, spans: Sequence[Span]) -> SpanExportResult:\n zipkin_spans = self._translate_to_zipkin(spans)\n result = requests.post(\n url=self.url, data=json.dumps(zipkin_spans), headers=ZIPKIN_HEADERS\n )\n\n if result.status_code not in SUCCESS_STATUS_CODES:\n logger.error(\n \"Traces cannot be uploaded; status code: %s, message %s\",\n result.status_code,\n result.text,\n )\n\n if self.retry:\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n return SpanExportResult.SUCCESS\n\n def _translate_to_zipkin(self, spans: Sequence[Span]):\n\n local_endpoint = {\"serviceName\": self.service_name, \"port\": self.port}\n\n if self.ipv4 is not None:\n local_endpoint[\"ipv4\"] = self.ipv4\n\n if self.ipv6 is not None:\n local_endpoint[\"ipv6\"] = self.ipv6\n\n zipkin_spans = []\n for span in spans:\n context = span.get_context()\n trace_id = context.trace_id\n span_id = context.span_id\n\n # Timestamp in zipkin spans is int of microseconds.\n # see: https://zipkin.io/pages/instrumenting.html\n start_timestamp_mus = _nsec_to_usec_round(span.start_time)\n duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)\n\n zipkin_span = {\n \"traceId\": format(trace_id, \"x\"),\n \"id\": format(span_id, \"x\"),\n \"name\": span.name,\n \"timestamp\": start_timestamp_mus,\n \"duration\": duration_mus,\n \"localEndpoint\": local_endpoint,\n \"kind\": SPAN_KIND_MAP[span.kind],\n \"tags\": _extract_tags_from_span(span),\n \"annotations\": _extract_annotations_from_events(span.events),\n }\n\n if context.trace_flags.sampled:\n zipkin_span[\"debug\"] = True\n\n if isinstance(span.parent, Span):\n zipkin_span[\"parentId\"] = format(\n span.parent.get_context().span_id, \"x\"\n )\n elif isinstance(span.parent, SpanContext):\n zipkin_span[\"parentId\"] = format(span.parent.span_id, \"x\")\n\n zipkin_spans.append(zipkin_span)\n return zipkin_spans\n\n def shutdown(self) -> None:\n pass\n\n\ndef _extract_tags_from_dict(tags_dict):\n tags = {}\n if not tags_dict:\n return tags\n for attribute_key, attribute_value in tags_dict.items():\n if isinstance(attribute_value, (int, bool, float)):\n value = str(attribute_value)\n elif isinstance(attribute_value, str):\n value = attribute_value[:128]\n else:\n logger.warning(\"Could not serialize tag %s\", attribute_key)\n continue\n tags[attribute_key] = value\n return tags\n\n\ndef _extract_tags_from_span(span: Span):\n tags = _extract_tags_from_dict(getattr(span, \"attributes\", None))\n if span.resource:\n tags.update(_extract_tags_from_dict(span.resource.labels))\n return tags\n\n\ndef _extract_annotations_from_events(events):\n return (\n [\n {\"timestamp\": _nsec_to_usec_round(e.timestamp), \"value\": e.name}\n for e in events\n ]\n if events\n else None\n )\n\n\ndef _nsec_to_usec_round(nsec):\n \"\"\"Round nanoseconds to microseconds\"\"\"\n return (nsec + 500) // 10 ** 3\n", "path": "exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows to export tracing data to `Zipkin <https://zipkin.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Zipkin Exporter** allows to export `OpenTelemetry`_ traces to `Zipkin`_.\nThis exporter always send traces to the configured Zipkin collector using HTTP.\n\n\n.. _Zipkin: https://zipkin.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from opentelemetry import trace\n from opentelemetry.exporter import zipkin\n from opentelemetry.sdk.trace import TracerProvider\n from opentelemetry.sdk.trace.export import BatchExportSpanProcessor\n\n trace.set_tracer_provider(TracerProvider())\n tracer = trace.get_tracer(__name__)\n\n # create a ZipkinSpanExporter\n zipkin_exporter = zipkin.ZipkinSpanExporter(\n service_name=\"my-helloworld-service\",\n # optional:\n # host_name=\"localhost\",\n # port=9411,\n # endpoint=\"/api/v2/spans\",\n # protocol=\"http\",\n # ipv4=\"\",\n # ipv6=\"\",\n # retry=False,\n )\n\n # Create a BatchExportSpanProcessor and add the exporter to it\n span_processor = BatchExportSpanProcessor(zipkin_exporter)\n\n # add to the tracer\n trace.get_tracer_provider().add_span_processor(span_processor)\n\n with tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n\nAPI\n---\n\"\"\"\n\nimport json\nimport logging\nfrom typing import Optional, Sequence\n\nimport requests\n\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.trace import Span, SpanContext, SpanKind\n\nDEFAULT_ENDPOINT = \"/api/v2/spans\"\nDEFAULT_HOST_NAME = \"localhost\"\nDEFAULT_PORT = 9411\nDEFAULT_PROTOCOL = \"http\"\nDEFAULT_RETRY = False\nZIPKIN_HEADERS = {\"Content-Type\": \"application/json\"}\n\nSPAN_KIND_MAP = {\n SpanKind.INTERNAL: None,\n SpanKind.SERVER: \"SERVER\",\n SpanKind.CLIENT: \"CLIENT\",\n SpanKind.PRODUCER: \"PRODUCER\",\n SpanKind.CONSUMER: \"CONSUMER\",\n}\n\nSUCCESS_STATUS_CODES = (200, 202)\n\nlogger = logging.getLogger(__name__)\n\n\nclass ZipkinSpanExporter(SpanExporter):\n \"\"\"Zipkin span exporter for OpenTelemetry.\n\n Args:\n service_name: Service that logged an annotation in a trace.Classifier\n when query for spans.\n host_name: The host name of the Zipkin server\n port: The port of the Zipkin server\n endpoint: The endpoint of the Zipkin server\n protocol: The protocol used for the request.\n ipv4: Primary IPv4 address associated with this connection.\n ipv6: Primary IPv6 address associated with this connection.\n retry: Set to True to configure the exporter to retry on failure.\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n host_name: str = DEFAULT_HOST_NAME,\n port: int = DEFAULT_PORT,\n endpoint: str = DEFAULT_ENDPOINT,\n protocol: str = DEFAULT_PROTOCOL,\n ipv4: Optional[str] = None,\n ipv6: Optional[str] = None,\n retry: Optional[str] = DEFAULT_RETRY,\n ):\n self.service_name = service_name\n self.host_name = host_name\n self.port = port\n self.endpoint = endpoint\n self.protocol = protocol\n self.url = \"{}://{}:{}{}\".format(\n self.protocol, self.host_name, self.port, self.endpoint\n )\n self.ipv4 = ipv4\n self.ipv6 = ipv6\n self.retry = retry\n\n def export(self, spans: Sequence[Span]) -> SpanExportResult:\n zipkin_spans = self._translate_to_zipkin(spans)\n result = requests.post(\n url=self.url, data=json.dumps(zipkin_spans), headers=ZIPKIN_HEADERS\n )\n\n if result.status_code not in SUCCESS_STATUS_CODES:\n logger.error(\n \"Traces cannot be uploaded; status code: %s, message %s\",\n result.status_code,\n result.text,\n )\n\n if self.retry:\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n return SpanExportResult.SUCCESS\n\n def _translate_to_zipkin(self, spans: Sequence[Span]):\n\n local_endpoint = {\"serviceName\": self.service_name, \"port\": self.port}\n\n if self.ipv4 is not None:\n local_endpoint[\"ipv4\"] = self.ipv4\n\n if self.ipv6 is not None:\n local_endpoint[\"ipv6\"] = self.ipv6\n\n zipkin_spans = []\n for span in spans:\n context = span.get_context()\n trace_id = context.trace_id\n span_id = context.span_id\n\n # Timestamp in zipkin spans is int of microseconds.\n # see: https://zipkin.io/pages/instrumenting.html\n start_timestamp_mus = _nsec_to_usec_round(span.start_time)\n duration_mus = _nsec_to_usec_round(span.end_time - span.start_time)\n\n zipkin_span = {\n # Ensure left-zero-padding of traceId, spanId, parentId\n \"traceId\": format(trace_id, \"032x\"),\n \"id\": format(span_id, \"016x\"),\n \"name\": span.name,\n \"timestamp\": start_timestamp_mus,\n \"duration\": duration_mus,\n \"localEndpoint\": local_endpoint,\n \"kind\": SPAN_KIND_MAP[span.kind],\n \"tags\": _extract_tags_from_span(span),\n \"annotations\": _extract_annotations_from_events(span.events),\n }\n\n if context.trace_flags.sampled:\n zipkin_span[\"debug\"] = True\n\n if isinstance(span.parent, Span):\n zipkin_span[\"parentId\"] = format(\n span.parent.get_context().span_id, \"016x\"\n )\n elif isinstance(span.parent, SpanContext):\n zipkin_span[\"parentId\"] = format(span.parent.span_id, \"016x\")\n\n zipkin_spans.append(zipkin_span)\n return zipkin_spans\n\n def shutdown(self) -> None:\n pass\n\n\ndef _extract_tags_from_dict(tags_dict):\n tags = {}\n if not tags_dict:\n return tags\n for attribute_key, attribute_value in tags_dict.items():\n if isinstance(attribute_value, (int, bool, float)):\n value = str(attribute_value)\n elif isinstance(attribute_value, str):\n value = attribute_value[:128]\n else:\n logger.warning(\"Could not serialize tag %s\", attribute_key)\n continue\n tags[attribute_key] = value\n return tags\n\n\ndef _extract_tags_from_span(span: Span):\n tags = _extract_tags_from_dict(getattr(span, \"attributes\", None))\n if span.resource:\n tags.update(_extract_tags_from_dict(span.resource.labels))\n return tags\n\n\ndef _extract_annotations_from_events(events):\n return (\n [\n {\"timestamp\": _nsec_to_usec_round(e.timestamp), \"value\": e.name}\n for e in events\n ]\n if events\n else None\n )\n\n\ndef _nsec_to_usec_round(nsec):\n \"\"\"Round nanoseconds to microseconds\"\"\"\n return (nsec + 500) // 10 ** 3\n", "path": "exporter/opentelemetry-exporter-zipkin/src/opentelemetry/exporter/zipkin/__init__.py"}]} | 2,894 | 372 |
gh_patches_debug_5155 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2901 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No Support for ItemBatcher & ToleratedFailurePercentage in Map State of StateMachine definition
### CloudFormation Lint Version
0.79.7
### What operating system are you using?
WSL Ubuntu
### Describe the bug
E2532 errors are thrown for ItemBatcher and ToleratedFailurePercentage for Map states in StateMachine definitions even though they are valid options for a Map state: https://states-language.net/#map-state
```
E2532 State Machine Definition key (ToleratedFailurePercentage) for State (Iterate through ODS List) of Type (Map) is not valid
templates/step-functions.yml:53:7
E2532 State Machine Definition key (ItemBatcher) for State (Iterate through ODS List) of Type (Map) is not valid
templates/step-functions.yml:53:7
```
### Expected behavior
Because they are valid Keys to configure a map state as per the docs here: https://states-language.net/#map-state the cfn-lint should not throw errors.
### Reproduction template
```AWSTemplateFormatVersion: '2010-09-09'
Resources:
DistrictSpecificMultiTenantConfigStateMachine:
Type: AWS::StepFunctions::StateMachine
Properties:
DefinitionString: |-
{
"Comment": "Map",
"StartAt": "map",
"States": {
"map": {
"Type": "Map",
"Iterator": {
"StartAt": "Create DB",
"States": {
"Create DB": {
"Type": "Task",
"Resource": "arn:aws:states:::lambda:invoke",
"OutputPath": "$.Payload",
"Parameters": {
"Payload.$": "$",
"FunctionName": "arn:aws:lambda:my_function:$LATEST"
},
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException"
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2
}
],
"Next": "Insert DB Record"
},
"Insert DB Record": {
"Type": "Task",
"Resource": "arn:aws:states:::lambda:invoke",
"OutputPath": "$.Payload",
"Parameters": {
"Payload.$": "$",
"FunctionName": "arn:aws:lambda:my_function2:$LATEST"
},
"Retry": [
{
"ErrorEquals": [
"Lambda.ServiceException",
"Lambda.AWSLambdaException",
"Lambda.SdkClientException",
"Lambda.TooManyRequestsException"
],
"IntervalSeconds": 2,
"MaxAttempts": 6,
"BackoffRate": 2
}
],
"End": true
}
},
"ProcessorConfig": {
"Mode": "DISTRIBUTED",
"ExecutionType": "STANDARD"
}
},
"End": true,
"MaxConcurrency": 1,
"ItemReader": {
"Resource": "arn:aws:states:::s3:getObject",
"ReaderConfig": {
"InputType": "CSV",
"CSVHeaderLocation": "FIRST_ROW"
},
"Parameters": {
"Bucket": "SeedDataBucket",
"Key": "test.csv"
}
},
"ToleratedFailurePercentage": 5,
"ItemBatcher": {
"MaxItemsPerBatch": 10
}
}
}
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/stepfunctions/StateMachine.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import json
6
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class StateMachine(CloudFormationLintRule):
11 """Check State Machine Definition"""
12
13 id = "E2532"
14 shortdesc = "Check State Machine Definition for proper syntax"
15 description = (
16 "Check the State Machine String Definition to make sure its JSON. "
17 "Validate basic syntax of the file to determine validity."
18 )
19 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html"
20 tags = ["resources", "stepfunctions"]
21
22 def __init__(self):
23 """Init"""
24 super().__init__()
25 self.resource_property_types.append("AWS::StepFunctions::StateMachine")
26
27 def _check_state_json(self, def_json, state_name, path):
28 """Check State JSON Definition"""
29 matches = []
30
31 # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html
32 common_state_keys = [
33 "Next",
34 "End",
35 "Type",
36 "Comment",
37 "InputPath",
38 "OutputPath",
39 ]
40 common_state_required_keys = [
41 "Type",
42 ]
43 state_key_types = {
44 "Pass": ["Result", "ResultPath", "Parameters"],
45 "Task": [
46 "Credentials",
47 "Resource",
48 "Parameters",
49 "ResultPath",
50 "ResultSelector",
51 "Retry",
52 "Catch",
53 "TimeoutSeconds",
54 "TimeoutSecondsPath",
55 "Parameters",
56 "HeartbeatSeconds",
57 "HeartbeatSecondsPath",
58 ],
59 "Map": [
60 "MaxConcurrency",
61 "Iterator",
62 "ItemsPath",
63 "ItemProcessor",
64 "ItemReader",
65 "ItemSelector",
66 "ResultPath",
67 "ResultSelector",
68 "Retry",
69 "Catch",
70 "Parameters",
71 ],
72 "Choice": ["Choices", "Default"],
73 "Wait": ["Seconds", "Timestamp", "SecondsPath", "TimestampPath"],
74 "Succeed": [],
75 "Fail": ["Cause", "CausePath", "Error", "ErrorPath"],
76 "Parallel": [
77 "Branches",
78 "ResultPath",
79 "ResultSelector",
80 "Parameters",
81 "Retry",
82 "Catch",
83 ],
84 }
85 state_required_types = {
86 "Pass": [],
87 "Task": ["Resource"],
88 "Choice": ["Choices"],
89 "Wait": [],
90 "Succeed": [],
91 "Fail": [],
92 "Parallel": ["Branches"],
93 }
94
95 for req_key in common_state_required_keys:
96 if req_key not in def_json:
97 message = f"State Machine Definition required key ({req_key}) for State ({state_name}) is missing"
98 matches.append(RuleMatch(path, message))
99 return matches
100
101 state_type = def_json.get("Type")
102
103 if state_type in state_key_types:
104 for state_key, _ in def_json.items():
105 if state_key not in common_state_keys + state_key_types.get(
106 state_type, []
107 ):
108 message = f"State Machine Definition key ({state_key}) for State ({state_name}) of Type ({state_type}) is not valid"
109 matches.append(RuleMatch(path, message))
110 for req_key in common_state_required_keys + state_required_types.get(
111 state_type, []
112 ):
113 if req_key not in def_json:
114 message = f"State Machine Definition required key ({req_key}) for State ({state_name}) of Type ({state_type}) is missing"
115 matches.append(RuleMatch(path, message))
116 return matches
117 else:
118 message = f"State Machine Definition Type ({state_type}) is not valid"
119 matches.append(RuleMatch(path, message))
120
121 return matches
122
123 def _check_definition_json(self, def_json, path):
124 """Check JSON Definition"""
125 matches = []
126
127 top_level_keys = ["Comment", "StartAt", "TimeoutSeconds", "Version", "States"]
128 top_level_required_keys = ["StartAt", "States"]
129 for top_key, _ in def_json.items():
130 if top_key not in top_level_keys:
131 message = f"State Machine Definition key ({top_key}) is not valid"
132 matches.append(RuleMatch(path, message))
133
134 for req_key in top_level_required_keys:
135 if req_key not in def_json:
136 message = (
137 f"State Machine Definition required key ({req_key}) is missing"
138 )
139 matches.append(RuleMatch(path, message))
140
141 for state_name, state_value in def_json.get("States", {}).items():
142 matches.extend(self._check_state_json(state_value, state_name, path))
143 return matches
144
145 def check_value(self, value, path, fail_on_loads=True):
146 """Check Definition Value"""
147 matches = []
148 try:
149 def_json = json.loads(value)
150 # pylint: disable=W0703
151 except Exception as err:
152 if fail_on_loads:
153 message = f"State Machine Definition needs to be formatted as JSON. Error {err}"
154 matches.append(RuleMatch(path, message))
155 return matches
156
157 self.logger.debug("State Machine definition could not be parsed. Skipping")
158 return matches
159
160 matches.extend(self._check_definition_json(def_json, path))
161 return matches
162
163 def check_sub(self, value, path):
164 """Check Sub Object"""
165 matches = []
166 if isinstance(value, list):
167 matches.extend(self.check_value(value[0], path, False))
168 elif isinstance(value, str):
169 matches.extend(self.check_value(value, path, False))
170
171 return matches
172
173 def match_resource_properties(self, properties, _, path, cfn):
174 """Check CloudFormation Properties"""
175 matches = []
176
177 matches.extend(
178 cfn.check_value(
179 obj=properties,
180 key="DefinitionString",
181 path=path[:],
182 check_value=self.check_value,
183 check_sub=self.check_sub,
184 )
185 )
186
187 return matches
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
@@ -68,6 +68,8 @@
"Retry",
"Catch",
"Parameters",
+ "ToleratedFailurePercentage",
+ "ItemBatcher",
],
"Choice": ["Choices", "Default"],
"Wait": ["Seconds", "Timestamp", "SecondsPath", "TimestampPath"],
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n@@ -68,6 +68,8 @@\n \"Retry\",\n \"Catch\",\n \"Parameters\",\n+ \"ToleratedFailurePercentage\",\n+ \"ItemBatcher\",\n ],\n \"Choice\": [\"Choices\", \"Default\"],\n \"Wait\": [\"Seconds\", \"Timestamp\", \"SecondsPath\", \"TimestampPath\"],\n", "issue": "No Support for ItemBatcher & ToleratedFailurePercentage in Map State of StateMachine definition\n### CloudFormation Lint Version\r\n\r\n0.79.7\r\n\r\n### What operating system are you using?\r\n\r\nWSL Ubuntu\r\n\r\n### Describe the bug\r\n\r\nE2532 errors are thrown for ItemBatcher and ToleratedFailurePercentage for Map states in StateMachine definitions even though they are valid options for a Map state: https://states-language.net/#map-state\r\n\r\n```\r\nE2532 State Machine Definition key (ToleratedFailurePercentage) for State (Iterate through ODS List) of Type (Map) is not valid\r\ntemplates/step-functions.yml:53:7\r\n\r\nE2532 State Machine Definition key (ItemBatcher) for State (Iterate through ODS List) of Type (Map) is not valid\r\ntemplates/step-functions.yml:53:7\r\n```\r\n\r\n### Expected behavior\r\n\r\nBecause they are valid Keys to configure a map state as per the docs here: https://states-language.net/#map-state the cfn-lint should not throw errors.\r\n\r\n### Reproduction template\r\n\r\n```AWSTemplateFormatVersion: '2010-09-09'\r\nResources:\r\n DistrictSpecificMultiTenantConfigStateMachine:\r\n Type: AWS::StepFunctions::StateMachine\r\n Properties:\r\n DefinitionString: |-\r\n {\r\n \"Comment\": \"Map\",\r\n \"StartAt\": \"map\",\r\n \"States\": {\r\n \"map\": {\r\n \"Type\": \"Map\",\r\n \"Iterator\": {\r\n \"StartAt\": \"Create DB\",\r\n \"States\": {\r\n \"Create DB\": {\r\n \"Type\": \"Task\",\r\n \"Resource\": \"arn:aws:states:::lambda:invoke\",\r\n \"OutputPath\": \"$.Payload\",\r\n \"Parameters\": {\r\n \"Payload.$\": \"$\",\r\n \"FunctionName\": \"arn:aws:lambda:my_function:$LATEST\"\r\n },\r\n \"Retry\": [\r\n {\r\n \"ErrorEquals\": [\r\n \"Lambda.ServiceException\",\r\n \"Lambda.AWSLambdaException\",\r\n \"Lambda.SdkClientException\"\r\n ],\r\n \"IntervalSeconds\": 2,\r\n \"MaxAttempts\": 6,\r\n \"BackoffRate\": 2\r\n }\r\n ],\r\n \"Next\": \"Insert DB Record\"\r\n },\r\n \"Insert DB Record\": {\r\n \"Type\": \"Task\",\r\n \"Resource\": \"arn:aws:states:::lambda:invoke\",\r\n \"OutputPath\": \"$.Payload\",\r\n \"Parameters\": {\r\n \"Payload.$\": \"$\",\r\n \"FunctionName\": \"arn:aws:lambda:my_function2:$LATEST\"\r\n },\r\n \"Retry\": [\r\n {\r\n \"ErrorEquals\": [\r\n \"Lambda.ServiceException\",\r\n \"Lambda.AWSLambdaException\",\r\n \"Lambda.SdkClientException\",\r\n \"Lambda.TooManyRequestsException\"\r\n ],\r\n \"IntervalSeconds\": 2,\r\n \"MaxAttempts\": 6,\r\n \"BackoffRate\": 2\r\n }\r\n ],\r\n \"End\": true\r\n }\r\n },\r\n \"ProcessorConfig\": {\r\n \"Mode\": \"DISTRIBUTED\",\r\n \"ExecutionType\": \"STANDARD\"\r\n }\r\n },\r\n \"End\": true,\r\n \"MaxConcurrency\": 1,\r\n \"ItemReader\": {\r\n \"Resource\": \"arn:aws:states:::s3:getObject\",\r\n \"ReaderConfig\": {\r\n \"InputType\": \"CSV\",\r\n \"CSVHeaderLocation\": \"FIRST_ROW\"\r\n },\r\n \"Parameters\": {\r\n \"Bucket\": \"SeedDataBucket\",\r\n \"Key\": \"test.csv\"\r\n }\r\n },\r\n \"ToleratedFailurePercentage\": 5,\r\n \"ItemBatcher\": {\r\n \"MaxItemsPerBatch\": 10\r\n }\r\n }\r\n }\r\n }\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport json\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass StateMachine(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n\n id = \"E2532\"\n shortdesc = \"Check State Machine Definition for proper syntax\"\n description = (\n \"Check the State Machine String Definition to make sure its JSON. \"\n \"Validate basic syntax of the file to determine validity.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html\"\n tags = [\"resources\", \"stepfunctions\"]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n self.resource_property_types.append(\"AWS::StepFunctions::StateMachine\")\n\n def _check_state_json(self, def_json, state_name, path):\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n\n # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n common_state_keys = [\n \"Next\",\n \"End\",\n \"Type\",\n \"Comment\",\n \"InputPath\",\n \"OutputPath\",\n ]\n common_state_required_keys = [\n \"Type\",\n ]\n state_key_types = {\n \"Pass\": [\"Result\", \"ResultPath\", \"Parameters\"],\n \"Task\": [\n \"Credentials\",\n \"Resource\",\n \"Parameters\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Retry\",\n \"Catch\",\n \"TimeoutSeconds\",\n \"TimeoutSecondsPath\",\n \"Parameters\",\n \"HeartbeatSeconds\",\n \"HeartbeatSecondsPath\",\n ],\n \"Map\": [\n \"MaxConcurrency\",\n \"Iterator\",\n \"ItemsPath\",\n \"ItemProcessor\",\n \"ItemReader\",\n \"ItemSelector\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Retry\",\n \"Catch\",\n \"Parameters\",\n ],\n \"Choice\": [\"Choices\", \"Default\"],\n \"Wait\": [\"Seconds\", \"Timestamp\", \"SecondsPath\", \"TimestampPath\"],\n \"Succeed\": [],\n \"Fail\": [\"Cause\", \"CausePath\", \"Error\", \"ErrorPath\"],\n \"Parallel\": [\n \"Branches\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Parameters\",\n \"Retry\",\n \"Catch\",\n ],\n }\n state_required_types = {\n \"Pass\": [],\n \"Task\": [\"Resource\"],\n \"Choice\": [\"Choices\"],\n \"Wait\": [],\n \"Succeed\": [],\n \"Fail\": [],\n \"Parallel\": [\"Branches\"],\n }\n\n for req_key in common_state_required_keys:\n if req_key not in def_json:\n message = f\"State Machine Definition required key ({req_key}) for State ({state_name}) is missing\"\n matches.append(RuleMatch(path, message))\n return matches\n\n state_type = def_json.get(\"Type\")\n\n if state_type in state_key_types:\n for state_key, _ in def_json.items():\n if state_key not in common_state_keys + state_key_types.get(\n state_type, []\n ):\n message = f\"State Machine Definition key ({state_key}) for State ({state_name}) of Type ({state_type}) is not valid\"\n matches.append(RuleMatch(path, message))\n for req_key in common_state_required_keys + state_required_types.get(\n state_type, []\n ):\n if req_key not in def_json:\n message = f\"State Machine Definition required key ({req_key}) for State ({state_name}) of Type ({state_type}) is missing\"\n matches.append(RuleMatch(path, message))\n return matches\n else:\n message = f\"State Machine Definition Type ({state_type}) is not valid\"\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def _check_definition_json(self, def_json, path):\n \"\"\"Check JSON Definition\"\"\"\n matches = []\n\n top_level_keys = [\"Comment\", \"StartAt\", \"TimeoutSeconds\", \"Version\", \"States\"]\n top_level_required_keys = [\"StartAt\", \"States\"]\n for top_key, _ in def_json.items():\n if top_key not in top_level_keys:\n message = f\"State Machine Definition key ({top_key}) is not valid\"\n matches.append(RuleMatch(path, message))\n\n for req_key in top_level_required_keys:\n if req_key not in def_json:\n message = (\n f\"State Machine Definition required key ({req_key}) is missing\"\n )\n matches.append(RuleMatch(path, message))\n\n for state_name, state_value in def_json.get(\"States\", {}).items():\n matches.extend(self._check_state_json(state_value, state_name, path))\n return matches\n\n def check_value(self, value, path, fail_on_loads=True):\n \"\"\"Check Definition Value\"\"\"\n matches = []\n try:\n def_json = json.loads(value)\n # pylint: disable=W0703\n except Exception as err:\n if fail_on_loads:\n message = f\"State Machine Definition needs to be formatted as JSON. Error {err}\"\n matches.append(RuleMatch(path, message))\n return matches\n\n self.logger.debug(\"State Machine definition could not be parsed. Skipping\")\n return matches\n\n matches.extend(self._check_definition_json(def_json, path))\n return matches\n\n def check_sub(self, value, path):\n \"\"\"Check Sub Object\"\"\"\n matches = []\n if isinstance(value, list):\n matches.extend(self.check_value(value[0], path, False))\n elif isinstance(value, str):\n matches.extend(self.check_value(value, path, False))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"DefinitionString\",\n path=path[:],\n check_value=self.check_value,\n check_sub=self.check_sub,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/stepfunctions/StateMachine.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport json\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass StateMachine(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n\n id = \"E2532\"\n shortdesc = \"Check State Machine Definition for proper syntax\"\n description = (\n \"Check the State Machine String Definition to make sure its JSON. \"\n \"Validate basic syntax of the file to determine validity.\"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html\"\n tags = [\"resources\", \"stepfunctions\"]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n self.resource_property_types.append(\"AWS::StepFunctions::StateMachine\")\n\n def _check_state_json(self, def_json, state_name, path):\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n\n # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n common_state_keys = [\n \"Next\",\n \"End\",\n \"Type\",\n \"Comment\",\n \"InputPath\",\n \"OutputPath\",\n ]\n common_state_required_keys = [\n \"Type\",\n ]\n state_key_types = {\n \"Pass\": [\"Result\", \"ResultPath\", \"Parameters\"],\n \"Task\": [\n \"Credentials\",\n \"Resource\",\n \"Parameters\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Retry\",\n \"Catch\",\n \"TimeoutSeconds\",\n \"TimeoutSecondsPath\",\n \"Parameters\",\n \"HeartbeatSeconds\",\n \"HeartbeatSecondsPath\",\n ],\n \"Map\": [\n \"MaxConcurrency\",\n \"Iterator\",\n \"ItemsPath\",\n \"ItemProcessor\",\n \"ItemReader\",\n \"ItemSelector\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Retry\",\n \"Catch\",\n \"Parameters\",\n \"ToleratedFailurePercentage\",\n \"ItemBatcher\",\n ],\n \"Choice\": [\"Choices\", \"Default\"],\n \"Wait\": [\"Seconds\", \"Timestamp\", \"SecondsPath\", \"TimestampPath\"],\n \"Succeed\": [],\n \"Fail\": [\"Cause\", \"CausePath\", \"Error\", \"ErrorPath\"],\n \"Parallel\": [\n \"Branches\",\n \"ResultPath\",\n \"ResultSelector\",\n \"Parameters\",\n \"Retry\",\n \"Catch\",\n ],\n }\n state_required_types = {\n \"Pass\": [],\n \"Task\": [\"Resource\"],\n \"Choice\": [\"Choices\"],\n \"Wait\": [],\n \"Succeed\": [],\n \"Fail\": [],\n \"Parallel\": [\"Branches\"],\n }\n\n for req_key in common_state_required_keys:\n if req_key not in def_json:\n message = f\"State Machine Definition required key ({req_key}) for State ({state_name}) is missing\"\n matches.append(RuleMatch(path, message))\n return matches\n\n state_type = def_json.get(\"Type\")\n\n if state_type in state_key_types:\n for state_key, _ in def_json.items():\n if state_key not in common_state_keys + state_key_types.get(\n state_type, []\n ):\n message = f\"State Machine Definition key ({state_key}) for State ({state_name}) of Type ({state_type}) is not valid\"\n matches.append(RuleMatch(path, message))\n for req_key in common_state_required_keys + state_required_types.get(\n state_type, []\n ):\n if req_key not in def_json:\n message = f\"State Machine Definition required key ({req_key}) for State ({state_name}) of Type ({state_type}) is missing\"\n matches.append(RuleMatch(path, message))\n return matches\n else:\n message = f\"State Machine Definition Type ({state_type}) is not valid\"\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def _check_definition_json(self, def_json, path):\n \"\"\"Check JSON Definition\"\"\"\n matches = []\n\n top_level_keys = [\"Comment\", \"StartAt\", \"TimeoutSeconds\", \"Version\", \"States\"]\n top_level_required_keys = [\"StartAt\", \"States\"]\n for top_key, _ in def_json.items():\n if top_key not in top_level_keys:\n message = f\"State Machine Definition key ({top_key}) is not valid\"\n matches.append(RuleMatch(path, message))\n\n for req_key in top_level_required_keys:\n if req_key not in def_json:\n message = (\n f\"State Machine Definition required key ({req_key}) is missing\"\n )\n matches.append(RuleMatch(path, message))\n\n for state_name, state_value in def_json.get(\"States\", {}).items():\n matches.extend(self._check_state_json(state_value, state_name, path))\n return matches\n\n def check_value(self, value, path, fail_on_loads=True):\n \"\"\"Check Definition Value\"\"\"\n matches = []\n try:\n def_json = json.loads(value)\n # pylint: disable=W0703\n except Exception as err:\n if fail_on_loads:\n message = f\"State Machine Definition needs to be formatted as JSON. Error {err}\"\n matches.append(RuleMatch(path, message))\n return matches\n\n self.logger.debug(\"State Machine definition could not be parsed. Skipping\")\n return matches\n\n matches.extend(self._check_definition_json(def_json, path))\n return matches\n\n def check_sub(self, value, path):\n \"\"\"Check Sub Object\"\"\"\n matches = []\n if isinstance(value, list):\n matches.extend(self.check_value(value[0], path, False))\n elif isinstance(value, str):\n matches.extend(self.check_value(value, path, False))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties,\n key=\"DefinitionString\",\n path=path[:],\n check_value=self.check_value,\n check_sub=self.check_sub,\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/resources/stepfunctions/StateMachine.py"}]} | 2,809 | 130 |
gh_patches_debug_23190 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1661 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Costa Rica does not have data available between 12am-1am
```
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 130, in launch_parsers
**parser_kwargs)
File "/home/electricitymap/parsers/CR.py", line 191, in fetch_production
df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]
File "/usr/local/lib/python3.6/site-packages/pandas/io/html.py", line 915, in read_html
keep_default_na=keep_default_na)
File "/usr/local/lib/python3.6/site-packages/pandas/io/html.py", line 749, in _parse
raise_with_traceback(retained)
File "/usr/local/lib/python3.6/site-packages/pandas/compat/__init__.py", line 385, in raise_with_traceback
raise exc.with_traceback(traceback)
ValueError: No tables found matching pattern 'Angostura'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/CR.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3
4 import logging
5
6 import arrow
7 import pandas as pd
8 import requests
9 from bs4 import BeautifulSoup
10
11 TIMEZONE = 'America/Costa_Rica'
12 DATE_FORMAT = 'DD/MM/YYYY'
13 MONTH_FORMAT = 'MM/YYYY'
14 POWER_PLANTS = {
15 u'Aeroenergía': 'wind',
16 u'Altamira': 'wind',
17 u'Angostura': 'hydro',
18 u'Arenal': 'hydro',
19 u'Balsa Inferior': 'hydro',
20 u'Barranca': 'unknown',
21 u'Barro Morado': 'geothermal',
22 u'Bijagua': 'hydro',
23 u'Birris12': 'hydro',
24 u'Birris3': 'hydro',
25 u'Boca de Pozo': 'hydro',
26 u'CNFL': 'unknown',
27 u'Cachí': 'hydro',
28 u'Campos Azules': 'wind',
29 u'Canalete': 'unknown',
30 u'Cariblanco': 'hydro',
31 u'Carrillos': 'hydro',
32 u'Caño Grande': 'hydro',
33 u'Caño Grande III': 'hydro',
34 u'Chiripa': 'wind',
35 u'Chocosuelas': 'hydro',
36 u'Chucás': 'hydro',
37 u'Cubujuquí': 'hydro',
38 u'Daniel Gutiérrez': 'hydro',
39 u'Dengo': 'hydro',
40 u'Don Pedro': 'hydro',
41 u'Doña Julia': 'hydro',
42 u'Echandi': 'hydro',
43 u'El Angel': 'hydro',
44 u'El Angel Ampliación': 'hydro',
45 u'El Embalse': 'hydro',
46 u'El General': 'hydro',
47 u'El Viejo': 'biomass',
48 u'Garabito': 'oil',
49 u'Garita': 'hydro',
50 u'Guápiles': 'oil',
51 u'Hidrozarcas': 'hydro',
52 u'La Esperanza (CoopeL)': 'hydro',
53 u'La Joya': 'hydro',
54 u'Los Negros': 'hydro',
55 u'Los Negros II': 'hydro',
56 u'Los Santos': 'wind',
57 u'MOVASA': 'wind',
58 u'Matamoros': 'unknown',
59 u'Miravalles I': 'geothermal',
60 u'Miravalles II': 'geothermal',
61 u'Miravalles III': 'geothermal',
62 u'Miravalles V': 'geothermal',
63 u'Moín I': 'oil',
64 u'Moín II': 'oil',
65 u'Moín III': 'oil',
66 u'Orosí': 'wind',
67 u'Orotina': 'unknown',
68 u'Otros': 'unknown',
69 u'PE Mogote': 'wind',
70 u'PE Río Naranjo': 'hydro',
71 u'PEG': 'wind',
72 u'Pailas': 'geothermal',
73 u'Parque Solar Juanilama': 'solar',
74 u'Parque Solar Miravalles': 'solar',
75 u'Peñas Blancas': 'hydro',
76 u'Pirrís': 'hydro',
77 u'Plantas Eólicas': 'wind',
78 u'Platanar': 'hydro',
79 u'Pocosol': 'hydro',
80 u'Poás I y II': 'hydro',
81 u'Reventazón': 'hydro',
82 u'Río Lajas': 'hydro',
83 u'Río Macho': 'hydro',
84 u'San Antonio': 'oil',
85 u'San Lorenzo (C)': 'hydro',
86 u'Sandillal': 'hydro',
87 u'Suerkata': 'hydro',
88 u'Taboga': 'biomass',
89 u'Tacares': 'hydro',
90 u'Tejona': 'wind',
91 u'Tilawind': 'wind',
92 u'Torito': 'hydro',
93 u'Toro I': 'hydro',
94 u'Toro II': 'hydro',
95 u'Toro III': 'hydro',
96 u'Tuis (JASEC)': 'hydro',
97 u'Valle Central': 'wind',
98 u'Vara Blanca': 'hydro',
99 u'Ventanas-Garita': 'hydro',
100 u'Vientos de La Perla': 'wind',
101 u'Vientos de Miramar': 'wind',
102 u'Vientos del Este': 'wind',
103 u'Volcán': 'hydro',
104 }
105
106 CHARACTERISTIC_NAME = 'Angostura'
107
108
109 def empty_record(zone_key):
110 return {
111 'zoneKey': zone_key,
112 'capacity': {},
113 'production': {
114 'biomass': 0.0,
115 'coal': 0.0,
116 'gas': 0.0,
117 'hydro': 0.0,
118 'nuclear': 0.0,
119 'oil': 0.0,
120 'solar': 0.0,
121 'wind': 0.0,
122 'geothermal': 0.0,
123 'unknown': 0.0
124 },
125 'storage': {},
126 'source': 'grupoice.com'
127 }
128
129
130 def df_to_data(zone_key, day, df, logger):
131 df = df.dropna(axis=1, how='any')
132 # Check for empty dataframe
133 if df.shape == (1, 1):
134 return []
135 df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')
136 df = df.iloc[:, :-1]
137
138 results = []
139 unknown_plants = set()
140 hour = 0
141 for column in df:
142 data = empty_record(zone_key)
143 data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime
144 for index, value in df[column].items():
145 source = POWER_PLANTS.get(index)
146 if not source:
147 source = 'unknown'
148 unknown_plants.add(index)
149 data['datetime'] = data_time
150 data['production'][source] += max(0.0, value)
151 hour += 1
152 results.append(data)
153
154 for plant in unknown_plants:
155 logger.warning(u'{} is not mapped to generation type'.format(plant),
156 extra={'key': zone_key})
157
158 return results
159
160
161 def fetch_production(zone_key='CR', session=None,
162 target_datetime=None, logger=logging.getLogger(__name__)):
163 # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.
164 target_datetime = arrow.get(target_datetime).to(TIMEZONE)
165
166 if target_datetime < arrow.get('2012-07-01'):
167 # data availability limit found by manual trial and error
168 logger.error('CR API does not provide data before 2012-07-01, '
169 '{} was requested'.format(target_datetime),
170 extra={"key": zone_key})
171 return None
172
173 # Do not use existing session as some amount of cache is taking place
174 r = requests.session()
175 url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'
176 response = r.get(url)
177
178 soup = BeautifulSoup(response.text, 'html.parser')
179 jsf_view_state = soup.find("input", {"name": 'javax.faces.ViewState'})['value']
180
181 data = [
182 ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),
183 ('formPosdespacho:pickFecha', ''),
184 ('formPosdespacho_SUBMIT', 1),
185 ('javax.faces.ViewState', jsf_view_state),
186 ]
187 response = r.post(url, data=data)
188
189 # tell pandas which table to use by providing CHARACTERISTIC_NAME
190 df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]
191
192 results = df_to_data(zone_key, target_datetime, df, logger)
193
194 return results
195
196
197 def fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):
198 """Requests the last known power exchange (in MW) between two regions
199
200 Arguments:
201 zone_key1 -- the first country code
202 zone_key2 -- the second country code; order of the two codes in params doesn't matter
203 session (optional) -- request session passed in order to re-use an existing session
204
205 Return:
206 A dictionary in the form:
207 {
208 'sortedZoneKeys': 'DK->NO',
209 'datetime': '2017-01-01T00:00:00Z',
210 'netFlow': 0.0,
211 'source': 'mysource.com'
212 }
213
214 where net flow is from DK into NO
215 """
216 if target_datetime:
217 raise NotImplementedError('This parser is not yet able to parse past dates')
218
219 sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
220
221 df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)
222
223 if sorted_zone_keys == 'CR->NI':
224 flow = df['NICR'][0]
225 elif sorted_zone_keys == 'CR->PA':
226 flow = -1 * df['CRPA'][0]
227 else:
228 raise NotImplementedError('This exchange pair is not implemented')
229
230 data = {
231 'datetime': arrow.now(TIMEZONE).datetime,
232 'sortedZoneKeys': sorted_zone_keys,
233 'netFlow': flow,
234 'source': 'enteoperador.org'
235 }
236
237 return data
238
239
240 if __name__ == '__main__':
241 """Main method, never used by the Electricity Map backend, but handy for testing."""
242
243 from pprint import pprint
244
245 print('fetch_production() ->')
246 pprint(fetch_production())
247
248 print('fetch_production(target_datetime=arrow.get("2018-03-13T12:00Z") ->')
249 pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))
250
251 # this should work
252 print('fetch_production(target_datetime=arrow.get("2013-03-13T12:00Z") ->')
253 pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))
254
255 # this should return None
256 print('fetch_production(target_datetime=arrow.get("2007-03-13T12:00Z") ->')
257 pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))
258
259 print('fetch_exchange() ->')
260 print(fetch_exchange())
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/CR.py b/parsers/CR.py
--- a/parsers/CR.py
+++ b/parsers/CR.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
# coding=utf-8
+import datetime as dt
import logging
import arrow
@@ -160,9 +161,17 @@
def fetch_production(zone_key='CR', session=None,
target_datetime=None, logger=logging.getLogger(__name__)):
- # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.
+ # ensure we have an arrow object.
+ # if no target_datetime is specified, this defaults to now.
target_datetime = arrow.get(target_datetime).to(TIMEZONE)
+ # if before 01:30am on the current day then fetch previous day due to
+ # data lag.
+ today = arrow.get().to(TIMEZONE).date()
+ if target_datetime.date() == today:
+ target_datetime = target_datetime if target_datetime.time() >= dt.time(1, 30) \
+ else target_datetime.shift(days=-1)
+
if target_datetime < arrow.get('2012-07-01'):
# data availability limit found by manual trial and error
logger.error('CR API does not provide data before 2012-07-01, '
| {"golden_diff": "diff --git a/parsers/CR.py b/parsers/CR.py\n--- a/parsers/CR.py\n+++ b/parsers/CR.py\n@@ -1,6 +1,7 @@\n #!/usr/bin/env python3\n # coding=utf-8\n \n+import datetime as dt\n import logging\n \n import arrow\n@@ -160,9 +161,17 @@\n \n def fetch_production(zone_key='CR', session=None,\n target_datetime=None, logger=logging.getLogger(__name__)):\n- # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.\n+ # ensure we have an arrow object.\n+ # if no target_datetime is specified, this defaults to now.\n target_datetime = arrow.get(target_datetime).to(TIMEZONE)\n \n+ # if before 01:30am on the current day then fetch previous day due to\n+ # data lag.\n+ today = arrow.get().to(TIMEZONE).date()\n+ if target_datetime.date() == today:\n+ target_datetime = target_datetime if target_datetime.time() >= dt.time(1, 30) \\\n+ else target_datetime.shift(days=-1)\n+\n if target_datetime < arrow.get('2012-07-01'):\n # data availability limit found by manual trial and error\n logger.error('CR API does not provide data before 2012-07-01, '\n", "issue": "Costa Rica does not have data available between 12am-1am\n```\r\nTraceback (most recent call last):\r\n File \"/home/feeder/lib/fetch_data.py\", line 130, in launch_parsers\r\n **parser_kwargs)\r\n File \"/home/electricitymap/parsers/CR.py\", line 191, in fetch_production\r\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/html.py\", line 915, in read_html\r\n keep_default_na=keep_default_na)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/html.py\", line 749, in _parse\r\n raise_with_traceback(retained)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/compat/__init__.py\", line 385, in raise_with_traceback\r\n raise exc.with_traceback(traceback)\r\nValueError: No tables found matching pattern 'Angostura'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\nimport logging\n\nimport arrow\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nTIMEZONE = 'America/Costa_Rica'\nDATE_FORMAT = 'DD/MM/YYYY'\nMONTH_FORMAT = 'MM/YYYY'\nPOWER_PLANTS = {\n u'Aeroenerg\u00eda': 'wind',\n u'Altamira': 'wind',\n u'Angostura': 'hydro',\n u'Arenal': 'hydro',\n u'Balsa Inferior': 'hydro',\n u'Barranca': 'unknown',\n u'Barro Morado': 'geothermal',\n u'Bijagua': 'hydro',\n u'Birris12': 'hydro',\n u'Birris3': 'hydro',\n u'Boca de Pozo': 'hydro',\n u'CNFL': 'unknown',\n u'Cach\u00ed': 'hydro',\n u'Campos Azules': 'wind',\n u'Canalete': 'unknown',\n u'Cariblanco': 'hydro',\n u'Carrillos': 'hydro',\n u'Ca\u00f1o Grande': 'hydro',\n u'Ca\u00f1o Grande III': 'hydro',\n u'Chiripa': 'wind',\n u'Chocosuelas': 'hydro',\n u'Chuc\u00e1s': 'hydro',\n u'Cubujuqu\u00ed': 'hydro',\n u'Daniel Guti\u00e9rrez': 'hydro',\n u'Dengo': 'hydro',\n u'Don Pedro': 'hydro',\n u'Do\u00f1a Julia': 'hydro',\n u'Echandi': 'hydro',\n u'El Angel': 'hydro',\n u'El Angel Ampliaci\u00f3n': 'hydro',\n u'El Embalse': 'hydro',\n u'El General': 'hydro',\n u'El Viejo': 'biomass',\n u'Garabito': 'oil',\n u'Garita': 'hydro',\n u'Gu\u00e1piles': 'oil',\n u'Hidrozarcas': 'hydro',\n u'La Esperanza (CoopeL)': 'hydro',\n u'La Joya': 'hydro',\n u'Los Negros': 'hydro',\n u'Los Negros II': 'hydro',\n u'Los Santos': 'wind',\n u'MOVASA': 'wind',\n u'Matamoros': 'unknown',\n u'Miravalles I': 'geothermal',\n u'Miravalles II': 'geothermal',\n u'Miravalles III': 'geothermal',\n u'Miravalles V': 'geothermal',\n u'Mo\u00edn I': 'oil',\n u'Mo\u00edn II': 'oil',\n u'Mo\u00edn III': 'oil',\n u'Oros\u00ed': 'wind',\n u'Orotina': 'unknown',\n u'Otros': 'unknown',\n u'PE Mogote': 'wind',\n u'PE R\u00edo Naranjo': 'hydro',\n u'PEG': 'wind',\n u'Pailas': 'geothermal',\n u'Parque Solar Juanilama': 'solar',\n u'Parque Solar Miravalles': 'solar',\n u'Pe\u00f1as Blancas': 'hydro',\n u'Pirr\u00eds': 'hydro',\n u'Plantas E\u00f3licas': 'wind',\n u'Platanar': 'hydro',\n u'Pocosol': 'hydro',\n u'Po\u00e1s I y II': 'hydro',\n u'Reventaz\u00f3n': 'hydro',\n u'R\u00edo Lajas': 'hydro',\n u'R\u00edo Macho': 'hydro',\n u'San Antonio': 'oil',\n u'San Lorenzo (C)': 'hydro',\n u'Sandillal': 'hydro',\n u'Suerkata': 'hydro',\n u'Taboga': 'biomass',\n u'Tacares': 'hydro',\n u'Tejona': 'wind',\n u'Tilawind': 'wind',\n u'Torito': 'hydro',\n u'Toro I': 'hydro',\n u'Toro II': 'hydro',\n u'Toro III': 'hydro',\n u'Tuis (JASEC)': 'hydro',\n u'Valle Central': 'wind',\n u'Vara Blanca': 'hydro',\n u'Ventanas-Garita': 'hydro',\n u'Vientos de La Perla': 'wind',\n u'Vientos de Miramar': 'wind',\n u'Vientos del Este': 'wind',\n u'Volc\u00e1n': 'hydro',\n}\n\nCHARACTERISTIC_NAME = 'Angostura'\n\n\ndef empty_record(zone_key):\n return {\n 'zoneKey': zone_key,\n 'capacity': {},\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {},\n 'source': 'grupoice.com'\n }\n\n\ndef df_to_data(zone_key, day, df, logger):\n df = df.dropna(axis=1, how='any')\n # Check for empty dataframe\n if df.shape == (1, 1):\n return []\n df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')\n df = df.iloc[:, :-1]\n\n results = []\n unknown_plants = set()\n hour = 0\n for column in df:\n data = empty_record(zone_key)\n data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime\n for index, value in df[column].items():\n source = POWER_PLANTS.get(index)\n if not source:\n source = 'unknown'\n unknown_plants.add(index)\n data['datetime'] = data_time\n data['production'][source] += max(0.0, value)\n hour += 1\n results.append(data)\n\n for plant in unknown_plants:\n logger.warning(u'{} is not mapped to generation type'.format(plant),\n extra={'key': zone_key})\n\n return results\n\n\ndef fetch_production(zone_key='CR', session=None,\n target_datetime=None, logger=logging.getLogger(__name__)):\n # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.\n target_datetime = arrow.get(target_datetime).to(TIMEZONE)\n\n if target_datetime < arrow.get('2012-07-01'):\n # data availability limit found by manual trial and error\n logger.error('CR API does not provide data before 2012-07-01, '\n '{} was requested'.format(target_datetime),\n extra={\"key\": zone_key})\n return None\n\n # Do not use existing session as some amount of cache is taking place\n r = requests.session()\n url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n response = r.get(url)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n jsf_view_state = soup.find(\"input\", {\"name\": 'javax.faces.ViewState'})['value']\n\n data = [\n ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),\n ('formPosdespacho:pickFecha', ''),\n ('formPosdespacho_SUBMIT', 1),\n ('javax.faces.ViewState', jsf_view_state),\n ]\n response = r.post(url, data=data)\n\n # tell pandas which table to use by providing CHARACTERISTIC_NAME\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\n\n results = df_to_data(zone_key, target_datetime, df, logger)\n\n return results\n\n\ndef fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two regions\n\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)\n\n if sorted_zone_keys == 'CR->NI':\n flow = df['NICR'][0]\n elif sorted_zone_keys == 'CR->PA':\n flow = -1 * df['CRPA'][0]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n data = {\n 'datetime': arrow.now(TIMEZONE).datetime,\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': flow,\n 'source': 'enteoperador.org'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n from pprint import pprint\n\n print('fetch_production() ->')\n pprint(fetch_production())\n\n print('fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))\n\n # this should work\n print('fetch_production(target_datetime=arrow.get(\"2013-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))\n\n # this should return None\n print('fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))\n\n print('fetch_exchange() ->')\n print(fetch_exchange())\n", "path": "parsers/CR.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\nimport datetime as dt\nimport logging\n\nimport arrow\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nTIMEZONE = 'America/Costa_Rica'\nDATE_FORMAT = 'DD/MM/YYYY'\nMONTH_FORMAT = 'MM/YYYY'\nPOWER_PLANTS = {\n u'Aeroenerg\u00eda': 'wind',\n u'Altamira': 'wind',\n u'Angostura': 'hydro',\n u'Arenal': 'hydro',\n u'Balsa Inferior': 'hydro',\n u'Barranca': 'unknown',\n u'Barro Morado': 'geothermal',\n u'Bijagua': 'hydro',\n u'Birris12': 'hydro',\n u'Birris3': 'hydro',\n u'Boca de Pozo': 'hydro',\n u'CNFL': 'unknown',\n u'Cach\u00ed': 'hydro',\n u'Campos Azules': 'wind',\n u'Canalete': 'unknown',\n u'Cariblanco': 'hydro',\n u'Carrillos': 'hydro',\n u'Ca\u00f1o Grande': 'hydro',\n u'Ca\u00f1o Grande III': 'hydro',\n u'Chiripa': 'wind',\n u'Chocosuelas': 'hydro',\n u'Chuc\u00e1s': 'hydro',\n u'Cubujuqu\u00ed': 'hydro',\n u'Daniel Guti\u00e9rrez': 'hydro',\n u'Dengo': 'hydro',\n u'Don Pedro': 'hydro',\n u'Do\u00f1a Julia': 'hydro',\n u'Echandi': 'hydro',\n u'El Angel': 'hydro',\n u'El Angel Ampliaci\u00f3n': 'hydro',\n u'El Embalse': 'hydro',\n u'El General': 'hydro',\n u'El Viejo': 'biomass',\n u'Garabito': 'oil',\n u'Garita': 'hydro',\n u'Gu\u00e1piles': 'oil',\n u'Hidrozarcas': 'hydro',\n u'La Esperanza (CoopeL)': 'hydro',\n u'La Joya': 'hydro',\n u'Los Negros': 'hydro',\n u'Los Negros II': 'hydro',\n u'Los Santos': 'wind',\n u'MOVASA': 'wind',\n u'Matamoros': 'unknown',\n u'Miravalles I': 'geothermal',\n u'Miravalles II': 'geothermal',\n u'Miravalles III': 'geothermal',\n u'Miravalles V': 'geothermal',\n u'Mo\u00edn I': 'oil',\n u'Mo\u00edn II': 'oil',\n u'Mo\u00edn III': 'oil',\n u'Oros\u00ed': 'wind',\n u'Orotina': 'unknown',\n u'Otros': 'unknown',\n u'PE Mogote': 'wind',\n u'PE R\u00edo Naranjo': 'hydro',\n u'PEG': 'wind',\n u'Pailas': 'geothermal',\n u'Parque Solar Juanilama': 'solar',\n u'Parque Solar Miravalles': 'solar',\n u'Pe\u00f1as Blancas': 'hydro',\n u'Pirr\u00eds': 'hydro',\n u'Plantas E\u00f3licas': 'wind',\n u'Platanar': 'hydro',\n u'Pocosol': 'hydro',\n u'Po\u00e1s I y II': 'hydro',\n u'Reventaz\u00f3n': 'hydro',\n u'R\u00edo Lajas': 'hydro',\n u'R\u00edo Macho': 'hydro',\n u'San Antonio': 'oil',\n u'San Lorenzo (C)': 'hydro',\n u'Sandillal': 'hydro',\n u'Suerkata': 'hydro',\n u'Taboga': 'biomass',\n u'Tacares': 'hydro',\n u'Tejona': 'wind',\n u'Tilawind': 'wind',\n u'Torito': 'hydro',\n u'Toro I': 'hydro',\n u'Toro II': 'hydro',\n u'Toro III': 'hydro',\n u'Tuis (JASEC)': 'hydro',\n u'Valle Central': 'wind',\n u'Vara Blanca': 'hydro',\n u'Ventanas-Garita': 'hydro',\n u'Vientos de La Perla': 'wind',\n u'Vientos de Miramar': 'wind',\n u'Vientos del Este': 'wind',\n u'Volc\u00e1n': 'hydro',\n}\n\nCHARACTERISTIC_NAME = 'Angostura'\n\n\ndef empty_record(zone_key):\n return {\n 'zoneKey': zone_key,\n 'capacity': {},\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {},\n 'source': 'grupoice.com'\n }\n\n\ndef df_to_data(zone_key, day, df, logger):\n df = df.dropna(axis=1, how='any')\n # Check for empty dataframe\n if df.shape == (1, 1):\n return []\n df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')\n df = df.iloc[:, :-1]\n\n results = []\n unknown_plants = set()\n hour = 0\n for column in df:\n data = empty_record(zone_key)\n data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime\n for index, value in df[column].items():\n source = POWER_PLANTS.get(index)\n if not source:\n source = 'unknown'\n unknown_plants.add(index)\n data['datetime'] = data_time\n data['production'][source] += max(0.0, value)\n hour += 1\n results.append(data)\n\n for plant in unknown_plants:\n logger.warning(u'{} is not mapped to generation type'.format(plant),\n extra={'key': zone_key})\n\n return results\n\n\ndef fetch_production(zone_key='CR', session=None,\n target_datetime=None, logger=logging.getLogger(__name__)):\n # ensure we have an arrow object.\n # if no target_datetime is specified, this defaults to now.\n target_datetime = arrow.get(target_datetime).to(TIMEZONE)\n\n # if before 01:30am on the current day then fetch previous day due to\n # data lag.\n today = arrow.get().to(TIMEZONE).date()\n if target_datetime.date() == today:\n target_datetime = target_datetime if target_datetime.time() >= dt.time(1, 30) \\\n else target_datetime.shift(days=-1)\n\n if target_datetime < arrow.get('2012-07-01'):\n # data availability limit found by manual trial and error\n logger.error('CR API does not provide data before 2012-07-01, '\n '{} was requested'.format(target_datetime),\n extra={\"key\": zone_key})\n return None\n\n # Do not use existing session as some amount of cache is taking place\n r = requests.session()\n url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n response = r.get(url)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n jsf_view_state = soup.find(\"input\", {\"name\": 'javax.faces.ViewState'})['value']\n\n data = [\n ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),\n ('formPosdespacho:pickFecha', ''),\n ('formPosdespacho_SUBMIT', 1),\n ('javax.faces.ViewState', jsf_view_state),\n ]\n response = r.post(url, data=data)\n\n # tell pandas which table to use by providing CHARACTERISTIC_NAME\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\n\n results = df_to_data(zone_key, target_datetime, df, logger)\n\n return results\n\n\ndef fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two regions\n\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)\n\n if sorted_zone_keys == 'CR->NI':\n flow = df['NICR'][0]\n elif sorted_zone_keys == 'CR->PA':\n flow = -1 * df['CRPA'][0]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n data = {\n 'datetime': arrow.now(TIMEZONE).datetime,\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': flow,\n 'source': 'enteoperador.org'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n from pprint import pprint\n\n print('fetch_production() ->')\n pprint(fetch_production())\n\n print('fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))\n\n # this should work\n print('fetch_production(target_datetime=arrow.get(\"2013-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))\n\n # this should return None\n print('fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))\n\n print('fetch_exchange() ->')\n print(fetch_exchange())\n", "path": "parsers/CR.py"}]} | 3,553 | 311 |
gh_patches_debug_12176 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1818 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to remove image from event in dashboard
If an image has been selected for an event there is currently no way to remove the image after saving.
A shitty workaround is to inspect the hidden input field and set the value to an empty string.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/gallery/widgets.py`
Content:
```
1 # -*- coding: utf8 -*-
2 #
3 # Created by 'myth' on 10/14/15
4
5 from django.conf import settings
6 from django.core.urlresolvers import reverse_lazy
7 from django.forms import HiddenInput, TextInput
8 from django.forms.utils import flatatt, force_text, format_html
9
10 from apps.gallery.models import ResponsiveImage
11
12
13 WIDGET_STRING = """<br /><input{} />\r\n
14 <div id="single-image-field-thumbnail">{}</div>
15 <a href="#" class="btn btn-primary" id="add-responsive-image">\r\n
16 <i class="fa fa-plus fa-lg"></i> Velg</a>\r\n
17 <a href="{}" class="btn btn-primary" target="_blank">\r\n
18 <i class="fa fa-image fa-lg"></i> Last opp</a><br>\r\n
19 <div id="image-selection-wrapper">\r\n
20 <h2 id="image-selection-title">Velg bilde</h2>\r\n
21 <div class="row">\r\n
22 <div class="col-md-12">\r\n
23 <div class="input-group">\r\n
24 <input type="text" id="image-gallery-search" class="form-control" placeholder="Skriv inn søkeord...">\r\n
25 <span class="input-group-btn">\r\n
26 <a class="btn btn-primary" id="image-gallery-search-button" type="button">Søk!</a>\r\n
27 </span>\r\n
28 </div>\r\n
29 </div>\r\n
30 </div>\r\n
31 <hr />\r\n
32 <div class="row" id="image-gallery-search-results"></div>\r\n
33 </div>\r\n"""
34
35
36 class SingleImageInput(HiddenInput):
37 """
38 SingleImageField adds wrapper HTML around the hidden input field containing the ResponsiveImage ID
39 """
40
41 def __init__(self, attrs=None):
42 super(SingleImageInput, self).__init__(attrs)
43 self.input_type = 'hidden'
44
45 def render(self, name, value, attrs=None):
46 """
47 Renders this field widget as HTML
48 :param name: Field input name
49 :param value: Field input value
50 :param attrs: Field input attributes
51 :return: An HTML string representing this widget
52 """
53
54 if value is None:
55 value = ''
56
57 img_thumb = 'Det er ikke valgt noe bilde.'
58 final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
59 if value != '':
60 # Only add the value attribute if the value is non-empty
61 final_attrs['value'] = force_text(self._format_value(value))
62 img = ResponsiveImage.objects.get(pk=value)
63 img_thumb = format_html(
64 '<img src="{}" alt title="{}"/>',
65 settings.MEDIA_URL + str(img.thumbnail),
66 str(img.name),
67 encoding='utf-8'
68 )
69
70 upload_url = reverse_lazy('gallery_dashboard:upload')
71
72 return format_html(WIDGET_STRING, flatatt(final_attrs), img_thumb, upload_url)
73
74
75 class TagInputField(TextInput):
76 """
77 Adds some extras to a TextInputField to support space or comma separated tagging
78 """
79
80 def __init__(self, attrs=None):
81 super(TagInputField, self).__init__(attrs=attrs)
82
83 def render(self, name, value, attrs=None):
84 """
85 Renders this field widget as HTML
86 :param name: Field input name
87 :param value: Field input value
88 :param attrs: Field input attributes
89 :return: An HTML string representing this widget
90 """
91
92 return super(TagInputField, self).render(name, value, attrs=attrs)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/gallery/widgets.py b/apps/gallery/widgets.py
--- a/apps/gallery/widgets.py
+++ b/apps/gallery/widgets.py
@@ -15,7 +15,9 @@
<a href="#" class="btn btn-primary" id="add-responsive-image">\r\n
<i class="fa fa-plus fa-lg"></i> Velg</a>\r\n
<a href="{}" class="btn btn-primary" target="_blank">\r\n
-<i class="fa fa-image fa-lg"></i> Last opp</a><br>\r\n
+<i class="fa fa-image fa-lg"></i> Last opp</a>\r\n
+<a href="#" class="btn btn-danger" id="dashboard-gallery-remove-image">\r\n
+<i class="fa fa-times fa-lg"></i> Fjern bilde</a><br>\r\n
<div id="image-selection-wrapper">\r\n
<h2 id="image-selection-title">Velg bilde</h2>\r\n
<div class="row">\r\n
| {"golden_diff": "diff --git a/apps/gallery/widgets.py b/apps/gallery/widgets.py\n--- a/apps/gallery/widgets.py\n+++ b/apps/gallery/widgets.py\n@@ -15,7 +15,9 @@\n <a href=\"#\" class=\"btn btn-primary\" id=\"add-responsive-image\">\\r\\n\n <i class=\"fa fa-plus fa-lg\"></i> Velg</a>\\r\\n\n <a href=\"{}\" class=\"btn btn-primary\" target=\"_blank\">\\r\\n\n-<i class=\"fa fa-image fa-lg\"></i> Last opp</a><br>\\r\\n\n+<i class=\"fa fa-image fa-lg\"></i> Last opp</a>\\r\\n\n+<a href=\"#\" class=\"btn btn-danger\" id=\"dashboard-gallery-remove-image\">\\r\\n\n+<i class=\"fa fa-times fa-lg\"></i> Fjern bilde</a><br>\\r\\n\n <div id=\"image-selection-wrapper\">\\r\\n\n <h2 id=\"image-selection-title\">Velg bilde</h2>\\r\\n\n <div class=\"row\">\\r\\n\n", "issue": "Unable to remove image from event in dashboard\nIf an image has been selected for an event there is currently no way to remove the image after saving. \r\n\r\nA shitty workaround is to inspect the hidden input field and set the value to an empty string.\n", "before_files": [{"content": "# -*- coding: utf8 -*-\n#\n# Created by 'myth' on 10/14/15\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.forms import HiddenInput, TextInput\nfrom django.forms.utils import flatatt, force_text, format_html\n\nfrom apps.gallery.models import ResponsiveImage\n\n\nWIDGET_STRING = \"\"\"<br /><input{} />\\r\\n\n<div id=\"single-image-field-thumbnail\">{}</div>\n<a href=\"#\" class=\"btn btn-primary\" id=\"add-responsive-image\">\\r\\n\n<i class=\"fa fa-plus fa-lg\"></i> Velg</a>\\r\\n\n<a href=\"{}\" class=\"btn btn-primary\" target=\"_blank\">\\r\\n\n<i class=\"fa fa-image fa-lg\"></i> Last opp</a><br>\\r\\n\n<div id=\"image-selection-wrapper\">\\r\\n\n<h2 id=\"image-selection-title\">Velg bilde</h2>\\r\\n\n<div class=\"row\">\\r\\n\n<div class=\"col-md-12\">\\r\\n\n<div class=\"input-group\">\\r\\n\n<input type=\"text\" id=\"image-gallery-search\" class=\"form-control\" placeholder=\"Skriv inn s\u00f8keord...\">\\r\\n\n<span class=\"input-group-btn\">\\r\\n\n<a class=\"btn btn-primary\" id=\"image-gallery-search-button\" type=\"button\">S\u00f8k!</a>\\r\\n\n</span>\\r\\n\n</div>\\r\\n\n</div>\\r\\n\n</div>\\r\\n\n<hr />\\r\\n\n<div class=\"row\" id=\"image-gallery-search-results\"></div>\\r\\n\n</div>\\r\\n\"\"\"\n\n\nclass SingleImageInput(HiddenInput):\n \"\"\"\n SingleImageField adds wrapper HTML around the hidden input field containing the ResponsiveImage ID\n \"\"\"\n\n def __init__(self, attrs=None):\n super(SingleImageInput, self).__init__(attrs)\n self.input_type = 'hidden'\n\n def render(self, name, value, attrs=None):\n \"\"\"\n Renders this field widget as HTML\n :param name: Field input name\n :param value: Field input value\n :param attrs: Field input attributes\n :return: An HTML string representing this widget\n \"\"\"\n\n if value is None:\n value = ''\n\n img_thumb = 'Det er ikke valgt noe bilde.'\n final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)\n if value != '':\n # Only add the value attribute if the value is non-empty\n final_attrs['value'] = force_text(self._format_value(value))\n img = ResponsiveImage.objects.get(pk=value)\n img_thumb = format_html(\n '<img src=\"{}\" alt title=\"{}\"/>',\n settings.MEDIA_URL + str(img.thumbnail),\n str(img.name),\n encoding='utf-8'\n )\n\n upload_url = reverse_lazy('gallery_dashboard:upload')\n\n return format_html(WIDGET_STRING, flatatt(final_attrs), img_thumb, upload_url)\n\n\nclass TagInputField(TextInput):\n \"\"\"\n Adds some extras to a TextInputField to support space or comma separated tagging\n \"\"\"\n\n def __init__(self, attrs=None):\n super(TagInputField, self).__init__(attrs=attrs)\n\n def render(self, name, value, attrs=None):\n \"\"\"\n Renders this field widget as HTML\n :param name: Field input name\n :param value: Field input value\n :param attrs: Field input attributes\n :return: An HTML string representing this widget\n \"\"\"\n\n return super(TagInputField, self).render(name, value, attrs=attrs)\n", "path": "apps/gallery/widgets.py"}], "after_files": [{"content": "# -*- coding: utf8 -*-\n#\n# Created by 'myth' on 10/14/15\n\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.forms import HiddenInput, TextInput\nfrom django.forms.utils import flatatt, force_text, format_html\n\nfrom apps.gallery.models import ResponsiveImage\n\n\nWIDGET_STRING = \"\"\"<br /><input{} />\\r\\n\n<div id=\"single-image-field-thumbnail\">{}</div>\n<a href=\"#\" class=\"btn btn-primary\" id=\"add-responsive-image\">\\r\\n\n<i class=\"fa fa-plus fa-lg\"></i> Velg</a>\\r\\n\n<a href=\"{}\" class=\"btn btn-primary\" target=\"_blank\">\\r\\n\n<i class=\"fa fa-image fa-lg\"></i> Last opp</a>\\r\\n\n<a href=\"#\" class=\"btn btn-danger\" id=\"dashboard-gallery-remove-image\">\\r\\n\n<i class=\"fa fa-times fa-lg\"></i> Fjern bilde</a><br>\\r\\n\n<div id=\"image-selection-wrapper\">\\r\\n\n<h2 id=\"image-selection-title\">Velg bilde</h2>\\r\\n\n<div class=\"row\">\\r\\n\n<div class=\"col-md-12\">\\r\\n\n<div class=\"input-group\">\\r\\n\n<input type=\"text\" id=\"image-gallery-search\" class=\"form-control\" placeholder=\"Skriv inn s\u00f8keord...\">\\r\\n\n<span class=\"input-group-btn\">\\r\\n\n<a class=\"btn btn-primary\" id=\"image-gallery-search-button\" type=\"button\">S\u00f8k!</a>\\r\\n\n</span>\\r\\n\n</div>\\r\\n\n</div>\\r\\n\n</div>\\r\\n\n<hr />\\r\\n\n<div class=\"row\" id=\"image-gallery-search-results\"></div>\\r\\n\n</div>\\r\\n\"\"\"\n\n\nclass SingleImageInput(HiddenInput):\n \"\"\"\n SingleImageField adds wrapper HTML around the hidden input field containing the ResponsiveImage ID\n \"\"\"\n\n def __init__(self, attrs=None):\n super(SingleImageInput, self).__init__(attrs)\n self.input_type = 'hidden'\n\n def render(self, name, value, attrs=None):\n \"\"\"\n Renders this field widget as HTML\n :param name: Field input name\n :param value: Field input value\n :param attrs: Field input attributes\n :return: An HTML string representing this widget\n \"\"\"\n\n if value is None:\n value = ''\n\n img_thumb = 'Det er ikke valgt noe bilde.'\n final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)\n if value != '':\n # Only add the value attribute if the value is non-empty\n final_attrs['value'] = force_text(self._format_value(value))\n img = ResponsiveImage.objects.get(pk=value)\n img_thumb = format_html(\n '<img src=\"{}\" alt title=\"{}\"/>',\n settings.MEDIA_URL + str(img.thumbnail),\n str(img.name),\n encoding='utf-8'\n )\n\n upload_url = reverse_lazy('gallery_dashboard:upload')\n\n return format_html(WIDGET_STRING, flatatt(final_attrs), img_thumb, upload_url)\n\n\nclass TagInputField(TextInput):\n \"\"\"\n Adds some extras to a TextInputField to support space or comma separated tagging\n \"\"\"\n\n def __init__(self, attrs=None):\n super(TagInputField, self).__init__(attrs=attrs)\n\n def render(self, name, value, attrs=None):\n \"\"\"\n Renders this field widget as HTML\n :param name: Field input name\n :param value: Field input value\n :param attrs: Field input attributes\n :return: An HTML string representing this widget\n \"\"\"\n\n return super(TagInputField, self).render(name, value, attrs=attrs)\n", "path": "apps/gallery/widgets.py"}]} | 1,281 | 229 |
gh_patches_debug_28339 | rasdani/github-patches | git_diff | mlflow__mlflow-5914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace `unittest.TestCase.assertRaises` with `unittest.TestCase.assertRaisesRegex`
Some tests use `unittest.TestCase.assertRaises` to test an exception is raised for illegal operations, but they need to be replaces with `unittest.TestCase.assertRaisesRegex`.
### Why do we need this change?
Let's say we have a function that raises an exception:
```python
def throw_exception(...):
if condition_1:
raise TypeError("condition_1")
if condition_2:
raise TypeError("condition_2")
...
```
If we test this function using `assertRaises`:
```python
class MyTest(unittest.TestCase):
def test_throw_exception(self):
# Does `throw_exception` really raise the second TypeError?
# It might throw the first TypeError, then the test will pass.
with self.assertRaises(TypeError):
throw_exception(...) # should raise TypeError("condition_2")
```
If we test this function using `assertRaisesRegex`:
```python
class MyTest(unittest.TestCase):
def test_throw_exception(self):
# This test fails when `throw_exception` raises the first TypeError.
with self. assertRaisesRegex(TypeError, "condition_b"):
throw_exception(...) # should raise TypeError("condition_2")
```
### Example
https://github.com/mlflow/mlflow/blob/fe6618823a2e6038149ee0da675503d2764552ca/tests/store/tracking/test_sqlalchemy_store.py#L107
The code above needs to be fixed to the following:
```python
# "<string that matches the error message>" must be replaced
with self.assertRaisesRegex(MlflowException, "<string that matches the error message>") as e:
```
### References
- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaises
- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegex
### Instructions
https://github.com/mlflow/mlflow/blob/101ad6e8eb383c769178df0df83d1d2a1cea6b4a/pylint_plugins/assert_raises_without_msg.py#L20-L33
Ping me with the file you want to work on :)
| File | Assignee | PR | Done |
| :---------------------------------------------------- | :---------- | :---- | :--- |
| `tests/entities/test_run_status.py` | @Sumanth077 | | |
| `tests/store/model_registry/test_sqlalchemy_store.py` | @ognis1205 | #5875 | ✅ |
| `tests/store/db/test_utils.py` | @erich-db | | |
| `tests/store/tracking/__init__.py` | @Sumanth077 | | |
| `tests/store/tracking/test_file_store.py` | @andy1122 | | |
| `tests/store/tracking/test_sqlalchemy_store.py` | @ognis1205 | #5875 | ✅ |
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pylint_plugins/unittest_assert_raises.py`
Content:
```
1 import os
2
3 import astroid
4 from pylint.interfaces import IAstroidChecker
5 from pylint.checkers import BaseChecker
6
7
8 def _is_unittest_assert_raises(node: astroid.Call):
9 return isinstance(node.func, astroid.Attribute) and node.func.as_string() == "self.assertRaises"
10
11
12 IGNORE_FILES = list(
13 map(
14 os.path.abspath,
15 [
16 # Instructions
17 # ============
18 # 1. Select a file in the list below and remove it.
19 # 2. Run pylint and confirm it fails.
20 # 3. Fix the lines printed out in the previous step.
21 # 4. Run pylint again and confirm it succeeds now.
22 # 5. Run pytest and confirm the changed lines don't fail.
23 # 6. Open a PR.
24 "tests/entities/test_run_status.py",
25 "tests/store/db/test_utils.py",
26 "tests/store/tracking/__init__.py",
27 "tests/store/tracking/test_file_store.py",
28 ],
29 )
30 )
31
32
33 def _should_ignore(path: str):
34 return path in IGNORE_FILES
35
36
37 class UnittestAssertRaises(BaseChecker):
38 __implements__ = IAstroidChecker
39
40 name = "unittest-assert-raises"
41 msgs = {
42 "W0003": (
43 "`assertRaises` must be replaced with `assertRaisesRegex`",
44 name,
45 "Use `assertRaisesRegex` instead",
46 ),
47 }
48 priority = -1
49
50 def visit_call(self, node: astroid.Call):
51 if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):
52 self.add_message(self.name, node=node)
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py
--- a/pylint_plugins/unittest_assert_raises.py
+++ b/pylint_plugins/unittest_assert_raises.py
@@ -1,5 +1,3 @@
-import os
-
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
@@ -9,31 +7,6 @@
return isinstance(node.func, astroid.Attribute) and node.func.as_string() == "self.assertRaises"
-IGNORE_FILES = list(
- map(
- os.path.abspath,
- [
- # Instructions
- # ============
- # 1. Select a file in the list below and remove it.
- # 2. Run pylint and confirm it fails.
- # 3. Fix the lines printed out in the previous step.
- # 4. Run pylint again and confirm it succeeds now.
- # 5. Run pytest and confirm the changed lines don't fail.
- # 6. Open a PR.
- "tests/entities/test_run_status.py",
- "tests/store/db/test_utils.py",
- "tests/store/tracking/__init__.py",
- "tests/store/tracking/test_file_store.py",
- ],
- )
-)
-
-
-def _should_ignore(path: str):
- return path in IGNORE_FILES
-
-
class UnittestAssertRaises(BaseChecker):
__implements__ = IAstroidChecker
@@ -48,5 +21,5 @@
priority = -1
def visit_call(self, node: astroid.Call):
- if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):
+ if _is_unittest_assert_raises(node):
self.add_message(self.name, node=node)
| {"golden_diff": "diff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py\n--- a/pylint_plugins/unittest_assert_raises.py\n+++ b/pylint_plugins/unittest_assert_raises.py\n@@ -1,5 +1,3 @@\n-import os\n-\n import astroid\n from pylint.interfaces import IAstroidChecker\n from pylint.checkers import BaseChecker\n@@ -9,31 +7,6 @@\n return isinstance(node.func, astroid.Attribute) and node.func.as_string() == \"self.assertRaises\"\n \n \n-IGNORE_FILES = list(\n- map(\n- os.path.abspath,\n- [\n- # Instructions\n- # ============\n- # 1. Select a file in the list below and remove it.\n- # 2. Run pylint and confirm it fails.\n- # 3. Fix the lines printed out in the previous step.\n- # 4. Run pylint again and confirm it succeeds now.\n- # 5. Run pytest and confirm the changed lines don't fail.\n- # 6. Open a PR.\n- \"tests/entities/test_run_status.py\",\n- \"tests/store/db/test_utils.py\",\n- \"tests/store/tracking/__init__.py\",\n- \"tests/store/tracking/test_file_store.py\",\n- ],\n- )\n-)\n-\n-\n-def _should_ignore(path: str):\n- return path in IGNORE_FILES\n-\n-\n class UnittestAssertRaises(BaseChecker):\n __implements__ = IAstroidChecker\n \n@@ -48,5 +21,5 @@\n priority = -1\n \n def visit_call(self, node: astroid.Call):\n- if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):\n+ if _is_unittest_assert_raises(node):\n self.add_message(self.name, node=node)\n", "issue": "Replace `unittest.TestCase.assertRaises` with `unittest.TestCase.assertRaisesRegex`\nSome tests use `unittest.TestCase.assertRaises` to test an exception is raised for illegal operations, but they need to be replaces with `unittest.TestCase.assertRaisesRegex`.\r\n\r\n### Why do we need this change?\r\n\r\nLet's say we have a function that raises an exception:\r\n\r\n```python\r\ndef throw_exception(...):\r\n if condition_1:\r\n raise TypeError(\"condition_1\")\r\n if condition_2:\r\n raise TypeError(\"condition_2\")\r\n ...\r\n```\r\n\r\nIf we test this function using `assertRaises`:\r\n\r\n```python\r\nclass MyTest(unittest.TestCase):\r\n def test_throw_exception(self):\r\n # Does `throw_exception` really raise the second TypeError?\r\n # It might throw the first TypeError, then the test will pass.\r\n with self.assertRaises(TypeError):\r\n throw_exception(...) # should raise TypeError(\"condition_2\")\r\n```\r\n\r\nIf we test this function using `assertRaisesRegex`:\r\n\r\n```python\r\nclass MyTest(unittest.TestCase):\r\n def test_throw_exception(self):\r\n # This test fails when `throw_exception` raises the first TypeError.\r\n with self. assertRaisesRegex(TypeError, \"condition_b\"):\r\n throw_exception(...) # should raise TypeError(\"condition_2\")\r\n```\r\n\r\n### Example\r\n\r\nhttps://github.com/mlflow/mlflow/blob/fe6618823a2e6038149ee0da675503d2764552ca/tests/store/tracking/test_sqlalchemy_store.py#L107\r\n\r\nThe code above needs to be fixed to the following:\r\n\r\n```python\r\n # \"<string that matches the error message>\" must be replaced\r\n with self.assertRaisesRegex(MlflowException, \"<string that matches the error message>\") as e:\r\n```\r\n\r\n### References\r\n\r\n- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaises\r\n- https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertRaisesRegex\r\n\r\n### Instructions\r\n\r\nhttps://github.com/mlflow/mlflow/blob/101ad6e8eb383c769178df0df83d1d2a1cea6b4a/pylint_plugins/assert_raises_without_msg.py#L20-L33\r\n\r\nPing me with the file you want to work on :)\r\n\r\n| File | Assignee | PR | Done |\r\n| :---------------------------------------------------- | :---------- | :---- | :--- |\r\n| `tests/entities/test_run_status.py` | @Sumanth077 | | |\r\n| `tests/store/model_registry/test_sqlalchemy_store.py` | @ognis1205 | #5875 | \u2705 |\r\n| `tests/store/db/test_utils.py` | @erich-db | | |\r\n| `tests/store/tracking/__init__.py` | @Sumanth077 | | |\r\n| `tests/store/tracking/test_file_store.py` | @andy1122 | | |\r\n| `tests/store/tracking/test_sqlalchemy_store.py` | @ognis1205 | #5875 | \u2705 |\r\n\n", "before_files": [{"content": "import os\n\nimport astroid\nfrom pylint.interfaces import IAstroidChecker\nfrom pylint.checkers import BaseChecker\n\n\ndef _is_unittest_assert_raises(node: astroid.Call):\n return isinstance(node.func, astroid.Attribute) and node.func.as_string() == \"self.assertRaises\"\n\n\nIGNORE_FILES = list(\n map(\n os.path.abspath,\n [\n # Instructions\n # ============\n # 1. Select a file in the list below and remove it.\n # 2. Run pylint and confirm it fails.\n # 3. Fix the lines printed out in the previous step.\n # 4. Run pylint again and confirm it succeeds now.\n # 5. Run pytest and confirm the changed lines don't fail.\n # 6. Open a PR.\n \"tests/entities/test_run_status.py\",\n \"tests/store/db/test_utils.py\",\n \"tests/store/tracking/__init__.py\",\n \"tests/store/tracking/test_file_store.py\",\n ],\n )\n)\n\n\ndef _should_ignore(path: str):\n return path in IGNORE_FILES\n\n\nclass UnittestAssertRaises(BaseChecker):\n __implements__ = IAstroidChecker\n\n name = \"unittest-assert-raises\"\n msgs = {\n \"W0003\": (\n \"`assertRaises` must be replaced with `assertRaisesRegex`\",\n name,\n \"Use `assertRaisesRegex` instead\",\n ),\n }\n priority = -1\n\n def visit_call(self, node: astroid.Call):\n if not _should_ignore(node.root().file) and _is_unittest_assert_raises(node):\n self.add_message(self.name, node=node)\n", "path": "pylint_plugins/unittest_assert_raises.py"}], "after_files": [{"content": "import astroid\nfrom pylint.interfaces import IAstroidChecker\nfrom pylint.checkers import BaseChecker\n\n\ndef _is_unittest_assert_raises(node: astroid.Call):\n return isinstance(node.func, astroid.Attribute) and node.func.as_string() == \"self.assertRaises\"\n\n\nclass UnittestAssertRaises(BaseChecker):\n __implements__ = IAstroidChecker\n\n name = \"unittest-assert-raises\"\n msgs = {\n \"W0003\": (\n \"`assertRaises` must be replaced with `assertRaisesRegex`\",\n name,\n \"Use `assertRaisesRegex` instead\",\n ),\n }\n priority = -1\n\n def visit_call(self, node: astroid.Call):\n if _is_unittest_assert_raises(node):\n self.add_message(self.name, node=node)\n", "path": "pylint_plugins/unittest_assert_raises.py"}]} | 1,383 | 391 |
gh_patches_debug_7346 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1446 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add subclass test case for model handle test
We only define a functional model in test_module.
https://github.com/sql-machine-learning/elasticdl/blob/35d0647a737d93e6969764cabe541ac8078d13bf/elasticdl/python/tests/test_module.py#L16-L20
We should add subclass one to ensure ModelHandle logic correct.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/common/model_handler.py`
Content:
```
1 import abc
2
3 import tensorflow as tf
4
5 from elasticdl.proto import elasticdl_pb2
6 from elasticdl.python.common import model_utils
7 from elasticdl.python.common.constants import DistributionStrategy
8 from elasticdl.python.common.log_utils import default_logger as logger
9 from elasticdl.python.common.tensor import Tensor
10 from elasticdl.python.elasticdl.layers.embedding import Embedding
11
12
13 class ModelHandler(metaclass=abc.ABCMeta):
14 """Generate the model to train in ElasticDL for different distributed
15 strategies and export trained model in ElasticDL to SavedModel.
16 """
17
18 @abc.abstractmethod
19 def get_model_to_train(self, model):
20 """Generate a model to train in ElasticDL.
21
22 Args:
23 model: A native keras model instance.
24
25 Returns:
26 A keras model instance for ElasticDL training.
27 """
28
29 @abc.abstractmethod
30 def get_model_to_export(self, model, dataset):
31 """Get the model which can be exported a SavedModel
32 by tf.saved_model.save.
33
34 Args:
35 model: A keras model instance trained by ElasticDL and
36 it may contains `elasticdl.layers.Embedding` layers.
37 dataset: A `tf.data.Dataset` instance which has the same outputs as
38 the training dataset.
39
40 Returns:
41 A keras model instance trained by ElasticDL.
42 """
43
44 @classmethod
45 def get_model_handler(cls, distribution_strategy=None, stub=None):
46 """Create a model handler to process the model for the
47 distributed strategy.
48
49 Args:
50 distribution_strategy (string): distribution strategy name
51 stub: A stub to communicate with parameter server(s) or the master,
52 e.g. `elasticdl_pb2_grpc.MasterStub`.
53
54 Return:
55 ModelHandler subclass instance.
56 """
57 if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:
58 return ParameterServerModelHandler(stub=stub)
59 else:
60 return DefaultModelHandler()
61
62
63 class DefaultModelHandler(ModelHandler):
64 """Return the origin model to train and export."""
65
66 def get_model_to_train(self, model):
67 return model
68
69 def get_model_to_export(self, model, dataset):
70 """
71 Get model with inputs and trained parameters to export.
72 """
73 if not model.inputs:
74 model._build_model_with_inputs(inputs=dataset, targets=None)
75 return model
76
77
78 class ParameterServerModelHandler(ModelHandler):
79 """Model handler for parameter server strategy.
80 For training, The handler will replace `tf.keras.layers.Embedding`
81 layers with`elasticdl.layers.Embedding` for training.
82 For saving model, the handler will restore Keras model definition and
83 pull trained parameters from parameter server(s) for the model.
84 """
85
86 def __init__(self, stub=None):
87 """
88 Arguments:
89 stub: A stub to get parameters from parameter server(s) or
90 the master,e.g. `elasticdl_pb2_grpc.MasterStub`
91 """
92 self._stub = stub
93
94 def get_model_to_train(self, model):
95 """Replace the tf.keras.layers.Embedding layer in the model with
96 an elasticdl.layers.Embedding layer in ParameterServerStrategy.
97 """
98 if type(model) == tf.keras.Sequential or model._is_graph_network:
99 model = self._replace_embedding_layer_to_clone_model(
100 model, tf.keras.layers.Embedding, Embedding
101 )
102 else:
103 model = self._replace_embedding_attributes_for_subclass(
104 model, tf.keras.layers.Embedding, Embedding
105 )
106 return model
107
108 def get_model_to_export(self, model, dataset):
109 """Get the model which can be exported to a SavedModel by
110 `tf.saved_model.save`.
111 """
112 model = self._restore_keras_model_def(model)
113 if not model.inputs:
114 # build model to add inputs and outputs that
115 # can be consumed by tf-serving
116 model._build_model_with_inputs(inputs=dataset, targets=None)
117
118 trained_params = self._get_trained_params(model)
119 for var in model.trainable_variables:
120 var.assign(trained_params[var.name])
121 return model
122
123 def _restore_keras_model_def(self, model):
124 """Restore Keras model definition by replacing
125 `elasticdl.layers.Embedding` layers with
126 `tf.keras.layers.Embedding` layers.
127 """
128 # clear keras model session to avoid clutter from old models/layers.
129 tf.keras.backend.clear_session()
130 if (
131 type(model) == tf.keras.models.Model
132 and not model._is_graph_network
133 ):
134 model = self._replace_embedding_attributes_for_subclass(
135 model, Embedding, tf.keras.layers.Embedding
136 )
137 else:
138 model = self._replace_embedding_layer_to_clone_model(
139 model, Embedding, tf.keras.layers.Embedding
140 )
141 return model
142
143 def _replace_embedding_layer_to_clone_model(
144 self, model, src_embedding_class, dst_embedding_class
145 ):
146 """Clone a new model by cloning model and replace the
147 src_embedding_class layer with a dst_embedding_class.
148 """
149
150 def _clone_function(layer):
151 if type(layer) == src_embedding_class:
152 logger.debug(
153 "Replace {} with {}".format(
154 src_embedding_class, dst_embedding_class
155 )
156 )
157 embedding_layer = dst_embedding_class(
158 output_dim=layer.output_dim,
159 input_dim=layer.input_dim,
160 embeddings_initializer=layer.embeddings_initializer,
161 mask_zero=layer.mask_zero,
162 input_length=layer.input_length,
163 )
164 return embedding_layer
165 return layer
166
167 return tf.keras.models.clone_model(
168 model, clone_function=_clone_function
169 )
170
171 def _replace_embedding_attributes_for_subclass(
172 self, model, src_embedding_class, dst_embedding_class
173 ):
174 """Replace the keras embedding attribute with
175 elasticdl.layers.Embedding layer.
176 """
177 for name, value in model.__dict__.items():
178 if type(value) == src_embedding_class:
179 embedding_layer = dst_embedding_class(
180 output_dim=value.output_dim,
181 input_dim=value.input_dim,
182 embeddings_initializer=value.embeddings_initializer,
183 mask_zero=value.mask_zero,
184 input_length=value.input_length,
185 )
186 setattr(model, name, embedding_layer)
187 return model
188
189 def _get_trained_params(self, model):
190 """Get all trained variable values of the model
191 """
192 trained_params = self._get_non_embedding_variables(
193 -1, elasticdl_pb2.MINIMUM
194 )
195 trained_embedding_params = self._get_trained_embedding_params(model)
196 trained_params.update(trained_embedding_params)
197 return trained_params
198
199 def _get_trained_embedding_params(self, model):
200 """Get trained embedding table from PS
201 """
202 embedding_params = {}
203 embedding_layers = model_utils.find_layer(model, Embedding)
204 for embedding_layer in embedding_layers:
205 # TODO get all embedding vectors of the embedding layer from PS
206 pass
207 return embedding_params
208
209 # TODO: Get model from parameter servers not the master if
210 # parameter servers are ready.
211 def _get_non_embedding_variables(self, version, method):
212 """Get model from master, and update model_version
213 """
214 req = elasticdl_pb2.GetModelRequest()
215 req.version = version
216 req.method = method
217 model = self._stub.GetModel(req, None)
218 variables = {}
219 for tensor_pb in model.param:
220 tensor = Tensor.from_tensor_pb(tensor_pb)
221 variables[tensor.name] = tensor.to_ndarray()
222 return variables
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/common/model_handler.py b/elasticdl/python/common/model_handler.py
--- a/elasticdl/python/common/model_handler.py
+++ b/elasticdl/python/common/model_handler.py
@@ -128,7 +128,7 @@
# clear keras model session to avoid clutter from old models/layers.
tf.keras.backend.clear_session()
if (
- type(model) == tf.keras.models.Model
+ isinstance(model, tf.keras.models.Model)
and not model._is_graph_network
):
model = self._replace_embedding_attributes_for_subclass(
| {"golden_diff": "diff --git a/elasticdl/python/common/model_handler.py b/elasticdl/python/common/model_handler.py\n--- a/elasticdl/python/common/model_handler.py\n+++ b/elasticdl/python/common/model_handler.py\n@@ -128,7 +128,7 @@\n # clear keras model session to avoid clutter from old models/layers.\n tf.keras.backend.clear_session()\n if (\n- type(model) == tf.keras.models.Model\n+ isinstance(model, tf.keras.models.Model)\n and not model._is_graph_network\n ):\n model = self._replace_embedding_attributes_for_subclass(\n", "issue": "add subclass test case for model handle test \nWe only define a functional model in test_module.\r\nhttps://github.com/sql-machine-learning/elasticdl/blob/35d0647a737d93e6969764cabe541ac8078d13bf/elasticdl/python/tests/test_module.py#L16-L20\r\n\r\nWe should add subclass one to ensure ModelHandle logic correct.\n", "before_files": [{"content": "import abc\n\nimport tensorflow as tf\n\nfrom elasticdl.proto import elasticdl_pb2\nfrom elasticdl.python.common import model_utils\nfrom elasticdl.python.common.constants import DistributionStrategy\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.tensor import Tensor\nfrom elasticdl.python.elasticdl.layers.embedding import Embedding\n\n\nclass ModelHandler(metaclass=abc.ABCMeta):\n \"\"\"Generate the model to train in ElasticDL for different distributed\n strategies and export trained model in ElasticDL to SavedModel.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_train(self, model):\n \"\"\"Generate a model to train in ElasticDL.\n\n Args:\n model: A native keras model instance.\n\n Returns:\n A keras model instance for ElasticDL training.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported a SavedModel\n by tf.saved_model.save.\n\n Args:\n model: A keras model instance trained by ElasticDL and\n it may contains `elasticdl.layers.Embedding` layers.\n dataset: A `tf.data.Dataset` instance which has the same outputs as\n the training dataset.\n\n Returns:\n A keras model instance trained by ElasticDL.\n \"\"\"\n\n @classmethod\n def get_model_handler(cls, distribution_strategy=None, stub=None):\n \"\"\"Create a model handler to process the model for the\n distributed strategy.\n\n Args:\n distribution_strategy (string): distribution strategy name\n stub: A stub to communicate with parameter server(s) or the master,\n e.g. `elasticdl_pb2_grpc.MasterStub`.\n\n Return:\n ModelHandler subclass instance.\n \"\"\"\n if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:\n return ParameterServerModelHandler(stub=stub)\n else:\n return DefaultModelHandler()\n\n\nclass DefaultModelHandler(ModelHandler):\n \"\"\"Return the origin model to train and export.\"\"\"\n\n def get_model_to_train(self, model):\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"\n Get model with inputs and trained parameters to export.\n \"\"\"\n if not model.inputs:\n model._build_model_with_inputs(inputs=dataset, targets=None)\n return model\n\n\nclass ParameterServerModelHandler(ModelHandler):\n \"\"\"Model handler for parameter server strategy.\n For training, The handler will replace `tf.keras.layers.Embedding`\n layers with`elasticdl.layers.Embedding` for training.\n For saving model, the handler will restore Keras model definition and\n pull trained parameters from parameter server(s) for the model.\n \"\"\"\n\n def __init__(self, stub=None):\n \"\"\"\n Arguments:\n stub: A stub to get parameters from parameter server(s) or\n the master,e.g. `elasticdl_pb2_grpc.MasterStub`\n \"\"\"\n self._stub = stub\n\n def get_model_to_train(self, model):\n \"\"\"Replace the tf.keras.layers.Embedding layer in the model with\n an elasticdl.layers.Embedding layer in ParameterServerStrategy.\n \"\"\"\n if type(model) == tf.keras.Sequential or model._is_graph_network:\n model = self._replace_embedding_layer_to_clone_model(\n model, tf.keras.layers.Embedding, Embedding\n )\n else:\n model = self._replace_embedding_attributes_for_subclass(\n model, tf.keras.layers.Embedding, Embedding\n )\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported to a SavedModel by\n `tf.saved_model.save`.\n \"\"\"\n model = self._restore_keras_model_def(model)\n if not model.inputs:\n # build model to add inputs and outputs that\n # can be consumed by tf-serving\n model._build_model_with_inputs(inputs=dataset, targets=None)\n\n trained_params = self._get_trained_params(model)\n for var in model.trainable_variables:\n var.assign(trained_params[var.name])\n return model\n\n def _restore_keras_model_def(self, model):\n \"\"\"Restore Keras model definition by replacing\n `elasticdl.layers.Embedding` layers with\n `tf.keras.layers.Embedding` layers.\n \"\"\"\n # clear keras model session to avoid clutter from old models/layers.\n tf.keras.backend.clear_session()\n if (\n type(model) == tf.keras.models.Model\n and not model._is_graph_network\n ):\n model = self._replace_embedding_attributes_for_subclass(\n model, Embedding, tf.keras.layers.Embedding\n )\n else:\n model = self._replace_embedding_layer_to_clone_model(\n model, Embedding, tf.keras.layers.Embedding\n )\n return model\n\n def _replace_embedding_layer_to_clone_model(\n self, model, src_embedding_class, dst_embedding_class\n ):\n \"\"\"Clone a new model by cloning model and replace the\n src_embedding_class layer with a dst_embedding_class.\n \"\"\"\n\n def _clone_function(layer):\n if type(layer) == src_embedding_class:\n logger.debug(\n \"Replace {} with {}\".format(\n src_embedding_class, dst_embedding_class\n )\n )\n embedding_layer = dst_embedding_class(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=layer.embeddings_initializer,\n mask_zero=layer.mask_zero,\n input_length=layer.input_length,\n )\n return embedding_layer\n return layer\n\n return tf.keras.models.clone_model(\n model, clone_function=_clone_function\n )\n\n def _replace_embedding_attributes_for_subclass(\n self, model, src_embedding_class, dst_embedding_class\n ):\n \"\"\"Replace the keras embedding attribute with\n elasticdl.layers.Embedding layer.\n \"\"\"\n for name, value in model.__dict__.items():\n if type(value) == src_embedding_class:\n embedding_layer = dst_embedding_class(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=value.embeddings_initializer,\n mask_zero=value.mask_zero,\n input_length=value.input_length,\n )\n setattr(model, name, embedding_layer)\n return model\n\n def _get_trained_params(self, model):\n \"\"\"Get all trained variable values of the model\n \"\"\"\n trained_params = self._get_non_embedding_variables(\n -1, elasticdl_pb2.MINIMUM\n )\n trained_embedding_params = self._get_trained_embedding_params(model)\n trained_params.update(trained_embedding_params)\n return trained_params\n\n def _get_trained_embedding_params(self, model):\n \"\"\"Get trained embedding table from PS\n \"\"\"\n embedding_params = {}\n embedding_layers = model_utils.find_layer(model, Embedding)\n for embedding_layer in embedding_layers:\n # TODO get all embedding vectors of the embedding layer from PS\n pass\n return embedding_params\n\n # TODO: Get model from parameter servers not the master if\n # parameter servers are ready.\n def _get_non_embedding_variables(self, version, method):\n \"\"\"Get model from master, and update model_version\n \"\"\"\n req = elasticdl_pb2.GetModelRequest()\n req.version = version\n req.method = method\n model = self._stub.GetModel(req, None)\n variables = {}\n for tensor_pb in model.param:\n tensor = Tensor.from_tensor_pb(tensor_pb)\n variables[tensor.name] = tensor.to_ndarray()\n return variables\n", "path": "elasticdl/python/common/model_handler.py"}], "after_files": [{"content": "import abc\n\nimport tensorflow as tf\n\nfrom elasticdl.proto import elasticdl_pb2\nfrom elasticdl.python.common import model_utils\nfrom elasticdl.python.common.constants import DistributionStrategy\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.common.tensor import Tensor\nfrom elasticdl.python.elasticdl.layers.embedding import Embedding\n\n\nclass ModelHandler(metaclass=abc.ABCMeta):\n \"\"\"Generate the model to train in ElasticDL for different distributed\n strategies and export trained model in ElasticDL to SavedModel.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_train(self, model):\n \"\"\"Generate a model to train in ElasticDL.\n\n Args:\n model: A native keras model instance.\n\n Returns:\n A keras model instance for ElasticDL training.\n \"\"\"\n\n @abc.abstractmethod\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported a SavedModel\n by tf.saved_model.save.\n\n Args:\n model: A keras model instance trained by ElasticDL and\n it may contains `elasticdl.layers.Embedding` layers.\n dataset: A `tf.data.Dataset` instance which has the same outputs as\n the training dataset.\n\n Returns:\n A keras model instance trained by ElasticDL.\n \"\"\"\n\n @classmethod\n def get_model_handler(cls, distribution_strategy=None, stub=None):\n \"\"\"Create a model handler to process the model for the\n distributed strategy.\n\n Args:\n distribution_strategy (string): distribution strategy name\n stub: A stub to communicate with parameter server(s) or the master,\n e.g. `elasticdl_pb2_grpc.MasterStub`.\n\n Return:\n ModelHandler subclass instance.\n \"\"\"\n if distribution_strategy == DistributionStrategy.PARAMETER_SERVER:\n return ParameterServerModelHandler(stub=stub)\n else:\n return DefaultModelHandler()\n\n\nclass DefaultModelHandler(ModelHandler):\n \"\"\"Return the origin model to train and export.\"\"\"\n\n def get_model_to_train(self, model):\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"\n Get model with inputs and trained parameters to export.\n \"\"\"\n if not model.inputs:\n model._build_model_with_inputs(inputs=dataset, targets=None)\n return model\n\n\nclass ParameterServerModelHandler(ModelHandler):\n \"\"\"Model handler for parameter server strategy.\n For training, The handler will replace `tf.keras.layers.Embedding`\n layers with`elasticdl.layers.Embedding` for training.\n For saving model, the handler will restore Keras model definition and\n pull trained parameters from parameter server(s) for the model.\n \"\"\"\n\n def __init__(self, stub=None):\n \"\"\"\n Arguments:\n stub: A stub to get parameters from parameter server(s) or\n the master,e.g. `elasticdl_pb2_grpc.MasterStub`\n \"\"\"\n self._stub = stub\n\n def get_model_to_train(self, model):\n \"\"\"Replace the tf.keras.layers.Embedding layer in the model with\n an elasticdl.layers.Embedding layer in ParameterServerStrategy.\n \"\"\"\n if type(model) == tf.keras.Sequential or model._is_graph_network:\n model = self._replace_embedding_layer_to_clone_model(\n model, tf.keras.layers.Embedding, Embedding\n )\n else:\n model = self._replace_embedding_attributes_for_subclass(\n model, tf.keras.layers.Embedding, Embedding\n )\n return model\n\n def get_model_to_export(self, model, dataset):\n \"\"\"Get the model which can be exported to a SavedModel by\n `tf.saved_model.save`.\n \"\"\"\n model = self._restore_keras_model_def(model)\n if not model.inputs:\n # build model to add inputs and outputs that\n # can be consumed by tf-serving\n model._build_model_with_inputs(inputs=dataset, targets=None)\n\n trained_params = self._get_trained_params(model)\n for var in model.trainable_variables:\n var.assign(trained_params[var.name])\n return model\n\n def _restore_keras_model_def(self, model):\n \"\"\"Restore Keras model definition by replacing\n `elasticdl.layers.Embedding` layers with\n `tf.keras.layers.Embedding` layers.\n \"\"\"\n # clear keras model session to avoid clutter from old models/layers.\n tf.keras.backend.clear_session()\n if (\n isinstance(model, tf.keras.models.Model)\n and not model._is_graph_network\n ):\n model = self._replace_embedding_attributes_for_subclass(\n model, Embedding, tf.keras.layers.Embedding\n )\n else:\n model = self._replace_embedding_layer_to_clone_model(\n model, Embedding, tf.keras.layers.Embedding\n )\n return model\n\n def _replace_embedding_layer_to_clone_model(\n self, model, src_embedding_class, dst_embedding_class\n ):\n \"\"\"Clone a new model by cloning model and replace the\n src_embedding_class layer with a dst_embedding_class.\n \"\"\"\n\n def _clone_function(layer):\n if type(layer) == src_embedding_class:\n logger.debug(\n \"Replace {} with {}\".format(\n src_embedding_class, dst_embedding_class\n )\n )\n embedding_layer = dst_embedding_class(\n output_dim=layer.output_dim,\n input_dim=layer.input_dim,\n embeddings_initializer=layer.embeddings_initializer,\n mask_zero=layer.mask_zero,\n input_length=layer.input_length,\n )\n return embedding_layer\n return layer\n\n return tf.keras.models.clone_model(\n model, clone_function=_clone_function\n )\n\n def _replace_embedding_attributes_for_subclass(\n self, model, src_embedding_class, dst_embedding_class\n ):\n \"\"\"Replace the keras embedding attribute with\n elasticdl.layers.Embedding layer.\n \"\"\"\n for name, value in model.__dict__.items():\n if type(value) == src_embedding_class:\n embedding_layer = dst_embedding_class(\n output_dim=value.output_dim,\n input_dim=value.input_dim,\n embeddings_initializer=value.embeddings_initializer,\n mask_zero=value.mask_zero,\n input_length=value.input_length,\n )\n setattr(model, name, embedding_layer)\n return model\n\n def _get_trained_params(self, model):\n \"\"\"Get all trained variable values of the model\n \"\"\"\n trained_params = self._get_non_embedding_variables(\n -1, elasticdl_pb2.MINIMUM\n )\n trained_embedding_params = self._get_trained_embedding_params(model)\n trained_params.update(trained_embedding_params)\n return trained_params\n\n def _get_trained_embedding_params(self, model):\n \"\"\"Get trained embedding table from PS\n \"\"\"\n embedding_params = {}\n embedding_layers = model_utils.find_layer(model, Embedding)\n for embedding_layer in embedding_layers:\n # TODO get all embedding vectors of the embedding layer from PS\n pass\n return embedding_params\n\n # TODO: Get model from parameter servers not the master if\n # parameter servers are ready.\n def _get_non_embedding_variables(self, version, method):\n \"\"\"Get model from master, and update model_version\n \"\"\"\n req = elasticdl_pb2.GetModelRequest()\n req.version = version\n req.method = method\n model = self._stub.GetModel(req, None)\n variables = {}\n for tensor_pb in model.param:\n tensor = Tensor.from_tensor_pb(tensor_pb)\n variables[tensor.name] = tensor.to_ndarray()\n return variables\n", "path": "elasticdl/python/common/model_handler.py"}]} | 2,504 | 127 |
gh_patches_debug_20045 | rasdani/github-patches | git_diff | Kinto__kinto-1136 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide previous object in history deletion entries
Provide previous object in history deletion entries
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/__init__.py`
Content:
```
1 import pkg_resources
2 import logging
3
4 import kinto.core
5 from pyramid.config import Configurator
6 from pyramid.settings import asbool
7 from pyramid.security import Authenticated, Everyone
8
9 from kinto.authorization import RouteFactory
10
11
12 # Module version, as defined in PEP-0396.
13 __version__ = pkg_resources.get_distribution(__package__).version
14
15 # Implemented HTTP API Version
16 HTTP_API_VERSION = '1.15'
17
18 # Main kinto logger
19 logger = logging.getLogger(__name__)
20
21
22 DEFAULT_SETTINGS = {
23 'flush_endpoint_enabled': False,
24 'retry_after_seconds': 3,
25 'cache_backend': 'kinto.core.cache.memory',
26 'permission_backend': 'kinto.core.permission.memory',
27 'storage_backend': 'kinto.core.storage.memory',
28 'project_docs': 'https://kinto.readthedocs.io/',
29 'bucket_create_principals': Authenticated,
30 'permissions_read_principals': Everyone,
31 'multiauth.authorization_policy': (
32 'kinto.authorization.AuthorizationPolicy'),
33 'experimental_collection_schema_validation': False,
34 'experimental_permissions_endpoint': False,
35 'http_api_version': HTTP_API_VERSION,
36 'bucket_id_generator': 'kinto.views.NameGenerator',
37 'collection_id_generator': 'kinto.views.NameGenerator',
38 'group_id_generator': 'kinto.views.NameGenerator',
39 'record_id_generator': 'kinto.views.RelaxedUUID'
40 }
41
42
43 def main(global_config, config=None, **settings):
44 if not config:
45 config = Configurator(settings=settings, root_factory=RouteFactory)
46
47 # Force project name, since it determines settings prefix.
48 config.add_settings({'kinto.project_name': 'kinto'})
49
50 kinto.core.initialize(config,
51 version=__version__,
52 default_settings=DEFAULT_SETTINGS)
53
54 settings = config.get_settings()
55
56 # Expose capability
57 schema_enabled = asbool(
58 settings['experimental_collection_schema_validation']
59 )
60 if schema_enabled:
61 config.add_api_capability(
62 "schema",
63 description="Validates collection records with JSON schemas.",
64 url="https://kinto.readthedocs.io/en/latest/api/1.x/"
65 "collections.html#collection-json-schema")
66
67 # Scan Kinto views.
68 kwargs = {}
69
70 flush_enabled = asbool(settings['flush_endpoint_enabled'])
71 if flush_enabled:
72 config.add_api_capability(
73 "flush_endpoint",
74 description="The __flush__ endpoint can be used to remove all "
75 "data from all backends.",
76 url="https://kinto.readthedocs.io/en/latest/configuration/"
77 "settings.html#activating-the-flush-endpoint")
78 else:
79 kwargs['ignore'] = ['kinto.views.flush']
80
81 # Permissions endpoint enabled if permission backend is setup.
82 permissions_endpoint_enabled = (
83 asbool(settings['experimental_permissions_endpoint']) and
84 hasattr(config.registry, 'permission'))
85 if permissions_endpoint_enabled:
86 config.add_api_capability(
87 "permissions_endpoint",
88 description="The permissions endpoint can be used to list all "
89 "user objects permissions.",
90 url="https://kinto.readthedocs.io/en/latest/configuration/"
91 "settings.html#activating-the-permissions-endpoint")
92 else:
93 kwargs.setdefault('ignore', []).append('kinto.views.permissions')
94
95 config.scan("kinto.views", **kwargs)
96
97 app = config.make_wsgi_app()
98
99 # Install middleware (no-op if disabled)
100 return kinto.core.install_middlewares(app, settings)
101
```
Path: `kinto/plugins/history/listener.py`
Content:
```
1 from pyramid.settings import aslist
2
3 from kinto.core.utils import instance_uri
4 from datetime import datetime
5
6
7 def on_resource_changed(event):
8 """
9 Everytime an object is created/changed/deleted, we create an entry in the
10 ``history`` resource. The entries are served as read-only in the
11 :mod:`kinto.plugins.history.views` module.
12 """
13 payload = event.payload
14 resource_name = payload['resource_name']
15 event_uri = payload['uri']
16
17 bucket_id = None
18 bucket_uri = None
19 collection_uri = None
20
21 storage = event.request.registry.storage
22 permission = event.request.registry.permission
23 settings = event.request.registry.settings
24
25 excluded_resources = aslist(settings.get('history.exclude_resources', ''))
26
27 targets = []
28 for impacted in event.impacted_records:
29 target = impacted['new']
30 obj_id = target['id']
31
32 try:
33 bucket_id = payload['bucket_id']
34 except KeyError:
35 # e.g. DELETE /buckets
36 bucket_id = obj_id
37 bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)
38
39 if bucket_uri in excluded_resources:
40 continue
41
42 if 'collection_id' in payload:
43 collection_id = payload['collection_id']
44 collection_uri = instance_uri(event.request,
45 'collection',
46 bucket_id=bucket_id,
47 id=collection_id)
48 if collection_uri in excluded_resources:
49 continue
50
51 # On POST .../records, the URI does not contain the newly created
52 # record id.
53 parts = event_uri.split('/')
54 if resource_name in parts[-1]:
55 parts.append(obj_id)
56 else:
57 # Make sure the id is correct on grouped events.
58 parts[-1] = obj_id
59 uri = '/'.join(parts)
60
61 if uri in excluded_resources:
62 continue
63
64 targets.append((uri, target))
65
66 if not targets:
67 return # Nothing to do.
68
69 # Prepare a list of object ids to be fetched from permission backend,
70 # and fetch them all at once. Use a mapping for later convenience.
71 all_perms_objects_ids = [oid for (oid, _) in targets]
72 all_perms_objects_ids.append(bucket_uri)
73 if collection_uri is not None:
74 all_perms_objects_ids.append(collection_uri)
75 all_perms_objects_ids = list(set(all_perms_objects_ids))
76 all_permissions = permission.get_objects_permissions(all_perms_objects_ids)
77 perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))
78
79 bucket_perms = perms_by_object_id[bucket_uri]
80 collection_perms = {}
81 if collection_uri is not None:
82 collection_perms = perms_by_object_id[collection_uri]
83
84 # The principals allowed to read the bucket and collection.
85 # (Note: ``write`` means ``read``)
86 read_principals = set(bucket_perms.get('read', []))
87 read_principals.update(bucket_perms.get('write', []))
88 read_principals.update(collection_perms.get('read', []))
89 read_principals.update(collection_perms.get('write', []))
90
91 # Create a history entry for each impacted record.
92 for (uri, target) in targets:
93 obj_id = target['id']
94 # Prepare the history entry attributes.
95 perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}
96 eventattrs = dict(**payload)
97 eventattrs.pop('timestamp', None) # Already in target `last_modified`.
98 eventattrs.pop('bucket_id', None)
99 eventattrs['{}_id'.format(resource_name)] = obj_id
100 eventattrs['uri'] = uri
101 attrs = dict(date=datetime.now().isoformat(),
102 target={'data': target, 'permissions': perms},
103 **eventattrs)
104
105 # Create a record for the 'history' resource, whose parent_id is
106 # the bucket URI (c.f. views.py).
107 # Note: this will be rolledback if the transaction is rolledback.
108 entry = storage.create(parent_id=bucket_uri,
109 collection_id='history',
110 record=attrs)
111
112 # The read permission on the newly created history entry is the union
113 # of the record permissions with the one from bucket and collection.
114 entry_principals = set(read_principals)
115 entry_principals.update(perms.get('read', []))
116 entry_principals.update(perms.get('write', []))
117 entry_perms = {'read': list(entry_principals)}
118 # /buckets/{id}/history is the URI for the list of history entries.
119 entry_perm_id = '/buckets/{}/history/{}'.format(bucket_id, entry['id'])
120 permission.replace_object_permissions(entry_perm_id, entry_perms)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/__init__.py b/kinto/__init__.py
--- a/kinto/__init__.py
+++ b/kinto/__init__.py
@@ -13,7 +13,7 @@
__version__ = pkg_resources.get_distribution(__package__).version
# Implemented HTTP API Version
-HTTP_API_VERSION = '1.15'
+HTTP_API_VERSION = '1.16'
# Main kinto logger
logger = logging.getLogger(__name__)
diff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py
--- a/kinto/plugins/history/listener.py
+++ b/kinto/plugins/history/listener.py
@@ -11,6 +11,7 @@
:mod:`kinto.plugins.history.views` module.
"""
payload = event.payload
+ action = payload['action']
resource_name = payload['resource_name']
event_uri = payload['uri']
@@ -26,7 +27,7 @@
targets = []
for impacted in event.impacted_records:
- target = impacted['new']
+ target = impacted['old'] if action == 'delete' else impacted['new']
obj_id = target['id']
try:
| {"golden_diff": "diff --git a/kinto/__init__.py b/kinto/__init__.py\n--- a/kinto/__init__.py\n+++ b/kinto/__init__.py\n@@ -13,7 +13,7 @@\n __version__ = pkg_resources.get_distribution(__package__).version\n \n # Implemented HTTP API Version\n-HTTP_API_VERSION = '1.15'\n+HTTP_API_VERSION = '1.16'\n \n # Main kinto logger\n logger = logging.getLogger(__name__)\ndiff --git a/kinto/plugins/history/listener.py b/kinto/plugins/history/listener.py\n--- a/kinto/plugins/history/listener.py\n+++ b/kinto/plugins/history/listener.py\n@@ -11,6 +11,7 @@\n :mod:`kinto.plugins.history.views` module.\n \"\"\"\n payload = event.payload\n+ action = payload['action']\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n \n@@ -26,7 +27,7 @@\n \n targets = []\n for impacted in event.impacted_records:\n- target = impacted['new']\n+ target = impacted['old'] if action == 'delete' else impacted['new']\n obj_id = target['id']\n \n try:\n", "issue": "Provide previous object in history deletion entries\n\nProvide previous object in history deletion entries\n\n", "before_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated, Everyone\n\nfrom kinto.authorization import RouteFactory\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.15'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'permissions_read_principals': Everyone,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': False,\n 'experimental_permissions_endpoint': False,\n 'http_api_version': HTTP_API_VERSION,\n 'bucket_id_generator': 'kinto.views.NameGenerator',\n 'collection_id_generator': 'kinto.views.NameGenerator',\n 'group_id_generator': 'kinto.views.NameGenerator',\n 'record_id_generator': 'kinto.views.RelaxedUUID'\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings['flush_endpoint_enabled'])\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = ['kinto.views.flush']\n\n # Permissions endpoint enabled if permission backend is setup.\n permissions_endpoint_enabled = (\n asbool(settings['experimental_permissions_endpoint']) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n \"permissions_endpoint\",\n description=\"The permissions endpoint can be used to list all \"\n \"user objects permissions.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-permissions-endpoint\")\n else:\n kwargs.setdefault('ignore', []).append('kinto.views.permissions')\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (no-op if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}, {"content": "from pyramid.settings import aslist\n\nfrom kinto.core.utils import instance_uri\nfrom datetime import datetime\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we create an entry in the\n ``history`` resource. The entries are served as read-only in the\n :mod:`kinto.plugins.history.views` module.\n \"\"\"\n payload = event.payload\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n bucket_id = None\n bucket_uri = None\n collection_uri = None\n\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n settings = event.request.registry.settings\n\n excluded_resources = aslist(settings.get('history.exclude_resources', ''))\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['new']\n obj_id = target['id']\n\n try:\n bucket_id = payload['bucket_id']\n except KeyError:\n # e.g. DELETE /buckets\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n\n if bucket_uri in excluded_resources:\n continue\n\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n if collection_uri in excluded_resources:\n continue\n\n # On POST .../records, the URI does not contain the newly created\n # record id.\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n\n if uri in excluded_resources:\n continue\n\n targets.append((uri, target))\n\n if not targets:\n return # Nothing to do.\n\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n all_perms_objects_ids.append(bucket_uri)\n if collection_uri is not None:\n all_perms_objects_ids.append(collection_uri)\n all_perms_objects_ids = list(set(all_perms_objects_ids))\n all_permissions = permission.get_objects_permissions(all_perms_objects_ids)\n perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))\n\n bucket_perms = perms_by_object_id[bucket_uri]\n collection_perms = {}\n if collection_uri is not None:\n collection_perms = perms_by_object_id[collection_uri]\n\n # The principals allowed to read the bucket and collection.\n # (Note: ``write`` means ``read``)\n read_principals = set(bucket_perms.get('read', []))\n read_principals.update(bucket_perms.get('write', []))\n read_principals.update(collection_perms.get('read', []))\n read_principals.update(collection_perms.get('write', []))\n\n # Create a history entry for each impacted record.\n for (uri, target) in targets:\n obj_id = target['id']\n # Prepare the history entry attributes.\n perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}\n eventattrs = dict(**payload)\n eventattrs.pop('timestamp', None) # Already in target `last_modified`.\n eventattrs.pop('bucket_id', None)\n eventattrs['{}_id'.format(resource_name)] = obj_id\n eventattrs['uri'] = uri\n attrs = dict(date=datetime.now().isoformat(),\n target={'data': target, 'permissions': perms},\n **eventattrs)\n\n # Create a record for the 'history' resource, whose parent_id is\n # the bucket URI (c.f. views.py).\n # Note: this will be rolledback if the transaction is rolledback.\n entry = storage.create(parent_id=bucket_uri,\n collection_id='history',\n record=attrs)\n\n # The read permission on the newly created history entry is the union\n # of the record permissions with the one from bucket and collection.\n entry_principals = set(read_principals)\n entry_principals.update(perms.get('read', []))\n entry_principals.update(perms.get('write', []))\n entry_perms = {'read': list(entry_principals)}\n # /buckets/{id}/history is the URI for the list of history entries.\n entry_perm_id = '/buckets/{}/history/{}'.format(bucket_id, entry['id'])\n permission.replace_object_permissions(entry_perm_id, entry_perms)\n", "path": "kinto/plugins/history/listener.py"}], "after_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated, Everyone\n\nfrom kinto.authorization import RouteFactory\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.16'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'permissions_read_principals': Everyone,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': False,\n 'experimental_permissions_endpoint': False,\n 'http_api_version': HTTP_API_VERSION,\n 'bucket_id_generator': 'kinto.views.NameGenerator',\n 'collection_id_generator': 'kinto.views.NameGenerator',\n 'group_id_generator': 'kinto.views.NameGenerator',\n 'record_id_generator': 'kinto.views.RelaxedUUID'\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings['flush_endpoint_enabled'])\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = ['kinto.views.flush']\n\n # Permissions endpoint enabled if permission backend is setup.\n permissions_endpoint_enabled = (\n asbool(settings['experimental_permissions_endpoint']) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n \"permissions_endpoint\",\n description=\"The permissions endpoint can be used to list all \"\n \"user objects permissions.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-permissions-endpoint\")\n else:\n kwargs.setdefault('ignore', []).append('kinto.views.permissions')\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (no-op if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}, {"content": "from pyramid.settings import aslist\n\nfrom kinto.core.utils import instance_uri\nfrom datetime import datetime\n\n\ndef on_resource_changed(event):\n \"\"\"\n Everytime an object is created/changed/deleted, we create an entry in the\n ``history`` resource. The entries are served as read-only in the\n :mod:`kinto.plugins.history.views` module.\n \"\"\"\n payload = event.payload\n action = payload['action']\n resource_name = payload['resource_name']\n event_uri = payload['uri']\n\n bucket_id = None\n bucket_uri = None\n collection_uri = None\n\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n settings = event.request.registry.settings\n\n excluded_resources = aslist(settings.get('history.exclude_resources', ''))\n\n targets = []\n for impacted in event.impacted_records:\n target = impacted['old'] if action == 'delete' else impacted['new']\n obj_id = target['id']\n\n try:\n bucket_id = payload['bucket_id']\n except KeyError:\n # e.g. DELETE /buckets\n bucket_id = obj_id\n bucket_uri = instance_uri(event.request, 'bucket', id=bucket_id)\n\n if bucket_uri in excluded_resources:\n continue\n\n if 'collection_id' in payload:\n collection_id = payload['collection_id']\n collection_uri = instance_uri(event.request,\n 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n if collection_uri in excluded_resources:\n continue\n\n # On POST .../records, the URI does not contain the newly created\n # record id.\n parts = event_uri.split('/')\n if resource_name in parts[-1]:\n parts.append(obj_id)\n else:\n # Make sure the id is correct on grouped events.\n parts[-1] = obj_id\n uri = '/'.join(parts)\n\n if uri in excluded_resources:\n continue\n\n targets.append((uri, target))\n\n if not targets:\n return # Nothing to do.\n\n # Prepare a list of object ids to be fetched from permission backend,\n # and fetch them all at once. Use a mapping for later convenience.\n all_perms_objects_ids = [oid for (oid, _) in targets]\n all_perms_objects_ids.append(bucket_uri)\n if collection_uri is not None:\n all_perms_objects_ids.append(collection_uri)\n all_perms_objects_ids = list(set(all_perms_objects_ids))\n all_permissions = permission.get_objects_permissions(all_perms_objects_ids)\n perms_by_object_id = dict(zip(all_perms_objects_ids, all_permissions))\n\n bucket_perms = perms_by_object_id[bucket_uri]\n collection_perms = {}\n if collection_uri is not None:\n collection_perms = perms_by_object_id[collection_uri]\n\n # The principals allowed to read the bucket and collection.\n # (Note: ``write`` means ``read``)\n read_principals = set(bucket_perms.get('read', []))\n read_principals.update(bucket_perms.get('write', []))\n read_principals.update(collection_perms.get('read', []))\n read_principals.update(collection_perms.get('write', []))\n\n # Create a history entry for each impacted record.\n for (uri, target) in targets:\n obj_id = target['id']\n # Prepare the history entry attributes.\n perms = {k: list(v) for k, v in perms_by_object_id[uri].items()}\n eventattrs = dict(**payload)\n eventattrs.pop('timestamp', None) # Already in target `last_modified`.\n eventattrs.pop('bucket_id', None)\n eventattrs['{}_id'.format(resource_name)] = obj_id\n eventattrs['uri'] = uri\n attrs = dict(date=datetime.now().isoformat(),\n target={'data': target, 'permissions': perms},\n **eventattrs)\n\n # Create a record for the 'history' resource, whose parent_id is\n # the bucket URI (c.f. views.py).\n # Note: this will be rolledback if the transaction is rolledback.\n entry = storage.create(parent_id=bucket_uri,\n collection_id='history',\n record=attrs)\n\n # The read permission on the newly created history entry is the union\n # of the record permissions with the one from bucket and collection.\n entry_principals = set(read_principals)\n entry_principals.update(perms.get('read', []))\n entry_principals.update(perms.get('write', []))\n entry_perms = {'read': list(entry_principals)}\n # /buckets/{id}/history is the URI for the list of history entries.\n entry_perm_id = '/buckets/{}/history/{}'.format(bucket_id, entry['id'])\n permission.replace_object_permissions(entry_perm_id, entry_perms)\n", "path": "kinto/plugins/history/listener.py"}]} | 2,485 | 266 |
gh_patches_debug_40242 | rasdani/github-patches | git_diff | SCons__scons-3862 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add timestamp override to Zip builder
Zip should have an option to override the timestamp on files in the archive instead of taking it from the file system. This can be useful for repeatable builds or for anonymizing the archive.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `SCons/Tool/zip.py`
Content:
```
1 """SCons.Tool.zip
2
3 Tool-specific initialization for zip.
4
5 There normally shouldn't be any need to import this module directly.
6 It will usually be imported through the generic SCons.Tool.Tool()
7 selection method.
8
9 """
10
11 #
12 # __COPYRIGHT__
13 #
14 # Permission is hereby granted, free of charge, to any person obtaining
15 # a copy of this software and associated documentation files (the
16 # "Software"), to deal in the Software without restriction, including
17 # without limitation the rights to use, copy, modify, merge, publish,
18 # distribute, sublicense, and/or sell copies of the Software, and to
19 # permit persons to whom the Software is furnished to do so, subject to
20 # the following conditions:
21 #
22 # The above copyright notice and this permission notice shall be included
23 # in all copies or substantial portions of the Software.
24 #
25 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
26 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
27 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
29 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
30 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
31 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
32 #
33
34 __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
35
36 import os.path
37
38 import SCons.Builder
39 import SCons.Defaults
40 import SCons.Node.FS
41 import SCons.Util
42
43 import zipfile
44
45 zip_compression = zipfile.ZIP_DEFLATED
46
47
48 def zip(target, source, env):
49 compression = env.get('ZIPCOMPRESSION', 0)
50 zf = zipfile.ZipFile(str(target[0]), 'w', compression)
51 for s in source:
52 if s.isdir():
53 for dirpath, dirnames, filenames in os.walk(str(s)):
54 for fname in filenames:
55 path = os.path.join(dirpath, fname)
56 if os.path.isfile(path):
57 zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
58 else:
59 zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
60 zf.close()
61
62 # Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating
63 # env, then it will ignore ZIPCOMSTR set afterwards.
64 zipAction = SCons.Action.Action(zip, "$ZIPCOMSTR", varlist=['ZIPCOMPRESSION'])
65
66 ZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
67 source_factory=SCons.Node.FS.Entry,
68 source_scanner=SCons.Defaults.DirScanner,
69 suffix='$ZIPSUFFIX',
70 multi=1)
71
72
73 def generate(env):
74 """Add Builders and construction variables for zip to an Environment."""
75 try:
76 bld = env['BUILDERS']['Zip']
77 except KeyError:
78 bld = ZipBuilder
79 env['BUILDERS']['Zip'] = bld
80
81 env['ZIP'] = 'zip'
82 env['ZIPFLAGS'] = SCons.Util.CLVar('')
83 env['ZIPCOM'] = zipAction
84 env['ZIPCOMPRESSION'] = zip_compression
85 env['ZIPSUFFIX'] = '.zip'
86 env['ZIPROOT'] = SCons.Util.CLVar('')
87
88
89 def exists(env):
90 return True
91
92 # Local Variables:
93 # tab-width:4
94 # indent-tabs-mode:nil
95 # End:
96 # vim: set expandtab tabstop=4 shiftwidth=4:
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/SCons/Tool/zip.py b/SCons/Tool/zip.py
--- a/SCons/Tool/zip.py
+++ b/SCons/Tool/zip.py
@@ -8,8 +8,9 @@
"""
+# MIT License
#
-# __COPYRIGHT__
+# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
@@ -29,39 +30,62 @@
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
-import os.path
+import os
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
+import time
import zipfile
+
zip_compression = zipfile.ZIP_DEFLATED
-def zip(target, source, env):
- compression = env.get('ZIPCOMPRESSION', 0)
- zf = zipfile.ZipFile(str(target[0]), 'w', compression)
+def _create_zipinfo_for_file(fname, arcname, date_time, compression):
+ st = os.stat(fname)
+ if not date_time:
+ mtime = time.localtime(st.st_mtime)
+ date_time = mtime[0:6]
+ zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time)
+ zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes
+ zinfo.compress_type = compression
+ zinfo.file_size = st.st_size
+ return zinfo
+
+
+def zip_builder(target, source, env):
+ compression = env.get('ZIPCOMPRESSION', zipfile.ZIP_STORED)
+ zip_root = str(env.get('ZIPROOT', ''))
+ date_time = env.get('ZIP_OVERRIDE_TIMESTAMP')
+
+ files = []
for s in source:
if s.isdir():
for dirpath, dirnames, filenames in os.walk(str(s)):
for fname in filenames:
path = os.path.join(dirpath, fname)
if os.path.isfile(path):
- zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))
+ files.append(path)
else:
- zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))
- zf.close()
+ files.append(str(s))
+
+ with zipfile.ZipFile(str(target[0]), 'w', compression) as zf:
+ for fname in files:
+ arcname = os.path.relpath(fname, zip_root)
+ # TODO: Switch to ZipInfo.from_file when 3.6 becomes the base python version
+ zinfo = _create_zipinfo_for_file(fname, arcname, date_time, compression)
+ with open(fname, "rb") as f:
+ zf.writestr(zinfo, f.read())
+
# Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating
# env, then it will ignore ZIPCOMSTR set afterwards.
-zipAction = SCons.Action.Action(zip, "$ZIPCOMSTR", varlist=['ZIPCOMPRESSION'])
+zipAction = SCons.Action.Action(zip_builder, "$ZIPCOMSTR",
+ varlist=['ZIPCOMPRESSION', 'ZIPROOT', 'ZIP_OVERRIDE_TIMESTAMP'])
ZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory=SCons.Node.FS.Entry,
| {"golden_diff": "diff --git a/SCons/Tool/zip.py b/SCons/Tool/zip.py\n--- a/SCons/Tool/zip.py\n+++ b/SCons/Tool/zip.py\n@@ -8,8 +8,9 @@\n \n \"\"\"\n \n+# MIT License\n #\n-# __COPYRIGHT__\n+# Copyright The SCons Foundation\n #\n # Permission is hereby granted, free of charge, to any person obtaining\n # a copy of this software and associated documentation files (the\n@@ -29,39 +30,62 @@\n # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-#\n-\n-__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n \n-import os.path\n+import os\n \n import SCons.Builder\n import SCons.Defaults\n import SCons.Node.FS\n import SCons.Util\n \n+import time\n import zipfile\n \n+\n zip_compression = zipfile.ZIP_DEFLATED\n \n \n-def zip(target, source, env):\n- compression = env.get('ZIPCOMPRESSION', 0)\n- zf = zipfile.ZipFile(str(target[0]), 'w', compression)\n+def _create_zipinfo_for_file(fname, arcname, date_time, compression):\n+ st = os.stat(fname)\n+ if not date_time:\n+ mtime = time.localtime(st.st_mtime)\n+ date_time = mtime[0:6]\n+ zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time)\n+ zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes\n+ zinfo.compress_type = compression\n+ zinfo.file_size = st.st_size\n+ return zinfo\n+\n+\n+def zip_builder(target, source, env):\n+ compression = env.get('ZIPCOMPRESSION', zipfile.ZIP_STORED)\n+ zip_root = str(env.get('ZIPROOT', ''))\n+ date_time = env.get('ZIP_OVERRIDE_TIMESTAMP')\n+\n+ files = []\n for s in source:\n if s.isdir():\n for dirpath, dirnames, filenames in os.walk(str(s)):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n if os.path.isfile(path):\n- zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))\n+ files.append(path)\n else:\n- zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))\n- zf.close()\n+ files.append(str(s))\n+\n+ with zipfile.ZipFile(str(target[0]), 'w', compression) as zf:\n+ for fname in files:\n+ arcname = os.path.relpath(fname, zip_root)\n+ # TODO: Switch to ZipInfo.from_file when 3.6 becomes the base python version\n+ zinfo = _create_zipinfo_for_file(fname, arcname, date_time, compression)\n+ with open(fname, \"rb\") as f:\n+ zf.writestr(zinfo, f.read())\n+\n \n # Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating\n # env, then it will ignore ZIPCOMSTR set afterwards.\n-zipAction = SCons.Action.Action(zip, \"$ZIPCOMSTR\", varlist=['ZIPCOMPRESSION'])\n+zipAction = SCons.Action.Action(zip_builder, \"$ZIPCOMSTR\",\n+ varlist=['ZIPCOMPRESSION', 'ZIPROOT', 'ZIP_OVERRIDE_TIMESTAMP'])\n \n ZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),\n source_factory=SCons.Node.FS.Entry,\n", "issue": "Add timestamp override to Zip builder\nZip should have an option to override the timestamp on files in the archive instead of taking it from the file system. This can be useful for repeatable builds or for anonymizing the archive.\n", "before_files": [{"content": "\"\"\"SCons.Tool.zip\n\nTool-specific initialization for zip.\n\nThere normally shouldn't be any need to import this module directly.\nIt will usually be imported through the generic SCons.Tool.Tool()\nselection method.\n\n\"\"\"\n\n#\n# __COPYRIGHT__\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__revision__ = \"__FILE__ __REVISION__ __DATE__ __DEVELOPER__\"\n\nimport os.path\n\nimport SCons.Builder\nimport SCons.Defaults\nimport SCons.Node.FS\nimport SCons.Util\n\nimport zipfile\n\nzip_compression = zipfile.ZIP_DEFLATED\n\n\ndef zip(target, source, env):\n compression = env.get('ZIPCOMPRESSION', 0)\n zf = zipfile.ZipFile(str(target[0]), 'w', compression)\n for s in source:\n if s.isdir():\n for dirpath, dirnames, filenames in os.walk(str(s)):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n if os.path.isfile(path):\n zf.write(path, os.path.relpath(path, str(env.get('ZIPROOT', ''))))\n else:\n zf.write(str(s), os.path.relpath(str(s), str(env.get('ZIPROOT', ''))))\n zf.close()\n\n# Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating\n# env, then it will ignore ZIPCOMSTR set afterwards.\nzipAction = SCons.Action.Action(zip, \"$ZIPCOMSTR\", varlist=['ZIPCOMPRESSION'])\n\nZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),\n source_factory=SCons.Node.FS.Entry,\n source_scanner=SCons.Defaults.DirScanner,\n suffix='$ZIPSUFFIX',\n multi=1)\n\n\ndef generate(env):\n \"\"\"Add Builders and construction variables for zip to an Environment.\"\"\"\n try:\n bld = env['BUILDERS']['Zip']\n except KeyError:\n bld = ZipBuilder\n env['BUILDERS']['Zip'] = bld\n\n env['ZIP'] = 'zip'\n env['ZIPFLAGS'] = SCons.Util.CLVar('')\n env['ZIPCOM'] = zipAction\n env['ZIPCOMPRESSION'] = zip_compression\n env['ZIPSUFFIX'] = '.zip'\n env['ZIPROOT'] = SCons.Util.CLVar('')\n\n\ndef exists(env):\n return True\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "SCons/Tool/zip.py"}], "after_files": [{"content": "\"\"\"SCons.Tool.zip\n\nTool-specific initialization for zip.\n\nThere normally shouldn't be any need to import this module directly.\nIt will usually be imported through the generic SCons.Tool.Tool()\nselection method.\n\n\"\"\"\n\n# MIT License\n#\n# Copyright The SCons Foundation\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os\n\nimport SCons.Builder\nimport SCons.Defaults\nimport SCons.Node.FS\nimport SCons.Util\n\nimport time\nimport zipfile\n\n\nzip_compression = zipfile.ZIP_DEFLATED\n\n\ndef _create_zipinfo_for_file(fname, arcname, date_time, compression):\n st = os.stat(fname)\n if not date_time:\n mtime = time.localtime(st.st_mtime)\n date_time = mtime[0:6]\n zinfo = zipfile.ZipInfo(filename=arcname, date_time=date_time)\n zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes\n zinfo.compress_type = compression\n zinfo.file_size = st.st_size\n return zinfo\n\n\ndef zip_builder(target, source, env):\n compression = env.get('ZIPCOMPRESSION', zipfile.ZIP_STORED)\n zip_root = str(env.get('ZIPROOT', ''))\n date_time = env.get('ZIP_OVERRIDE_TIMESTAMP')\n\n files = []\n for s in source:\n if s.isdir():\n for dirpath, dirnames, filenames in os.walk(str(s)):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n if os.path.isfile(path):\n files.append(path)\n else:\n files.append(str(s))\n\n with zipfile.ZipFile(str(target[0]), 'w', compression) as zf:\n for fname in files:\n arcname = os.path.relpath(fname, zip_root)\n # TODO: Switch to ZipInfo.from_file when 3.6 becomes the base python version\n zinfo = _create_zipinfo_for_file(fname, arcname, date_time, compression)\n with open(fname, \"rb\") as f:\n zf.writestr(zinfo, f.read())\n\n\n# Fix PR #3569 - If you don't specify ZIPCOM and ZIPCOMSTR when creating\n# env, then it will ignore ZIPCOMSTR set afterwards.\nzipAction = SCons.Action.Action(zip_builder, \"$ZIPCOMSTR\",\n varlist=['ZIPCOMPRESSION', 'ZIPROOT', 'ZIP_OVERRIDE_TIMESTAMP'])\n\nZipBuilder = SCons.Builder.Builder(action=SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),\n source_factory=SCons.Node.FS.Entry,\n source_scanner=SCons.Defaults.DirScanner,\n suffix='$ZIPSUFFIX',\n multi=1)\n\n\ndef generate(env):\n \"\"\"Add Builders and construction variables for zip to an Environment.\"\"\"\n try:\n bld = env['BUILDERS']['Zip']\n except KeyError:\n bld = ZipBuilder\n env['BUILDERS']['Zip'] = bld\n\n env['ZIP'] = 'zip'\n env['ZIPFLAGS'] = SCons.Util.CLVar('')\n env['ZIPCOM'] = zipAction\n env['ZIPCOMPRESSION'] = zip_compression\n env['ZIPSUFFIX'] = '.zip'\n env['ZIPROOT'] = SCons.Util.CLVar('')\n\n\ndef exists(env):\n return True\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "SCons/Tool/zip.py"}]} | 1,254 | 810 |
gh_patches_debug_23861 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-1669 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IL: Capture "Subject Matter" for `event`s
IL events have a Subject Matter field on the legislature's website, which we should capture, probably as the `description` value for that `event` object.
For example, look at one of the hearings linked from [here](http://www.ilga.gov/senate/committees/hearing.asp?CommitteeID=1927), like this:
<img width="864" alt="screen shot 2017-04-23 at 00 19 37" src="https://cloud.githubusercontent.com/assets/4959135/25310740/bb6f24b8-27ba-11e7-96e3-9b102819581c.png">
cc @jonrogoff, @justgosh
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/il/events.py`
Content:
```
1 import datetime as dt
2 import re
3
4 from openstates.utils import LXMLMixin
5 from billy.scrape.events import Event, EventScraper
6
7 import lxml.html
8 import pytz
9
10 urls = {
11 "upper": "http://www.ilga.gov/senate/schedules/weeklyhearings.asp",
12 "lower": "http://www.ilga.gov/house/schedules/weeklyhearings.asp"
13 }
14
15
16 class ILEventScraper(EventScraper, LXMLMixin):
17 jurisdiction = 'il'
18 _tz = pytz.timezone('US/Eastern')
19
20 def scrape_page(self, url, session, chamber):
21 page = self.lxmlize(url)
22
23 ctty_name = page.xpath("//span[@class='heading']")[0].text_content()
24
25 tables = page.xpath("//table[@cellpadding='3']")
26 info = tables[0]
27 rows = info.xpath(".//tr")
28 metainf = {}
29 for row in rows:
30 tds = row.xpath(".//td")
31 key = tds[0].text_content().strip()
32 value = tds[1].text_content().strip()
33 metainf[key] = value
34
35 where = metainf['Location:']
36 description = ctty_name
37
38 datetime = metainf['Scheduled Date:']
39 datetime = re.sub("\s+", " ", datetime)
40 repl = {
41 "AM": " AM",
42 "PM": " PM" # Space shim.
43 }
44 for r in repl:
45 datetime = datetime.replace(r, repl[r])
46 datetime = dt.datetime.strptime(datetime, "%b %d, %Y %I:%M %p")
47
48 event = Event(session, datetime, 'committee:meeting',
49 description, location=where)
50 event.add_source(url)
51
52 if ctty_name.startswith('Hearing Notice For'):
53 ctty_name.replace('Hearing Notice For', '')
54 event.add_participant('host', ctty_name, 'committee', chamber=chamber)
55
56 bills = tables[1]
57 for bill in bills.xpath(".//tr")[1:]:
58 tds = bill.xpath(".//td")
59 if len(tds) < 4:
60 continue
61 # First, let's get the bill ID:
62 bill_id = tds[0].text_content()
63 event.add_related_bill(bill_id,
64 description=description,
65 type='consideration')
66
67 self.save_event(event)
68
69 def scrape(self, chamber, session):
70 try:
71 url = urls[chamber]
72 except KeyError:
73 return # Not for us.
74 page = self.lxmlize(url)
75 tables = page.xpath("//table[@width='550']")
76 for table in tables:
77 meetings = table.xpath(".//a")
78 for meeting in meetings:
79 self.scrape_page(meeting.attrib['href'],
80 session, chamber)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openstates/il/events.py b/openstates/il/events.py
--- a/openstates/il/events.py
+++ b/openstates/il/events.py
@@ -4,7 +4,6 @@
from openstates.utils import LXMLMixin
from billy.scrape.events import Event, EventScraper
-import lxml.html
import pytz
urls = {
@@ -20,8 +19,8 @@
def scrape_page(self, url, session, chamber):
page = self.lxmlize(url)
- ctty_name = page.xpath("//span[@class='heading']")[0].text_content()
-
+ ctty_name = page.xpath("//span[@class='heading']")[0].text_content().replace(
+ "Hearing Notice For ", "")
tables = page.xpath("//table[@cellpadding='3']")
info = tables[0]
rows = info.xpath(".//tr")
@@ -33,7 +32,8 @@
metainf[key] = value
where = metainf['Location:']
- description = ctty_name
+ subject_matter = metainf['Subject Matter:']
+ description = "{}, {}".format(ctty_name, subject_matter)
datetime = metainf['Scheduled Date:']
datetime = re.sub("\s+", " ", datetime)
| {"golden_diff": "diff --git a/openstates/il/events.py b/openstates/il/events.py\n--- a/openstates/il/events.py\n+++ b/openstates/il/events.py\n@@ -4,7 +4,6 @@\n from openstates.utils import LXMLMixin\n from billy.scrape.events import Event, EventScraper\n \n-import lxml.html\n import pytz\n \n urls = {\n@@ -20,8 +19,8 @@\n def scrape_page(self, url, session, chamber):\n page = self.lxmlize(url)\n \n- ctty_name = page.xpath(\"//span[@class='heading']\")[0].text_content()\n-\n+ ctty_name = page.xpath(\"//span[@class='heading']\")[0].text_content().replace(\n+ \"Hearing Notice For \", \"\")\n tables = page.xpath(\"//table[@cellpadding='3']\")\n info = tables[0]\n rows = info.xpath(\".//tr\")\n@@ -33,7 +32,8 @@\n metainf[key] = value\n \n where = metainf['Location:']\n- description = ctty_name\n+ subject_matter = metainf['Subject Matter:']\n+ description = \"{}, {}\".format(ctty_name, subject_matter)\n \n datetime = metainf['Scheduled Date:']\n datetime = re.sub(\"\\s+\", \" \", datetime)\n", "issue": "IL: Capture \"Subject Matter\" for `event`s\nIL events have a Subject Matter field on the legislature's website, which we should capture, probably as the `description` value for that `event` object.\r\n\r\nFor example, look at one of the hearings linked from [here](http://www.ilga.gov/senate/committees/hearing.asp?CommitteeID=1927), like this:\r\n\r\n<img width=\"864\" alt=\"screen shot 2017-04-23 at 00 19 37\" src=\"https://cloud.githubusercontent.com/assets/4959135/25310740/bb6f24b8-27ba-11e7-96e3-9b102819581c.png\">\r\n\r\ncc @jonrogoff, @justgosh\n", "before_files": [{"content": "import datetime as dt\nimport re\n\nfrom openstates.utils import LXMLMixin\nfrom billy.scrape.events import Event, EventScraper\n\nimport lxml.html\nimport pytz\n\nurls = {\n \"upper\": \"http://www.ilga.gov/senate/schedules/weeklyhearings.asp\",\n \"lower\": \"http://www.ilga.gov/house/schedules/weeklyhearings.asp\"\n}\n\n\nclass ILEventScraper(EventScraper, LXMLMixin):\n jurisdiction = 'il'\n _tz = pytz.timezone('US/Eastern')\n\n def scrape_page(self, url, session, chamber):\n page = self.lxmlize(url)\n\n ctty_name = page.xpath(\"//span[@class='heading']\")[0].text_content()\n\n tables = page.xpath(\"//table[@cellpadding='3']\")\n info = tables[0]\n rows = info.xpath(\".//tr\")\n metainf = {}\n for row in rows:\n tds = row.xpath(\".//td\")\n key = tds[0].text_content().strip()\n value = tds[1].text_content().strip()\n metainf[key] = value\n\n where = metainf['Location:']\n description = ctty_name\n\n datetime = metainf['Scheduled Date:']\n datetime = re.sub(\"\\s+\", \" \", datetime)\n repl = {\n \"AM\": \" AM\",\n \"PM\": \" PM\" # Space shim.\n }\n for r in repl:\n datetime = datetime.replace(r, repl[r])\n datetime = dt.datetime.strptime(datetime, \"%b %d, %Y %I:%M %p\")\n\n event = Event(session, datetime, 'committee:meeting',\n description, location=where)\n event.add_source(url)\n\n if ctty_name.startswith('Hearing Notice For'):\n ctty_name.replace('Hearing Notice For', '')\n event.add_participant('host', ctty_name, 'committee', chamber=chamber)\n\n bills = tables[1]\n for bill in bills.xpath(\".//tr\")[1:]:\n tds = bill.xpath(\".//td\")\n if len(tds) < 4:\n continue\n # First, let's get the bill ID:\n bill_id = tds[0].text_content()\n event.add_related_bill(bill_id,\n description=description,\n type='consideration')\n\n self.save_event(event)\n\n def scrape(self, chamber, session):\n try:\n url = urls[chamber]\n except KeyError:\n return # Not for us.\n page = self.lxmlize(url)\n tables = page.xpath(\"//table[@width='550']\")\n for table in tables:\n meetings = table.xpath(\".//a\")\n for meeting in meetings:\n self.scrape_page(meeting.attrib['href'],\n session, chamber)\n", "path": "openstates/il/events.py"}], "after_files": [{"content": "import datetime as dt\nimport re\n\nfrom openstates.utils import LXMLMixin\nfrom billy.scrape.events import Event, EventScraper\n\nimport pytz\n\nurls = {\n \"upper\": \"http://www.ilga.gov/senate/schedules/weeklyhearings.asp\",\n \"lower\": \"http://www.ilga.gov/house/schedules/weeklyhearings.asp\"\n}\n\n\nclass ILEventScraper(EventScraper, LXMLMixin):\n jurisdiction = 'il'\n _tz = pytz.timezone('US/Eastern')\n\n def scrape_page(self, url, session, chamber):\n page = self.lxmlize(url)\n\n ctty_name = page.xpath(\"//span[@class='heading']\")[0].text_content().replace(\n \"Hearing Notice For \", \"\")\n tables = page.xpath(\"//table[@cellpadding='3']\")\n info = tables[0]\n rows = info.xpath(\".//tr\")\n metainf = {}\n for row in rows:\n tds = row.xpath(\".//td\")\n key = tds[0].text_content().strip()\n value = tds[1].text_content().strip()\n metainf[key] = value\n\n where = metainf['Location:']\n subject_matter = metainf['Subject Matter:']\n description = \"{}, {}\".format(ctty_name, subject_matter)\n\n datetime = metainf['Scheduled Date:']\n datetime = re.sub(\"\\s+\", \" \", datetime)\n repl = {\n \"AM\": \" AM\",\n \"PM\": \" PM\" # Space shim.\n }\n for r in repl:\n datetime = datetime.replace(r, repl[r])\n datetime = dt.datetime.strptime(datetime, \"%b %d, %Y %I:%M %p\")\n\n event = Event(session, datetime, 'committee:meeting',\n description, location=where)\n event.add_source(url)\n\n if ctty_name.startswith('Hearing Notice For'):\n ctty_name.replace('Hearing Notice For', '')\n event.add_participant('host', ctty_name, 'committee', chamber=chamber)\n\n bills = tables[1]\n for bill in bills.xpath(\".//tr\")[1:]:\n tds = bill.xpath(\".//td\")\n if len(tds) < 4:\n continue\n # First, let's get the bill ID:\n bill_id = tds[0].text_content()\n event.add_related_bill(bill_id,\n description=description,\n type='consideration')\n\n self.save_event(event)\n\n def scrape(self, chamber, session):\n try:\n url = urls[chamber]\n except KeyError:\n return # Not for us.\n page = self.lxmlize(url)\n tables = page.xpath(\"//table[@width='550']\")\n for table in tables:\n meetings = table.xpath(\".//a\")\n for meeting in meetings:\n self.scrape_page(meeting.attrib['href'],\n session, chamber)\n", "path": "openstates/il/events.py"}]} | 1,217 | 285 |
gh_patches_debug_13472 | rasdani/github-patches | git_diff | freqtrade__freqtrade-3798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Telegram Balance not updating correctly after sell
## Describe your environment
* Operating system: Ubuntu 18.04
* Python Version: 2.7.17
* CCXT version: 1.34.7
* Freqtrade Version: develop-4c349342
## Describe the problem:
Balance is not updated after sell on Bittrex live. All positions ever bought, but where sold, are shown in Telegram via /balance, as if they where actually held and in the wallet and are summed up in the estimated value too. The stake currency is updated correctly and the bot trades with correct values.
### Steps to reproduce:
1. Live trade on Bittrex and have a successful buy and sell on the same position by the bot.
2. Call /balance in Telegram.
3. In the response all successfully sold positions show up as not pending, held and available.
### Observed Results:
* What happened?
Calling /balance in Telegram shows all ever bought assets (tested for 3 Days) although they where successfully sold.
* What did you expect to happen?
/balance shows actual held positions/wallet. For instance, If no positions are held, the stake currency only would be shown.
### Relevant code exceptions or logs
2020-09-22 11:42:52 freqtrade.worker: INFO - Bot heartbeat. PID=25032, version='develop-4c349342', state='RUNNING'
2020-09-22 11:43:13 freqtrade.rpc.telegram: INFO - Executing handler: _balance for chat_id: XXX
2020-09-22 11:43:15 freqtrade.wallets: INFO - Wallets synced.
2020-09-22 11:43:15 freqtrade.rpc.rpc: WARNING - Could not get rate for pair BTXCRD.
2020-09-22 11:43:52 freqtrade.worker: INFO - Bot heartbeat. PID=25032, version='develop-4c349342', state='RUNNING'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/wallets.py`
Content:
```
1 # pragma pylint: disable=W0603
2 """ Wallet """
3
4 import logging
5 from typing import Any, Dict, NamedTuple
6
7 import arrow
8
9 from freqtrade.exchange import Exchange
10 from freqtrade.persistence import Trade
11
12 logger = logging.getLogger(__name__)
13
14
15 # wallet data structure
16 class Wallet(NamedTuple):
17 currency: str
18 free: float = 0
19 used: float = 0
20 total: float = 0
21
22
23 class Wallets:
24
25 def __init__(self, config: dict, exchange: Exchange) -> None:
26 self._config = config
27 self._exchange = exchange
28 self._wallets: Dict[str, Wallet] = {}
29 self.start_cap = config['dry_run_wallet']
30 self._last_wallet_refresh = 0
31 self.update()
32
33 def get_free(self, currency: str) -> float:
34 balance = self._wallets.get(currency)
35 if balance and balance.free:
36 return balance.free
37 else:
38 return 0
39
40 def get_used(self, currency: str) -> float:
41 balance = self._wallets.get(currency)
42 if balance and balance.used:
43 return balance.used
44 else:
45 return 0
46
47 def get_total(self, currency: str) -> float:
48 balance = self._wallets.get(currency)
49 if balance and balance.total:
50 return balance.total
51 else:
52 return 0
53
54 def _update_dry(self) -> None:
55 """
56 Update from database in dry-run mode
57 - Apply apply profits of closed trades on top of stake amount
58 - Subtract currently tied up stake_amount in open trades
59 - update balances for currencies currently in trades
60 """
61 # Recreate _wallets to reset closed trade balances
62 _wallets = {}
63 closed_trades = Trade.get_trades(Trade.is_open.is_(False)).all()
64 open_trades = Trade.get_trades(Trade.is_open.is_(True)).all()
65 tot_profit = sum([trade.calc_profit() for trade in closed_trades])
66 tot_in_trades = sum([trade.stake_amount for trade in open_trades])
67
68 current_stake = self.start_cap + tot_profit - tot_in_trades
69 _wallets[self._config['stake_currency']] = Wallet(
70 self._config['stake_currency'],
71 current_stake,
72 0,
73 current_stake
74 )
75
76 for trade in open_trades:
77 curr = self._exchange.get_pair_base_currency(trade.pair)
78 _wallets[curr] = Wallet(
79 curr,
80 trade.amount,
81 0,
82 trade.amount
83 )
84 self._wallets = _wallets
85
86 def _update_live(self) -> None:
87 balances = self._exchange.get_balances()
88
89 for currency in balances:
90 self._wallets[currency] = Wallet(
91 currency,
92 balances[currency].get('free', None),
93 balances[currency].get('used', None),
94 balances[currency].get('total', None)
95 )
96
97 def update(self, require_update: bool = True) -> None:
98 """
99 Updates wallets from the configured version.
100 By default, updates from the exchange.
101 Update-skipping should only be used for user-invoked /balance calls, since
102 for trading operations, the latest balance is needed.
103 :param require_update: Allow skipping an update if balances were recently refreshed
104 """
105 if (require_update or (self._last_wallet_refresh + 3600 < arrow.utcnow().timestamp)):
106 if self._config['dry_run']:
107 self._update_dry()
108 else:
109 self._update_live()
110 logger.info('Wallets synced.')
111 self._last_wallet_refresh = arrow.utcnow().timestamp
112
113 def get_all_balances(self) -> Dict[str, Any]:
114 return self._wallets
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/wallets.py b/freqtrade/wallets.py
--- a/freqtrade/wallets.py
+++ b/freqtrade/wallets.py
@@ -2,6 +2,7 @@
""" Wallet """
import logging
+from copy import deepcopy
from typing import Any, Dict, NamedTuple
import arrow
@@ -93,6 +94,10 @@
balances[currency].get('used', None),
balances[currency].get('total', None)
)
+ # Remove currencies no longer in get_balances output
+ for currency in deepcopy(self._wallets):
+ if currency not in balances:
+ del self._wallets[currency]
def update(self, require_update: bool = True) -> None:
"""
| {"golden_diff": "diff --git a/freqtrade/wallets.py b/freqtrade/wallets.py\n--- a/freqtrade/wallets.py\n+++ b/freqtrade/wallets.py\n@@ -2,6 +2,7 @@\n \"\"\" Wallet \"\"\"\n \n import logging\n+from copy import deepcopy\n from typing import Any, Dict, NamedTuple\n \n import arrow\n@@ -93,6 +94,10 @@\n balances[currency].get('used', None),\n balances[currency].get('total', None)\n )\n+ # Remove currencies no longer in get_balances output\n+ for currency in deepcopy(self._wallets):\n+ if currency not in balances:\n+ del self._wallets[currency]\n \n def update(self, require_update: bool = True) -> None:\n \"\"\"\n", "issue": "Telegram Balance not updating correctly after sell\n## Describe your environment\r\n\r\n * Operating system: Ubuntu 18.04\r\n * Python Version: 2.7.17\r\n * CCXT version: 1.34.7\r\n * Freqtrade Version: develop-4c349342\r\n \r\n## Describe the problem:\r\n\r\nBalance is not updated after sell on Bittrex live. All positions ever bought, but where sold, are shown in Telegram via /balance, as if they where actually held and in the wallet and are summed up in the estimated value too. The stake currency is updated correctly and the bot trades with correct values.\r\n\r\n### Steps to reproduce:\r\n\r\n 1. Live trade on Bittrex and have a successful buy and sell on the same position by the bot.\r\n 2. Call /balance in Telegram.\r\n 3. In the response all successfully sold positions show up as not pending, held and available.\r\n \r\n### Observed Results:\r\n\r\n * What happened?\r\n Calling /balance in Telegram shows all ever bought assets (tested for 3 Days) although they where successfully sold.\r\n * What did you expect to happen?\r\n /balance shows actual held positions/wallet. For instance, If no positions are held, the stake currency only would be shown.\r\n\r\n### Relevant code exceptions or logs\r\n\r\n2020-09-22 11:42:52 freqtrade.worker: INFO - Bot heartbeat. PID=25032, version='develop-4c349342', state='RUNNING'\r\n2020-09-22 11:43:13 freqtrade.rpc.telegram: INFO - Executing handler: _balance for chat_id: XXX\r\n2020-09-22 11:43:15 freqtrade.wallets: INFO - Wallets synced.\r\n2020-09-22 11:43:15 freqtrade.rpc.rpc: WARNING - Could not get rate for pair BTXCRD.\r\n2020-09-22 11:43:52 freqtrade.worker: INFO - Bot heartbeat. PID=25032, version='develop-4c349342', state='RUNNING'\r\n\n", "before_files": [{"content": "# pragma pylint: disable=W0603\n\"\"\" Wallet \"\"\"\n\nimport logging\nfrom typing import Any, Dict, NamedTuple\n\nimport arrow\n\nfrom freqtrade.exchange import Exchange\nfrom freqtrade.persistence import Trade\n\nlogger = logging.getLogger(__name__)\n\n\n# wallet data structure\nclass Wallet(NamedTuple):\n currency: str\n free: float = 0\n used: float = 0\n total: float = 0\n\n\nclass Wallets:\n\n def __init__(self, config: dict, exchange: Exchange) -> None:\n self._config = config\n self._exchange = exchange\n self._wallets: Dict[str, Wallet] = {}\n self.start_cap = config['dry_run_wallet']\n self._last_wallet_refresh = 0\n self.update()\n\n def get_free(self, currency: str) -> float:\n balance = self._wallets.get(currency)\n if balance and balance.free:\n return balance.free\n else:\n return 0\n\n def get_used(self, currency: str) -> float:\n balance = self._wallets.get(currency)\n if balance and balance.used:\n return balance.used\n else:\n return 0\n\n def get_total(self, currency: str) -> float:\n balance = self._wallets.get(currency)\n if balance and balance.total:\n return balance.total\n else:\n return 0\n\n def _update_dry(self) -> None:\n \"\"\"\n Update from database in dry-run mode\n - Apply apply profits of closed trades on top of stake amount\n - Subtract currently tied up stake_amount in open trades\n - update balances for currencies currently in trades\n \"\"\"\n # Recreate _wallets to reset closed trade balances\n _wallets = {}\n closed_trades = Trade.get_trades(Trade.is_open.is_(False)).all()\n open_trades = Trade.get_trades(Trade.is_open.is_(True)).all()\n tot_profit = sum([trade.calc_profit() for trade in closed_trades])\n tot_in_trades = sum([trade.stake_amount for trade in open_trades])\n\n current_stake = self.start_cap + tot_profit - tot_in_trades\n _wallets[self._config['stake_currency']] = Wallet(\n self._config['stake_currency'],\n current_stake,\n 0,\n current_stake\n )\n\n for trade in open_trades:\n curr = self._exchange.get_pair_base_currency(trade.pair)\n _wallets[curr] = Wallet(\n curr,\n trade.amount,\n 0,\n trade.amount\n )\n self._wallets = _wallets\n\n def _update_live(self) -> None:\n balances = self._exchange.get_balances()\n\n for currency in balances:\n self._wallets[currency] = Wallet(\n currency,\n balances[currency].get('free', None),\n balances[currency].get('used', None),\n balances[currency].get('total', None)\n )\n\n def update(self, require_update: bool = True) -> None:\n \"\"\"\n Updates wallets from the configured version.\n By default, updates from the exchange.\n Update-skipping should only be used for user-invoked /balance calls, since\n for trading operations, the latest balance is needed.\n :param require_update: Allow skipping an update if balances were recently refreshed\n \"\"\"\n if (require_update or (self._last_wallet_refresh + 3600 < arrow.utcnow().timestamp)):\n if self._config['dry_run']:\n self._update_dry()\n else:\n self._update_live()\n logger.info('Wallets synced.')\n self._last_wallet_refresh = arrow.utcnow().timestamp\n\n def get_all_balances(self) -> Dict[str, Any]:\n return self._wallets\n", "path": "freqtrade/wallets.py"}], "after_files": [{"content": "# pragma pylint: disable=W0603\n\"\"\" Wallet \"\"\"\n\nimport logging\nfrom copy import deepcopy\nfrom typing import Any, Dict, NamedTuple\n\nimport arrow\n\nfrom freqtrade.exchange import Exchange\nfrom freqtrade.persistence import Trade\n\nlogger = logging.getLogger(__name__)\n\n\n# wallet data structure\nclass Wallet(NamedTuple):\n currency: str\n free: float = 0\n used: float = 0\n total: float = 0\n\n\nclass Wallets:\n\n def __init__(self, config: dict, exchange: Exchange) -> None:\n self._config = config\n self._exchange = exchange\n self._wallets: Dict[str, Wallet] = {}\n self.start_cap = config['dry_run_wallet']\n self._last_wallet_refresh = 0\n self.update()\n\n def get_free(self, currency: str) -> float:\n balance = self._wallets.get(currency)\n if balance and balance.free:\n return balance.free\n else:\n return 0\n\n def get_used(self, currency: str) -> float:\n balance = self._wallets.get(currency)\n if balance and balance.used:\n return balance.used\n else:\n return 0\n\n def get_total(self, currency: str) -> float:\n balance = self._wallets.get(currency)\n if balance and balance.total:\n return balance.total\n else:\n return 0\n\n def _update_dry(self) -> None:\n \"\"\"\n Update from database in dry-run mode\n - Apply apply profits of closed trades on top of stake amount\n - Subtract currently tied up stake_amount in open trades\n - update balances for currencies currently in trades\n \"\"\"\n # Recreate _wallets to reset closed trade balances\n _wallets = {}\n closed_trades = Trade.get_trades(Trade.is_open.is_(False)).all()\n open_trades = Trade.get_trades(Trade.is_open.is_(True)).all()\n tot_profit = sum([trade.calc_profit() for trade in closed_trades])\n tot_in_trades = sum([trade.stake_amount for trade in open_trades])\n\n current_stake = self.start_cap + tot_profit - tot_in_trades\n _wallets[self._config['stake_currency']] = Wallet(\n self._config['stake_currency'],\n current_stake,\n 0,\n current_stake\n )\n\n for trade in open_trades:\n curr = self._exchange.get_pair_base_currency(trade.pair)\n _wallets[curr] = Wallet(\n curr,\n trade.amount,\n 0,\n trade.amount\n )\n self._wallets = _wallets\n\n def _update_live(self) -> None:\n balances = self._exchange.get_balances()\n\n for currency in balances:\n self._wallets[currency] = Wallet(\n currency,\n balances[currency].get('free', None),\n balances[currency].get('used', None),\n balances[currency].get('total', None)\n )\n # Remove currencies no longer in get_balances output\n for currency in deepcopy(self._wallets):\n if currency not in balances:\n del self._wallets[currency]\n\n def update(self, require_update: bool = True) -> None:\n \"\"\"\n Updates wallets from the configured version.\n By default, updates from the exchange.\n Update-skipping should only be used for user-invoked /balance calls, since\n for trading operations, the latest balance is needed.\n :param require_update: Allow skipping an update if balances were recently refreshed\n \"\"\"\n if (require_update or (self._last_wallet_refresh + 3600 < arrow.utcnow().timestamp)):\n if self._config['dry_run']:\n self._update_dry()\n else:\n self._update_live()\n logger.info('Wallets synced.')\n self._last_wallet_refresh = arrow.utcnow().timestamp\n\n def get_all_balances(self) -> Dict[str, Any]:\n return self._wallets\n", "path": "freqtrade/wallets.py"}]} | 1,821 | 173 |
gh_patches_debug_36188 | rasdani/github-patches | git_diff | mit-ll-responsible-ai__hydra-zen-116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError with package `random`
Executing
```python
import random
instantiate(builds(random.uniform, 2, 4))
```
Results in
```
TypeError: Error instantiating 'random.Random.uniform' : uniform() missing 1 required positional argument: 'b'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/hydra_zen/structured_configs/_utils.py`
Content:
```
1 # Copyright (c) 2021 Massachusetts Institute of Technology
2 # SPDX-License-Identifier: MIT
3
4 import sys
5 from dataclasses import MISSING, Field, field as _field, is_dataclass
6 from enum import Enum
7 from typing import (
8 Any,
9 Callable,
10 Dict,
11 List,
12 Mapping,
13 Optional,
14 Tuple,
15 TypeVar,
16 Union,
17 cast,
18 overload,
19 )
20
21 from typing_extensions import Final
22
23 try:
24 from typing import get_args, get_origin
25 except ImportError: # pragma: no cover
26 # remove at Python 3.7 end-of-life
27 from collections.abc import Callable as _Callable
28
29 def get_origin(obj: Any) -> Union[None, type]:
30 """Get the unsubscripted version of a type.
31
32 Parameters
33 ----------
34 obj : Any
35
36 Returns
37 -------
38 Union[None, type]
39 Return None for unsupported types.
40
41 Notes
42 -----
43 Bare `Generic` not supported by this hacked version of `get_origin`
44
45 Examples
46 --------
47 >>> assert get_origin(Literal[42]) is Literal
48 >>> assert get_origin(int) is None
49 >>> assert get_origin(ClassVar[int]) is ClassVar
50 >>> assert get_origin(Generic[T]) is Generic
51 >>> assert get_origin(Union[T, int]) is Union
52 >>> assert get_origin(List[Tuple[T, T]][int]) == list
53 """
54 return getattr(obj, "__origin__", None)
55
56 def get_args(obj: Any) -> Union[Tuple[type, ...], Tuple[List[type], type]]:
57 """Get type arguments with all substitutions performed.
58
59 Parameters
60 ----------
61 obj : Any
62
63 Returns
64 -------
65 Union[Tuple[type, ...], Tuple[List[type], type]]
66 Callable[[t1, ...], r] -> ([t1, ...], r)
67
68 Examples
69 --------
70 >>> assert get_args(Dict[str, int]) == (str, int)
71 >>> assert get_args(int) == ()
72 >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str)
73 >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
74 >>> assert get_args(Callable[[], T][int]) == ([], int)
75 """
76 if hasattr(obj, "__origin__") and hasattr(obj, "__args__"):
77 args = obj.__args__
78 if get_origin(obj) is _Callable and args and args[0] is not Ellipsis:
79 args = (list(args[:-1]), args[-1])
80 return args
81 return ()
82
83
84 COMMON_MODULES_WITH_OBFUSCATED_IMPORTS: Tuple[str, ...] = (
85 "numpy",
86 "numpy.random",
87 "jax.numpy",
88 "jax",
89 "torch",
90 )
91 UNKNOWN_NAME: Final[str] = "<unknown>"
92 HYDRA_SUPPORTED_PRIMITIVES: Final = {int, float, bool, str, Enum}
93 KNOWN_MUTABLE_TYPES = (list, dict, set)
94
95 T = TypeVar("T")
96
97
98 # The typeshed definition of `field` has an inaccurate annotation:
99 # https://github.com/python/typeshed/blob/b9e1d7d522fe90b98e07d43a764bbe60216bc2c4/stdlib/dataclasses.pyi#L109
100 # This makes it impossible for `make_dataclass` to by type-correct in the eyes of
101 # static checkers. See https://github.com/microsoft/pyright/issues/1680 for discussion.
102 #
103 # We happen to make rather heavy use of `make_dataclass`, thus we..*sigh*.. we provide
104 # our own overloads for `field`.
105 @overload # `default` and `default_factory` are optional and mutually exclusive.
106 def field(
107 *,
108 default: Any,
109 init: bool = ...,
110 repr: bool = ...,
111 hash: Optional[bool] = ...,
112 compare: bool = ...,
113 metadata: Optional[Mapping[Any, Any]] = ...,
114 ) -> Field: # pragma: no cover
115 ...
116
117
118 @overload
119 def field(
120 *,
121 default_factory: Callable[[], Any],
122 init: bool = ...,
123 repr: bool = ...,
124 hash: Optional[bool] = ...,
125 compare: bool = ...,
126 metadata: Optional[Mapping[Any, Any]] = ...,
127 ) -> Field: # pragma: no cover
128 ...
129
130
131 def field(
132 *,
133 default=MISSING,
134 default_factory=MISSING,
135 init=True,
136 repr=True,
137 hash=None,
138 compare=True,
139 metadata=None,
140 ) -> Field:
141 if default is MISSING:
142 return cast(
143 Field,
144 _field(
145 default_factory=default_factory,
146 init=init,
147 repr=repr,
148 hash=hash,
149 compare=compare,
150 metadata=metadata,
151 ),
152 )
153 else:
154 return cast(
155 Field,
156 _field(
157 default=default,
158 init=init,
159 repr=repr,
160 hash=hash,
161 compare=compare,
162 metadata=metadata,
163 ),
164 )
165
166
167 def safe_name(obj: Any, repr_allowed=True) -> str:
168 """Tries to get a descriptive name for an object. Returns '<unknown>`
169 instead of raising - useful for writing descriptive/dafe error messages."""
170 if hasattr(obj, "__qualname__"):
171 return obj.__qualname__
172
173 if hasattr(obj, "__name__"):
174 return obj.__name__
175
176 if repr_allowed and hasattr(obj, "__repr__"):
177 return repr(obj)
178
179 return UNKNOWN_NAME
180
181
182 def building_error_prefix(target) -> str:
183 return f"Building: {safe_name(target)} ..\n"
184
185
186 def get_obj_path(obj: Any) -> str:
187 name = safe_name(obj, repr_allowed=False)
188
189 if name == UNKNOWN_NAME:
190 raise AttributeError(f"{obj} does not have a `__name__` attribute")
191
192 module = getattr(obj, "__module__", None)
193
194 if "<" in name or module is None:
195 # NumPy's ufuncs do not have an inspectable `__module__` attribute, so we
196 # check to see if the object lives in NumPy's top-level namespace.
197 #
198 # or..
199 #
200 # Qualname produced a name from a local namespace.
201 # E.g. jax.numpy.add.__qualname__ is '_maybe_bool_binop.<locals>.fn'
202 # Thus we defer to the name of the object and look for it in the
203 # top-level namespace of the known suspects
204 #
205 # or...
206 #
207 # module is None, which is apparently a thing..: numpy.random.rand.__module__ is None
208
209 # don't use qualname for obfuscated paths
210 name = obj.__name__
211 for new_module in COMMON_MODULES_WITH_OBFUSCATED_IMPORTS:
212 if getattr(sys.modules.get(new_module), name, None) is obj:
213 module = new_module
214 break
215 else: # pragma: no cover
216 name = safe_name(obj)
217 raise ModuleNotFoundError(f"{name} is not importable")
218
219 return f"{module}.{name}"
220
221
222 NoneType = type(None)
223
224
225 def sanitized_type(
226 type_: type, *, primitive_only: bool = False, wrap_optional: bool = False
227 ) -> type:
228 """Returns ``type_`` unchanged if it is supported as an annotation by hydra,
229 otherwise returns ``Any``.
230
231 Examples
232 --------
233 >>> sanitized_type(int)
234 int
235
236 >>> sanitized_type(frozenset) # not supported by hydra
237 typing.Any
238
239 >>> sanitized_type(int, wrap_optional=True)
240 Union[
241 >>> sanitized_type(List[int])
242 List[int]
243
244 >>> sanitized_type(List[int], primitive_only=True)
245 Any
246
247 >>> sanitized_type(Dict[str, frozenset])
248 Dict[str, Any]
249 """
250
251 # Warning: mutating `type_` will mutate the signature being inspected
252 # Even calling deepcopy(`type_`) silently fails to prevent this.
253 origin = get_origin(type_)
254
255 if origin is not None:
256 if primitive_only:
257 return Any
258
259 args = get_args(type_)
260 if origin is Union:
261 # Hydra only supports Optional[<type>] unions
262 if len(args) != 2 or type(None) not in args:
263 # isn't Optional[<type>]
264 return Any
265
266 args = cast(Tuple[type, type], args)
267
268 optional_type, none_type = args
269 if not isinstance(None, none_type):
270 optional_type = none_type
271 optional_type: Optional[Any]
272 optional_type = sanitized_type(optional_type)
273
274 if optional_type is Any: # Union[Any, T] is just Any
275 return Any
276 return Union[optional_type, NoneType] # type: ignore
277
278 if origin is list or origin is List:
279 return List[sanitized_type(args[0], primitive_only=True)] if args else type_ # type: ignore
280
281 if origin is dict or origin is Dict:
282 return (
283 Dict[
284 sanitized_type(args[0], primitive_only=True), # type: ignore
285 sanitized_type(args[1], primitive_only=True), # type: ignore
286 ]
287 if args
288 else type_
289 )
290
291 if origin is tuple or origin is Tuple:
292 # hydra silently supports tuples of homogenous types
293 # It has some weird behavior. It treats `Tuple[t1, t2, ...]` as `List[t1]`
294 # It isn't clear that we want to perpetrate this on our end..
295 # So we deal with inhomogeneous types as e.g. `Tuple[str, int]` -> `Tuple[Any, Any]`.
296 #
297 # Otherwise we preserve the annotation as accurately as possible
298 if not args:
299 return Any # bare Tuple not supported by hydra
300 args = cast(Tuple[type, ...], args)
301 unique_args = set(args)
302 has_ellipses = Ellipsis in unique_args
303
304 _unique_type = (
305 sanitized_type(args[0], primitive_only=True)
306 if len(unique_args) == 1 or (len(unique_args) == 2 and has_ellipses)
307 else Any
308 )
309 if has_ellipses:
310 return Tuple[_unique_type, ...] # type: ignore
311 else:
312 return Tuple[(_unique_type,) * len(args)] # type: ignore
313
314 return Any
315
316 if (
317 type_ is Any
318 or type_ in HYDRA_SUPPORTED_PRIMITIVES
319 or is_dataclass(type_)
320 or (isinstance(type_, type) and issubclass(type_, Enum))
321 ):
322 if wrap_optional and type_ is not Any: # pragma: no cover
323 # normally get_type_hints automatically resolves Optional[...]
324 # when None is set as the default, but this has been flaky
325 # for some pytorch-lightning classes. So we just do it ourselves...
326 # It might be worth removing this later since none of our standard tests
327 # cover it.
328 type_ = Optional[type_] # type: ignore
329 return type_
330
331 # Needed to cover python 3.6 where __origin__ doesn't normalize to type
332 if not primitive_only and type_ in {List, Tuple, Dict}: # pragma: no cover
333 if wrap_optional and type_ is not Any:
334 type_ = Optional[type_] # type: ignore
335 return type_
336
337 return Any
338
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/hydra_zen/structured_configs/_utils.py b/src/hydra_zen/structured_configs/_utils.py
--- a/src/hydra_zen/structured_configs/_utils.py
+++ b/src/hydra_zen/structured_configs/_utils.py
@@ -82,6 +82,7 @@
COMMON_MODULES_WITH_OBFUSCATED_IMPORTS: Tuple[str, ...] = (
+ "random",
"numpy",
"numpy.random",
"jax.numpy",
@@ -167,8 +168,6 @@
def safe_name(obj: Any, repr_allowed=True) -> str:
"""Tries to get a descriptive name for an object. Returns '<unknown>`
instead of raising - useful for writing descriptive/dafe error messages."""
- if hasattr(obj, "__qualname__"):
- return obj.__qualname__
if hasattr(obj, "__name__"):
return obj.__name__
@@ -190,8 +189,9 @@
raise AttributeError(f"{obj} does not have a `__name__` attribute")
module = getattr(obj, "__module__", None)
+ qualname = getattr(obj, "__qualname__", None)
- if "<" in name or module is None:
+ if (qualname is not None and "<" in qualname) or module is None:
# NumPy's ufuncs do not have an inspectable `__module__` attribute, so we
# check to see if the object lives in NumPy's top-level namespace.
#
@@ -204,16 +204,16 @@
#
# or...
#
- # module is None, which is apparently a thing..: numpy.random.rand.__module__ is None
+ # module is None, which is apparently a thing..:
+ # __module__ is None for both numpy.random.rand and random.random
+ #
# don't use qualname for obfuscated paths
- name = obj.__name__
for new_module in COMMON_MODULES_WITH_OBFUSCATED_IMPORTS:
if getattr(sys.modules.get(new_module), name, None) is obj:
module = new_module
break
- else: # pragma: no cover
- name = safe_name(obj)
+ else:
raise ModuleNotFoundError(f"{name} is not importable")
return f"{module}.{name}"
| {"golden_diff": "diff --git a/src/hydra_zen/structured_configs/_utils.py b/src/hydra_zen/structured_configs/_utils.py\n--- a/src/hydra_zen/structured_configs/_utils.py\n+++ b/src/hydra_zen/structured_configs/_utils.py\n@@ -82,6 +82,7 @@\n \n \n COMMON_MODULES_WITH_OBFUSCATED_IMPORTS: Tuple[str, ...] = (\n+ \"random\",\n \"numpy\",\n \"numpy.random\",\n \"jax.numpy\",\n@@ -167,8 +168,6 @@\n def safe_name(obj: Any, repr_allowed=True) -> str:\n \"\"\"Tries to get a descriptive name for an object. Returns '<unknown>`\n instead of raising - useful for writing descriptive/dafe error messages.\"\"\"\n- if hasattr(obj, \"__qualname__\"):\n- return obj.__qualname__\n \n if hasattr(obj, \"__name__\"):\n return obj.__name__\n@@ -190,8 +189,9 @@\n raise AttributeError(f\"{obj} does not have a `__name__` attribute\")\n \n module = getattr(obj, \"__module__\", None)\n+ qualname = getattr(obj, \"__qualname__\", None)\n \n- if \"<\" in name or module is None:\n+ if (qualname is not None and \"<\" in qualname) or module is None:\n # NumPy's ufuncs do not have an inspectable `__module__` attribute, so we\n # check to see if the object lives in NumPy's top-level namespace.\n #\n@@ -204,16 +204,16 @@\n #\n # or...\n #\n- # module is None, which is apparently a thing..: numpy.random.rand.__module__ is None\n+ # module is None, which is apparently a thing..:\n+ # __module__ is None for both numpy.random.rand and random.random\n+ #\n \n # don't use qualname for obfuscated paths\n- name = obj.__name__\n for new_module in COMMON_MODULES_WITH_OBFUSCATED_IMPORTS:\n if getattr(sys.modules.get(new_module), name, None) is obj:\n module = new_module\n break\n- else: # pragma: no cover\n- name = safe_name(obj)\n+ else:\n raise ModuleNotFoundError(f\"{name} is not importable\")\n \n return f\"{module}.{name}\"\n", "issue": "TypeError with package `random`\nExecuting\r\n\r\n```python\r\nimport random\r\ninstantiate(builds(random.uniform, 2, 4))\r\n```\r\n\r\nResults in\r\n\r\n```\r\nTypeError: Error instantiating 'random.Random.uniform' : uniform() missing 1 required positional argument: 'b'\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nimport sys\nfrom dataclasses import MISSING, Field, field as _field, is_dataclass\nfrom enum import Enum\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nfrom typing_extensions import Final\n\ntry:\n from typing import get_args, get_origin\nexcept ImportError: # pragma: no cover\n # remove at Python 3.7 end-of-life\n from collections.abc import Callable as _Callable\n\n def get_origin(obj: Any) -> Union[None, type]:\n \"\"\"Get the unsubscripted version of a type.\n\n Parameters\n ----------\n obj : Any\n\n Returns\n -------\n Union[None, type]\n Return None for unsupported types.\n\n Notes\n -----\n Bare `Generic` not supported by this hacked version of `get_origin`\n\n Examples\n --------\n >>> assert get_origin(Literal[42]) is Literal\n >>> assert get_origin(int) is None\n >>> assert get_origin(ClassVar[int]) is ClassVar\n >>> assert get_origin(Generic[T]) is Generic\n >>> assert get_origin(Union[T, int]) is Union\n >>> assert get_origin(List[Tuple[T, T]][int]) == list\n \"\"\"\n return getattr(obj, \"__origin__\", None)\n\n def get_args(obj: Any) -> Union[Tuple[type, ...], Tuple[List[type], type]]:\n \"\"\"Get type arguments with all substitutions performed.\n\n Parameters\n ----------\n obj : Any\n\n Returns\n -------\n Union[Tuple[type, ...], Tuple[List[type], type]]\n Callable[[t1, ...], r] -> ([t1, ...], r)\n\n Examples\n --------\n >>> assert get_args(Dict[str, int]) == (str, int)\n >>> assert get_args(int) == ()\n >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str)\n >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])\n >>> assert get_args(Callable[[], T][int]) == ([], int)\n \"\"\"\n if hasattr(obj, \"__origin__\") and hasattr(obj, \"__args__\"):\n args = obj.__args__\n if get_origin(obj) is _Callable and args and args[0] is not Ellipsis:\n args = (list(args[:-1]), args[-1])\n return args\n return ()\n\n\nCOMMON_MODULES_WITH_OBFUSCATED_IMPORTS: Tuple[str, ...] = (\n \"numpy\",\n \"numpy.random\",\n \"jax.numpy\",\n \"jax\",\n \"torch\",\n)\nUNKNOWN_NAME: Final[str] = \"<unknown>\"\nHYDRA_SUPPORTED_PRIMITIVES: Final = {int, float, bool, str, Enum}\nKNOWN_MUTABLE_TYPES = (list, dict, set)\n\nT = TypeVar(\"T\")\n\n\n# The typeshed definition of `field` has an inaccurate annotation:\n# https://github.com/python/typeshed/blob/b9e1d7d522fe90b98e07d43a764bbe60216bc2c4/stdlib/dataclasses.pyi#L109\n# This makes it impossible for `make_dataclass` to by type-correct in the eyes of\n# static checkers. See https://github.com/microsoft/pyright/issues/1680 for discussion.\n#\n# We happen to make rather heavy use of `make_dataclass`, thus we..*sigh*.. we provide\n# our own overloads for `field`.\n@overload # `default` and `default_factory` are optional and mutually exclusive.\ndef field(\n *,\n default: Any,\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field: # pragma: no cover\n ...\n\n\n@overload\ndef field(\n *,\n default_factory: Callable[[], Any],\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field: # pragma: no cover\n ...\n\n\ndef field(\n *,\n default=MISSING,\n default_factory=MISSING,\n init=True,\n repr=True,\n hash=None,\n compare=True,\n metadata=None,\n) -> Field:\n if default is MISSING:\n return cast(\n Field,\n _field(\n default_factory=default_factory,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n else:\n return cast(\n Field,\n _field(\n default=default,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n\n\ndef safe_name(obj: Any, repr_allowed=True) -> str:\n \"\"\"Tries to get a descriptive name for an object. Returns '<unknown>`\n instead of raising - useful for writing descriptive/dafe error messages.\"\"\"\n if hasattr(obj, \"__qualname__\"):\n return obj.__qualname__\n\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n\n if repr_allowed and hasattr(obj, \"__repr__\"):\n return repr(obj)\n\n return UNKNOWN_NAME\n\n\ndef building_error_prefix(target) -> str:\n return f\"Building: {safe_name(target)} ..\\n\"\n\n\ndef get_obj_path(obj: Any) -> str:\n name = safe_name(obj, repr_allowed=False)\n\n if name == UNKNOWN_NAME:\n raise AttributeError(f\"{obj} does not have a `__name__` attribute\")\n\n module = getattr(obj, \"__module__\", None)\n\n if \"<\" in name or module is None:\n # NumPy's ufuncs do not have an inspectable `__module__` attribute, so we\n # check to see if the object lives in NumPy's top-level namespace.\n #\n # or..\n #\n # Qualname produced a name from a local namespace.\n # E.g. jax.numpy.add.__qualname__ is '_maybe_bool_binop.<locals>.fn'\n # Thus we defer to the name of the object and look for it in the\n # top-level namespace of the known suspects\n #\n # or...\n #\n # module is None, which is apparently a thing..: numpy.random.rand.__module__ is None\n\n # don't use qualname for obfuscated paths\n name = obj.__name__\n for new_module in COMMON_MODULES_WITH_OBFUSCATED_IMPORTS:\n if getattr(sys.modules.get(new_module), name, None) is obj:\n module = new_module\n break\n else: # pragma: no cover\n name = safe_name(obj)\n raise ModuleNotFoundError(f\"{name} is not importable\")\n\n return f\"{module}.{name}\"\n\n\nNoneType = type(None)\n\n\ndef sanitized_type(\n type_: type, *, primitive_only: bool = False, wrap_optional: bool = False\n) -> type:\n \"\"\"Returns ``type_`` unchanged if it is supported as an annotation by hydra,\n otherwise returns ``Any``.\n\n Examples\n --------\n >>> sanitized_type(int)\n int\n\n >>> sanitized_type(frozenset) # not supported by hydra\n typing.Any\n\n >>> sanitized_type(int, wrap_optional=True)\n Union[\n >>> sanitized_type(List[int])\n List[int]\n\n >>> sanitized_type(List[int], primitive_only=True)\n Any\n\n >>> sanitized_type(Dict[str, frozenset])\n Dict[str, Any]\n \"\"\"\n\n # Warning: mutating `type_` will mutate the signature being inspected\n # Even calling deepcopy(`type_`) silently fails to prevent this.\n origin = get_origin(type_)\n\n if origin is not None:\n if primitive_only:\n return Any\n\n args = get_args(type_)\n if origin is Union:\n # Hydra only supports Optional[<type>] unions\n if len(args) != 2 or type(None) not in args:\n # isn't Optional[<type>]\n return Any\n\n args = cast(Tuple[type, type], args)\n\n optional_type, none_type = args\n if not isinstance(None, none_type):\n optional_type = none_type\n optional_type: Optional[Any]\n optional_type = sanitized_type(optional_type)\n\n if optional_type is Any: # Union[Any, T] is just Any\n return Any\n return Union[optional_type, NoneType] # type: ignore\n\n if origin is list or origin is List:\n return List[sanitized_type(args[0], primitive_only=True)] if args else type_ # type: ignore\n\n if origin is dict or origin is Dict:\n return (\n Dict[\n sanitized_type(args[0], primitive_only=True), # type: ignore\n sanitized_type(args[1], primitive_only=True), # type: ignore\n ]\n if args\n else type_\n )\n\n if origin is tuple or origin is Tuple:\n # hydra silently supports tuples of homogenous types\n # It has some weird behavior. It treats `Tuple[t1, t2, ...]` as `List[t1]`\n # It isn't clear that we want to perpetrate this on our end..\n # So we deal with inhomogeneous types as e.g. `Tuple[str, int]` -> `Tuple[Any, Any]`.\n #\n # Otherwise we preserve the annotation as accurately as possible\n if not args:\n return Any # bare Tuple not supported by hydra\n args = cast(Tuple[type, ...], args)\n unique_args = set(args)\n has_ellipses = Ellipsis in unique_args\n\n _unique_type = (\n sanitized_type(args[0], primitive_only=True)\n if len(unique_args) == 1 or (len(unique_args) == 2 and has_ellipses)\n else Any\n )\n if has_ellipses:\n return Tuple[_unique_type, ...] # type: ignore\n else:\n return Tuple[(_unique_type,) * len(args)] # type: ignore\n\n return Any\n\n if (\n type_ is Any\n or type_ in HYDRA_SUPPORTED_PRIMITIVES\n or is_dataclass(type_)\n or (isinstance(type_, type) and issubclass(type_, Enum))\n ):\n if wrap_optional and type_ is not Any: # pragma: no cover\n # normally get_type_hints automatically resolves Optional[...]\n # when None is set as the default, but this has been flaky\n # for some pytorch-lightning classes. So we just do it ourselves...\n # It might be worth removing this later since none of our standard tests\n # cover it.\n type_ = Optional[type_] # type: ignore\n return type_\n\n # Needed to cover python 3.6 where __origin__ doesn't normalize to type\n if not primitive_only and type_ in {List, Tuple, Dict}: # pragma: no cover\n if wrap_optional and type_ is not Any:\n type_ = Optional[type_] # type: ignore\n return type_\n\n return Any\n", "path": "src/hydra_zen/structured_configs/_utils.py"}], "after_files": [{"content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nimport sys\nfrom dataclasses import MISSING, Field, field as _field, is_dataclass\nfrom enum import Enum\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Mapping,\n Optional,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nfrom typing_extensions import Final\n\ntry:\n from typing import get_args, get_origin\nexcept ImportError: # pragma: no cover\n # remove at Python 3.7 end-of-life\n from collections.abc import Callable as _Callable\n\n def get_origin(obj: Any) -> Union[None, type]:\n \"\"\"Get the unsubscripted version of a type.\n\n Parameters\n ----------\n obj : Any\n\n Returns\n -------\n Union[None, type]\n Return None for unsupported types.\n\n Notes\n -----\n Bare `Generic` not supported by this hacked version of `get_origin`\n\n Examples\n --------\n >>> assert get_origin(Literal[42]) is Literal\n >>> assert get_origin(int) is None\n >>> assert get_origin(ClassVar[int]) is ClassVar\n >>> assert get_origin(Generic[T]) is Generic\n >>> assert get_origin(Union[T, int]) is Union\n >>> assert get_origin(List[Tuple[T, T]][int]) == list\n \"\"\"\n return getattr(obj, \"__origin__\", None)\n\n def get_args(obj: Any) -> Union[Tuple[type, ...], Tuple[List[type], type]]:\n \"\"\"Get type arguments with all substitutions performed.\n\n Parameters\n ----------\n obj : Any\n\n Returns\n -------\n Union[Tuple[type, ...], Tuple[List[type], type]]\n Callable[[t1, ...], r] -> ([t1, ...], r)\n\n Examples\n --------\n >>> assert get_args(Dict[str, int]) == (str, int)\n >>> assert get_args(int) == ()\n >>> assert get_args(Union[int, Union[T, int], str][int]) == (int, str)\n >>> assert get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])\n >>> assert get_args(Callable[[], T][int]) == ([], int)\n \"\"\"\n if hasattr(obj, \"__origin__\") and hasattr(obj, \"__args__\"):\n args = obj.__args__\n if get_origin(obj) is _Callable and args and args[0] is not Ellipsis:\n args = (list(args[:-1]), args[-1])\n return args\n return ()\n\n\nCOMMON_MODULES_WITH_OBFUSCATED_IMPORTS: Tuple[str, ...] = (\n \"random\",\n \"numpy\",\n \"numpy.random\",\n \"jax.numpy\",\n \"jax\",\n \"torch\",\n)\nUNKNOWN_NAME: Final[str] = \"<unknown>\"\nHYDRA_SUPPORTED_PRIMITIVES: Final = {int, float, bool, str, Enum}\nKNOWN_MUTABLE_TYPES = (list, dict, set)\n\nT = TypeVar(\"T\")\n\n\n# The typeshed definition of `field` has an inaccurate annotation:\n# https://github.com/python/typeshed/blob/b9e1d7d522fe90b98e07d43a764bbe60216bc2c4/stdlib/dataclasses.pyi#L109\n# This makes it impossible for `make_dataclass` to by type-correct in the eyes of\n# static checkers. See https://github.com/microsoft/pyright/issues/1680 for discussion.\n#\n# We happen to make rather heavy use of `make_dataclass`, thus we..*sigh*.. we provide\n# our own overloads for `field`.\n@overload # `default` and `default_factory` are optional and mutually exclusive.\ndef field(\n *,\n default: Any,\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field: # pragma: no cover\n ...\n\n\n@overload\ndef field(\n *,\n default_factory: Callable[[], Any],\n init: bool = ...,\n repr: bool = ...,\n hash: Optional[bool] = ...,\n compare: bool = ...,\n metadata: Optional[Mapping[Any, Any]] = ...,\n) -> Field: # pragma: no cover\n ...\n\n\ndef field(\n *,\n default=MISSING,\n default_factory=MISSING,\n init=True,\n repr=True,\n hash=None,\n compare=True,\n metadata=None,\n) -> Field:\n if default is MISSING:\n return cast(\n Field,\n _field(\n default_factory=default_factory,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n else:\n return cast(\n Field,\n _field(\n default=default,\n init=init,\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ),\n )\n\n\ndef safe_name(obj: Any, repr_allowed=True) -> str:\n \"\"\"Tries to get a descriptive name for an object. Returns '<unknown>`\n instead of raising - useful for writing descriptive/dafe error messages.\"\"\"\n\n if hasattr(obj, \"__name__\"):\n return obj.__name__\n\n if repr_allowed and hasattr(obj, \"__repr__\"):\n return repr(obj)\n\n return UNKNOWN_NAME\n\n\ndef building_error_prefix(target) -> str:\n return f\"Building: {safe_name(target)} ..\\n\"\n\n\ndef get_obj_path(obj: Any) -> str:\n name = safe_name(obj, repr_allowed=False)\n\n if name == UNKNOWN_NAME:\n raise AttributeError(f\"{obj} does not have a `__name__` attribute\")\n\n module = getattr(obj, \"__module__\", None)\n qualname = getattr(obj, \"__qualname__\", None)\n\n if (qualname is not None and \"<\" in qualname) or module is None:\n # NumPy's ufuncs do not have an inspectable `__module__` attribute, so we\n # check to see if the object lives in NumPy's top-level namespace.\n #\n # or..\n #\n # Qualname produced a name from a local namespace.\n # E.g. jax.numpy.add.__qualname__ is '_maybe_bool_binop.<locals>.fn'\n # Thus we defer to the name of the object and look for it in the\n # top-level namespace of the known suspects\n #\n # or...\n #\n # module is None, which is apparently a thing..:\n # __module__ is None for both numpy.random.rand and random.random\n #\n\n # don't use qualname for obfuscated paths\n for new_module in COMMON_MODULES_WITH_OBFUSCATED_IMPORTS:\n if getattr(sys.modules.get(new_module), name, None) is obj:\n module = new_module\n break\n else:\n raise ModuleNotFoundError(f\"{name} is not importable\")\n\n return f\"{module}.{name}\"\n\n\nNoneType = type(None)\n\n\ndef sanitized_type(\n type_: type, *, primitive_only: bool = False, wrap_optional: bool = False\n) -> type:\n \"\"\"Returns ``type_`` unchanged if it is supported as an annotation by hydra,\n otherwise returns ``Any``.\n\n Examples\n --------\n >>> sanitized_type(int)\n int\n\n >>> sanitized_type(frozenset) # not supported by hydra\n typing.Any\n\n >>> sanitized_type(int, wrap_optional=True)\n Union[\n >>> sanitized_type(List[int])\n List[int]\n\n >>> sanitized_type(List[int], primitive_only=True)\n Any\n\n >>> sanitized_type(Dict[str, frozenset])\n Dict[str, Any]\n \"\"\"\n\n # Warning: mutating `type_` will mutate the signature being inspected\n # Even calling deepcopy(`type_`) silently fails to prevent this.\n origin = get_origin(type_)\n\n if origin is not None:\n if primitive_only:\n return Any\n\n args = get_args(type_)\n if origin is Union:\n # Hydra only supports Optional[<type>] unions\n if len(args) != 2 or type(None) not in args:\n # isn't Optional[<type>]\n return Any\n\n args = cast(Tuple[type, type], args)\n\n optional_type, none_type = args\n if not isinstance(None, none_type):\n optional_type = none_type\n optional_type: Optional[Any]\n optional_type = sanitized_type(optional_type)\n\n if optional_type is Any: # Union[Any, T] is just Any\n return Any\n return Union[optional_type, NoneType] # type: ignore\n\n if origin is list or origin is List:\n return List[sanitized_type(args[0], primitive_only=True)] if args else type_ # type: ignore\n\n if origin is dict or origin is Dict:\n return (\n Dict[\n sanitized_type(args[0], primitive_only=True), # type: ignore\n sanitized_type(args[1], primitive_only=True), # type: ignore\n ]\n if args\n else type_\n )\n\n if origin is tuple or origin is Tuple:\n # hydra silently supports tuples of homogenous types\n # It has some weird behavior. It treats `Tuple[t1, t2, ...]` as `List[t1]`\n # It isn't clear that we want to perpetrate this on our end..\n # So we deal with inhomogeneous types as e.g. `Tuple[str, int]` -> `Tuple[Any, Any]`.\n #\n # Otherwise we preserve the annotation as accurately as possible\n if not args:\n return Any # bare Tuple not supported by hydra\n args = cast(Tuple[type, ...], args)\n unique_args = set(args)\n has_ellipses = Ellipsis in unique_args\n\n _unique_type = (\n sanitized_type(args[0], primitive_only=True)\n if len(unique_args) == 1 or (len(unique_args) == 2 and has_ellipses)\n else Any\n )\n if has_ellipses:\n return Tuple[_unique_type, ...] # type: ignore\n else:\n return Tuple[(_unique_type,) * len(args)] # type: ignore\n\n return Any\n\n if (\n type_ is Any\n or type_ in HYDRA_SUPPORTED_PRIMITIVES\n or is_dataclass(type_)\n or (isinstance(type_, type) and issubclass(type_, Enum))\n ):\n if wrap_optional and type_ is not Any: # pragma: no cover\n # normally get_type_hints automatically resolves Optional[...]\n # when None is set as the default, but this has been flaky\n # for some pytorch-lightning classes. So we just do it ourselves...\n # It might be worth removing this later since none of our standard tests\n # cover it.\n type_ = Optional[type_] # type: ignore\n return type_\n\n # Needed to cover python 3.6 where __origin__ doesn't normalize to type\n if not primitive_only and type_ in {List, Tuple, Dict}: # pragma: no cover\n if wrap_optional and type_ is not Any:\n type_ = Optional[type_] # type: ignore\n return type_\n\n return Any\n", "path": "src/hydra_zen/structured_configs/_utils.py"}]} | 3,759 | 529 |
gh_patches_debug_16602 | rasdani/github-patches | git_diff | svthalia__concrexit-2500 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Partner page partner blocks keep showing the loading animation
### Describe the bug
Partner page partner blocks keep showing the loading animation.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to [the Thalia partner page](https://thalia.nu/career/).
2. Scroll down and see the loading animation on the partner blocks.
### Expected behaviour
The loading animation should stop when the partners have been loaded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/thaliawebsite/templatetags/grid_item.py`
Content:
```
1 from django import template
2
3 register = template.Library()
4
5
6 @register.inclusion_tag("includes/grid_item.html")
7 def grid_item(
8 title=None,
9 meta_text="",
10 url=None,
11 image_url=None,
12 ribbon=None,
13 class_name="",
14 anchor_attrs="",
15 ):
16 return {
17 "title": title,
18 "url": url,
19 "image_url": image_url,
20 "meta_text": meta_text,
21 "ribbon": ribbon,
22 "class_name": class_name,
23 "anchor_attrs": anchor_attrs,
24 }
25
```
Path: `website/partners/templatetags/partner_cards.py`
Content:
```
1 from django import template
2 from django.conf import settings
3 from django.template.defaultfilters import striptags, truncatechars
4
5 from thaliawebsite.templatetags.bleach_tags import bleach
6 from thaliawebsite.templatetags.grid_item import grid_item
7 from utils.media.services import get_thumbnail_url
8 from partners.models import Vacancy
9
10 register = template.Library()
11
12
13 @register.inclusion_tag("includes/grid_item.html")
14 def partner_card(partner):
15 """Return grid item showing partner."""
16 image_url = ""
17 if partner.logo:
18 image_url = get_thumbnail_url(
19 partner.logo, settings.THUMBNAIL_SIZES["medium"], fit=False
20 )
21
22 meta_text = truncatechars(bleach(striptags(partner.company_profile)), 80)
23
24 return grid_item(
25 title=partner.name,
26 meta_text='<p class="px-2 d-none d-md-block">{}</p>'.format(meta_text),
27 url=partner.get_absolute_url,
28 image_url=image_url,
29 class_name="partner-card contain-logo",
30 )
31
32
33 @register.inclusion_tag("includes/grid_item.html")
34 def partner_image_card(image):
35 """Return grid item showing partner image."""
36 class_name = "partner-image-card"
37 image_url = get_thumbnail_url(image, settings.THUMBNAIL_SIZES["medium"])
38
39 return grid_item(
40 title="",
41 url=get_thumbnail_url(image, settings.THUMBNAIL_SIZES["large"], fit=False),
42 image_url=image_url,
43 class_name=class_name,
44 anchor_attrs='data-fancybox="gallery"',
45 )
46
47
48 @register.inclusion_tag("partners/vacancy_card.html")
49 def vacancy_card(vacancy):
50 """Return grid item showing vacancy."""
51 image_url = None
52 if vacancy.get_company_logo():
53 image_url = get_thumbnail_url(
54 vacancy.get_company_logo(), settings.THUMBNAIL_SIZES["medium"], fit=False
55 )
56
57 description = truncatechars(bleach(striptags(vacancy.description)), 300)
58 extra_class = "external-vacancy"
59 url = "#vacancy-{}".format(vacancy.id)
60 keywords = vacancy.keywords.split(",")
61 location = vacancy.location
62 if vacancy.partner and vacancy.partner.is_active:
63 url = "{}#vacancy-{}".format(vacancy.partner.get_absolute_url(), vacancy.id)
64 extra_class = ""
65
66 return {
67 "title": vacancy.title,
68 "company_name": vacancy.get_company_name(),
69 "image_url": image_url,
70 "description": description,
71 "location": location,
72 "keywords": keywords,
73 "url": url,
74 "extra_class": extra_class,
75 }
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/partners/templatetags/partner_cards.py b/website/partners/templatetags/partner_cards.py
--- a/website/partners/templatetags/partner_cards.py
+++ b/website/partners/templatetags/partner_cards.py
@@ -27,6 +27,7 @@
url=partner.get_absolute_url,
image_url=image_url,
class_name="partner-card contain-logo",
+ show_loading_animation=False,
)
diff --git a/website/thaliawebsite/templatetags/grid_item.py b/website/thaliawebsite/templatetags/grid_item.py
--- a/website/thaliawebsite/templatetags/grid_item.py
+++ b/website/thaliawebsite/templatetags/grid_item.py
@@ -12,6 +12,7 @@
ribbon=None,
class_name="",
anchor_attrs="",
+ show_loading_animation=True,
):
return {
"title": title,
@@ -21,4 +22,5 @@
"ribbon": ribbon,
"class_name": class_name,
"anchor_attrs": anchor_attrs,
+ "show_loading_animation": show_loading_animation,
}
| {"golden_diff": "diff --git a/website/partners/templatetags/partner_cards.py b/website/partners/templatetags/partner_cards.py\n--- a/website/partners/templatetags/partner_cards.py\n+++ b/website/partners/templatetags/partner_cards.py\n@@ -27,6 +27,7 @@\n url=partner.get_absolute_url,\n image_url=image_url,\n class_name=\"partner-card contain-logo\",\n+ show_loading_animation=False,\n )\n \n \ndiff --git a/website/thaliawebsite/templatetags/grid_item.py b/website/thaliawebsite/templatetags/grid_item.py\n--- a/website/thaliawebsite/templatetags/grid_item.py\n+++ b/website/thaliawebsite/templatetags/grid_item.py\n@@ -12,6 +12,7 @@\n ribbon=None,\n class_name=\"\",\n anchor_attrs=\"\",\n+ show_loading_animation=True,\n ):\n return {\n \"title\": title,\n@@ -21,4 +22,5 @@\n \"ribbon\": ribbon,\n \"class_name\": class_name,\n \"anchor_attrs\": anchor_attrs,\n+ \"show_loading_animation\": show_loading_animation,\n }\n", "issue": "Partner page partner blocks keep showing the loading animation\n### Describe the bug\r\nPartner page partner blocks keep showing the loading animation.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to [the Thalia partner page](https://thalia.nu/career/).\r\n2. Scroll down and see the loading animation on the partner blocks.\r\n\r\n### Expected behaviour\r\nThe loading animation should stop when the partners have been loaded.\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef grid_item(\n title=None,\n meta_text=\"\",\n url=None,\n image_url=None,\n ribbon=None,\n class_name=\"\",\n anchor_attrs=\"\",\n):\n return {\n \"title\": title,\n \"url\": url,\n \"image_url\": image_url,\n \"meta_text\": meta_text,\n \"ribbon\": ribbon,\n \"class_name\": class_name,\n \"anchor_attrs\": anchor_attrs,\n }\n", "path": "website/thaliawebsite/templatetags/grid_item.py"}, {"content": "from django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import striptags, truncatechars\n\nfrom thaliawebsite.templatetags.bleach_tags import bleach\nfrom thaliawebsite.templatetags.grid_item import grid_item\nfrom utils.media.services import get_thumbnail_url\nfrom partners.models import Vacancy\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef partner_card(partner):\n \"\"\"Return grid item showing partner.\"\"\"\n image_url = \"\"\n if partner.logo:\n image_url = get_thumbnail_url(\n partner.logo, settings.THUMBNAIL_SIZES[\"medium\"], fit=False\n )\n\n meta_text = truncatechars(bleach(striptags(partner.company_profile)), 80)\n\n return grid_item(\n title=partner.name,\n meta_text='<p class=\"px-2 d-none d-md-block\">{}</p>'.format(meta_text),\n url=partner.get_absolute_url,\n image_url=image_url,\n class_name=\"partner-card contain-logo\",\n )\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef partner_image_card(image):\n \"\"\"Return grid item showing partner image.\"\"\"\n class_name = \"partner-image-card\"\n image_url = get_thumbnail_url(image, settings.THUMBNAIL_SIZES[\"medium\"])\n\n return grid_item(\n title=\"\",\n url=get_thumbnail_url(image, settings.THUMBNAIL_SIZES[\"large\"], fit=False),\n image_url=image_url,\n class_name=class_name,\n anchor_attrs='data-fancybox=\"gallery\"',\n )\n\n\[email protected]_tag(\"partners/vacancy_card.html\")\ndef vacancy_card(vacancy):\n \"\"\"Return grid item showing vacancy.\"\"\"\n image_url = None\n if vacancy.get_company_logo():\n image_url = get_thumbnail_url(\n vacancy.get_company_logo(), settings.THUMBNAIL_SIZES[\"medium\"], fit=False\n )\n\n description = truncatechars(bleach(striptags(vacancy.description)), 300)\n extra_class = \"external-vacancy\"\n url = \"#vacancy-{}\".format(vacancy.id)\n keywords = vacancy.keywords.split(\",\")\n location = vacancy.location\n if vacancy.partner and vacancy.partner.is_active:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n extra_class = \"\"\n\n return {\n \"title\": vacancy.title,\n \"company_name\": vacancy.get_company_name(),\n \"image_url\": image_url,\n \"description\": description,\n \"location\": location,\n \"keywords\": keywords,\n \"url\": url,\n \"extra_class\": extra_class,\n }\n", "path": "website/partners/templatetags/partner_cards.py"}], "after_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef grid_item(\n title=None,\n meta_text=\"\",\n url=None,\n image_url=None,\n ribbon=None,\n class_name=\"\",\n anchor_attrs=\"\",\n show_loading_animation=True,\n):\n return {\n \"title\": title,\n \"url\": url,\n \"image_url\": image_url,\n \"meta_text\": meta_text,\n \"ribbon\": ribbon,\n \"class_name\": class_name,\n \"anchor_attrs\": anchor_attrs,\n \"show_loading_animation\": show_loading_animation,\n }\n", "path": "website/thaliawebsite/templatetags/grid_item.py"}, {"content": "from django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import striptags, truncatechars\n\nfrom thaliawebsite.templatetags.bleach_tags import bleach\nfrom thaliawebsite.templatetags.grid_item import grid_item\nfrom utils.media.services import get_thumbnail_url\nfrom partners.models import Vacancy\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef partner_card(partner):\n \"\"\"Return grid item showing partner.\"\"\"\n image_url = \"\"\n if partner.logo:\n image_url = get_thumbnail_url(\n partner.logo, settings.THUMBNAIL_SIZES[\"medium\"], fit=False\n )\n\n meta_text = truncatechars(bleach(striptags(partner.company_profile)), 80)\n\n return grid_item(\n title=partner.name,\n meta_text='<p class=\"px-2 d-none d-md-block\">{}</p>'.format(meta_text),\n url=partner.get_absolute_url,\n image_url=image_url,\n class_name=\"partner-card contain-logo\",\n show_loading_animation=False,\n )\n\n\[email protected]_tag(\"includes/grid_item.html\")\ndef partner_image_card(image):\n \"\"\"Return grid item showing partner image.\"\"\"\n class_name = \"partner-image-card\"\n image_url = get_thumbnail_url(image, settings.THUMBNAIL_SIZES[\"medium\"])\n\n return grid_item(\n title=\"\",\n url=get_thumbnail_url(image, settings.THUMBNAIL_SIZES[\"large\"], fit=False),\n image_url=image_url,\n class_name=class_name,\n anchor_attrs='data-fancybox=\"gallery\"',\n )\n\n\[email protected]_tag(\"partners/vacancy_card.html\")\ndef vacancy_card(vacancy):\n \"\"\"Return grid item showing vacancy.\"\"\"\n image_url = None\n if vacancy.get_company_logo():\n image_url = get_thumbnail_url(\n vacancy.get_company_logo(), settings.THUMBNAIL_SIZES[\"medium\"], fit=False\n )\n\n description = truncatechars(bleach(striptags(vacancy.description)), 300)\n extra_class = \"external-vacancy\"\n url = \"#vacancy-{}\".format(vacancy.id)\n keywords = vacancy.keywords.split(\",\")\n location = vacancy.location\n if vacancy.partner and vacancy.partner.is_active:\n url = \"{}#vacancy-{}\".format(vacancy.partner.get_absolute_url(), vacancy.id)\n extra_class = \"\"\n\n return {\n \"title\": vacancy.title,\n \"company_name\": vacancy.get_company_name(),\n \"image_url\": image_url,\n \"description\": description,\n \"location\": location,\n \"keywords\": keywords,\n \"url\": url,\n \"extra_class\": extra_class,\n }\n", "path": "website/partners/templatetags/partner_cards.py"}]} | 1,249 | 267 |
gh_patches_debug_19699 | rasdani/github-patches | git_diff | ibis-project__ibis-3990 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: comparing bool expr to bool literal generates invalid sql
It looks like one of the recent refactorings may have broken comparisons of boolean to boolean:
This test:
```python
def test_bool_bool():
import ibis
from ibis.backends.base.sql.compiler import Compiler
t = ibis.table(
[('dest', 'string'), ('origin', 'string'), ('arrdelay', 'int32')],
'airlines',
)
x = ibis.literal(True)
top = t[(t.dest.cast('int64') == 0) == x]
result = Compiler.to_sql(top)
print(result)
```
produces this SQL:
```sql
SELECT *
FROM airlines
WHERE CAST(`dest` AS bigint) = 0 = TRUE
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/backends/base/sql/registry/helpers.py`
Content:
```
1 import ibis.common.exceptions as com
2 import ibis.expr.datatypes as dt
3 import ibis.expr.operations as ops
4 import ibis.expr.types as ir
5 from ibis.backends.base.sql.registry import identifiers
6
7
8 def format_call(translator, func, *args):
9 formatted_args = []
10 for arg in args:
11 fmt_arg = translator.translate(arg)
12 formatted_args.append(fmt_arg)
13
14 return '{}({})'.format(func, ', '.join(formatted_args))
15
16
17 def quote_identifier(name, quotechar='`', force=False):
18 """Add quotes to the `name` identifier if needed."""
19 if force or name.count(' ') or name in identifiers.base_identifiers:
20 return '{0}{1}{0}'.format(quotechar, name)
21 else:
22 return name
23
24
25 def needs_parens(op):
26 if isinstance(op, ir.Expr):
27 op = op.op()
28 op_klass = type(op)
29 # function calls don't need parens
30 return op_klass in {
31 ops.Negate,
32 ops.IsNull,
33 ops.NotNull,
34 ops.Add,
35 ops.Subtract,
36 ops.Multiply,
37 ops.Divide,
38 ops.Power,
39 ops.Modulus,
40 ops.Equals,
41 ops.NotEquals,
42 ops.GreaterEqual,
43 ops.Greater,
44 ops.LessEqual,
45 ops.Less,
46 ops.IdenticalTo,
47 ops.And,
48 ops.Or,
49 ops.Xor,
50 }
51
52
53 parenthesize = '({})'.format
54
55
56 sql_type_names = {
57 'int8': 'tinyint',
58 'int16': 'smallint',
59 'int32': 'int',
60 'int64': 'bigint',
61 'float': 'float',
62 'float32': 'float',
63 'double': 'double',
64 'float64': 'double',
65 'string': 'string',
66 'boolean': 'boolean',
67 'timestamp': 'timestamp',
68 'decimal': 'decimal',
69 }
70
71
72 def type_to_sql_string(tval):
73 if isinstance(tval, dt.Decimal):
74 return f'decimal({tval.precision}, {tval.scale})'
75 name = tval.name.lower()
76 try:
77 return sql_type_names[name]
78 except KeyError:
79 raise com.UnsupportedBackendType(name)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/backends/base/sql/registry/helpers.py b/ibis/backends/base/sql/registry/helpers.py
--- a/ibis/backends/base/sql/registry/helpers.py
+++ b/ibis/backends/base/sql/registry/helpers.py
@@ -22,32 +22,34 @@
return name
-def needs_parens(op):
- if isinstance(op, ir.Expr):
- op = op.op()
- op_klass = type(op)
- # function calls don't need parens
- return op_klass in {
- ops.Negate,
- ops.IsNull,
- ops.NotNull,
- ops.Add,
- ops.Subtract,
- ops.Multiply,
- ops.Divide,
- ops.Power,
- ops.Modulus,
- ops.Equals,
- ops.NotEquals,
- ops.GreaterEqual,
- ops.Greater,
- ops.LessEqual,
- ops.Less,
- ops.IdenticalTo,
- ops.And,
- ops.Or,
- ops.Xor,
- }
+_NEEDS_PARENS_OPS = (
+ ops.Negate,
+ ops.IsNull,
+ ops.NotNull,
+ ops.Add,
+ ops.Subtract,
+ ops.Multiply,
+ ops.Divide,
+ ops.Power,
+ ops.Modulus,
+ ops.Equals,
+ ops.NotEquals,
+ ops.GreaterEqual,
+ ops.Greater,
+ ops.LessEqual,
+ ops.Less,
+ ops.IdenticalTo,
+ ops.And,
+ ops.Or,
+ ops.Xor,
+)
+
+
+def needs_parens(expr: ir.Expr):
+ op = expr.op()
+ if isinstance(op, ops.Alias):
+ op = op.arg.op()
+ return isinstance(op, _NEEDS_PARENS_OPS)
parenthesize = '({})'.format
| {"golden_diff": "diff --git a/ibis/backends/base/sql/registry/helpers.py b/ibis/backends/base/sql/registry/helpers.py\n--- a/ibis/backends/base/sql/registry/helpers.py\n+++ b/ibis/backends/base/sql/registry/helpers.py\n@@ -22,32 +22,34 @@\n return name\n \n \n-def needs_parens(op):\n- if isinstance(op, ir.Expr):\n- op = op.op()\n- op_klass = type(op)\n- # function calls don't need parens\n- return op_klass in {\n- ops.Negate,\n- ops.IsNull,\n- ops.NotNull,\n- ops.Add,\n- ops.Subtract,\n- ops.Multiply,\n- ops.Divide,\n- ops.Power,\n- ops.Modulus,\n- ops.Equals,\n- ops.NotEquals,\n- ops.GreaterEqual,\n- ops.Greater,\n- ops.LessEqual,\n- ops.Less,\n- ops.IdenticalTo,\n- ops.And,\n- ops.Or,\n- ops.Xor,\n- }\n+_NEEDS_PARENS_OPS = (\n+ ops.Negate,\n+ ops.IsNull,\n+ ops.NotNull,\n+ ops.Add,\n+ ops.Subtract,\n+ ops.Multiply,\n+ ops.Divide,\n+ ops.Power,\n+ ops.Modulus,\n+ ops.Equals,\n+ ops.NotEquals,\n+ ops.GreaterEqual,\n+ ops.Greater,\n+ ops.LessEqual,\n+ ops.Less,\n+ ops.IdenticalTo,\n+ ops.And,\n+ ops.Or,\n+ ops.Xor,\n+)\n+\n+\n+def needs_parens(expr: ir.Expr):\n+ op = expr.op()\n+ if isinstance(op, ops.Alias):\n+ op = op.arg.op()\n+ return isinstance(op, _NEEDS_PARENS_OPS)\n \n \n parenthesize = '({})'.format\n", "issue": "bug: comparing bool expr to bool literal generates invalid sql\nIt looks like one of the recent refactorings may have broken comparisons of boolean to boolean:\r\n\r\nThis test:\r\n\r\n```python\r\ndef test_bool_bool():\r\n import ibis\r\n from ibis.backends.base.sql.compiler import Compiler\r\n\r\n t = ibis.table(\r\n [('dest', 'string'), ('origin', 'string'), ('arrdelay', 'int32')],\r\n 'airlines',\r\n )\r\n\r\n x = ibis.literal(True)\r\n top = t[(t.dest.cast('int64') == 0) == x]\r\n\r\n result = Compiler.to_sql(top)\r\n print(result)\r\n```\r\n\r\nproduces this SQL:\r\n\r\n```sql\r\nSELECT *\r\nFROM airlines\r\nWHERE CAST(`dest` AS bigint) = 0 = TRUE\r\n```\n", "before_files": [{"content": "import ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nfrom ibis.backends.base.sql.registry import identifiers\n\n\ndef format_call(translator, func, *args):\n formatted_args = []\n for arg in args:\n fmt_arg = translator.translate(arg)\n formatted_args.append(fmt_arg)\n\n return '{}({})'.format(func, ', '.join(formatted_args))\n\n\ndef quote_identifier(name, quotechar='`', force=False):\n \"\"\"Add quotes to the `name` identifier if needed.\"\"\"\n if force or name.count(' ') or name in identifiers.base_identifiers:\n return '{0}{1}{0}'.format(quotechar, name)\n else:\n return name\n\n\ndef needs_parens(op):\n if isinstance(op, ir.Expr):\n op = op.op()\n op_klass = type(op)\n # function calls don't need parens\n return op_klass in {\n ops.Negate,\n ops.IsNull,\n ops.NotNull,\n ops.Add,\n ops.Subtract,\n ops.Multiply,\n ops.Divide,\n ops.Power,\n ops.Modulus,\n ops.Equals,\n ops.NotEquals,\n ops.GreaterEqual,\n ops.Greater,\n ops.LessEqual,\n ops.Less,\n ops.IdenticalTo,\n ops.And,\n ops.Or,\n ops.Xor,\n }\n\n\nparenthesize = '({})'.format\n\n\nsql_type_names = {\n 'int8': 'tinyint',\n 'int16': 'smallint',\n 'int32': 'int',\n 'int64': 'bigint',\n 'float': 'float',\n 'float32': 'float',\n 'double': 'double',\n 'float64': 'double',\n 'string': 'string',\n 'boolean': 'boolean',\n 'timestamp': 'timestamp',\n 'decimal': 'decimal',\n}\n\n\ndef type_to_sql_string(tval):\n if isinstance(tval, dt.Decimal):\n return f'decimal({tval.precision}, {tval.scale})'\n name = tval.name.lower()\n try:\n return sql_type_names[name]\n except KeyError:\n raise com.UnsupportedBackendType(name)\n", "path": "ibis/backends/base/sql/registry/helpers.py"}], "after_files": [{"content": "import ibis.common.exceptions as com\nimport ibis.expr.datatypes as dt\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nfrom ibis.backends.base.sql.registry import identifiers\n\n\ndef format_call(translator, func, *args):\n formatted_args = []\n for arg in args:\n fmt_arg = translator.translate(arg)\n formatted_args.append(fmt_arg)\n\n return '{}({})'.format(func, ', '.join(formatted_args))\n\n\ndef quote_identifier(name, quotechar='`', force=False):\n \"\"\"Add quotes to the `name` identifier if needed.\"\"\"\n if force or name.count(' ') or name in identifiers.base_identifiers:\n return '{0}{1}{0}'.format(quotechar, name)\n else:\n return name\n\n\n_NEEDS_PARENS_OPS = (\n ops.Negate,\n ops.IsNull,\n ops.NotNull,\n ops.Add,\n ops.Subtract,\n ops.Multiply,\n ops.Divide,\n ops.Power,\n ops.Modulus,\n ops.Equals,\n ops.NotEquals,\n ops.GreaterEqual,\n ops.Greater,\n ops.LessEqual,\n ops.Less,\n ops.IdenticalTo,\n ops.And,\n ops.Or,\n ops.Xor,\n)\n\n\ndef needs_parens(expr: ir.Expr):\n op = expr.op()\n if isinstance(op, ops.Alias):\n op = op.arg.op()\n return isinstance(op, _NEEDS_PARENS_OPS)\n\n\nparenthesize = '({})'.format\n\n\nsql_type_names = {\n 'int8': 'tinyint',\n 'int16': 'smallint',\n 'int32': 'int',\n 'int64': 'bigint',\n 'float': 'float',\n 'float32': 'float',\n 'double': 'double',\n 'float64': 'double',\n 'string': 'string',\n 'boolean': 'boolean',\n 'timestamp': 'timestamp',\n 'decimal': 'decimal',\n}\n\n\ndef type_to_sql_string(tval):\n if isinstance(tval, dt.Decimal):\n return f'decimal({tval.precision}, {tval.scale})'\n name = tval.name.lower()\n try:\n return sql_type_names[name]\n except KeyError:\n raise com.UnsupportedBackendType(name)\n", "path": "ibis/backends/base/sql/registry/helpers.py"}]} | 1,070 | 430 |
gh_patches_debug_6083 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-361 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Question about evaluation metrics
Hello,
I use opennmt-tf to train models for different tasks, and I want to evaluate the effectiveness of a model to know how long should I take for training. But I’m not sure in how to evaluate sequence tagging model or other sequence to sequence model using validation data to measure accuracy, recall, precision, and F1-score. I have tried to train on a supported tagging scheme (BIOES) by adding
```
train:
tagging_scheme: BIOES
```
to parameters in YMAL file, but I could’t find where the additional evaluation metrics are computed. Could you provide some tutorial for me?
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opennmt/models/sequence_tagger.py`
Content:
```
1 """Sequence tagger."""
2
3 import tensorflow as tf
4 import numpy as np
5
6 from opennmt import inputters
7 from opennmt.models.model import Model
8 from opennmt.utils.misc import print_bytes
9 from opennmt.utils.losses import cross_entropy_sequence_loss
10
11
12 class SequenceTagger(Model):
13 """A sequence tagger."""
14
15 def __init__(self,
16 inputter,
17 encoder,
18 labels_vocabulary_file_key,
19 tagging_scheme=None,
20 crf_decoding=False,
21 daisy_chain_variables=False,
22 name="seqtagger"):
23 """Initializes a sequence tagger.
24
25 Args:
26 inputter: A :class:`opennmt.inputters.inputter.Inputter` to process the
27 input data.
28 encoder: A :class:`opennmt.encoders.encoder.Encoder` to encode the input.
29 labels_vocabulary_file_key: The data configuration key of the labels
30 vocabulary file containing one label per line.
31 tagging_scheme: The tagging scheme used. For supported schemes (currently
32 only BIOES), additional evaluation metrics could be computed such as
33 precision, recall, etc.
34 crf_decoding: If ``True``, add a CRF layer after the encoder.
35 daisy_chain_variables: If ``True``, copy variables in a daisy chain
36 between devices for this model. Not compatible with RNN based models.
37 name: The name of this model.
38 """
39 super(SequenceTagger, self).__init__(
40 name,
41 features_inputter=inputter,
42 labels_inputter=TagsInputter(labels_vocabulary_file_key),
43 daisy_chain_variables=daisy_chain_variables)
44 self.encoder = encoder
45 self.crf_decoding = crf_decoding
46 self.transition_params = None
47 if tagging_scheme:
48 self.tagging_scheme = tagging_scheme.lower()
49 else:
50 self.tagging_scheme = None
51
52 def _call(self, features, labels, params, mode):
53 training = mode == tf.estimator.ModeKeys.TRAIN
54 length = self.features_inputter.get_length(features)
55
56 with tf.variable_scope("encoder"):
57 inputs = self.features_inputter.make_inputs(features, training=training)
58 encoder_outputs, _, encoder_sequence_length = self.encoder.encode(
59 inputs,
60 sequence_length=length,
61 mode=mode)
62
63 with tf.variable_scope("generator"):
64 logits = tf.layers.dense(
65 encoder_outputs,
66 self.labels_inputter.vocabulary_size)
67
68 num_tags = self.labels_inputter.vocabulary_size
69 self.transition_params = tf.get_variable("transitions", shape=[num_tags, num_tags])
70 if mode != tf.estimator.ModeKeys.TRAIN:
71 if self.crf_decoding:
72 tags_id, _ = tf.contrib.crf.crf_decode(
73 logits,
74 self.transition_params,
75 encoder_sequence_length)
76 tags_id = tf.cast(tags_id, tf.int64)
77 else:
78 tags_prob = tf.nn.softmax(logits)
79 tags_id = tf.argmax(tags_prob, axis=2)
80
81 labels_vocab_rev = self.labels_inputter.vocabulary_lookup_reverse()
82
83 # A tensor can not be both fed and fetched,
84 # so identity a new tensor of "length" for export model to predict
85 output_sequence_length = tf.identity(encoder_sequence_length)
86
87 predictions = {
88 "length": output_sequence_length,
89 "tags": labels_vocab_rev.lookup(tags_id)
90 }
91 else:
92 predictions = None
93
94 return logits, predictions
95
96 def compute_loss(self, outputs, labels, training=True, params=None):
97 if params is None:
98 params = {}
99 if self.crf_decoding:
100 log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
101 outputs,
102 tf.cast(labels["tags_id"], tf.int32),
103 labels["length"],
104 transition_params=self.transition_params)
105 loss = tf.reduce_sum(-log_likelihood)
106 loss_normalizer = tf.cast(tf.shape(log_likelihood)[0], loss.dtype)
107 return loss, loss_normalizer
108 else:
109 return cross_entropy_sequence_loss(
110 outputs,
111 labels["tags_id"],
112 labels["length"],
113 label_smoothing=params.get("label_smoothing", 0.0),
114 average_in_time=params.get("average_loss_in_time", False),
115 training=training)
116
117 def compute_metrics(self, predictions, labels):
118 weights = tf.sequence_mask(
119 labels["length"], maxlen=tf.shape(labels["tags"])[1], dtype=tf.float32)
120
121 eval_metric_ops = {}
122 eval_metric_ops["accuracy"] = tf.metrics.accuracy(
123 labels["tags"], predictions["tags"], weights=weights)
124
125 if self.tagging_scheme in ("bioes",):
126 flag_fn = None
127 if self.tagging_scheme == "bioes":
128 flag_fn = flag_bioes_tags
129
130 gold_flags, predicted_flags = tf.py_func(
131 flag_fn,
132 [labels["tags"], predictions["tags"], labels["length"]],
133 [tf.bool, tf.bool],
134 stateful=False)
135
136 precision_metric = tf.metrics.precision(gold_flags, predicted_flags)
137 recall_metric = tf.metrics.recall(gold_flags, predicted_flags)
138
139 precision = precision_metric[0]
140 recall = recall_metric[0]
141 f1 = (2 * precision * recall) / (recall + precision)
142
143 eval_metric_ops["precision"] = precision_metric
144 eval_metric_ops["recall"] = recall_metric
145 eval_metric_ops["f1"] = (f1, tf.no_op())
146
147 return eval_metric_ops
148
149 def print_prediction(self, prediction, params=None, stream=None):
150 tags = prediction["tags"][:prediction["length"]]
151 sent = b" ".join(tags)
152 print_bytes(sent, stream=stream)
153
154
155 class TagsInputter(inputters.TextInputter):
156 """Reading space-separated tags."""
157
158 def __init__(self, vocabulary_file_key):
159 super(TagsInputter, self).__init__(
160 vocabulary_file_key=vocabulary_file_key, num_oov_buckets=0)
161
162 def make_features(self, element=None, features=None, training=None):
163 features = super(TagsInputter, self).make_features(
164 element=element, features=features, training=training)
165 return {
166 "length": features["length"],
167 "tags": features["tokens"],
168 "tags_id": self.vocabulary.lookup(features["tokens"])
169 }
170
171
172 def flag_bioes_tags(gold, predicted, sequence_length=None):
173 """Flags chunk matches for the BIOES tagging scheme.
174
175 This function will produce the gold flags and the predicted flags. For each aligned
176 gold flag ``g`` and predicted flag ``p``:
177
178 * when ``g == p == True``, the chunk has been correctly identified (true positive).
179 * when ``g == False and p == True``, the chunk has been incorrectly identified (false positive).
180 * when ``g == True and p == False``, the chunk has been missed (false negative).
181 * when ``g == p == False``, the chunk has been correctly ignored (true negative).
182
183 Args:
184 gold: The gold tags as a Numpy 2D string array.
185 predicted: The predicted tags as a Numpy 2D string array.
186 sequence_length: The length of each sequence as Numpy array.
187
188 Returns:
189 A tuple ``(gold_flags, predicted_flags)``.
190 """
191 gold_flags = []
192 predicted_flags = []
193
194 def _add_true_positive():
195 gold_flags.append(True)
196 predicted_flags.append(True)
197 def _add_false_positive():
198 gold_flags.append(False)
199 predicted_flags.append(True)
200 def _add_true_negative():
201 gold_flags.append(False)
202 predicted_flags.append(False)
203 def _add_false_negative():
204 gold_flags.append(True)
205 predicted_flags.append(False)
206
207 def _match(ref, hyp, index, length):
208 if ref[index].startswith(b"B"):
209 match = True
210 while index < length and not ref[index].startswith(b"E"):
211 if ref[index] != hyp[index]:
212 match = False
213 index += 1
214 match = match and index < length and ref[index] == hyp[index]
215 return match, index
216 return ref[index] == hyp[index], index
217
218 for b in range(gold.shape[0]):
219 length = sequence_length[b] if sequence_length is not None else gold.shape[1]
220
221 # First pass to detect true positives and true/false negatives.
222 index = 0
223 while index < length:
224 gold_tag = gold[b][index]
225 match, index = _match(gold[b], predicted[b], index, length)
226 if match:
227 if gold_tag == b"O":
228 _add_true_negative()
229 else:
230 _add_true_positive()
231 else:
232 if gold_tag != b"O":
233 _add_false_negative()
234 index += 1
235
236 # Second pass to detect false postives.
237 index = 0
238 while index < length:
239 pred_tag = predicted[b][index]
240 match, index = _match(predicted[b], gold[b], index, length)
241 if not match and pred_tag != b"O":
242 _add_false_positive()
243 index += 1
244
245 return np.array(gold_flags), np.array(predicted_flags)
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opennmt/models/sequence_tagger.py b/opennmt/models/sequence_tagger.py
--- a/opennmt/models/sequence_tagger.py
+++ b/opennmt/models/sequence_tagger.py
@@ -49,6 +49,10 @@
else:
self.tagging_scheme = None
+ def initialize(self, metadata):
+ self.tagging_scheme = metadata.get("tagging_scheme", self.tagging_scheme)
+ super(SequenceTagger, self).initialize(metadata)
+
def _call(self, features, labels, params, mode):
training = mode == tf.estimator.ModeKeys.TRAIN
length = self.features_inputter.get_length(features)
| {"golden_diff": "diff --git a/opennmt/models/sequence_tagger.py b/opennmt/models/sequence_tagger.py\n--- a/opennmt/models/sequence_tagger.py\n+++ b/opennmt/models/sequence_tagger.py\n@@ -49,6 +49,10 @@\n else:\n self.tagging_scheme = None\n \n+ def initialize(self, metadata):\n+ self.tagging_scheme = metadata.get(\"tagging_scheme\", self.tagging_scheme)\n+ super(SequenceTagger, self).initialize(metadata)\n+\n def _call(self, features, labels, params, mode):\n training = mode == tf.estimator.ModeKeys.TRAIN\n length = self.features_inputter.get_length(features)\n", "issue": "Question about evaluation metrics\nHello,\r\nI use opennmt-tf to train models for different tasks, and I want to evaluate the effectiveness of a model to know how long should I take for training. But I\u2019m not sure in how to evaluate sequence tagging model or other sequence to sequence model using validation data to measure accuracy, recall, precision, and F1-score. I have tried to train on a supported tagging scheme (BIOES) by adding \r\n```\r\ntrain: \r\n tagging_scheme: BIOES\r\n```\r\nto parameters in YMAL file, but I could\u2019t find where the additional evaluation metrics are computed. Could you provide some tutorial for me?\r\n\r\nThanks.\n", "before_files": [{"content": "\"\"\"Sequence tagger.\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom opennmt import inputters\nfrom opennmt.models.model import Model\nfrom opennmt.utils.misc import print_bytes\nfrom opennmt.utils.losses import cross_entropy_sequence_loss\n\n\nclass SequenceTagger(Model):\n \"\"\"A sequence tagger.\"\"\"\n\n def __init__(self,\n inputter,\n encoder,\n labels_vocabulary_file_key,\n tagging_scheme=None,\n crf_decoding=False,\n daisy_chain_variables=False,\n name=\"seqtagger\"):\n \"\"\"Initializes a sequence tagger.\n\n Args:\n inputter: A :class:`opennmt.inputters.inputter.Inputter` to process the\n input data.\n encoder: A :class:`opennmt.encoders.encoder.Encoder` to encode the input.\n labels_vocabulary_file_key: The data configuration key of the labels\n vocabulary file containing one label per line.\n tagging_scheme: The tagging scheme used. For supported schemes (currently\n only BIOES), additional evaluation metrics could be computed such as\n precision, recall, etc.\n crf_decoding: If ``True``, add a CRF layer after the encoder.\n daisy_chain_variables: If ``True``, copy variables in a daisy chain\n between devices for this model. Not compatible with RNN based models.\n name: The name of this model.\n \"\"\"\n super(SequenceTagger, self).__init__(\n name,\n features_inputter=inputter,\n labels_inputter=TagsInputter(labels_vocabulary_file_key),\n daisy_chain_variables=daisy_chain_variables)\n self.encoder = encoder\n self.crf_decoding = crf_decoding\n self.transition_params = None\n if tagging_scheme:\n self.tagging_scheme = tagging_scheme.lower()\n else:\n self.tagging_scheme = None\n\n def _call(self, features, labels, params, mode):\n training = mode == tf.estimator.ModeKeys.TRAIN\n length = self.features_inputter.get_length(features)\n\n with tf.variable_scope(\"encoder\"):\n inputs = self.features_inputter.make_inputs(features, training=training)\n encoder_outputs, _, encoder_sequence_length = self.encoder.encode(\n inputs,\n sequence_length=length,\n mode=mode)\n\n with tf.variable_scope(\"generator\"):\n logits = tf.layers.dense(\n encoder_outputs,\n self.labels_inputter.vocabulary_size)\n\n num_tags = self.labels_inputter.vocabulary_size\n self.transition_params = tf.get_variable(\"transitions\", shape=[num_tags, num_tags])\n if mode != tf.estimator.ModeKeys.TRAIN:\n if self.crf_decoding:\n tags_id, _ = tf.contrib.crf.crf_decode(\n logits,\n self.transition_params,\n encoder_sequence_length)\n tags_id = tf.cast(tags_id, tf.int64)\n else:\n tags_prob = tf.nn.softmax(logits)\n tags_id = tf.argmax(tags_prob, axis=2)\n\n labels_vocab_rev = self.labels_inputter.vocabulary_lookup_reverse()\n\n # A tensor can not be both fed and fetched,\n # so identity a new tensor of \"length\" for export model to predict\n output_sequence_length = tf.identity(encoder_sequence_length)\n\n predictions = {\n \"length\": output_sequence_length,\n \"tags\": labels_vocab_rev.lookup(tags_id)\n }\n else:\n predictions = None\n\n return logits, predictions\n\n def compute_loss(self, outputs, labels, training=True, params=None):\n if params is None:\n params = {}\n if self.crf_decoding:\n log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(\n outputs,\n tf.cast(labels[\"tags_id\"], tf.int32),\n labels[\"length\"],\n transition_params=self.transition_params)\n loss = tf.reduce_sum(-log_likelihood)\n loss_normalizer = tf.cast(tf.shape(log_likelihood)[0], loss.dtype)\n return loss, loss_normalizer\n else:\n return cross_entropy_sequence_loss(\n outputs,\n labels[\"tags_id\"],\n labels[\"length\"],\n label_smoothing=params.get(\"label_smoothing\", 0.0),\n average_in_time=params.get(\"average_loss_in_time\", False),\n training=training)\n\n def compute_metrics(self, predictions, labels):\n weights = tf.sequence_mask(\n labels[\"length\"], maxlen=tf.shape(labels[\"tags\"])[1], dtype=tf.float32)\n\n eval_metric_ops = {}\n eval_metric_ops[\"accuracy\"] = tf.metrics.accuracy(\n labels[\"tags\"], predictions[\"tags\"], weights=weights)\n\n if self.tagging_scheme in (\"bioes\",):\n flag_fn = None\n if self.tagging_scheme == \"bioes\":\n flag_fn = flag_bioes_tags\n\n gold_flags, predicted_flags = tf.py_func(\n flag_fn,\n [labels[\"tags\"], predictions[\"tags\"], labels[\"length\"]],\n [tf.bool, tf.bool],\n stateful=False)\n\n precision_metric = tf.metrics.precision(gold_flags, predicted_flags)\n recall_metric = tf.metrics.recall(gold_flags, predicted_flags)\n\n precision = precision_metric[0]\n recall = recall_metric[0]\n f1 = (2 * precision * recall) / (recall + precision)\n\n eval_metric_ops[\"precision\"] = precision_metric\n eval_metric_ops[\"recall\"] = recall_metric\n eval_metric_ops[\"f1\"] = (f1, tf.no_op())\n\n return eval_metric_ops\n\n def print_prediction(self, prediction, params=None, stream=None):\n tags = prediction[\"tags\"][:prediction[\"length\"]]\n sent = b\" \".join(tags)\n print_bytes(sent, stream=stream)\n\n\nclass TagsInputter(inputters.TextInputter):\n \"\"\"Reading space-separated tags.\"\"\"\n\n def __init__(self, vocabulary_file_key):\n super(TagsInputter, self).__init__(\n vocabulary_file_key=vocabulary_file_key, num_oov_buckets=0)\n\n def make_features(self, element=None, features=None, training=None):\n features = super(TagsInputter, self).make_features(\n element=element, features=features, training=training)\n return {\n \"length\": features[\"length\"],\n \"tags\": features[\"tokens\"],\n \"tags_id\": self.vocabulary.lookup(features[\"tokens\"])\n }\n\n\ndef flag_bioes_tags(gold, predicted, sequence_length=None):\n \"\"\"Flags chunk matches for the BIOES tagging scheme.\n\n This function will produce the gold flags and the predicted flags. For each aligned\n gold flag ``g`` and predicted flag ``p``:\n\n * when ``g == p == True``, the chunk has been correctly identified (true positive).\n * when ``g == False and p == True``, the chunk has been incorrectly identified (false positive).\n * when ``g == True and p == False``, the chunk has been missed (false negative).\n * when ``g == p == False``, the chunk has been correctly ignored (true negative).\n\n Args:\n gold: The gold tags as a Numpy 2D string array.\n predicted: The predicted tags as a Numpy 2D string array.\n sequence_length: The length of each sequence as Numpy array.\n\n Returns:\n A tuple ``(gold_flags, predicted_flags)``.\n \"\"\"\n gold_flags = []\n predicted_flags = []\n\n def _add_true_positive():\n gold_flags.append(True)\n predicted_flags.append(True)\n def _add_false_positive():\n gold_flags.append(False)\n predicted_flags.append(True)\n def _add_true_negative():\n gold_flags.append(False)\n predicted_flags.append(False)\n def _add_false_negative():\n gold_flags.append(True)\n predicted_flags.append(False)\n\n def _match(ref, hyp, index, length):\n if ref[index].startswith(b\"B\"):\n match = True\n while index < length and not ref[index].startswith(b\"E\"):\n if ref[index] != hyp[index]:\n match = False\n index += 1\n match = match and index < length and ref[index] == hyp[index]\n return match, index\n return ref[index] == hyp[index], index\n\n for b in range(gold.shape[0]):\n length = sequence_length[b] if sequence_length is not None else gold.shape[1]\n\n # First pass to detect true positives and true/false negatives.\n index = 0\n while index < length:\n gold_tag = gold[b][index]\n match, index = _match(gold[b], predicted[b], index, length)\n if match:\n if gold_tag == b\"O\":\n _add_true_negative()\n else:\n _add_true_positive()\n else:\n if gold_tag != b\"O\":\n _add_false_negative()\n index += 1\n\n # Second pass to detect false postives.\n index = 0\n while index < length:\n pred_tag = predicted[b][index]\n match, index = _match(predicted[b], gold[b], index, length)\n if not match and pred_tag != b\"O\":\n _add_false_positive()\n index += 1\n\n return np.array(gold_flags), np.array(predicted_flags)\n", "path": "opennmt/models/sequence_tagger.py"}], "after_files": [{"content": "\"\"\"Sequence tagger.\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom opennmt import inputters\nfrom opennmt.models.model import Model\nfrom opennmt.utils.misc import print_bytes\nfrom opennmt.utils.losses import cross_entropy_sequence_loss\n\n\nclass SequenceTagger(Model):\n \"\"\"A sequence tagger.\"\"\"\n\n def __init__(self,\n inputter,\n encoder,\n labels_vocabulary_file_key,\n tagging_scheme=None,\n crf_decoding=False,\n daisy_chain_variables=False,\n name=\"seqtagger\"):\n \"\"\"Initializes a sequence tagger.\n\n Args:\n inputter: A :class:`opennmt.inputters.inputter.Inputter` to process the\n input data.\n encoder: A :class:`opennmt.encoders.encoder.Encoder` to encode the input.\n labels_vocabulary_file_key: The data configuration key of the labels\n vocabulary file containing one label per line.\n tagging_scheme: The tagging scheme used. For supported schemes (currently\n only BIOES), additional evaluation metrics could be computed such as\n precision, recall, etc.\n crf_decoding: If ``True``, add a CRF layer after the encoder.\n daisy_chain_variables: If ``True``, copy variables in a daisy chain\n between devices for this model. Not compatible with RNN based models.\n name: The name of this model.\n \"\"\"\n super(SequenceTagger, self).__init__(\n name,\n features_inputter=inputter,\n labels_inputter=TagsInputter(labels_vocabulary_file_key),\n daisy_chain_variables=daisy_chain_variables)\n self.encoder = encoder\n self.crf_decoding = crf_decoding\n self.transition_params = None\n if tagging_scheme:\n self.tagging_scheme = tagging_scheme.lower()\n else:\n self.tagging_scheme = None\n\n def initialize(self, metadata):\n self.tagging_scheme = metadata.get(\"tagging_scheme\", self.tagging_scheme)\n super(SequenceTagger, self).initialize(metadata)\n\n def _call(self, features, labels, params, mode):\n training = mode == tf.estimator.ModeKeys.TRAIN\n length = self.features_inputter.get_length(features)\n\n with tf.variable_scope(\"encoder\"):\n inputs = self.features_inputter.make_inputs(features, training=training)\n encoder_outputs, _, encoder_sequence_length = self.encoder.encode(\n inputs,\n sequence_length=length,\n mode=mode)\n\n with tf.variable_scope(\"generator\"):\n logits = tf.layers.dense(\n encoder_outputs,\n self.labels_inputter.vocabulary_size)\n\n num_tags = self.labels_inputter.vocabulary_size\n self.transition_params = tf.get_variable(\"transitions\", shape=[num_tags, num_tags])\n if mode != tf.estimator.ModeKeys.TRAIN:\n if self.crf_decoding:\n tags_id, _ = tf.contrib.crf.crf_decode(\n logits,\n self.transition_params,\n encoder_sequence_length)\n tags_id = tf.cast(tags_id, tf.int64)\n else:\n tags_prob = tf.nn.softmax(logits)\n tags_id = tf.argmax(tags_prob, axis=2)\n\n labels_vocab_rev = self.labels_inputter.vocabulary_lookup_reverse()\n\n # A tensor can not be both fed and fetched,\n # so identity a new tensor of \"length\" for export model to predict\n output_sequence_length = tf.identity(encoder_sequence_length)\n\n predictions = {\n \"length\": output_sequence_length,\n \"tags\": labels_vocab_rev.lookup(tags_id)\n }\n else:\n predictions = None\n\n return logits, predictions\n\n def compute_loss(self, outputs, labels, training=True, params=None):\n if params is None:\n params = {}\n if self.crf_decoding:\n log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(\n outputs,\n tf.cast(labels[\"tags_id\"], tf.int32),\n labels[\"length\"],\n transition_params=self.transition_params)\n loss = tf.reduce_sum(-log_likelihood)\n loss_normalizer = tf.cast(tf.shape(log_likelihood)[0], loss.dtype)\n return loss, loss_normalizer\n else:\n return cross_entropy_sequence_loss(\n outputs,\n labels[\"tags_id\"],\n labels[\"length\"],\n label_smoothing=params.get(\"label_smoothing\", 0.0),\n average_in_time=params.get(\"average_loss_in_time\", False),\n training=training)\n\n def compute_metrics(self, predictions, labels):\n weights = tf.sequence_mask(\n labels[\"length\"], maxlen=tf.shape(labels[\"tags\"])[1], dtype=tf.float32)\n\n eval_metric_ops = {}\n eval_metric_ops[\"accuracy\"] = tf.metrics.accuracy(\n labels[\"tags\"], predictions[\"tags\"], weights=weights)\n\n if self.tagging_scheme in (\"bioes\",):\n flag_fn = None\n if self.tagging_scheme == \"bioes\":\n flag_fn = flag_bioes_tags\n\n gold_flags, predicted_flags = tf.py_func(\n flag_fn,\n [labels[\"tags\"], predictions[\"tags\"], labels[\"length\"]],\n [tf.bool, tf.bool],\n stateful=False)\n\n precision_metric = tf.metrics.precision(gold_flags, predicted_flags)\n recall_metric = tf.metrics.recall(gold_flags, predicted_flags)\n\n precision = precision_metric[0]\n recall = recall_metric[0]\n f1 = (2 * precision * recall) / (recall + precision)\n\n eval_metric_ops[\"precision\"] = precision_metric\n eval_metric_ops[\"recall\"] = recall_metric\n eval_metric_ops[\"f1\"] = (f1, tf.no_op())\n\n return eval_metric_ops\n\n def print_prediction(self, prediction, params=None, stream=None):\n tags = prediction[\"tags\"][:prediction[\"length\"]]\n sent = b\" \".join(tags)\n print_bytes(sent, stream=stream)\n\n\nclass TagsInputter(inputters.TextInputter):\n \"\"\"Reading space-separated tags.\"\"\"\n\n def __init__(self, vocabulary_file_key):\n super(TagsInputter, self).__init__(\n vocabulary_file_key=vocabulary_file_key, num_oov_buckets=0)\n\n def make_features(self, element=None, features=None, training=None):\n features = super(TagsInputter, self).make_features(\n element=element, features=features, training=training)\n return {\n \"length\": features[\"length\"],\n \"tags\": features[\"tokens\"],\n \"tags_id\": self.vocabulary.lookup(features[\"tokens\"])\n }\n\n\ndef flag_bioes_tags(gold, predicted, sequence_length=None):\n \"\"\"Flags chunk matches for the BIOES tagging scheme.\n\n This function will produce the gold flags and the predicted flags. For each aligned\n gold flag ``g`` and predicted flag ``p``:\n\n * when ``g == p == True``, the chunk has been correctly identified (true positive).\n * when ``g == False and p == True``, the chunk has been incorrectly identified (false positive).\n * when ``g == True and p == False``, the chunk has been missed (false negative).\n * when ``g == p == False``, the chunk has been correctly ignored (true negative).\n\n Args:\n gold: The gold tags as a Numpy 2D string array.\n predicted: The predicted tags as a Numpy 2D string array.\n sequence_length: The length of each sequence as Numpy array.\n\n Returns:\n A tuple ``(gold_flags, predicted_flags)``.\n \"\"\"\n gold_flags = []\n predicted_flags = []\n\n def _add_true_positive():\n gold_flags.append(True)\n predicted_flags.append(True)\n def _add_false_positive():\n gold_flags.append(False)\n predicted_flags.append(True)\n def _add_true_negative():\n gold_flags.append(False)\n predicted_flags.append(False)\n def _add_false_negative():\n gold_flags.append(True)\n predicted_flags.append(False)\n\n def _match(ref, hyp, index, length):\n if ref[index].startswith(b\"B\"):\n match = True\n while index < length and not ref[index].startswith(b\"E\"):\n if ref[index] != hyp[index]:\n match = False\n index += 1\n match = match and index < length and ref[index] == hyp[index]\n return match, index\n return ref[index] == hyp[index], index\n\n for b in range(gold.shape[0]):\n length = sequence_length[b] if sequence_length is not None else gold.shape[1]\n\n # First pass to detect true positives and true/false negatives.\n index = 0\n while index < length:\n gold_tag = gold[b][index]\n match, index = _match(gold[b], predicted[b], index, length)\n if match:\n if gold_tag == b\"O\":\n _add_true_negative()\n else:\n _add_true_positive()\n else:\n if gold_tag != b\"O\":\n _add_false_negative()\n index += 1\n\n # Second pass to detect false postives.\n index = 0\n while index < length:\n pred_tag = predicted[b][index]\n match, index = _match(predicted[b], gold[b], index, length)\n if not match and pred_tag != b\"O\":\n _add_false_positive()\n index += 1\n\n return np.array(gold_flags), np.array(predicted_flags)\n", "path": "opennmt/models/sequence_tagger.py"}]} | 2,996 | 151 |
gh_patches_debug_5310 | rasdani/github-patches | git_diff | pantsbuild__pants-6499 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow `compatibility` attribute to be passed through to generated python targets
To do this:
- Override ` _copy_target_attributes` in `ApacheThriftPyGen` to include 'compatibility', so it looks like this:
```
@property
def _copy_target_attributes(self):
"""Propagate these attributes to the synthetic python_library() target."""
return ['provides', 'tags', 'scope', 'compatibility']
```
- See https://github.com/pantsbuild/pants/blob/039051735542d29ae02f4faa09c0c51c47292bf0/contrib/jax_ws/src/python/pants/contrib/jax_ws/tasks/jax_ws_gen.py#L90 as an example
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 import os
8 from builtins import open
9
10 from pants.backend.codegen.thrift.lib.apache_thrift_gen_base import ApacheThriftGenBase
11 from pants.backend.codegen.thrift.python.python_thrift_library import PythonThriftLibrary
12 from pants.backend.python.targets.python_library import PythonLibrary
13 from pants.util.dirutil import safe_delete, safe_walk
14
15
16 class ApacheThriftPyGen(ApacheThriftGenBase):
17 """Generate Python source files from thrift IDL files."""
18 gentarget_type = PythonThriftLibrary
19 thrift_generator = 'py'
20 default_gen_options_map = {
21 'new_style': None
22 }
23
24 sources_globs = ('**/*',)
25
26 def synthetic_target_type(self, target):
27 return PythonLibrary
28
29 def execute_codegen(self, target, target_workdir):
30 super(ApacheThriftPyGen, self).execute_codegen(target, target_workdir)
31
32 # Thrift generates code with all parent namespaces with empty __init__.py's. Since pants allows
33 # splitting a thrift namespace hierarchy across multiple packages, we explicitly insert
34 # namespace packages to allow for consumption of 2 or more of these packages in the same
35 # PYTHONPATH.
36 for root, _, files in safe_walk(target_workdir):
37 if '__init__.py' not in files: # skip non-packages
38 continue
39
40 init_py_abspath = os.path.join(root, '__init__.py')
41
42 # Thrift puts an __init__.py file at the root, and we don't want one there (it's not needed,
43 # and it confuses some import mechanisms).
44 if root == target_workdir:
45 safe_delete(init_py_abspath)
46 elif os.path.getsize(init_py_abspath) == 0: # empty __init__, translate to namespace package
47 with open(init_py_abspath, 'wb') as f:
48 f.write(b"__import__('pkg_resources').declare_namespace(__name__)")
49 else:
50 # A non-empty __init__, this is a leaf package, usually with ttypes and constants; so we
51 # leave as-is.
52 pass
53
54 def ignore_dup(self, tgt1, tgt2, rel_src):
55 # Thrift generates all the intermediate __init__.py files, and they shouldn't
56 # count as dups.
57 return os.path.basename(rel_src) == '__init__.py'
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py
--- a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py
+++ b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py
@@ -51,6 +51,10 @@
# leave as-is.
pass
+ @property
+ def _copy_target_attributes(self):
+ return super(ApacheThriftPyGen, self)._copy_target_attributes + ['compatibility']
+
def ignore_dup(self, tgt1, tgt2, rel_src):
# Thrift generates all the intermediate __init__.py files, and they shouldn't
# count as dups.
| {"golden_diff": "diff --git a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py\n--- a/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py\n+++ b/src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py\n@@ -51,6 +51,10 @@\n # leave as-is.\n pass\n \n+ @property\n+ def _copy_target_attributes(self):\n+ return super(ApacheThriftPyGen, self)._copy_target_attributes + ['compatibility']\n+\n def ignore_dup(self, tgt1, tgt2, rel_src):\n # Thrift generates all the intermediate __init__.py files, and they shouldn't\n # count as dups.\n", "issue": "Allow `compatibility` attribute to be passed through to generated python targets\nTo do this:\r\n\r\n- Override ` _copy_target_attributes` in `ApacheThriftPyGen` to include 'compatibility', so it looks like this:\r\n\r\n```\r\n @property\r\n def _copy_target_attributes(self):\r\n \"\"\"Propagate these attributes to the synthetic python_library() target.\"\"\"\r\n return ['provides', 'tags', 'scope', 'compatibility']\r\n```\r\n\r\n- See https://github.com/pantsbuild/pants/blob/039051735542d29ae02f4faa09c0c51c47292bf0/contrib/jax_ws/src/python/pants/contrib/jax_ws/tasks/jax_ws_gen.py#L90 as an example\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import open\n\nfrom pants.backend.codegen.thrift.lib.apache_thrift_gen_base import ApacheThriftGenBase\nfrom pants.backend.codegen.thrift.python.python_thrift_library import PythonThriftLibrary\nfrom pants.backend.python.targets.python_library import PythonLibrary\nfrom pants.util.dirutil import safe_delete, safe_walk\n\n\nclass ApacheThriftPyGen(ApacheThriftGenBase):\n \"\"\"Generate Python source files from thrift IDL files.\"\"\"\n gentarget_type = PythonThriftLibrary\n thrift_generator = 'py'\n default_gen_options_map = {\n 'new_style': None\n }\n\n sources_globs = ('**/*',)\n\n def synthetic_target_type(self, target):\n return PythonLibrary\n\n def execute_codegen(self, target, target_workdir):\n super(ApacheThriftPyGen, self).execute_codegen(target, target_workdir)\n\n # Thrift generates code with all parent namespaces with empty __init__.py's. Since pants allows\n # splitting a thrift namespace hierarchy across multiple packages, we explicitly insert\n # namespace packages to allow for consumption of 2 or more of these packages in the same\n # PYTHONPATH.\n for root, _, files in safe_walk(target_workdir):\n if '__init__.py' not in files: # skip non-packages\n continue\n\n init_py_abspath = os.path.join(root, '__init__.py')\n\n # Thrift puts an __init__.py file at the root, and we don't want one there (it's not needed,\n # and it confuses some import mechanisms).\n if root == target_workdir:\n safe_delete(init_py_abspath)\n elif os.path.getsize(init_py_abspath) == 0: # empty __init__, translate to namespace package\n with open(init_py_abspath, 'wb') as f:\n f.write(b\"__import__('pkg_resources').declare_namespace(__name__)\")\n else:\n # A non-empty __init__, this is a leaf package, usually with ttypes and constants; so we\n # leave as-is.\n pass\n\n def ignore_dup(self, tgt1, tgt2, rel_src):\n # Thrift generates all the intermediate __init__.py files, and they shouldn't\n # count as dups.\n return os.path.basename(rel_src) == '__init__.py'\n", "path": "src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import open\n\nfrom pants.backend.codegen.thrift.lib.apache_thrift_gen_base import ApacheThriftGenBase\nfrom pants.backend.codegen.thrift.python.python_thrift_library import PythonThriftLibrary\nfrom pants.backend.python.targets.python_library import PythonLibrary\nfrom pants.util.dirutil import safe_delete, safe_walk\n\n\nclass ApacheThriftPyGen(ApacheThriftGenBase):\n \"\"\"Generate Python source files from thrift IDL files.\"\"\"\n gentarget_type = PythonThriftLibrary\n thrift_generator = 'py'\n default_gen_options_map = {\n 'new_style': None\n }\n\n sources_globs = ('**/*',)\n\n def synthetic_target_type(self, target):\n return PythonLibrary\n\n def execute_codegen(self, target, target_workdir):\n super(ApacheThriftPyGen, self).execute_codegen(target, target_workdir)\n\n # Thrift generates code with all parent namespaces with empty __init__.py's. Since pants allows\n # splitting a thrift namespace hierarchy across multiple packages, we explicitly insert\n # namespace packages to allow for consumption of 2 or more of these packages in the same\n # PYTHONPATH.\n for root, _, files in safe_walk(target_workdir):\n if '__init__.py' not in files: # skip non-packages\n continue\n\n init_py_abspath = os.path.join(root, '__init__.py')\n\n # Thrift puts an __init__.py file at the root, and we don't want one there (it's not needed,\n # and it confuses some import mechanisms).\n if root == target_workdir:\n safe_delete(init_py_abspath)\n elif os.path.getsize(init_py_abspath) == 0: # empty __init__, translate to namespace package\n with open(init_py_abspath, 'wb') as f:\n f.write(b\"__import__('pkg_resources').declare_namespace(__name__)\")\n else:\n # A non-empty __init__, this is a leaf package, usually with ttypes and constants; so we\n # leave as-is.\n pass\n\n @property\n def _copy_target_attributes(self):\n return super(ApacheThriftPyGen, self)._copy_target_attributes + ['compatibility']\n\n def ignore_dup(self, tgt1, tgt2, rel_src):\n # Thrift generates all the intermediate __init__.py files, and they shouldn't\n # count as dups.\n return os.path.basename(rel_src) == '__init__.py'\n", "path": "src/python/pants/backend/codegen/thrift/python/apache_thrift_py_gen.py"}]} | 1,114 | 176 |
gh_patches_debug_1357 | rasdani/github-patches | git_diff | aws__aws-cli-4334 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken docutils==0.15
Hi community,
Today docutils were updated to 0.15 (https://pypi.org/project/docutils/#history) and it breaks awscli running on Python 2.
```
# aws --version
Traceback (most recent call last):
File "/bin/aws", line 19, in <module>
import awscli.clidriver
File "/usr/lib/python2.7/site-packages/awscli/clidriver.py", line 36, in <module>
from awscli.help import ProviderHelpCommand
File "/usr/lib/python2.7/site-packages/awscli/help.py", line 20, in <module>
from docutils.core import publish_string
File "/usr/lib/python2.7/site-packages/docutils/core.py", line 246
print('\n::: Runtime settings:', file=self._stderr)
^
SyntaxError: invalid syntax
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import codecs
3 import os.path
4 import re
5 import sys
6
7 from setuptools import setup, find_packages
8
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12
13 def read(*parts):
14 return codecs.open(os.path.join(here, *parts), 'r').read()
15
16
17 def find_version(*file_paths):
18 version_file = read(*file_paths)
19 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
20 version_file, re.M)
21 if version_match:
22 return version_match.group(1)
23 raise RuntimeError("Unable to find version string.")
24
25
26 requires = ['botocore==1.12.191',
27 'colorama>=0.2.5,<=0.3.9',
28 'docutils>=0.10',
29 'rsa>=3.1.2,<=3.5.0',
30 's3transfer>=0.2.0,<0.3.0']
31
32
33 if sys.version_info[:2] == (2, 6):
34 # For python2.6 we have to require argparse since it
35 # was not in stdlib until 2.7.
36 requires.append('argparse>=1.1')
37
38 # For Python 2.6, we have to require a different verion of PyYAML since the latest
39 # versions dropped support for Python 2.6.
40 requires.append('PyYAML>=3.10,<=3.13')
41 else:
42 requires.append('PyYAML>=3.10,<=5.1')
43
44
45 setup_options = dict(
46 name='awscli',
47 version=find_version("awscli", "__init__.py"),
48 description='Universal Command Line Environment for AWS.',
49 long_description=read('README.rst'),
50 author='Amazon Web Services',
51 url='http://aws.amazon.com/cli/',
52 scripts=['bin/aws', 'bin/aws.cmd',
53 'bin/aws_completer', 'bin/aws_zsh_completer.sh',
54 'bin/aws_bash_completer'],
55 packages=find_packages(exclude=['tests*']),
56 package_data={'awscli': ['data/*.json', 'examples/*/*.rst',
57 'examples/*/*.txt', 'examples/*/*/*.txt',
58 'examples/*/*/*.rst', 'topics/*.rst',
59 'topics/*.json']},
60 install_requires=requires,
61 extras_require={
62 ':python_version=="2.6"': [
63 'argparse>=1.1',
64 ]
65 },
66 license="Apache License 2.0",
67 classifiers=[
68 'Development Status :: 5 - Production/Stable',
69 'Intended Audience :: Developers',
70 'Intended Audience :: System Administrators',
71 'Natural Language :: English',
72 'License :: OSI Approved :: Apache Software License',
73 'Programming Language :: Python',
74 'Programming Language :: Python :: 2',
75 'Programming Language :: Python :: 2.6',
76 'Programming Language :: Python :: 2.7',
77 'Programming Language :: Python :: 3',
78 'Programming Language :: Python :: 3.3',
79 'Programming Language :: Python :: 3.4',
80 'Programming Language :: Python :: 3.5',
81 'Programming Language :: Python :: 3.6',
82 'Programming Language :: Python :: 3.7',
83 ],
84 )
85
86 if 'py2exe' in sys.argv:
87 # This will actually give us a py2exe command.
88 import py2exe
89 # And we have some py2exe specific options.
90 setup_options['options'] = {
91 'py2exe': {
92 'optimize': 0,
93 'skip_archive': True,
94 'dll_excludes': ['crypt32.dll'],
95 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',
96 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],
97 }
98 }
99 setup_options['console'] = ['bin/aws']
100
101
102 setup(**setup_options)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@
requires = ['botocore==1.12.191',
'colorama>=0.2.5,<=0.3.9',
- 'docutils>=0.10',
+ 'docutils>=0.10,<0.15',
'rsa>=3.1.2,<=3.5.0',
's3transfer>=0.2.0,<0.3.0']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,7 +25,7 @@\n \n requires = ['botocore==1.12.191',\n 'colorama>=0.2.5,<=0.3.9',\n- 'docutils>=0.10',\n+ 'docutils>=0.10,<0.15',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n", "issue": "Broken docutils==0.15\nHi community,\r\n\r\nToday docutils were updated to 0.15 (https://pypi.org/project/docutils/#history) and it breaks awscli running on Python 2.\r\n\r\n```\r\n# aws --version\r\nTraceback (most recent call last):\r\n File \"/bin/aws\", line 19, in <module>\r\n import awscli.clidriver\r\n File \"/usr/lib/python2.7/site-packages/awscli/clidriver.py\", line 36, in <module>\r\n from awscli.help import ProviderHelpCommand\r\n File \"/usr/lib/python2.7/site-packages/awscli/help.py\", line 20, in <module>\r\n from docutils.core import publish_string\r\n File \"/usr/lib/python2.7/site-packages/docutils/core.py\", line 246\r\n print('\\n::: Runtime settings:', file=self._stderr)\r\n ^\r\nSyntaxError: invalid syntax\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.12.191',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n # For Python 2.6, we have to require a different verion of PyYAML since the latest\n # versions dropped support for Python 2.6.\n requires.append('PyYAML>=3.10,<=3.13')\nelse:\n requires.append('PyYAML>=3.10,<=5.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nimport os.path\nimport re\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n return codecs.open(os.path.join(here, *parts), 'r').read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = ['botocore==1.12.191',\n 'colorama>=0.2.5,<=0.3.9',\n 'docutils>=0.10,<0.15',\n 'rsa>=3.1.2,<=3.5.0',\n 's3transfer>=0.2.0,<0.3.0']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have to require argparse since it\n # was not in stdlib until 2.7.\n requires.append('argparse>=1.1')\n\n # For Python 2.6, we have to require a different verion of PyYAML since the latest\n # versions dropped support for Python 2.6.\n requires.append('PyYAML>=3.10,<=3.13')\nelse:\n requires.append('PyYAML>=3.10,<=5.1')\n\n\nsetup_options = dict(\n name='awscli',\n version=find_version(\"awscli\", \"__init__.py\"),\n description='Universal Command Line Environment for AWS.',\n long_description=read('README.rst'),\n author='Amazon Web Services',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh',\n 'bin/aws_bash_completer'],\n packages=find_packages(exclude=['tests*']),\n package_data={'awscli': ['data/*.json', 'examples/*/*.rst',\n 'examples/*/*.txt', 'examples/*/*/*.txt',\n 'examples/*/*/*.rst', 'topics/*.rst',\n 'topics/*.json']},\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'argparse>=1.1',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'dll_excludes': ['crypt32.dll'],\n 'packages': ['docutils', 'urllib', 'httplib', 'HTMLParser',\n 'awscli', 'ConfigParser', 'xml.etree', 'pipes'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]} | 1,521 | 129 |
gh_patches_debug_16828 | rasdani/github-patches | git_diff | streamlit__streamlit-4071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't call st.write() on a class that has repr_html
### Summary
When you call `st.write()` any **class** that has `_repr_html_` it throws an error.
(But you can call `st.write` on _instances_ of those classes just fine)
### Steps to reproduce
Code snippet:
```
import streamlit as st
import pandas as pd
x = pd.DataFrame([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
"## st.text"
st.text(type(x)) # This works.
"## st.write"
st.write(type(x)) # This doesn't work.
```
**Expected behavior:**
`st.text` and `st.write` print the exact same thing.
**Actual behavior:**
You get this beautiful error:

Why this fails: `st.write` uses `_repr_html_()` when it's available. `DataFrame` contains that method, but it requires a `self` to be passed. So when you call it directly as `DataFrame._repr_html_()` there's no `self` and we get an exception.
**Solution:**
When `st.write()` receives a class as input we should just call `st.text()` on it.
You can use `inspect.isclass` to check for this.
### Is this a regression?
Yes, but not a recent one. It was introduced in https://github.com/streamlit/streamlit/pull/3154 , 7 months ago.
### Debug info
- Streamlit version: 1.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/elements/write.py`
Content:
```
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json as json
16 import types
17 from typing import cast, Any, List, Tuple, Type
18
19 import numpy as np
20
21 import streamlit
22 from streamlit import type_util
23 from streamlit.errors import StreamlitAPIException
24 from streamlit.state.session_state import LazySessionState
25
26 # Special methods:
27
28 HELP_TYPES = (
29 types.BuiltinFunctionType,
30 types.BuiltinMethodType,
31 types.FunctionType,
32 types.MethodType,
33 types.ModuleType,
34 ) # type: Tuple[Type[Any], ...]
35
36
37 class WriteMixin:
38 def write(self, *args, **kwargs):
39 """Write arguments to the app.
40
41 This is the Swiss Army knife of Streamlit commands: it does different
42 things depending on what you throw at it. Unlike other Streamlit commands,
43 write() has some unique properties:
44
45 1. You can pass in multiple arguments, all of which will be written.
46 2. Its behavior depends on the input types as follows.
47 3. It returns None, so its "slot" in the App cannot be reused.
48
49 Parameters
50 ----------
51 *args : any
52 One or many objects to print to the App.
53
54 Arguments are handled as follows:
55
56 - write(string) : Prints the formatted Markdown string, with
57 support for LaTeX expression and emoji shortcodes.
58 See docs for st.markdown for more.
59 - write(data_frame) : Displays the DataFrame as a table.
60 - write(error) : Prints an exception specially.
61 - write(func) : Displays information about a function.
62 - write(module) : Displays information about the module.
63 - write(dict) : Displays dict in an interactive widget.
64 - write(mpl_fig) : Displays a Matplotlib figure.
65 - write(altair) : Displays an Altair chart.
66 - write(keras) : Displays a Keras model.
67 - write(graphviz) : Displays a Graphviz graph.
68 - write(plotly_fig) : Displays a Plotly figure.
69 - write(bokeh_fig) : Displays a Bokeh figure.
70 - write(sympy_expr) : Prints SymPy expression using LaTeX.
71 - write(htmlable) : Prints _repr_html_() for the object if available.
72 - write(obj) : Prints str(obj) if otherwise unknown.
73
74 unsafe_allow_html : bool
75 This is a keyword-only argument that defaults to False.
76
77 By default, any HTML tags found in strings will be escaped and
78 therefore treated as pure text. This behavior may be turned off by
79 setting this argument to True.
80
81 That said, *we strongly advise against it*. It is hard to write secure
82 HTML, so by using this argument you may be compromising your users'
83 security. For more information, see:
84
85 https://github.com/streamlit/streamlit/issues/152
86
87 **Also note that `unsafe_allow_html` is a temporary measure and may be
88 removed from Streamlit at any time.**
89
90 If you decide to turn on HTML anyway, we ask you to please tell us your
91 exact use case here:
92 https://discuss.streamlit.io/t/96 .
93
94 This will help us come up with safe APIs that allow you to do what you
95 want.
96
97 Example
98 -------
99
100 Its basic use case is to draw Markdown-formatted text, whenever the
101 input is a string:
102
103 >>> write('Hello, *World!* :sunglasses:')
104
105 .. output::
106 https://static.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE
107 height: 50px
108
109 As mentioned earlier, `st.write()` also accepts other data formats, such as
110 numbers, data frames, styled data frames, and assorted objects:
111
112 >>> st.write(1234)
113 >>> st.write(pd.DataFrame({
114 ... 'first column': [1, 2, 3, 4],
115 ... 'second column': [10, 20, 30, 40],
116 ... }))
117
118 .. output::
119 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD
120 height: 250px
121
122 Finally, you can pass in multiple arguments to do things like:
123
124 >>> st.write('1 + 1 = ', 2)
125 >>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')
126
127 .. output::
128 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1
129 height: 300px
130
131 Oh, one more thing: `st.write` accepts chart objects too! For example:
132
133 >>> import pandas as pd
134 >>> import numpy as np
135 >>> import altair as alt
136 >>>
137 >>> df = pd.DataFrame(
138 ... np.random.randn(200, 3),
139 ... columns=['a', 'b', 'c'])
140 ...
141 >>> c = alt.Chart(df).mark_circle().encode(
142 ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])
143 >>>
144 >>> st.write(c)
145
146 .. output::
147 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5
148 height: 200px
149
150 """
151 string_buffer = [] # type: List[str]
152 unsafe_allow_html = kwargs.get("unsafe_allow_html", False)
153
154 # This bans some valid cases like: e = st.empty(); e.write("a", "b").
155 # BUT: 1) such cases are rare, 2) this rule is easy to understand,
156 # and 3) this rule should be removed once we have st.container()
157 if not self.dg._is_top_level and len(args) > 1:
158 raise StreamlitAPIException(
159 "Cannot replace a single element with multiple elements.\n\n"
160 "The `write()` method only supports multiple elements when "
161 "inserting elements rather than replacing. That is, only "
162 "when called as `st.write()` or `st.sidebar.write()`."
163 )
164
165 def flush_buffer():
166 if string_buffer:
167 self.dg.markdown(
168 " ".join(string_buffer),
169 unsafe_allow_html=unsafe_allow_html,
170 )
171 string_buffer[:] = []
172
173 for arg in args:
174 # Order matters!
175 if isinstance(arg, str):
176 string_buffer.append(arg)
177 elif type_util.is_dataframe_like(arg):
178 flush_buffer()
179 if len(np.shape(arg)) > 2:
180 self.dg.text(arg)
181 else:
182 self.dg.dataframe(arg)
183 elif isinstance(arg, Exception):
184 flush_buffer()
185 self.dg.exception(arg)
186 elif isinstance(arg, HELP_TYPES):
187 flush_buffer()
188 self.dg.help(arg)
189 elif type_util.is_altair_chart(arg):
190 flush_buffer()
191 self.dg.altair_chart(arg)
192 elif type_util.is_type(arg, "matplotlib.figure.Figure"):
193 flush_buffer()
194 self.dg.pyplot(arg)
195 elif type_util.is_plotly_chart(arg):
196 flush_buffer()
197 self.dg.plotly_chart(arg)
198 elif type_util.is_type(arg, "bokeh.plotting.figure.Figure"):
199 flush_buffer()
200 self.dg.bokeh_chart(arg)
201 elif type_util.is_graphviz_chart(arg):
202 flush_buffer()
203 self.dg.graphviz_chart(arg)
204 elif type_util.is_sympy_expession(arg):
205 flush_buffer()
206 self.dg.latex(arg)
207 elif type_util.is_keras_model(arg):
208 from tensorflow.python.keras.utils import vis_utils
209
210 flush_buffer()
211 dot = vis_utils.model_to_dot(arg)
212 self.dg.graphviz_chart(dot.to_string())
213 elif isinstance(arg, (dict, list, LazySessionState)):
214 flush_buffer()
215 self.dg.json(arg)
216 elif type_util.is_namedtuple(arg):
217 flush_buffer()
218 self.dg.json(json.dumps(arg._asdict()))
219 elif type_util.is_pydeck(arg):
220 flush_buffer()
221 self.dg.pydeck_chart(arg)
222 elif hasattr(arg, "_repr_html_"):
223 self.dg.markdown(
224 arg._repr_html_(),
225 unsafe_allow_html=True,
226 )
227 else:
228 string_buffer.append("`%s`" % str(arg).replace("`", "\\`"))
229
230 flush_buffer()
231
232 @property
233 def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
234 """Get our DeltaGenerator."""
235 return cast("streamlit.delta_generator.DeltaGenerator", self)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/elements/write.py b/lib/streamlit/elements/write.py
--- a/lib/streamlit/elements/write.py
+++ b/lib/streamlit/elements/write.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import inspect
import json as json
import types
from typing import cast, Any, List, Tuple, Type
@@ -219,6 +220,9 @@
elif type_util.is_pydeck(arg):
flush_buffer()
self.dg.pydeck_chart(arg)
+ elif inspect.isclass(arg):
+ flush_buffer()
+ self.dg.text(arg)
elif hasattr(arg, "_repr_html_"):
self.dg.markdown(
arg._repr_html_(),
| {"golden_diff": "diff --git a/lib/streamlit/elements/write.py b/lib/streamlit/elements/write.py\n--- a/lib/streamlit/elements/write.py\n+++ b/lib/streamlit/elements/write.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import inspect\n import json as json\n import types\n from typing import cast, Any, List, Tuple, Type\n@@ -219,6 +220,9 @@\n elif type_util.is_pydeck(arg):\n flush_buffer()\n self.dg.pydeck_chart(arg)\n+ elif inspect.isclass(arg):\n+ flush_buffer()\n+ self.dg.text(arg)\n elif hasattr(arg, \"_repr_html_\"):\n self.dg.markdown(\n arg._repr_html_(),\n", "issue": "Can't call st.write() on a class that has repr_html\n### Summary\r\n\r\nWhen you call `st.write()` any **class** that has `_repr_html_` it throws an error.\r\n\r\n(But you can call `st.write` on _instances_ of those classes just fine)\r\n\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\nimport streamlit as st \r\nimport pandas as pd \r\n \r\nx = pd.DataFrame([{\"a\": 1, \"b\": 2}, {\"a\": 3, \"b\": 4}]) \r\n\r\n\"## st.text\"\r\nst.text(type(x)) # This works. \r\n\r\n\"## st.write\"\r\nst.write(type(x)) # This doesn't work. \r\n```\r\n\r\n**Expected behavior:**\r\n\r\n`st.text` and `st.write` print the exact same thing.\r\n\r\n**Actual behavior:**\r\n\r\nYou get this beautiful error:\r\n\r\n\r\n\r\nWhy this fails: `st.write` uses `_repr_html_()` when it's available. `DataFrame` contains that method, but it requires a `self` to be passed. So when you call it directly as `DataFrame._repr_html_()` there's no `self` and we get an exception.\r\n\r\n**Solution:**\r\n\r\nWhen `st.write()` receives a class as input we should just call `st.text()` on it.\r\n\r\nYou can use `inspect.isclass` to check for this.\r\n\r\n\r\n### Is this a regression?\r\n\r\nYes, but not a recent one. It was introduced in https://github.com/streamlit/streamlit/pull/3154 , 7 months ago.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.1\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json as json\nimport types\nfrom typing import cast, Any, List, Tuple, Type\n\nimport numpy as np\n\nimport streamlit\nfrom streamlit import type_util\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.state.session_state import LazySessionState\n\n# Special methods:\n\nHELP_TYPES = (\n types.BuiltinFunctionType,\n types.BuiltinMethodType,\n types.FunctionType,\n types.MethodType,\n types.ModuleType,\n) # type: Tuple[Type[Any], ...]\n\n\nclass WriteMixin:\n def write(self, *args, **kwargs):\n \"\"\"Write arguments to the app.\n\n This is the Swiss Army knife of Streamlit commands: it does different\n things depending on what you throw at it. Unlike other Streamlit commands,\n write() has some unique properties:\n\n 1. You can pass in multiple arguments, all of which will be written.\n 2. Its behavior depends on the input types as follows.\n 3. It returns None, so its \"slot\" in the App cannot be reused.\n\n Parameters\n ----------\n *args : any\n One or many objects to print to the App.\n\n Arguments are handled as follows:\n\n - write(string) : Prints the formatted Markdown string, with\n support for LaTeX expression and emoji shortcodes.\n See docs for st.markdown for more.\n - write(data_frame) : Displays the DataFrame as a table.\n - write(error) : Prints an exception specially.\n - write(func) : Displays information about a function.\n - write(module) : Displays information about the module.\n - write(dict) : Displays dict in an interactive widget.\n - write(mpl_fig) : Displays a Matplotlib figure.\n - write(altair) : Displays an Altair chart.\n - write(keras) : Displays a Keras model.\n - write(graphviz) : Displays a Graphviz graph.\n - write(plotly_fig) : Displays a Plotly figure.\n - write(bokeh_fig) : Displays a Bokeh figure.\n - write(sympy_expr) : Prints SymPy expression using LaTeX.\n - write(htmlable) : Prints _repr_html_() for the object if available.\n - write(obj) : Prints str(obj) if otherwise unknown.\n\n unsafe_allow_html : bool\n This is a keyword-only argument that defaults to False.\n\n By default, any HTML tags found in strings will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, *we strongly advise against it*. It is hard to write secure\n HTML, so by using this argument you may be compromising your users'\n security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n **Also note that `unsafe_allow_html` is a temporary measure and may be\n removed from Streamlit at any time.**\n\n If you decide to turn on HTML anyway, we ask you to please tell us your\n exact use case here:\n https://discuss.streamlit.io/t/96 .\n\n This will help us come up with safe APIs that allow you to do what you\n want.\n\n Example\n -------\n\n Its basic use case is to draw Markdown-formatted text, whenever the\n input is a string:\n\n >>> write('Hello, *World!* :sunglasses:')\n\n .. output::\n https://static.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE\n height: 50px\n\n As mentioned earlier, `st.write()` also accepts other data formats, such as\n numbers, data frames, styled data frames, and assorted objects:\n\n >>> st.write(1234)\n >>> st.write(pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD\n height: 250px\n\n Finally, you can pass in multiple arguments to do things like:\n\n >>> st.write('1 + 1 = ', 2)\n >>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1\n height: 300px\n\n Oh, one more thing: `st.write` accepts chart objects too! For example:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> import altair as alt\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(200, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> c = alt.Chart(df).mark_circle().encode(\n ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\n >>>\n >>> st.write(c)\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5\n height: 200px\n\n \"\"\"\n string_buffer = [] # type: List[str]\n unsafe_allow_html = kwargs.get(\"unsafe_allow_html\", False)\n\n # This bans some valid cases like: e = st.empty(); e.write(\"a\", \"b\").\n # BUT: 1) such cases are rare, 2) this rule is easy to understand,\n # and 3) this rule should be removed once we have st.container()\n if not self.dg._is_top_level and len(args) > 1:\n raise StreamlitAPIException(\n \"Cannot replace a single element with multiple elements.\\n\\n\"\n \"The `write()` method only supports multiple elements when \"\n \"inserting elements rather than replacing. That is, only \"\n \"when called as `st.write()` or `st.sidebar.write()`.\"\n )\n\n def flush_buffer():\n if string_buffer:\n self.dg.markdown(\n \" \".join(string_buffer),\n unsafe_allow_html=unsafe_allow_html,\n )\n string_buffer[:] = []\n\n for arg in args:\n # Order matters!\n if isinstance(arg, str):\n string_buffer.append(arg)\n elif type_util.is_dataframe_like(arg):\n flush_buffer()\n if len(np.shape(arg)) > 2:\n self.dg.text(arg)\n else:\n self.dg.dataframe(arg)\n elif isinstance(arg, Exception):\n flush_buffer()\n self.dg.exception(arg)\n elif isinstance(arg, HELP_TYPES):\n flush_buffer()\n self.dg.help(arg)\n elif type_util.is_altair_chart(arg):\n flush_buffer()\n self.dg.altair_chart(arg)\n elif type_util.is_type(arg, \"matplotlib.figure.Figure\"):\n flush_buffer()\n self.dg.pyplot(arg)\n elif type_util.is_plotly_chart(arg):\n flush_buffer()\n self.dg.plotly_chart(arg)\n elif type_util.is_type(arg, \"bokeh.plotting.figure.Figure\"):\n flush_buffer()\n self.dg.bokeh_chart(arg)\n elif type_util.is_graphviz_chart(arg):\n flush_buffer()\n self.dg.graphviz_chart(arg)\n elif type_util.is_sympy_expession(arg):\n flush_buffer()\n self.dg.latex(arg)\n elif type_util.is_keras_model(arg):\n from tensorflow.python.keras.utils import vis_utils\n\n flush_buffer()\n dot = vis_utils.model_to_dot(arg)\n self.dg.graphviz_chart(dot.to_string())\n elif isinstance(arg, (dict, list, LazySessionState)):\n flush_buffer()\n self.dg.json(arg)\n elif type_util.is_namedtuple(arg):\n flush_buffer()\n self.dg.json(json.dumps(arg._asdict()))\n elif type_util.is_pydeck(arg):\n flush_buffer()\n self.dg.pydeck_chart(arg)\n elif hasattr(arg, \"_repr_html_\"):\n self.dg.markdown(\n arg._repr_html_(),\n unsafe_allow_html=True,\n )\n else:\n string_buffer.append(\"`%s`\" % str(arg).replace(\"`\", \"\\\\`\"))\n\n flush_buffer()\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/write.py"}], "after_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport json as json\nimport types\nfrom typing import cast, Any, List, Tuple, Type\n\nimport numpy as np\n\nimport streamlit\nfrom streamlit import type_util\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.state.session_state import LazySessionState\n\n# Special methods:\n\nHELP_TYPES = (\n types.BuiltinFunctionType,\n types.BuiltinMethodType,\n types.FunctionType,\n types.MethodType,\n types.ModuleType,\n) # type: Tuple[Type[Any], ...]\n\n\nclass WriteMixin:\n def write(self, *args, **kwargs):\n \"\"\"Write arguments to the app.\n\n This is the Swiss Army knife of Streamlit commands: it does different\n things depending on what you throw at it. Unlike other Streamlit commands,\n write() has some unique properties:\n\n 1. You can pass in multiple arguments, all of which will be written.\n 2. Its behavior depends on the input types as follows.\n 3. It returns None, so its \"slot\" in the App cannot be reused.\n\n Parameters\n ----------\n *args : any\n One or many objects to print to the App.\n\n Arguments are handled as follows:\n\n - write(string) : Prints the formatted Markdown string, with\n support for LaTeX expression and emoji shortcodes.\n See docs for st.markdown for more.\n - write(data_frame) : Displays the DataFrame as a table.\n - write(error) : Prints an exception specially.\n - write(func) : Displays information about a function.\n - write(module) : Displays information about the module.\n - write(dict) : Displays dict in an interactive widget.\n - write(mpl_fig) : Displays a Matplotlib figure.\n - write(altair) : Displays an Altair chart.\n - write(keras) : Displays a Keras model.\n - write(graphviz) : Displays a Graphviz graph.\n - write(plotly_fig) : Displays a Plotly figure.\n - write(bokeh_fig) : Displays a Bokeh figure.\n - write(sympy_expr) : Prints SymPy expression using LaTeX.\n - write(htmlable) : Prints _repr_html_() for the object if available.\n - write(obj) : Prints str(obj) if otherwise unknown.\n\n unsafe_allow_html : bool\n This is a keyword-only argument that defaults to False.\n\n By default, any HTML tags found in strings will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, *we strongly advise against it*. It is hard to write secure\n HTML, so by using this argument you may be compromising your users'\n security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n **Also note that `unsafe_allow_html` is a temporary measure and may be\n removed from Streamlit at any time.**\n\n If you decide to turn on HTML anyway, we ask you to please tell us your\n exact use case here:\n https://discuss.streamlit.io/t/96 .\n\n This will help us come up with safe APIs that allow you to do what you\n want.\n\n Example\n -------\n\n Its basic use case is to draw Markdown-formatted text, whenever the\n input is a string:\n\n >>> write('Hello, *World!* :sunglasses:')\n\n .. output::\n https://static.streamlit.io/0.50.2-ZWk9/index.html?id=Pn5sjhgNs4a8ZbiUoSTRxE\n height: 50px\n\n As mentioned earlier, `st.write()` also accepts other data formats, such as\n numbers, data frames, styled data frames, and assorted objects:\n\n >>> st.write(1234)\n >>> st.write(pd.DataFrame({\n ... 'first column': [1, 2, 3, 4],\n ... 'second column': [10, 20, 30, 40],\n ... }))\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=FCp9AMJHwHRsWSiqMgUZGD\n height: 250px\n\n Finally, you can pass in multiple arguments to do things like:\n\n >>> st.write('1 + 1 = ', 2)\n >>> st.write('Below is a DataFrame:', data_frame, 'Above is a dataframe.')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=DHkcU72sxYcGarkFbf4kK1\n height: 300px\n\n Oh, one more thing: `st.write` accepts chart objects too! For example:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> import altair as alt\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(200, 3),\n ... columns=['a', 'b', 'c'])\n ...\n >>> c = alt.Chart(df).mark_circle().encode(\n ... x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\n >>>\n >>> st.write(c)\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=8jmmXR8iKoZGV4kXaKGYV5\n height: 200px\n\n \"\"\"\n string_buffer = [] # type: List[str]\n unsafe_allow_html = kwargs.get(\"unsafe_allow_html\", False)\n\n # This bans some valid cases like: e = st.empty(); e.write(\"a\", \"b\").\n # BUT: 1) such cases are rare, 2) this rule is easy to understand,\n # and 3) this rule should be removed once we have st.container()\n if not self.dg._is_top_level and len(args) > 1:\n raise StreamlitAPIException(\n \"Cannot replace a single element with multiple elements.\\n\\n\"\n \"The `write()` method only supports multiple elements when \"\n \"inserting elements rather than replacing. That is, only \"\n \"when called as `st.write()` or `st.sidebar.write()`.\"\n )\n\n def flush_buffer():\n if string_buffer:\n self.dg.markdown(\n \" \".join(string_buffer),\n unsafe_allow_html=unsafe_allow_html,\n )\n string_buffer[:] = []\n\n for arg in args:\n # Order matters!\n if isinstance(arg, str):\n string_buffer.append(arg)\n elif type_util.is_dataframe_like(arg):\n flush_buffer()\n if len(np.shape(arg)) > 2:\n self.dg.text(arg)\n else:\n self.dg.dataframe(arg)\n elif isinstance(arg, Exception):\n flush_buffer()\n self.dg.exception(arg)\n elif isinstance(arg, HELP_TYPES):\n flush_buffer()\n self.dg.help(arg)\n elif type_util.is_altair_chart(arg):\n flush_buffer()\n self.dg.altair_chart(arg)\n elif type_util.is_type(arg, \"matplotlib.figure.Figure\"):\n flush_buffer()\n self.dg.pyplot(arg)\n elif type_util.is_plotly_chart(arg):\n flush_buffer()\n self.dg.plotly_chart(arg)\n elif type_util.is_type(arg, \"bokeh.plotting.figure.Figure\"):\n flush_buffer()\n self.dg.bokeh_chart(arg)\n elif type_util.is_graphviz_chart(arg):\n flush_buffer()\n self.dg.graphviz_chart(arg)\n elif type_util.is_sympy_expession(arg):\n flush_buffer()\n self.dg.latex(arg)\n elif type_util.is_keras_model(arg):\n from tensorflow.python.keras.utils import vis_utils\n\n flush_buffer()\n dot = vis_utils.model_to_dot(arg)\n self.dg.graphviz_chart(dot.to_string())\n elif isinstance(arg, (dict, list, LazySessionState)):\n flush_buffer()\n self.dg.json(arg)\n elif type_util.is_namedtuple(arg):\n flush_buffer()\n self.dg.json(json.dumps(arg._asdict()))\n elif type_util.is_pydeck(arg):\n flush_buffer()\n self.dg.pydeck_chart(arg)\n elif inspect.isclass(arg):\n flush_buffer()\n self.dg.text(arg)\n elif hasattr(arg, \"_repr_html_\"):\n self.dg.markdown(\n arg._repr_html_(),\n unsafe_allow_html=True,\n )\n else:\n string_buffer.append(\"`%s`\" % str(arg).replace(\"`\", \"\\\\`\"))\n\n flush_buffer()\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/write.py"}]} | 3,337 | 177 |
gh_patches_debug_16430 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-1094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transformer model for summarization: copy_generator.py - IndexError: too many indices for tensor of dimension 0
When I try to train my model I get the following
```
Traceback (most recent call last):
File "onmt_train.py", line 123, in <module>
main(opt)
File "onmt_train.py", line 54, in main
single_main(opt, 0)
File "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/train_single.py", line 154, in main
opt.valid_steps)
File "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/trainer.py", line 175, in train
report_stats)
File "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/trainer.py", line 288, in _gradient_accumulation
trunc_size, self.shard_size, normalization)
File "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/utils/loss.py", line 170, in sharded_compute_loss
loss, stats = self._compute_loss(batch, **shard)
File "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/modules/copy_generator.py", line 188, in _compute_loss
loss = self.criterion(scores, align, target)
File "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/modules/copy_generator.py", line 132, in forward
copy_tok_probs[align == self.unk_index] = 0
IndexError: too many indices for tensor of dimension 0
```
I have no idea where it comes from, it comes in step 800, but after 337 steps the network should have seen all my data (I've set it to checkpoint and validate every 400 steps).
I've also tried to find whether this comes from an empty sequence, but I explicitly deleted examples that contain less than one token (after stripping whitespace).
The command I use for training:
```
train.py -data /home/ubuntu/efs/data/WikiHowSep \
...: -save_model /home/ubuntu/efs/models/WikiHowSep \
...: -layers 4 \
...: -rnn_size 512 \
...: -word_vec_size 512 \
...: -max_grad_norm 0 \
...: -optim adam \
...: -encoder_type transformer \
...: -decoder_type transformer \
...: -position_encoding \
...: -dropout 0\.2 \
...: -param_init 0 \
...: -warmup_steps 100 \
...: -learning_rate 2 \
...: -decay_method noam \
...: -label_smoothing 0.1 \
...: -adam_beta2 0.998 \
...: -save_checkpoint_steps 400 \
...: -batch_size $batch_size \
...: -batch_type tokens \
...: -normalization tokens \
...: -max_generator_batches 2 \
...: -train_steps $train_steps \
...: -accum_count 4 \
...: -share_embeddings \
...: -copy_attn \
...: -param_init_glorot \
...: -gpu_ranks 0 \
...: -valid_steps 400 \
...: -tensorboard \
...: -tensorboard_log_dir /home/ubuntu/efs/models/tensorboard/WikiHowSep
```
Command I used for preprocessing:
```
preprocess.py -train_src $DATA_PATH/wikihow_train_text.txt \
-train_tgt $DATA_PATH/wikihow_train_headline.txt \
-valid_src $DATA_PATH/wikihow_val_text.txt \
-valid_tgt $DATA_PATH/wikihow_val_headline.txt \
-save_data ~/efs/data/WikiHowSep \
-src_seq_length_trunc 200 \
-tgt_seq_length_trunc 20 \
-dynamic_dict \
-share_vocab \
-shard_size 100000
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/modules/copy_generator.py`
Content:
```
1 """ Generator module """
2 import torch
3 import torch.nn as nn
4
5 import onmt.inputters as inputters
6 from onmt.utils.misc import aeq
7 from onmt.utils.loss import LossComputeBase
8
9
10 class CopyGenerator(nn.Module):
11 """An implementation of pointer-generator networks (See et al., 2017)
12 (https://arxiv.org/abs/1704.04368), which consider copying words
13 directly from the source sequence.
14
15 The main idea is that we have an extended "dynamic dictionary".
16 It contains `|tgt_dict|` words plus an arbitrary number of
17 additional words introduced by the source sentence.
18 For each source sentence we have a `src_map` that maps
19 each source word to an index in `tgt_dict` if it known, or
20 else to an extra word.
21
22 The copy generator is an extended version of the standard
23 generator that computes three values.
24
25 * :math:`p_{softmax}` the standard softmax over `tgt_dict`
26 * :math:`p(z)` the probability of copying a word from
27 the source
28 * :math:`p_{copy}` the probility of copying a particular word.
29 taken from the attention distribution directly.
30
31 The model returns a distribution over the extend dictionary,
32 computed as
33
34 :math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`
35
36
37 .. mermaid::
38
39 graph BT
40 A[input]
41 S[src_map]
42 B[softmax]
43 BB[switch]
44 C[attn]
45 D[copy]
46 O[output]
47 A --> B
48 A --> BB
49 S --> D
50 C --> D
51 D --> O
52 B --> O
53 BB --> O
54
55
56 Args:
57 input_size (int): size of input representation
58 tgt_dict (Vocab): output target dictionary
59
60 """
61
62 def __init__(self, input_size, tgt_dict):
63 super(CopyGenerator, self).__init__()
64 self.linear = nn.Linear(input_size, len(tgt_dict))
65 self.linear_copy = nn.Linear(input_size, 1)
66 self.tgt_dict = tgt_dict
67
68 def forward(self, hidden, attn, src_map):
69 """
70 Compute a distribution over the target dictionary
71 extended by the dynamic dictionary implied by compying
72 source words.
73
74 Args:
75 hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`
76 attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`
77 src_map (`FloatTensor`):
78 A sparse indicator matrix mapping each source word to
79 its index in the "extended" vocab containing.
80 `[src_len, batch, extra_words]`
81 """
82 # CHECKS
83 batch_by_tlen, _ = hidden.size()
84 batch_by_tlen_, slen = attn.size()
85 slen_, batch, cvocab = src_map.size()
86 aeq(batch_by_tlen, batch_by_tlen_)
87 aeq(slen, slen_)
88
89 # Original probabilities.
90 logits = self.linear(hidden)
91 logits[:, self.tgt_dict.stoi[inputters.PAD_WORD]] = -float('inf')
92 prob = torch.softmax(logits, 1)
93
94 # Probability of copying p(z=1) batch.
95 p_copy = torch.sigmoid(self.linear_copy(hidden))
96 # Probibility of not copying: p_{word}(w) * (1 - p(z))
97 out_prob = torch.mul(prob, 1 - p_copy)
98 mul_attn = torch.mul(attn, p_copy)
99 copy_prob = torch.bmm(
100 mul_attn.view(-1, batch, slen).transpose(0, 1),
101 src_map.transpose(0, 1)
102 ).transpose(0, 1)
103 copy_prob = copy_prob.contiguous().view(-1, cvocab)
104 return torch.cat([out_prob, copy_prob], 1)
105
106
107 class CopyGeneratorLoss(nn.Module):
108 """ Copy generator criterion """
109
110 def __init__(self, vocab_size, force_copy, unk_index=0,
111 ignore_index=-100, eps=1e-20):
112 super(CopyGeneratorLoss, self).__init__()
113 self.force_copy = force_copy
114 self.eps = eps
115 self.vocab_size = vocab_size
116 self.ignore_index = ignore_index
117 self.unk_index = unk_index
118
119 def forward(self, scores, align, target):
120 """
121 scores (FloatTensor): (batch_size*tgt_len) x dynamic vocab size
122 align (LongTensor): (batch_size*tgt_len)
123 target (LongTensor): (batch_size*tgt_len)
124 """
125 # probabilities assigned by the model to the gold targets
126 vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze()
127
128 # probability of tokens copied from source
129 copy_ix = align.unsqueeze(1) + self.vocab_size
130 copy_tok_probs = scores.gather(1, copy_ix).squeeze()
131 # Set scores for unk to 0 and add eps
132 copy_tok_probs[align == self.unk_index] = 0
133 copy_tok_probs += self.eps # to avoid -inf logs
134
135 # find the indices in which you do not use the copy mechanism
136 non_copy = align == self.unk_index
137 if not self.force_copy:
138 non_copy = non_copy | (target != self.unk_index)
139
140 probs = torch.where(
141 non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
142 )
143
144 loss = -probs.log() # just NLLLoss; can the module be incorporated?
145 # Drop padding.
146 loss[target == self.ignore_index] = 0
147 return loss
148
149
150 class CopyGeneratorLossCompute(LossComputeBase):
151 """
152 Copy Generator Loss Computation.
153 """
154
155 def __init__(self, criterion, generator, tgt_vocab, normalize_by_length):
156 super(CopyGeneratorLossCompute, self).__init__(criterion, generator)
157 self.tgt_vocab = tgt_vocab
158 self.normalize_by_length = normalize_by_length
159
160 def _make_shard_state(self, batch, output, range_, attns):
161 """ See base class for args description. """
162 if getattr(batch, "alignment", None) is None:
163 raise AssertionError("using -copy_attn you need to pass in "
164 "-dynamic_dict during preprocess stage.")
165
166 return {
167 "output": output,
168 "target": batch.tgt[range_[0] + 1: range_[1]],
169 "copy_attn": attns.get("copy"),
170 "align": batch.alignment[range_[0] + 1: range_[1]]
171 }
172
173 def _compute_loss(self, batch, output, target, copy_attn, align):
174 """
175 Compute the loss. The args must match self._make_shard_state().
176 Args:
177 batch: the current batch.
178 output: the predict output from the model.
179 target: the validate target to compare output with.
180 copy_attn: the copy attention value.
181 align: the align info.
182 """
183 target = target.view(-1)
184 align = align.view(-1)
185 scores = self.generator(self._bottle(output),
186 self._bottle(copy_attn),
187 batch.src_map)
188 loss = self.criterion(scores, align, target)
189
190 # this block does not depend on the loss value computed above
191 # and is used only for stats
192 scores_data = inputters.TextDataset.collapse_copy_scores(
193 self._unbottle(scores.clone(), batch.batch_size),
194 batch, self.tgt_vocab, batch.dataset.src_vocabs)
195 scores_data = self._bottle(scores_data)
196
197 # this block does not depend on the loss value computed above
198 # and is used only for stats
199 # Correct target copy token instead of <unk>
200 # tgt[i] = align[i] + len(tgt_vocab)
201 # for i such that tgt[i] == 0 and align[i] != 0
202 target_data = target.clone()
203 unk = self.criterion.unk_index
204 correct_mask = (target_data == unk) & (align != unk)
205 offset_align = align[correct_mask] + len(self.tgt_vocab)
206 target_data[correct_mask] += offset_align
207
208 # Compute sum of perplexities for stats
209 stats = self._stats(loss.sum().clone(), scores_data, target_data)
210
211 # this part looks like it belongs in CopyGeneratorLoss
212 if self.normalize_by_length:
213 # Compute Loss as NLL divided by seq length
214 tgt_lens = batch.tgt.ne(self.padding_idx).sum(0).float()
215 # Compute Total Loss per sequence in batch
216 loss = loss.view(-1, batch.batch_size).sum(0)
217 # Divide by length of each sequence and sum
218 loss = torch.div(loss, tgt_lens).sum()
219 else:
220 loss = loss.sum()
221
222 return loss, stats
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/modules/copy_generator.py b/onmt/modules/copy_generator.py
--- a/onmt/modules/copy_generator.py
+++ b/onmt/modules/copy_generator.py
@@ -123,11 +123,11 @@
target (LongTensor): (batch_size*tgt_len)
"""
# probabilities assigned by the model to the gold targets
- vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze()
+ vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
- copy_tok_probs = scores.gather(1, copy_ix).squeeze()
+ copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)
# Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
| {"golden_diff": "diff --git a/onmt/modules/copy_generator.py b/onmt/modules/copy_generator.py\n--- a/onmt/modules/copy_generator.py\n+++ b/onmt/modules/copy_generator.py\n@@ -123,11 +123,11 @@\n target (LongTensor): (batch_size*tgt_len)\n \"\"\"\n # probabilities assigned by the model to the gold targets\n- vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze()\n+ vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)\n \n # probability of tokens copied from source\n copy_ix = align.unsqueeze(1) + self.vocab_size\n- copy_tok_probs = scores.gather(1, copy_ix).squeeze()\n+ copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)\n # Set scores for unk to 0 and add eps\n copy_tok_probs[align == self.unk_index] = 0\n copy_tok_probs += self.eps # to avoid -inf logs\n", "issue": "Transformer model for summarization: copy_generator.py - IndexError: too many indices for tensor of dimension 0\nWhen I try to train my model I get the following\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"onmt_train.py\", line 123, in <module>\r\n main(opt)\r\n File \"onmt_train.py\", line 54, in main\r\n single_main(opt, 0)\r\n File \"/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/train_single.py\", line 154, in main\r\n opt.valid_steps)\r\n File \"/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/trainer.py\", line 175, in train\r\n report_stats)\r\n File \"/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/trainer.py\", line 288, in _gradient_accumulation\r\n trunc_size, self.shard_size, normalization)\r\n File \"/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/utils/loss.py\", line 170, in sharded_compute_loss\r\n loss, stats = self._compute_loss(batch, **shard)\r\n File \"/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/modules/copy_generator.py\", line 188, in _compute_loss\r\n loss = self.criterion(scores, align, target)\r\n File \"/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 477, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/ubuntu/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/onmt/modules/copy_generator.py\", line 132, in forward\r\n copy_tok_probs[align == self.unk_index] = 0\r\nIndexError: too many indices for tensor of dimension 0\r\n```\r\n\r\nI have no idea where it comes from, it comes in step 800, but after 337 steps the network should have seen all my data (I've set it to checkpoint and validate every 400 steps).\r\n\r\nI've also tried to find whether this comes from an empty sequence, but I explicitly deleted examples that contain less than one token (after stripping whitespace).\r\n\r\n\r\nThe command I use for training:\r\n```\r\ntrain.py -data /home/ubuntu/efs/data/WikiHowSep \\\r\n ...: -save_model /home/ubuntu/efs/models/WikiHowSep \\\r\n ...: -layers 4 \\\r\n ...: -rnn_size 512 \\\r\n ...: -word_vec_size 512 \\\r\n ...: -max_grad_norm 0 \\\r\n ...: -optim adam \\\r\n ...: -encoder_type transformer \\\r\n ...: -decoder_type transformer \\\r\n ...: -position_encoding \\\r\n ...: -dropout 0\\.2 \\\r\n ...: -param_init 0 \\\r\n ...: -warmup_steps 100 \\\r\n ...: -learning_rate 2 \\\r\n ...: -decay_method noam \\\r\n ...: -label_smoothing 0.1 \\\r\n ...: -adam_beta2 0.998 \\\r\n ...: -save_checkpoint_steps 400 \\\r\n ...: -batch_size $batch_size \\\r\n ...: -batch_type tokens \\\r\n ...: -normalization tokens \\\r\n ...: -max_generator_batches 2 \\\r\n ...: -train_steps $train_steps \\\r\n ...: -accum_count 4 \\\r\n ...: -share_embeddings \\\r\n ...: -copy_attn \\\r\n ...: -param_init_glorot \\\r\n ...: -gpu_ranks 0 \\\r\n ...: -valid_steps 400 \\\r\n ...: -tensorboard \\\r\n ...: -tensorboard_log_dir /home/ubuntu/efs/models/tensorboard/WikiHowSep\r\n```\r\n\r\nCommand I used for preprocessing:\r\n```\r\npreprocess.py -train_src $DATA_PATH/wikihow_train_text.txt \\\r\n -train_tgt $DATA_PATH/wikihow_train_headline.txt \\\r\n -valid_src $DATA_PATH/wikihow_val_text.txt \\\r\n -valid_tgt $DATA_PATH/wikihow_val_headline.txt \\\r\n -save_data ~/efs/data/WikiHowSep \\\r\n -src_seq_length_trunc 200 \\\r\n -tgt_seq_length_trunc 20 \\\r\n -dynamic_dict \\\r\n -share_vocab \\\r\n -shard_size 100000\r\n```\n", "before_files": [{"content": "\"\"\" Generator module \"\"\"\nimport torch\nimport torch.nn as nn\n\nimport onmt.inputters as inputters\nfrom onmt.utils.misc import aeq\nfrom onmt.utils.loss import LossComputeBase\n\n\nclass CopyGenerator(nn.Module):\n \"\"\"An implementation of pointer-generator networks (See et al., 2017)\n (https://arxiv.org/abs/1704.04368), which consider copying words\n directly from the source sequence.\n\n The main idea is that we have an extended \"dynamic dictionary\".\n It contains `|tgt_dict|` words plus an arbitrary number of\n additional words introduced by the source sentence.\n For each source sentence we have a `src_map` that maps\n each source word to an index in `tgt_dict` if it known, or\n else to an extra word.\n\n The copy generator is an extended version of the standard\n generator that computes three values.\n\n * :math:`p_{softmax}` the standard softmax over `tgt_dict`\n * :math:`p(z)` the probability of copying a word from\n the source\n * :math:`p_{copy}` the probility of copying a particular word.\n taken from the attention distribution directly.\n\n The model returns a distribution over the extend dictionary,\n computed as\n\n :math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`\n\n\n .. mermaid::\n\n graph BT\n A[input]\n S[src_map]\n B[softmax]\n BB[switch]\n C[attn]\n D[copy]\n O[output]\n A --> B\n A --> BB\n S --> D\n C --> D\n D --> O\n B --> O\n BB --> O\n\n\n Args:\n input_size (int): size of input representation\n tgt_dict (Vocab): output target dictionary\n\n \"\"\"\n\n def __init__(self, input_size, tgt_dict):\n super(CopyGenerator, self).__init__()\n self.linear = nn.Linear(input_size, len(tgt_dict))\n self.linear_copy = nn.Linear(input_size, 1)\n self.tgt_dict = tgt_dict\n\n def forward(self, hidden, attn, src_map):\n \"\"\"\n Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.\n\n Args:\n hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`\n attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`\n src_map (`FloatTensor`):\n A sparse indicator matrix mapping each source word to\n its index in the \"extended\" vocab containing.\n `[src_len, batch, extra_words]`\n \"\"\"\n # CHECKS\n batch_by_tlen, _ = hidden.size()\n batch_by_tlen_, slen = attn.size()\n slen_, batch, cvocab = src_map.size()\n aeq(batch_by_tlen, batch_by_tlen_)\n aeq(slen, slen_)\n\n # Original probabilities.\n logits = self.linear(hidden)\n logits[:, self.tgt_dict.stoi[inputters.PAD_WORD]] = -float('inf')\n prob = torch.softmax(logits, 1)\n\n # Probability of copying p(z=1) batch.\n p_copy = torch.sigmoid(self.linear_copy(hidden))\n # Probibility of not copying: p_{word}(w) * (1 - p(z))\n out_prob = torch.mul(prob, 1 - p_copy)\n mul_attn = torch.mul(attn, p_copy)\n copy_prob = torch.bmm(\n mul_attn.view(-1, batch, slen).transpose(0, 1),\n src_map.transpose(0, 1)\n ).transpose(0, 1)\n copy_prob = copy_prob.contiguous().view(-1, cvocab)\n return torch.cat([out_prob, copy_prob], 1)\n\n\nclass CopyGeneratorLoss(nn.Module):\n \"\"\" Copy generator criterion \"\"\"\n\n def __init__(self, vocab_size, force_copy, unk_index=0,\n ignore_index=-100, eps=1e-20):\n super(CopyGeneratorLoss, self).__init__()\n self.force_copy = force_copy\n self.eps = eps\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.unk_index = unk_index\n\n def forward(self, scores, align, target):\n \"\"\"\n scores (FloatTensor): (batch_size*tgt_len) x dynamic vocab size\n align (LongTensor): (batch_size*tgt_len)\n target (LongTensor): (batch_size*tgt_len)\n \"\"\"\n # probabilities assigned by the model to the gold targets\n vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze()\n\n # probability of tokens copied from source\n copy_ix = align.unsqueeze(1) + self.vocab_size\n copy_tok_probs = scores.gather(1, copy_ix).squeeze()\n # Set scores for unk to 0 and add eps\n copy_tok_probs[align == self.unk_index] = 0\n copy_tok_probs += self.eps # to avoid -inf logs\n\n # find the indices in which you do not use the copy mechanism\n non_copy = align == self.unk_index\n if not self.force_copy:\n non_copy = non_copy | (target != self.unk_index)\n\n probs = torch.where(\n non_copy, copy_tok_probs + vocab_probs, copy_tok_probs\n )\n\n loss = -probs.log() # just NLLLoss; can the module be incorporated?\n # Drop padding.\n loss[target == self.ignore_index] = 0\n return loss\n\n\nclass CopyGeneratorLossCompute(LossComputeBase):\n \"\"\"\n Copy Generator Loss Computation.\n \"\"\"\n\n def __init__(self, criterion, generator, tgt_vocab, normalize_by_length):\n super(CopyGeneratorLossCompute, self).__init__(criterion, generator)\n self.tgt_vocab = tgt_vocab\n self.normalize_by_length = normalize_by_length\n\n def _make_shard_state(self, batch, output, range_, attns):\n \"\"\" See base class for args description. \"\"\"\n if getattr(batch, \"alignment\", None) is None:\n raise AssertionError(\"using -copy_attn you need to pass in \"\n \"-dynamic_dict during preprocess stage.\")\n\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n \"copy_attn\": attns.get(\"copy\"),\n \"align\": batch.alignment[range_[0] + 1: range_[1]]\n }\n\n def _compute_loss(self, batch, output, target, copy_attn, align):\n \"\"\"\n Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.\n \"\"\"\n target = target.view(-1)\n align = align.view(-1)\n scores = self.generator(self._bottle(output),\n self._bottle(copy_attn),\n batch.src_map)\n loss = self.criterion(scores, align, target)\n\n # this block does not depend on the loss value computed above\n # and is used only for stats\n scores_data = inputters.TextDataset.collapse_copy_scores(\n self._unbottle(scores.clone(), batch.batch_size),\n batch, self.tgt_vocab, batch.dataset.src_vocabs)\n scores_data = self._bottle(scores_data)\n\n # this block does not depend on the loss value computed above\n # and is used only for stats\n # Correct target copy token instead of <unk>\n # tgt[i] = align[i] + len(tgt_vocab)\n # for i such that tgt[i] == 0 and align[i] != 0\n target_data = target.clone()\n unk = self.criterion.unk_index\n correct_mask = (target_data == unk) & (align != unk)\n offset_align = align[correct_mask] + len(self.tgt_vocab)\n target_data[correct_mask] += offset_align\n\n # Compute sum of perplexities for stats\n stats = self._stats(loss.sum().clone(), scores_data, target_data)\n\n # this part looks like it belongs in CopyGeneratorLoss\n if self.normalize_by_length:\n # Compute Loss as NLL divided by seq length\n tgt_lens = batch.tgt.ne(self.padding_idx).sum(0).float()\n # Compute Total Loss per sequence in batch\n loss = loss.view(-1, batch.batch_size).sum(0)\n # Divide by length of each sequence and sum\n loss = torch.div(loss, tgt_lens).sum()\n else:\n loss = loss.sum()\n\n return loss, stats\n", "path": "onmt/modules/copy_generator.py"}], "after_files": [{"content": "\"\"\" Generator module \"\"\"\nimport torch\nimport torch.nn as nn\n\nimport onmt.inputters as inputters\nfrom onmt.utils.misc import aeq\nfrom onmt.utils.loss import LossComputeBase\n\n\nclass CopyGenerator(nn.Module):\n \"\"\"An implementation of pointer-generator networks (See et al., 2017)\n (https://arxiv.org/abs/1704.04368), which consider copying words\n directly from the source sequence.\n\n The main idea is that we have an extended \"dynamic dictionary\".\n It contains `|tgt_dict|` words plus an arbitrary number of\n additional words introduced by the source sentence.\n For each source sentence we have a `src_map` that maps\n each source word to an index in `tgt_dict` if it known, or\n else to an extra word.\n\n The copy generator is an extended version of the standard\n generator that computes three values.\n\n * :math:`p_{softmax}` the standard softmax over `tgt_dict`\n * :math:`p(z)` the probability of copying a word from\n the source\n * :math:`p_{copy}` the probility of copying a particular word.\n taken from the attention distribution directly.\n\n The model returns a distribution over the extend dictionary,\n computed as\n\n :math:`p(w) = p(z=1) p_{copy}(w) + p(z=0) p_{softmax}(w)`\n\n\n .. mermaid::\n\n graph BT\n A[input]\n S[src_map]\n B[softmax]\n BB[switch]\n C[attn]\n D[copy]\n O[output]\n A --> B\n A --> BB\n S --> D\n C --> D\n D --> O\n B --> O\n BB --> O\n\n\n Args:\n input_size (int): size of input representation\n tgt_dict (Vocab): output target dictionary\n\n \"\"\"\n\n def __init__(self, input_size, tgt_dict):\n super(CopyGenerator, self).__init__()\n self.linear = nn.Linear(input_size, len(tgt_dict))\n self.linear_copy = nn.Linear(input_size, 1)\n self.tgt_dict = tgt_dict\n\n def forward(self, hidden, attn, src_map):\n \"\"\"\n Compute a distribution over the target dictionary\n extended by the dynamic dictionary implied by compying\n source words.\n\n Args:\n hidden (`FloatTensor`): hidden outputs `[batch*tlen, input_size]`\n attn (`FloatTensor`): attn for each `[batch*tlen, input_size]`\n src_map (`FloatTensor`):\n A sparse indicator matrix mapping each source word to\n its index in the \"extended\" vocab containing.\n `[src_len, batch, extra_words]`\n \"\"\"\n # CHECKS\n batch_by_tlen, _ = hidden.size()\n batch_by_tlen_, slen = attn.size()\n slen_, batch, cvocab = src_map.size()\n aeq(batch_by_tlen, batch_by_tlen_)\n aeq(slen, slen_)\n\n # Original probabilities.\n logits = self.linear(hidden)\n logits[:, self.tgt_dict.stoi[inputters.PAD_WORD]] = -float('inf')\n prob = torch.softmax(logits, 1)\n\n # Probability of copying p(z=1) batch.\n p_copy = torch.sigmoid(self.linear_copy(hidden))\n # Probibility of not copying: p_{word}(w) * (1 - p(z))\n out_prob = torch.mul(prob, 1 - p_copy)\n mul_attn = torch.mul(attn, p_copy)\n copy_prob = torch.bmm(\n mul_attn.view(-1, batch, slen).transpose(0, 1),\n src_map.transpose(0, 1)\n ).transpose(0, 1)\n copy_prob = copy_prob.contiguous().view(-1, cvocab)\n return torch.cat([out_prob, copy_prob], 1)\n\n\nclass CopyGeneratorLoss(nn.Module):\n \"\"\" Copy generator criterion \"\"\"\n\n def __init__(self, vocab_size, force_copy, unk_index=0,\n ignore_index=-100, eps=1e-20):\n super(CopyGeneratorLoss, self).__init__()\n self.force_copy = force_copy\n self.eps = eps\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.unk_index = unk_index\n\n def forward(self, scores, align, target):\n \"\"\"\n scores (FloatTensor): (batch_size*tgt_len) x dynamic vocab size\n align (LongTensor): (batch_size*tgt_len)\n target (LongTensor): (batch_size*tgt_len)\n \"\"\"\n # probabilities assigned by the model to the gold targets\n vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)\n\n # probability of tokens copied from source\n copy_ix = align.unsqueeze(1) + self.vocab_size\n copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)\n # Set scores for unk to 0 and add eps\n copy_tok_probs[align == self.unk_index] = 0\n copy_tok_probs += self.eps # to avoid -inf logs\n\n # find the indices in which you do not use the copy mechanism\n non_copy = align == self.unk_index\n if not self.force_copy:\n non_copy = non_copy | (target != self.unk_index)\n\n probs = torch.where(\n non_copy, copy_tok_probs + vocab_probs, copy_tok_probs\n )\n\n loss = -probs.log() # just NLLLoss; can the module be incorporated?\n # Drop padding.\n loss[target == self.ignore_index] = 0\n return loss\n\n\nclass CopyGeneratorLossCompute(LossComputeBase):\n \"\"\"\n Copy Generator Loss Computation.\n \"\"\"\n\n def __init__(self, criterion, generator, tgt_vocab, normalize_by_length):\n super(CopyGeneratorLossCompute, self).__init__(criterion, generator)\n self.tgt_vocab = tgt_vocab\n self.normalize_by_length = normalize_by_length\n\n def _make_shard_state(self, batch, output, range_, attns):\n \"\"\" See base class for args description. \"\"\"\n if getattr(batch, \"alignment\", None) is None:\n raise AssertionError(\"using -copy_attn you need to pass in \"\n \"-dynamic_dict during preprocess stage.\")\n\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n \"copy_attn\": attns.get(\"copy\"),\n \"align\": batch.alignment[range_[0] + 1: range_[1]]\n }\n\n def _compute_loss(self, batch, output, target, copy_attn, align):\n \"\"\"\n Compute the loss. The args must match self._make_shard_state().\n Args:\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n copy_attn: the copy attention value.\n align: the align info.\n \"\"\"\n target = target.view(-1)\n align = align.view(-1)\n scores = self.generator(self._bottle(output),\n self._bottle(copy_attn),\n batch.src_map)\n loss = self.criterion(scores, align, target)\n\n # this block does not depend on the loss value computed above\n # and is used only for stats\n scores_data = inputters.TextDataset.collapse_copy_scores(\n self._unbottle(scores.clone(), batch.batch_size),\n batch, self.tgt_vocab, batch.dataset.src_vocabs)\n scores_data = self._bottle(scores_data)\n\n # this block does not depend on the loss value computed above\n # and is used only for stats\n # Correct target copy token instead of <unk>\n # tgt[i] = align[i] + len(tgt_vocab)\n # for i such that tgt[i] == 0 and align[i] != 0\n target_data = target.clone()\n unk = self.criterion.unk_index\n correct_mask = (target_data == unk) & (align != unk)\n offset_align = align[correct_mask] + len(self.tgt_vocab)\n target_data[correct_mask] += offset_align\n\n # Compute sum of perplexities for stats\n stats = self._stats(loss.sum().clone(), scores_data, target_data)\n\n # this part looks like it belongs in CopyGeneratorLoss\n if self.normalize_by_length:\n # Compute Loss as NLL divided by seq length\n tgt_lens = batch.tgt.ne(self.padding_idx).sum(0).float()\n # Compute Total Loss per sequence in batch\n loss = loss.view(-1, batch.batch_size).sum(0)\n # Divide by length of each sequence and sum\n loss = torch.div(loss, tgt_lens).sum()\n else:\n loss = loss.sum()\n\n return loss, stats\n", "path": "onmt/modules/copy_generator.py"}]} | 3,804 | 225 |
gh_patches_debug_29112 | rasdani/github-patches | git_diff | crytic__slither-2239 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[False-Positive]:`msg.value` in a loop when `msg.value` is not transferred
### Describe the issue:
In the following code snippet, Slither detects the following:
<img width="1309" alt="image" src="https://github.com/crytic/slither/assets/31145285/eacf2441-3e2d-464a-97c3-635498e43120">
This seems invalid as `msg.value` here is just checked as a conditional to ensure no value is sent to this function. In any case, no logic inside this function (including inside the internal `_transfer` function) forwards `msg.value`.
### Code example to reproduce the issue:
```solidity
modifier noNativeTokens() {
require(msg.value == 0, "Not aimed to receive native tokens");
_;
}
function executeRelayCallBatch(
bytes[] calldata signatures,
uint256[] calldata nonces,
uint256[] calldata validityTimestamps,
uint256[] calldata values,
bytes[] calldata payloads
) public payable noNativeTokens returns (bytes[] memory) {
if (
signatures.length != nonces.length ||
nonces.length != validityTimestamps.length ||
validityTimestamps.length != values.length ||
values.length != payloads.length
) {
revert("Batch ExecuteRelayCall Params Length Mismatch");
}
bytes[] memory castedVotes = new bytes[](payloads.length);
for (uint256 ii; ii < payloads.length; ++ii) {
require(values[ii] == 0, "Batch entry cannot contain value");
// cast each votes one by one
castedVotes[ii] = executeRelayCall(
signatures[ii],
nonces[ii],
validityTimestamps[ii],
payloads[ii]
);
}
return castedVotes;
}
```
### Version:
0.10.0
### Relevant log output:
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/statements/msg_value_in_loop.py`
Content:
```
1 from typing import List, Optional
2 from slither.core.cfg.node import NodeType, Node
3 from slither.detectors.abstract_detector import (
4 AbstractDetector,
5 DetectorClassification,
6 DETECTOR_INFO,
7 )
8 from slither.slithir.operations import InternalCall
9 from slither.core.declarations import SolidityVariableComposed, Contract
10 from slither.utils.output import Output
11
12
13 def detect_msg_value_in_loop(contract: Contract) -> List[Node]:
14 results: List[Node] = []
15 for f in contract.functions_entry_points:
16 if f.is_implemented and f.payable:
17 msg_value_in_loop(f.entry_point, 0, [], results)
18 return results
19
20
21 def msg_value_in_loop(
22 node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]
23 ) -> None:
24
25 if node is None:
26 return
27
28 if node in visited:
29 return
30 # shared visited
31 visited.append(node)
32
33 if node.type == NodeType.STARTLOOP:
34 in_loop_counter += 1
35 elif node.type == NodeType.ENDLOOP:
36 in_loop_counter -= 1
37
38 for ir in node.all_slithir_operations():
39 if in_loop_counter > 0 and SolidityVariableComposed("msg.value") in ir.read:
40 results.append(ir.node)
41 if isinstance(ir, (InternalCall)):
42 msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)
43
44 for son in node.sons:
45 msg_value_in_loop(son, in_loop_counter, visited, results)
46
47
48 class MsgValueInLoop(AbstractDetector):
49 """
50 Detect the use of msg.value inside a loop
51 """
52
53 ARGUMENT = "msg-value-loop"
54 HELP = "msg.value inside a loop"
55 IMPACT = DetectorClassification.HIGH
56 CONFIDENCE = DetectorClassification.MEDIUM
57
58 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop"
59
60 WIKI_TITLE = "`msg.value` inside a loop"
61 WIKI_DESCRIPTION = "Detect the use of `msg.value` inside a loop."
62
63 # region wiki_exploit_scenario
64 WIKI_EXPLOIT_SCENARIO = """
65 ```solidity
66 contract MsgValueInLoop{
67
68 mapping (address => uint256) balances;
69
70 function bad(address[] memory receivers) public payable {
71 for (uint256 i=0; i < receivers.length; i++) {
72 balances[receivers[i]] += msg.value;
73 }
74 }
75
76 }
77 ```
78 """
79 # endregion wiki_exploit_scenario
80
81 WIKI_RECOMMENDATION = """
82 Provide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.
83 """
84
85 def _detect(self) -> List[Output]:
86 """"""
87 results: List[Output] = []
88 for c in self.compilation_unit.contracts_derived:
89 values = detect_msg_value_in_loop(c)
90 for node in values:
91 func = node.function
92
93 info: DETECTOR_INFO = [func, " use msg.value in a loop: ", node, "\n"]
94 res = self.generate_result(info)
95 results.append(res)
96
97 return results
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py
--- a/slither/detectors/statements/msg_value_in_loop.py
+++ b/slither/detectors/statements/msg_value_in_loop.py
@@ -8,6 +8,9 @@
from slither.slithir.operations import InternalCall
from slither.core.declarations import SolidityVariableComposed, Contract
from slither.utils.output import Output
+from slither.slithir.variables.constant import Constant
+from slither.core.variables import Variable
+from slither.core.expressions.literal import Literal
def detect_msg_value_in_loop(contract: Contract) -> List[Node]:
@@ -37,6 +40,21 @@
for ir in node.all_slithir_operations():
if in_loop_counter > 0 and SolidityVariableComposed("msg.value") in ir.read:
+ # If we find a conditional expression with msg.value and is compared to 0 we don't report it
+ if ir.node.is_conditional() and SolidityVariableComposed("msg.value") in ir.read:
+ compared_to = (
+ ir.read[1]
+ if ir.read[0] == SolidityVariableComposed("msg.value")
+ else ir.read[0]
+ )
+ if (
+ isinstance(compared_to, Constant)
+ and compared_to.value == 0
+ or isinstance(compared_to, Variable)
+ and isinstance(compared_to.expression, Literal)
+ and str(compared_to.expression.value) == "0"
+ ):
+ continue
results.append(ir.node)
if isinstance(ir, (InternalCall)):
msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)
| {"golden_diff": "diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py\n--- a/slither/detectors/statements/msg_value_in_loop.py\n+++ b/slither/detectors/statements/msg_value_in_loop.py\n@@ -8,6 +8,9 @@\n from slither.slithir.operations import InternalCall\n from slither.core.declarations import SolidityVariableComposed, Contract\n from slither.utils.output import Output\n+from slither.slithir.variables.constant import Constant\n+from slither.core.variables import Variable\n+from slither.core.expressions.literal import Literal\n \n \n def detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n@@ -37,6 +40,21 @@\n \n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n+ # If we find a conditional expression with msg.value and is compared to 0 we don't report it\n+ if ir.node.is_conditional() and SolidityVariableComposed(\"msg.value\") in ir.read:\n+ compared_to = (\n+ ir.read[1]\n+ if ir.read[0] == SolidityVariableComposed(\"msg.value\")\n+ else ir.read[0]\n+ )\n+ if (\n+ isinstance(compared_to, Constant)\n+ and compared_to.value == 0\n+ or isinstance(compared_to, Variable)\n+ and isinstance(compared_to.expression, Literal)\n+ and str(compared_to.expression.value) == \"0\"\n+ ):\n+ continue\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n", "issue": "[False-Positive]:`msg.value` in a loop when `msg.value` is not transferred\n### Describe the issue:\n\nIn the following code snippet, Slither detects the following:\r\n\r\n<img width=\"1309\" alt=\"image\" src=\"https://github.com/crytic/slither/assets/31145285/eacf2441-3e2d-464a-97c3-635498e43120\">\r\n\r\nThis seems invalid as `msg.value` here is just checked as a conditional to ensure no value is sent to this function. In any case, no logic inside this function (including inside the internal `_transfer` function) forwards `msg.value`.\n\n### Code example to reproduce the issue:\n\n```solidity\r\n\r\n modifier noNativeTokens() {\r\n require(msg.value == 0, \"Not aimed to receive native tokens\");\r\n _;\r\n }\r\n \r\n function executeRelayCallBatch(\r\n bytes[] calldata signatures,\r\n uint256[] calldata nonces,\r\n uint256[] calldata validityTimestamps,\r\n uint256[] calldata values,\r\n bytes[] calldata payloads\r\n ) public payable noNativeTokens returns (bytes[] memory) {\r\n if (\r\n signatures.length != nonces.length ||\r\n nonces.length != validityTimestamps.length ||\r\n validityTimestamps.length != values.length ||\r\n values.length != payloads.length\r\n ) {\r\n revert(\"Batch ExecuteRelayCall Params Length Mismatch\");\r\n }\r\n\r\n bytes[] memory castedVotes = new bytes[](payloads.length);\r\n\r\n for (uint256 ii; ii < payloads.length; ++ii) {\r\n require(values[ii] == 0, \"Batch entry cannot contain value\");\r\n\r\n // cast each votes one by one\r\n castedVotes[ii] = executeRelayCall(\r\n signatures[ii],\r\n nonces[ii],\r\n validityTimestamps[ii],\r\n payloads[ii]\r\n );\r\n }\r\n\r\n return castedVotes;\r\n }\r\n```\n\n### Version:\n\n0.10.0\n\n### Relevant log output:\n\n_No response_\n", "before_files": [{"content": "from typing import List, Optional\nfrom slither.core.cfg.node import NodeType, Node\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import InternalCall\nfrom slither.core.declarations import SolidityVariableComposed, Contract\nfrom slither.utils.output import Output\n\n\ndef detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n results: List[Node] = []\n for f in contract.functions_entry_points:\n if f.is_implemented and f.payable:\n msg_value_in_loop(f.entry_point, 0, [], results)\n return results\n\n\ndef msg_value_in_loop(\n node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]\n) -> None:\n\n if node is None:\n return\n\n if node in visited:\n return\n # shared visited\n visited.append(node)\n\n if node.type == NodeType.STARTLOOP:\n in_loop_counter += 1\n elif node.type == NodeType.ENDLOOP:\n in_loop_counter -= 1\n\n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n\n for son in node.sons:\n msg_value_in_loop(son, in_loop_counter, visited, results)\n\n\nclass MsgValueInLoop(AbstractDetector):\n \"\"\"\n Detect the use of msg.value inside a loop\n \"\"\"\n\n ARGUMENT = \"msg-value-loop\"\n HELP = \"msg.value inside a loop\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop\"\n\n WIKI_TITLE = \"`msg.value` inside a loop\"\n WIKI_DESCRIPTION = \"Detect the use of `msg.value` inside a loop.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract MsgValueInLoop{\n\n mapping (address => uint256) balances;\n\n function bad(address[] memory receivers) public payable {\n for (uint256 i=0; i < receivers.length; i++) {\n balances[receivers[i]] += msg.value;\n }\n }\n\n}\n```\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"\"\"\nProvide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.\n\"\"\"\n\n def _detect(self) -> List[Output]:\n \"\"\"\"\"\"\n results: List[Output] = []\n for c in self.compilation_unit.contracts_derived:\n values = detect_msg_value_in_loop(c)\n for node in values:\n func = node.function\n\n info: DETECTOR_INFO = [func, \" use msg.value in a loop: \", node, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n return results\n", "path": "slither/detectors/statements/msg_value_in_loop.py"}], "after_files": [{"content": "from typing import List, Optional\nfrom slither.core.cfg.node import NodeType, Node\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import InternalCall\nfrom slither.core.declarations import SolidityVariableComposed, Contract\nfrom slither.utils.output import Output\nfrom slither.slithir.variables.constant import Constant\nfrom slither.core.variables import Variable\nfrom slither.core.expressions.literal import Literal\n\n\ndef detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n results: List[Node] = []\n for f in contract.functions_entry_points:\n if f.is_implemented and f.payable:\n msg_value_in_loop(f.entry_point, 0, [], results)\n return results\n\n\ndef msg_value_in_loop(\n node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]\n) -> None:\n\n if node is None:\n return\n\n if node in visited:\n return\n # shared visited\n visited.append(node)\n\n if node.type == NodeType.STARTLOOP:\n in_loop_counter += 1\n elif node.type == NodeType.ENDLOOP:\n in_loop_counter -= 1\n\n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n # If we find a conditional expression with msg.value and is compared to 0 we don't report it\n if ir.node.is_conditional() and SolidityVariableComposed(\"msg.value\") in ir.read:\n compared_to = (\n ir.read[1]\n if ir.read[0] == SolidityVariableComposed(\"msg.value\")\n else ir.read[0]\n )\n if (\n isinstance(compared_to, Constant)\n and compared_to.value == 0\n or isinstance(compared_to, Variable)\n and isinstance(compared_to.expression, Literal)\n and str(compared_to.expression.value) == \"0\"\n ):\n continue\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n\n for son in node.sons:\n msg_value_in_loop(son, in_loop_counter, visited, results)\n\n\nclass MsgValueInLoop(AbstractDetector):\n \"\"\"\n Detect the use of msg.value inside a loop\n \"\"\"\n\n ARGUMENT = \"msg-value-loop\"\n HELP = \"msg.value inside a loop\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop\"\n\n WIKI_TITLE = \"`msg.value` inside a loop\"\n WIKI_DESCRIPTION = \"Detect the use of `msg.value` inside a loop.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract MsgValueInLoop{\n\n mapping (address => uint256) balances;\n\n function bad(address[] memory receivers) public payable {\n for (uint256 i=0; i < receivers.length; i++) {\n balances[receivers[i]] += msg.value;\n }\n }\n\n}\n```\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"\"\"\nProvide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.\n\"\"\"\n\n def _detect(self) -> List[Output]:\n \"\"\"\"\"\"\n results: List[Output] = []\n for c in self.compilation_unit.contracts_derived:\n values = detect_msg_value_in_loop(c)\n for node in values:\n func = node.function\n\n info: DETECTOR_INFO = [func, \" use msg.value in a loop: \", node, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n return results\n", "path": "slither/detectors/statements/msg_value_in_loop.py"}]} | 1,596 | 386 |
gh_patches_debug_14377 | rasdani/github-patches | git_diff | pantsbuild__pants-9773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Narrow or remove the hole that pantsd punches in the ignore patterns for `.pids`
In #9636, we punched a hole in the ignore patterns in order to allow `pantsd` to watch its own pidfile. But in cases where users had previously used `pantsd`, this resulted in a failure to deal with an orphaned `watchman` socket file (`watchman` is now disabled by default, so this only impacts users who) which had previous been ignored.
```
22:32:58:201 [ERROR] 1 Exception encountered:
Exception: Failed to scan directory "$workspace/.pids/watchman": Expected File, Dir or Link, but ".pids/watchman/watchman.sock" (relative to "$workspace") was a FileType(FileType { mode: 49152 })
```
Narrowing the pattern to just the `.pids/pantsd` subdirectory is challenging (possibly not an option) because gitignore patterns do not allow re-including the children (`.pids/pantsd` in this case) of ignored parents (`.pids`, due to `.*`).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/init/options_initializer.py`
Content:
```
1 # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import logging
5 import os
6 import sys
7
8 import pkg_resources
9
10 from pants.base.build_environment import pants_version
11 from pants.base.exceptions import BuildConfigurationError
12 from pants.build_graph.build_configuration import BuildConfiguration
13 from pants.goal.goal import Goal
14 from pants.init.extension_loader import load_backends_and_plugins
15 from pants.init.global_subsystems import GlobalSubsystems
16 from pants.init.plugin_resolver import PluginResolver
17 from pants.option.global_options import GlobalOptions
18 from pants.subsystem.subsystem import Subsystem
19 from pants.util.dirutil import fast_relpath_optional
20
21 logger = logging.getLogger(__name__)
22
23
24 class BuildConfigInitializer:
25 """Initializes a BuildConfiguration object.
26
27 This class uses a class-level cache for the internally generated `BuildConfiguration` object,
28 which permits multiple invocations in the same runtime context without re-incurring backend &
29 plugin loading, which can be expensive and cause issues (double task registration, etc).
30 """
31
32 _cached_build_config = None
33
34 @classmethod
35 def get(cls, options_bootstrapper):
36 if cls._cached_build_config is None:
37 cls._cached_build_config = cls(options_bootstrapper).setup()
38 return cls._cached_build_config
39
40 @classmethod
41 def reset(cls):
42 cls._cached_build_config = None
43
44 def __init__(self, options_bootstrapper):
45 self._options_bootstrapper = options_bootstrapper
46 self._bootstrap_options = options_bootstrapper.get_bootstrap_options().for_global_scope()
47 self._working_set = PluginResolver(self._options_bootstrapper).resolve()
48
49 def _load_plugins(self):
50 # Add any extra paths to python path (e.g., for loading extra source backends).
51 for path in self._bootstrap_options.pythonpath:
52 if path not in sys.path:
53 sys.path.append(path)
54 pkg_resources.fixup_namespace_packages(path)
55
56 # Load plugins and backends.
57 return load_backends_and_plugins(
58 self._bootstrap_options.plugins,
59 self._bootstrap_options.plugins2,
60 self._working_set,
61 self._bootstrap_options.backend_packages,
62 self._bootstrap_options.backend_packages2,
63 BuildConfiguration(),
64 )
65
66 def setup(self):
67 """Load backends and plugins.
68
69 :returns: A `BuildConfiguration` object constructed during backend/plugin loading.
70 """
71 return self._load_plugins()
72
73
74 class OptionsInitializer:
75 """Initializes options."""
76
77 @staticmethod
78 def _construct_options(options_bootstrapper, build_configuration):
79 """Parse and register options.
80
81 :returns: An Options object representing the full set of runtime options.
82 """
83 # Now that plugins and backends are loaded, we can gather the known scopes.
84
85 # Gather the optionables that are not scoped to any other. All known scopes are reachable
86 # via these optionables' known_scope_infos() methods.
87 top_level_optionables = (
88 {GlobalOptions}
89 | GlobalSubsystems.get()
90 | build_configuration.optionables()
91 | set(Goal.get_optionables())
92 )
93
94 # Now that we have the known scopes we can get the full options. `get_full_options` will
95 # sort and de-duplicate these for us.
96 known_scope_infos = [
97 si for optionable in top_level_optionables for si in optionable.known_scope_infos()
98 ]
99 return options_bootstrapper.get_full_options(known_scope_infos)
100
101 @staticmethod
102 def compute_pants_ignore(buildroot, global_options):
103 """Computes the merged value of the `--pants-ignore` flag.
104
105 This inherently includes the workdir and distdir locations if they are located under the
106 buildroot.
107 """
108 pants_ignore = list(global_options.pants_ignore)
109
110 def add(absolute_path, include=False):
111 # To ensure that the path is ignored regardless of whether it is a symlink or a directory, we
112 # strip trailing slashes (which would signal that we wanted to ignore only directories).
113 maybe_rel_path = fast_relpath_optional(absolute_path, buildroot)
114 if maybe_rel_path:
115 rel_path = maybe_rel_path.rstrip(os.path.sep)
116 prefix = "!" if include else ""
117 pants_ignore.append(f"{prefix}/{rel_path}")
118
119 add(global_options.pants_workdir)
120 add(global_options.pants_distdir)
121 # NB: We punch a hole in the ignore patterns to allow pants to directly watch process
122 # metadata that is written to disk.
123 add(global_options.pants_subprocessdir, include=True)
124
125 return pants_ignore
126
127 @staticmethod
128 def compute_pantsd_invalidation_globs(buildroot, bootstrap_options):
129 """Computes the merged value of the `--pantsd-invalidation-globs` option.
130
131 Combines --pythonpath and --pants-config-files files that are in {buildroot} dir with those
132 invalidation_globs provided by users.
133 """
134 invalidation_globs = set()
135 globs = set(
136 sys.path
137 + bootstrap_options.pythonpath
138 + bootstrap_options.pants_config_files
139 + bootstrap_options.pantsd_invalidation_globs
140 )
141
142 for glob in globs:
143 if glob.startswith("!"):
144 invalidation_globs.add(glob)
145 continue
146
147 glob_relpath = fast_relpath_optional(glob, buildroot) if os.path.isabs(glob) else glob
148 if glob_relpath:
149 invalidation_globs.update([glob_relpath, glob_relpath + "/**"])
150 else:
151 logger.debug(
152 f"Changes to {glob}, outside of the buildroot, will not be invalidated."
153 )
154
155 return list(sorted(invalidation_globs))
156
157 @classmethod
158 def create(cls, options_bootstrapper, build_configuration, init_subsystems=True):
159 global_bootstrap_options = options_bootstrapper.get_bootstrap_options().for_global_scope()
160
161 if global_bootstrap_options.pants_version != pants_version():
162 raise BuildConfigurationError(
163 f"Version mismatch: Requested version was {global_bootstrap_options.pants_version}, "
164 f"our version is {pants_version()}."
165 )
166
167 # Parse and register options.
168 options = cls._construct_options(options_bootstrapper, build_configuration)
169
170 GlobalOptions.validate_instance(options.for_global_scope())
171
172 if init_subsystems:
173 Subsystem.set_options(options)
174
175 return options
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/init/options_initializer.py b/src/python/pants/init/options_initializer.py
--- a/src/python/pants/init/options_initializer.py
+++ b/src/python/pants/init/options_initializer.py
@@ -118,9 +118,12 @@
add(global_options.pants_workdir)
add(global_options.pants_distdir)
- # NB: We punch a hole in the ignore patterns to allow pants to directly watch process
- # metadata that is written to disk.
+ # TODO: We punch a hole in the ignore patterns to allow pantsd to directly watch process
+ # metadata that is written to disk, but we re-ignore the watchman directory (which
+ # contains a named pipe). Over time, as more of the pantsd server components are ported to
+ # rust, we will be able to remove this special case.
add(global_options.pants_subprocessdir, include=True)
+ add(os.path.join(global_options.pants_subprocessdir, "watchman"))
return pants_ignore
| {"golden_diff": "diff --git a/src/python/pants/init/options_initializer.py b/src/python/pants/init/options_initializer.py\n--- a/src/python/pants/init/options_initializer.py\n+++ b/src/python/pants/init/options_initializer.py\n@@ -118,9 +118,12 @@\n \n add(global_options.pants_workdir)\n add(global_options.pants_distdir)\n- # NB: We punch a hole in the ignore patterns to allow pants to directly watch process\n- # metadata that is written to disk.\n+ # TODO: We punch a hole in the ignore patterns to allow pantsd to directly watch process\n+ # metadata that is written to disk, but we re-ignore the watchman directory (which\n+ # contains a named pipe). Over time, as more of the pantsd server components are ported to\n+ # rust, we will be able to remove this special case.\n add(global_options.pants_subprocessdir, include=True)\n+ add(os.path.join(global_options.pants_subprocessdir, \"watchman\"))\n \n return pants_ignore\n", "issue": "Narrow or remove the hole that pantsd punches in the ignore patterns for `.pids`\nIn #9636, we punched a hole in the ignore patterns in order to allow `pantsd` to watch its own pidfile. But in cases where users had previously used `pantsd`, this resulted in a failure to deal with an orphaned `watchman` socket file (`watchman` is now disabled by default, so this only impacts users who) which had previous been ignored.\r\n\r\n```\r\n22:32:58:201 [ERROR] 1 Exception encountered:\r\n Exception: Failed to scan directory \"$workspace/.pids/watchman\": Expected File, Dir or Link, but \".pids/watchman/watchman.sock\" (relative to \"$workspace\") was a FileType(FileType { mode: 49152 })\r\n```\r\n\r\nNarrowing the pattern to just the `.pids/pantsd` subdirectory is challenging (possibly not an option) because gitignore patterns do not allow re-including the children (`.pids/pantsd` in this case) of ignored parents (`.pids`, due to `.*`).\n", "before_files": [{"content": "# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport logging\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pants.base.build_environment import pants_version\nfrom pants.base.exceptions import BuildConfigurationError\nfrom pants.build_graph.build_configuration import BuildConfiguration\nfrom pants.goal.goal import Goal\nfrom pants.init.extension_loader import load_backends_and_plugins\nfrom pants.init.global_subsystems import GlobalSubsystems\nfrom pants.init.plugin_resolver import PluginResolver\nfrom pants.option.global_options import GlobalOptions\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.dirutil import fast_relpath_optional\n\nlogger = logging.getLogger(__name__)\n\n\nclass BuildConfigInitializer:\n \"\"\"Initializes a BuildConfiguration object.\n\n This class uses a class-level cache for the internally generated `BuildConfiguration` object,\n which permits multiple invocations in the same runtime context without re-incurring backend &\n plugin loading, which can be expensive and cause issues (double task registration, etc).\n \"\"\"\n\n _cached_build_config = None\n\n @classmethod\n def get(cls, options_bootstrapper):\n if cls._cached_build_config is None:\n cls._cached_build_config = cls(options_bootstrapper).setup()\n return cls._cached_build_config\n\n @classmethod\n def reset(cls):\n cls._cached_build_config = None\n\n def __init__(self, options_bootstrapper):\n self._options_bootstrapper = options_bootstrapper\n self._bootstrap_options = options_bootstrapper.get_bootstrap_options().for_global_scope()\n self._working_set = PluginResolver(self._options_bootstrapper).resolve()\n\n def _load_plugins(self):\n # Add any extra paths to python path (e.g., for loading extra source backends).\n for path in self._bootstrap_options.pythonpath:\n if path not in sys.path:\n sys.path.append(path)\n pkg_resources.fixup_namespace_packages(path)\n\n # Load plugins and backends.\n return load_backends_and_plugins(\n self._bootstrap_options.plugins,\n self._bootstrap_options.plugins2,\n self._working_set,\n self._bootstrap_options.backend_packages,\n self._bootstrap_options.backend_packages2,\n BuildConfiguration(),\n )\n\n def setup(self):\n \"\"\"Load backends and plugins.\n\n :returns: A `BuildConfiguration` object constructed during backend/plugin loading.\n \"\"\"\n return self._load_plugins()\n\n\nclass OptionsInitializer:\n \"\"\"Initializes options.\"\"\"\n\n @staticmethod\n def _construct_options(options_bootstrapper, build_configuration):\n \"\"\"Parse and register options.\n\n :returns: An Options object representing the full set of runtime options.\n \"\"\"\n # Now that plugins and backends are loaded, we can gather the known scopes.\n\n # Gather the optionables that are not scoped to any other. All known scopes are reachable\n # via these optionables' known_scope_infos() methods.\n top_level_optionables = (\n {GlobalOptions}\n | GlobalSubsystems.get()\n | build_configuration.optionables()\n | set(Goal.get_optionables())\n )\n\n # Now that we have the known scopes we can get the full options. `get_full_options` will\n # sort and de-duplicate these for us.\n known_scope_infos = [\n si for optionable in top_level_optionables for si in optionable.known_scope_infos()\n ]\n return options_bootstrapper.get_full_options(known_scope_infos)\n\n @staticmethod\n def compute_pants_ignore(buildroot, global_options):\n \"\"\"Computes the merged value of the `--pants-ignore` flag.\n\n This inherently includes the workdir and distdir locations if they are located under the\n buildroot.\n \"\"\"\n pants_ignore = list(global_options.pants_ignore)\n\n def add(absolute_path, include=False):\n # To ensure that the path is ignored regardless of whether it is a symlink or a directory, we\n # strip trailing slashes (which would signal that we wanted to ignore only directories).\n maybe_rel_path = fast_relpath_optional(absolute_path, buildroot)\n if maybe_rel_path:\n rel_path = maybe_rel_path.rstrip(os.path.sep)\n prefix = \"!\" if include else \"\"\n pants_ignore.append(f\"{prefix}/{rel_path}\")\n\n add(global_options.pants_workdir)\n add(global_options.pants_distdir)\n # NB: We punch a hole in the ignore patterns to allow pants to directly watch process\n # metadata that is written to disk.\n add(global_options.pants_subprocessdir, include=True)\n\n return pants_ignore\n\n @staticmethod\n def compute_pantsd_invalidation_globs(buildroot, bootstrap_options):\n \"\"\"Computes the merged value of the `--pantsd-invalidation-globs` option.\n\n Combines --pythonpath and --pants-config-files files that are in {buildroot} dir with those\n invalidation_globs provided by users.\n \"\"\"\n invalidation_globs = set()\n globs = set(\n sys.path\n + bootstrap_options.pythonpath\n + bootstrap_options.pants_config_files\n + bootstrap_options.pantsd_invalidation_globs\n )\n\n for glob in globs:\n if glob.startswith(\"!\"):\n invalidation_globs.add(glob)\n continue\n\n glob_relpath = fast_relpath_optional(glob, buildroot) if os.path.isabs(glob) else glob\n if glob_relpath:\n invalidation_globs.update([glob_relpath, glob_relpath + \"/**\"])\n else:\n logger.debug(\n f\"Changes to {glob}, outside of the buildroot, will not be invalidated.\"\n )\n\n return list(sorted(invalidation_globs))\n\n @classmethod\n def create(cls, options_bootstrapper, build_configuration, init_subsystems=True):\n global_bootstrap_options = options_bootstrapper.get_bootstrap_options().for_global_scope()\n\n if global_bootstrap_options.pants_version != pants_version():\n raise BuildConfigurationError(\n f\"Version mismatch: Requested version was {global_bootstrap_options.pants_version}, \"\n f\"our version is {pants_version()}.\"\n )\n\n # Parse and register options.\n options = cls._construct_options(options_bootstrapper, build_configuration)\n\n GlobalOptions.validate_instance(options.for_global_scope())\n\n if init_subsystems:\n Subsystem.set_options(options)\n\n return options\n", "path": "src/python/pants/init/options_initializer.py"}], "after_files": [{"content": "# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport logging\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pants.base.build_environment import pants_version\nfrom pants.base.exceptions import BuildConfigurationError\nfrom pants.build_graph.build_configuration import BuildConfiguration\nfrom pants.goal.goal import Goal\nfrom pants.init.extension_loader import load_backends_and_plugins\nfrom pants.init.global_subsystems import GlobalSubsystems\nfrom pants.init.plugin_resolver import PluginResolver\nfrom pants.option.global_options import GlobalOptions\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.dirutil import fast_relpath_optional\n\nlogger = logging.getLogger(__name__)\n\n\nclass BuildConfigInitializer:\n \"\"\"Initializes a BuildConfiguration object.\n\n This class uses a class-level cache for the internally generated `BuildConfiguration` object,\n which permits multiple invocations in the same runtime context without re-incurring backend &\n plugin loading, which can be expensive and cause issues (double task registration, etc).\n \"\"\"\n\n _cached_build_config = None\n\n @classmethod\n def get(cls, options_bootstrapper):\n if cls._cached_build_config is None:\n cls._cached_build_config = cls(options_bootstrapper).setup()\n return cls._cached_build_config\n\n @classmethod\n def reset(cls):\n cls._cached_build_config = None\n\n def __init__(self, options_bootstrapper):\n self._options_bootstrapper = options_bootstrapper\n self._bootstrap_options = options_bootstrapper.get_bootstrap_options().for_global_scope()\n self._working_set = PluginResolver(self._options_bootstrapper).resolve()\n\n def _load_plugins(self):\n # Add any extra paths to python path (e.g., for loading extra source backends).\n for path in self._bootstrap_options.pythonpath:\n if path not in sys.path:\n sys.path.append(path)\n pkg_resources.fixup_namespace_packages(path)\n\n # Load plugins and backends.\n return load_backends_and_plugins(\n self._bootstrap_options.plugins,\n self._bootstrap_options.plugins2,\n self._working_set,\n self._bootstrap_options.backend_packages,\n self._bootstrap_options.backend_packages2,\n BuildConfiguration(),\n )\n\n def setup(self):\n \"\"\"Load backends and plugins.\n\n :returns: A `BuildConfiguration` object constructed during backend/plugin loading.\n \"\"\"\n return self._load_plugins()\n\n\nclass OptionsInitializer:\n \"\"\"Initializes options.\"\"\"\n\n @staticmethod\n def _construct_options(options_bootstrapper, build_configuration):\n \"\"\"Parse and register options.\n\n :returns: An Options object representing the full set of runtime options.\n \"\"\"\n # Now that plugins and backends are loaded, we can gather the known scopes.\n\n # Gather the optionables that are not scoped to any other. All known scopes are reachable\n # via these optionables' known_scope_infos() methods.\n top_level_optionables = (\n {GlobalOptions}\n | GlobalSubsystems.get()\n | build_configuration.optionables()\n | set(Goal.get_optionables())\n )\n\n # Now that we have the known scopes we can get the full options. `get_full_options` will\n # sort and de-duplicate these for us.\n known_scope_infos = [\n si for optionable in top_level_optionables for si in optionable.known_scope_infos()\n ]\n return options_bootstrapper.get_full_options(known_scope_infos)\n\n @staticmethod\n def compute_pants_ignore(buildroot, global_options):\n \"\"\"Computes the merged value of the `--pants-ignore` flag.\n\n This inherently includes the workdir and distdir locations if they are located under the\n buildroot.\n \"\"\"\n pants_ignore = list(global_options.pants_ignore)\n\n def add(absolute_path, include=False):\n # To ensure that the path is ignored regardless of whether it is a symlink or a directory, we\n # strip trailing slashes (which would signal that we wanted to ignore only directories).\n maybe_rel_path = fast_relpath_optional(absolute_path, buildroot)\n if maybe_rel_path:\n rel_path = maybe_rel_path.rstrip(os.path.sep)\n prefix = \"!\" if include else \"\"\n pants_ignore.append(f\"{prefix}/{rel_path}\")\n\n add(global_options.pants_workdir)\n add(global_options.pants_distdir)\n # TODO: We punch a hole in the ignore patterns to allow pantsd to directly watch process\n # metadata that is written to disk, but we re-ignore the watchman directory (which\n # contains a named pipe). Over time, as more of the pantsd server components are ported to\n # rust, we will be able to remove this special case.\n add(global_options.pants_subprocessdir, include=True)\n add(os.path.join(global_options.pants_subprocessdir, \"watchman\"))\n\n return pants_ignore\n\n @staticmethod\n def compute_pantsd_invalidation_globs(buildroot, bootstrap_options):\n \"\"\"Computes the merged value of the `--pantsd-invalidation-globs` option.\n\n Combines --pythonpath and --pants-config-files files that are in {buildroot} dir with those\n invalidation_globs provided by users.\n \"\"\"\n invalidation_globs = set()\n globs = set(\n sys.path\n + bootstrap_options.pythonpath\n + bootstrap_options.pants_config_files\n + bootstrap_options.pantsd_invalidation_globs\n )\n\n for glob in globs:\n if glob.startswith(\"!\"):\n invalidation_globs.add(glob)\n continue\n\n glob_relpath = fast_relpath_optional(glob, buildroot) if os.path.isabs(glob) else glob\n if glob_relpath:\n invalidation_globs.update([glob_relpath, glob_relpath + \"/**\"])\n else:\n logger.debug(\n f\"Changes to {glob}, outside of the buildroot, will not be invalidated.\"\n )\n\n return list(sorted(invalidation_globs))\n\n @classmethod\n def create(cls, options_bootstrapper, build_configuration, init_subsystems=True):\n global_bootstrap_options = options_bootstrapper.get_bootstrap_options().for_global_scope()\n\n if global_bootstrap_options.pants_version != pants_version():\n raise BuildConfigurationError(\n f\"Version mismatch: Requested version was {global_bootstrap_options.pants_version}, \"\n f\"our version is {pants_version()}.\"\n )\n\n # Parse and register options.\n options = cls._construct_options(options_bootstrapper, build_configuration)\n\n GlobalOptions.validate_instance(options.for_global_scope())\n\n if init_subsystems:\n Subsystem.set_options(options)\n\n return options\n", "path": "src/python/pants/init/options_initializer.py"}]} | 2,294 | 228 |
gh_patches_debug_786 | rasdani/github-patches | git_diff | nilearn__nilearn-4306 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tests failing on main with pytest 8.1.0
See https://github.com/nilearn/nilearn/actions/runs/8136733065/job/22233621361
```
test_plotting: install_deps> python -I -m pip install kaleido 'kaleido; platform_system != "Windows"' 'kaleido==0.1.0.post1; platform_system == "Windows"' 'matplotlib>=3.3.0' plotly
.pkg: install_requires> python -I -m pip install hatch-vcs hatchling
.pkg: _optional_hooks> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build
.pkg: get_requires_for_build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build
.pkg: freeze> python -m pip freeze --all
.pkg: editables==0.5,hatch-vcs==0.4.0,hatchling==1.21.1,packaging==23.2,pathspec==0.12.1,pip==24.0,pluggy==1.4.0,setuptools==69.1.1,setuptools-scm==8.0.4,trove-classifiers==2024.3.3,typing_extensions==4.10.0
.pkg: build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build
test_plotting: install_package_deps> python -I -m pip install coverage 'joblib>=1.0.0' lxml 'nibabel>=4.0.0' 'numpy>=1.19.0' packaging 'pandas>=1.1.5' pytest-cov 'pytest>=6.0.0' 'requests>=2.25.0' 'scikit-learn>=1.0.0' 'scipy>=1.8.0'
test_plotting: install_package> python -I -m pip install --force-reinstall --no-deps /Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1+gf91de22.tar.gz
test_plotting: freeze> python -m pip freeze --all
test_plotting: certifi==2024.2.2,charset-normalizer==3.3.2,contourpy==1.2.0,coverage==7.4.3,cycler==0.12.1,fonttools==4.49.0,idna==3.6,iniconfig==2.0.0,joblib==1.3.2,kaleido==0.2.1,kiwisolver==1.4.5,lxml==5.1.0,matplotlib==3.8.3,nibabel==5.2.1,nilearn @ file:///Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1%2Bgf91de22.tar.gz#sha256=b29f617bbb5d9aa3a94aa7518f1006aea9f52d58d945a8b82ed6951bffccb22e,numpy==1.26.4,packaging==23.2,pandas==2.2.1,pillow==10.2.0,pip==24.0,plotly==5.19.0,pluggy==1.4.0,pyparsing==3.1.1,pytest==8.1.0,pytest-cov==4.1.0,python-dateutil==2.9.0.post0,pytz==2024.1,requests==2.31.0,scikit-learn==1.4.1.post1,scipy==1.12.0,six==1.16.0,tenacity==8.2.3,threadpoolctl==3.3.0,tzdata==2024.1,urllib3==2.2.1
test_plotting: commands[0]> pytest --cov=nilearn --cov-report=xml nilearn
============================= test session starts ==============================
platform darwin -- Python 3.12.1, pytest-8.1.0, pluggy-1.4.0 -- /Users/runner/work/nilearn/nilearn/.tox/test_plotting/bin/python
cachedir: .tox/test_plotting/.pytest_cache
rootdir: /Users/runner/work/nilearn/nilearn
configfile: pyproject.toml
plugins: cov-4.1.0
collecting ... collected 3118 items / 1 error
==================================== ERRORS ====================================
______________________ ERROR collecting nilearn/externals ______________________
.tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:167: in register
self._verify_hook(hook, hookimpl)
hook = <HookCaller 'pytest_ignore_collect'>
hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>>
hookimpl_opts = {'hookwrapper': False,
'optionalhook': False,
'specname': None,
'tryfirst': False,
'trylast': False,
'wrapper': False}
method = <function pytest_ignore_collect at 0x12f044680>
name = 'pytest_ignore_collect'
plugin = <module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>
plugin_name = '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'
self = <_pytest.config.PytestPluginManager object at 0x10c395130>
.tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:342: in _verify_hook
raise PluginValidationError(
E pluggy._manager.PluginValidationError: Plugin '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py' for hook 'pytest_ignore_collect'
E hookimpl definition: pytest_ignore_collect(path, config)
E Argument(s) {'path'} are declared in the hookimpl but can not be found in the hookspec
hook = <HookCaller 'pytest_ignore_collect'>
hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>>
notinspec = {'path'}
self = <_pytest.config.PytestPluginManager object at 0x10c395130>
=============================== warnings summary ===============================
nilearn/input_data/__init__.py:23
/Users/runner/work/nilearn/nilearn/nilearn/input_data/__init__.py:23: DeprecationWarning: The import path 'nilearn.input_data' is deprecated in version 0.9. Importing from 'nilearn.input_data' will be possible at least until release 0.13.0. Please import from 'nilearn.maskers' instead.
warnings.warn(message, DeprecationWarning)
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
---------- coverage: platform darwin, python 3.12.1-final-0 ----------
Coverage XML written to file coverage.xml
=========================== short test summary info ============================
ERROR nilearn/externals - pluggy._manager.PluginValidationError: Plugin '/Use...
!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!
========================= 1 warning, 1 error in 17.43s =========================
test_plotting: exit 2 (22.26 seconds) /Users/runner/work/nilearn/nilearn> pytest --cov=nilearn --cov-report=xml nilearn pid=7328
.pkg: _exit> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build
test_plotting: FAIL code 2 (102.15=setup[79.89]+cmd[22.26] seconds)
evaluation failed :( (102.25 seconds)
```
The tests run fine locally:
- with pytest<8.1.0
- OR removing the conftest in nilearn/nilearn/externals/conftest.py (which suggest to ignore collecting tests, though there are no tests in that subfolder - EDIT: tests are in the __init__.py file).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/externals/conftest.py`
Content:
```
1 # Do not collect any tests in externals. This is more robust than using
2 # --ignore because --ignore needs a path and it is not convenient to pass in
3 # the externals path (very long install-dependent path in site-packages) when
4 # using --pyargs
5
6
7 def pytest_ignore_collect(path, config):
8 return True
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nilearn/externals/conftest.py b/nilearn/externals/conftest.py
deleted file mode 100644
--- a/nilearn/externals/conftest.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Do not collect any tests in externals. This is more robust than using
-# --ignore because --ignore needs a path and it is not convenient to pass in
-# the externals path (very long install-dependent path in site-packages) when
-# using --pyargs
-
-
-def pytest_ignore_collect(path, config):
- return True
| {"golden_diff": "diff --git a/nilearn/externals/conftest.py b/nilearn/externals/conftest.py\ndeleted file mode 100644\n--- a/nilearn/externals/conftest.py\n+++ /dev/null\n@@ -1,8 +0,0 @@\n-# Do not collect any tests in externals. This is more robust than using\n-# --ignore because --ignore needs a path and it is not convenient to pass in\n-# the externals path (very long install-dependent path in site-packages) when\n-# using --pyargs\n-\n-\n-def pytest_ignore_collect(path, config):\n- return True\n", "issue": "Tests failing on main with pytest 8.1.0\nSee https://github.com/nilearn/nilearn/actions/runs/8136733065/job/22233621361\r\n\r\n```\r\ntest_plotting: install_deps> python -I -m pip install kaleido 'kaleido; platform_system != \"Windows\"' 'kaleido==0.1.0.post1; platform_system == \"Windows\"' 'matplotlib>=3.3.0' plotly\r\n.pkg: install_requires> python -I -m pip install hatch-vcs hatchling\r\n.pkg: _optional_hooks> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build\r\n.pkg: get_requires_for_build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build\r\n.pkg: freeze> python -m pip freeze --all\r\n.pkg: editables==0.5,hatch-vcs==0.4.0,hatchling==1.21.1,packaging==23.2,pathspec==0.12.1,pip==24.0,pluggy==1.4.0,setuptools==69.1.1,setuptools-scm==8.0.4,trove-classifiers==2024.3.3,typing_extensions==4.10.0\r\n.pkg: build_sdist> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build\r\ntest_plotting: install_package_deps> python -I -m pip install coverage 'joblib>=1.0.0' lxml 'nibabel>=4.0.0' 'numpy>=1.19.0' packaging 'pandas>=1.1.5' pytest-cov 'pytest>=6.0.0' 'requests>=2.25.0' 'scikit-learn>=1.0.0' 'scipy>=1.8.0'\r\ntest_plotting: install_package> python -I -m pip install --force-reinstall --no-deps /Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1+gf91de22.tar.gz\r\ntest_plotting: freeze> python -m pip freeze --all\r\ntest_plotting: certifi==2024.2.2,charset-normalizer==3.3.2,contourpy==1.2.0,coverage==7.4.3,cycler==0.12.1,fonttools==4.49.0,idna==3.6,iniconfig==2.0.0,joblib==1.3.2,kaleido==0.2.1,kiwisolver==1.4.5,lxml==5.1.0,matplotlib==3.8.3,nibabel==5.2.1,nilearn @ file:///Users/runner/work/nilearn/nilearn/.tox/.tmp/package/1/nilearn-0.1.dev1%2Bgf91de22.tar.gz#sha256=b29f617bbb5d9aa3a94aa7518f1006aea9f52d58d945a8b82ed6951bffccb22e,numpy==1.26.4,packaging==23.2,pandas==2.2.1,pillow==10.2.0,pip==24.0,plotly==5.19.0,pluggy==1.4.0,pyparsing==3.1.1,pytest==8.1.0,pytest-cov==4.1.0,python-dateutil==2.9.0.post0,pytz==2024.1,requests==2.31.0,scikit-learn==1.4.1.post1,scipy==1.12.0,six==1.16.0,tenacity==8.2.3,threadpoolctl==3.3.0,tzdata==2024.1,urllib3==2.2.1\r\ntest_plotting: commands[0]> pytest --cov=nilearn --cov-report=xml nilearn\r\n============================= test session starts ==============================\r\nplatform darwin -- Python 3.12.1, pytest-8.1.0, pluggy-1.4.0 -- /Users/runner/work/nilearn/nilearn/.tox/test_plotting/bin/python\r\ncachedir: .tox/test_plotting/.pytest_cache\r\nrootdir: /Users/runner/work/nilearn/nilearn\r\nconfigfile: pyproject.toml\r\nplugins: cov-4.1.0\r\ncollecting ... collected 3118 items / 1 error\r\n\r\n==================================== ERRORS ====================================\r\n______________________ ERROR collecting nilearn/externals ______________________\r\n.tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:167: in register\r\n self._verify_hook(hook, hookimpl)\r\n hook = <HookCaller 'pytest_ignore_collect'>\r\n hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>>\r\n hookimpl_opts = {'hookwrapper': False,\r\n 'optionalhook': False,\r\n 'specname': None,\r\n 'tryfirst': False,\r\n 'trylast': False,\r\n 'wrapper': False}\r\n method = <function pytest_ignore_collect at 0x12f044680>\r\n name = 'pytest_ignore_collect'\r\n plugin = <module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>\r\n plugin_name = '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'\r\n self = <_pytest.config.PytestPluginManager object at 0x10c395130>\r\n.tox/test_plotting/lib/python3.12/site-packages/pluggy/_manager.py:342: in _verify_hook\r\n raise PluginValidationError(\r\nE pluggy._manager.PluginValidationError: Plugin '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py' for hook 'pytest_ignore_collect'\r\nE hookimpl definition: pytest_ignore_collect(path, config)\r\nE Argument(s) {'path'} are declared in the hookimpl but can not be found in the hookspec\r\n hook = <HookCaller 'pytest_ignore_collect'>\r\n hookimpl = <HookImpl plugin_name='/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py', plugin=<module 'nilearn.externals.conftest' from '/Users/runner/work/nilearn/nilearn/nilearn/externals/conftest.py'>>\r\n notinspec = {'path'}\r\n self = <_pytest.config.PytestPluginManager object at 0x10c395130>\r\n=============================== warnings summary ===============================\r\nnilearn/input_data/__init__.py:23\r\n /Users/runner/work/nilearn/nilearn/nilearn/input_data/__init__.py:23: DeprecationWarning: The import path 'nilearn.input_data' is deprecated in version 0.9. Importing from 'nilearn.input_data' will be possible at least until release 0.13.0. Please import from 'nilearn.maskers' instead.\r\n warnings.warn(message, DeprecationWarning)\r\n\r\n-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html\r\n\r\n---------- coverage: platform darwin, python 3.12.1-final-0 ----------\r\nCoverage XML written to file coverage.xml\r\n\r\n=========================== short test summary info ============================\r\nERROR nilearn/externals - pluggy._manager.PluginValidationError: Plugin '/Use...\r\n!!!!!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!!!!!\r\n========================= 1 warning, 1 error in 17.43s =========================\r\ntest_plotting: exit 2 (22.26 seconds) /Users/runner/work/nilearn/nilearn> pytest --cov=nilearn --cov-report=xml nilearn pid=7328\r\n.pkg: _exit> python /Library/Frameworks/Python.framework/Versions/3.12/lib/python3.12/site-packages/pyproject_api/_backend.py True hatchling.build\r\n test_plotting: FAIL code 2 (102.15=setup[79.89]+cmd[22.26] seconds)\r\n evaluation failed :( (102.25 seconds)\r\n```\r\n\r\nThe tests run fine locally:\r\n- with pytest<8.1.0\r\n- OR removing the conftest in nilearn/nilearn/externals/conftest.py (which suggest to ignore collecting tests, though there are no tests in that subfolder - EDIT: tests are in the __init__.py file).\n", "before_files": [{"content": "# Do not collect any tests in externals. This is more robust than using\n# --ignore because --ignore needs a path and it is not convenient to pass in\n# the externals path (very long install-dependent path in site-packages) when\n# using --pyargs\n\n\ndef pytest_ignore_collect(path, config):\n return True\n", "path": "nilearn/externals/conftest.py"}], "after_files": [{"content": null, "path": "nilearn/externals/conftest.py"}]} | 2,419 | 142 |
gh_patches_debug_20470 | rasdani/github-patches | git_diff | freedomofpress__securedrop-574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can delete and download submissions without having selected any
On any source page in the journalist interface, the journalist is allowed to "Download selected" and "Delete selected" without having selected any submissions. In the case of downloading, the journalist receives an empty zip file. In the case of deleting, nothing happens, but the journalist receives a flashed "Deleted 0 files" message.
We should instead display an error message if the journalist attempts such a nonsensical action.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import config
3 import version
4 import crypto_util
5 import store
6 from db import db_session, Source, Submission, SourceStar, get_one_or_else
7
8 import os
9 from datetime import datetime
10 from flask import (Flask, request, render_template, send_file, redirect, flash, url_for, g, abort)
11 from flask_wtf.csrf import CsrfProtect
12 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
13
14 import background
15
16 app = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)
17 app.config.from_object(config.FlaskConfig)
18 CsrfProtect(app)
19
20 app.jinja_env.globals['version'] = version.__version__
21 if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
22 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
23 app.jinja_env.globals['use_custom_header_image'] = True
24 else:
25 app.jinja_env.globals['header_image'] = 'logo.png'
26 app.jinja_env.globals['use_custom_header_image'] = False
27
28
29 @app.template_filter('datetimeformat')
30 def _jinja2_datetimeformat(dt, fmt=None):
31 """Template filter for readable formatting of datetime.datetime"""
32 fmt = fmt or '%b %d, %Y %I:%M %p'
33 return dt.strftime(fmt)
34
35
36 @app.teardown_appcontext
37 def shutdown_session(exception=None):
38 """Automatically remove database sessions at the end of the request, or
39 when the application shuts down"""
40 db_session.remove()
41
42
43 def get_source(sid):
44 """Return a Source object, representing the database row, for the source
45 with id `sid`"""
46 source = None
47 query = Source.query.filter(Source.filesystem_id == sid)
48 source = get_one_or_else(query, app.logger, abort)
49
50 return source
51
52
53 @app.before_request
54 def setup_g():
55 """Store commonly used values in Flask's special g object"""
56 if request.method == 'POST':
57 sid = request.form.get('sid')
58 if sid:
59 g.sid = sid
60 g.source = get_source(sid)
61
62
63 def get_docs(sid):
64 """Get docs associated with source id `sid`, sorted by submission date"""
65 docs = []
66 for filename in os.listdir(store.path(sid)):
67 os_stat = os.stat(store.path(sid, filename))
68 docs.append(dict(
69 name=filename,
70 date=datetime.fromtimestamp(os_stat.st_mtime),
71 size=os_stat.st_size,
72 ))
73 # sort in chronological order
74 docs.sort(key=lambda x: int(x['name'].split('-')[0]))
75 return docs
76
77
78 def make_star_true(sid):
79 source = get_source(sid)
80 if source.star:
81 source.star.starred = True
82 else:
83 source_star = SourceStar(source)
84 db_session.add(source_star)
85
86
87 def make_star_false(sid):
88 source = get_source(sid)
89 source.star.starred = False
90
91
92 @app.route('/col/add_star/<sid>', methods=('POST',))
93 def add_star(sid):
94 make_star_true(sid)
95 db_session.commit()
96 return redirect(url_for('index'))
97
98
99 @app.route("/col/remove_star/<sid>", methods=('POST',))
100 def remove_star(sid):
101 make_star_false(sid)
102 db_session.commit()
103 return redirect(url_for('index'))
104
105
106 @app.route('/')
107 def index():
108 unstarred = []
109 starred = []
110 for source in Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all():
111 star = SourceStar.query.filter(SourceStar.source_id == source.id).first()
112 if star and star.starred:
113 starred.append(source)
114 else:
115 unstarred.append(source)
116 source.num_unread = len(
117 Submission.query.filter(Submission.source_id == source.id, Submission.downloaded == False).all())
118
119 return render_template('index.html', unstarred=unstarred, starred=starred)
120
121
122 @app.route('/col/<sid>')
123 def col(sid):
124 source = get_source(sid)
125 docs = get_docs(sid)
126 haskey = crypto_util.getkey(sid)
127 return render_template("col.html", sid=sid,
128 codename=source.journalist_designation, docs=docs, haskey=haskey,
129 flagged=source.flagged)
130
131
132 def delete_collection(source_id):
133 # Delete the source's collection of submissions
134 store.delete_source_directory(source_id)
135
136 # Delete the source's reply keypair
137 crypto_util.delete_reply_keypair(source_id)
138
139 # Delete their entry in the db
140 source = get_source(source_id)
141 db_session.delete(source)
142 db_session.commit()
143
144
145 @app.route('/col/process', methods=('POST',))
146 def col_process():
147 actions = {'delete': col_delete, 'star': col_star, 'un-star': col_un_star}
148 if 'cols_selected' not in request.form:
149 return redirect(url_for('index'))
150
151 cols_selected = request.form.getlist('cols_selected') # getlist is cgi.FieldStorage.getlist
152 action = request.form['action']
153
154 if action not in actions:
155 return abort(500)
156
157 method = actions[action]
158 return method(cols_selected)
159
160
161 def col_star(cols_selected):
162 for sid in cols_selected:
163 make_star_true(sid)
164
165 db_session.commit()
166 return redirect(url_for('index'))
167
168
169 def col_un_star(cols_selected):
170 for source_id in cols_selected:
171 make_star_false(source_id)
172
173 db_session.commit()
174 return redirect(url_for('index'))
175
176
177 @app.route('/col/delete/<sid>', methods=('POST',))
178 def col_delete_single(sid):
179 """deleting a single collection from its /col page"""
180 source = get_source(sid)
181 delete_collection(sid)
182 flash("%s's collection deleted" % (source.journalist_designation,), "notification")
183 return redirect(url_for('index'))
184
185
186 def col_delete(cols_selected):
187 """deleting multiple collections from the index"""
188 if len(cols_selected) < 1:
189 flash("No collections selected to delete!", "warning")
190 else:
191 for source_id in cols_selected:
192 delete_collection(source_id)
193 flash("%s %s deleted" % (
194 len(cols_selected),
195 "collection" if len(cols_selected) == 1 else "collections"
196 ), "notification")
197
198 return redirect(url_for('index'))
199
200
201 @app.route('/col/<sid>/<fn>')
202 def doc(sid, fn):
203 if '..' in fn or fn.startswith('/'):
204 abort(404)
205 try:
206 Submission.query.filter(Submission.filename == fn).one().downloaded = True
207 except NoResultFound as e:
208 app.logger.error("Could not mark " + fn + " as downloaded: %s" % (e,))
209 db_session.commit()
210 return send_file(store.path(sid, fn), mimetype="application/pgp-encrypted")
211
212
213 @app.route('/reply', methods=('POST',))
214 def reply():
215 msg = request.form['msg']
216 g.source.interaction_count += 1
217 filename = "{0}-reply.gpg".format(g.source.interaction_count)
218
219 crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=
220 store.path(g.sid, filename))
221
222 db_session.commit()
223 return render_template('reply.html', sid=g.sid,
224 codename=g.source.journalist_designation)
225
226
227 @app.route('/regenerate-code', methods=('POST',))
228 def generate_code():
229 g.source.journalist_designation = crypto_util.display_id()
230 db_session.commit()
231 return redirect('/col/' + g.sid)
232
233
234 @app.route('/download_unread/<sid>')
235 def download_unread(sid):
236 id = Source.query.filter(Source.filesystem_id == sid).one().id
237 docs = [doc.filename for doc in
238 Submission.query.filter(Submission.source_id == id, Submission.downloaded == False).all()]
239 return bulk_download(sid, docs)
240
241
242 @app.route('/bulk', methods=('POST',))
243 def bulk():
244 action = request.form['action']
245
246 doc_names_selected = request.form.getlist('doc_names_selected')
247 docs_selected = [
248 doc for doc in get_docs(g.sid) if doc['name'] in doc_names_selected]
249 filenames_selected = [
250 doc['name'] for doc in docs_selected]
251
252 if action == 'download':
253 return bulk_download(g.sid, filenames_selected)
254 elif action == 'delete':
255 return bulk_delete(g.sid, docs_selected)
256 else:
257 abort(400)
258
259
260 def bulk_delete(sid, docs_selected):
261 source = get_source(sid)
262 confirm_delete = bool(request.form.get('confirm_delete', False))
263 if confirm_delete:
264 for doc in docs_selected:
265 db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())
266 fn = store.path(sid, doc['name'])
267 store.secure_unlink(fn)
268 db_session.commit()
269 return render_template('delete.html', sid=sid,
270 codename=source.journalist_designation,
271 docs_selected=docs_selected, confirm_delete=confirm_delete)
272
273
274 def bulk_download(sid, docs_selected):
275 source = get_source(sid)
276 filenames = []
277 for doc in docs_selected:
278 filenames.append(store.path(sid, doc))
279 try:
280 Submission.query.filter(Submission.filename == doc).one().downloaded = True
281 except NoResultFound as e:
282 app.logger.error("Could not mark " + doc + " as downloaded: %s" % (e,))
283 db_session.commit()
284 zip = store.get_bulk_archive(filenames)
285 return send_file(zip.name, mimetype="application/zip",
286 attachment_filename=source.journalist_designation + ".zip",
287 as_attachment=True)
288
289
290 @app.route('/flag', methods=('POST',))
291 def flag():
292 g.source.flagged = True
293 db_session.commit()
294 return render_template('flag.html', sid=g.sid,
295 codename=g.source.journalist_designation)
296 def write_pidfile():
297 pid = str(os.getpid())
298 with open(config.JOURNALIST_PIDFILE, 'w') as fp:
299 fp.write(pid)
300
301 if __name__ == "__main__":
302 write_pidfile()
303 # TODO make sure debug=False in production
304 app.run(debug=True, host='0.0.0.0', port=8081)
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -186,7 +186,7 @@
def col_delete(cols_selected):
"""deleting multiple collections from the index"""
if len(cols_selected) < 1:
- flash("No collections selected to delete!", "warning")
+ flash("No collections selected to delete!", "error")
else:
for source_id in cols_selected:
delete_collection(source_id)
@@ -249,6 +249,13 @@
filenames_selected = [
doc['name'] for doc in docs_selected]
+ if not docs_selected:
+ if action == 'download':
+ flash("No collections selected to download!", "error")
+ elif action == 'delete':
+ flash("No collections selected to delete!", "error")
+ return redirect(url_for('col', sid=g.sid))
+
if action == 'download':
return bulk_download(g.sid, filenames_selected)
elif action == 'delete':
| {"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -186,7 +186,7 @@\n def col_delete(cols_selected):\n \"\"\"deleting multiple collections from the index\"\"\"\n if len(cols_selected) < 1:\n- flash(\"No collections selected to delete!\", \"warning\")\n+ flash(\"No collections selected to delete!\", \"error\")\n else:\n for source_id in cols_selected:\n delete_collection(source_id)\n@@ -249,6 +249,13 @@\n filenames_selected = [\n doc['name'] for doc in docs_selected]\n \n+ if not docs_selected:\n+ if action == 'download':\n+ flash(\"No collections selected to download!\", \"error\")\n+ elif action == 'delete':\n+ flash(\"No collections selected to delete!\", \"error\")\n+ return redirect(url_for('col', sid=g.sid))\n+\n if action == 'download':\n return bulk_download(g.sid, filenames_selected)\n elif action == 'delete':\n", "issue": "Can delete and download submissions without having selected any\nOn any source page in the journalist interface, the journalist is allowed to \"Download selected\" and \"Delete selected\" without having selected any submissions. In the case of downloading, the journalist receives an empty zip file. In the case of deleting, nothing happens, but the journalist receives a flashed \"Deleted 0 files\" message.\n\nWe should instead display an error message if the journalist attempts such a nonsensical action.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport config\nimport version\nimport crypto_util\nimport store\nfrom db import db_session, Source, Submission, SourceStar, get_one_or_else\n\nimport os\nfrom datetime import datetime\nfrom flask import (Flask, request, render_template, send_file, redirect, flash, url_for, g, abort)\nfrom flask_wtf.csrf import CsrfProtect\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport background\n\napp = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_filter('datetimeformat')\ndef _jinja2_datetimeformat(dt, fmt=None):\n \"\"\"Template filter for readable formatting of datetime.datetime\"\"\"\n fmt = fmt or '%b %d, %Y %I:%M %p'\n return dt.strftime(fmt)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef get_source(sid):\n \"\"\"Return a Source object, representing the database row, for the source\n with id `sid`\"\"\"\n source = None\n query = Source.query.filter(Source.filesystem_id == sid)\n source = get_one_or_else(query, app.logger, abort)\n\n return source\n\n\[email protected]_request\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n if request.method == 'POST':\n sid = request.form.get('sid')\n if sid:\n g.sid = sid\n g.source = get_source(sid)\n\n\ndef get_docs(sid):\n \"\"\"Get docs associated with source id `sid`, sorted by submission date\"\"\"\n docs = []\n for filename in os.listdir(store.path(sid)):\n os_stat = os.stat(store.path(sid, filename))\n docs.append(dict(\n name=filename,\n date=datetime.fromtimestamp(os_stat.st_mtime),\n size=os_stat.st_size,\n ))\n # sort in chronological order\n docs.sort(key=lambda x: int(x['name'].split('-')[0]))\n return docs\n\n\ndef make_star_true(sid):\n source = get_source(sid)\n if source.star:\n source.star.starred = True\n else:\n source_star = SourceStar(source)\n db_session.add(source_star)\n\n\ndef make_star_false(sid):\n source = get_source(sid)\n source.star.starred = False\n\n\[email protected]('/col/add_star/<sid>', methods=('POST',))\ndef add_star(sid):\n make_star_true(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected](\"/col/remove_star/<sid>\", methods=('POST',))\ndef remove_star(sid):\n make_star_false(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/')\ndef index():\n unstarred = []\n starred = []\n for source in Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all():\n star = SourceStar.query.filter(SourceStar.source_id == source.id).first()\n if star and star.starred:\n starred.append(source)\n else:\n unstarred.append(source)\n source.num_unread = len(\n Submission.query.filter(Submission.source_id == source.id, Submission.downloaded == False).all())\n\n return render_template('index.html', unstarred=unstarred, starred=starred)\n\n\[email protected]('/col/<sid>')\ndef col(sid):\n source = get_source(sid)\n docs = get_docs(sid)\n haskey = crypto_util.getkey(sid)\n return render_template(\"col.html\", sid=sid,\n codename=source.journalist_designation, docs=docs, haskey=haskey,\n flagged=source.flagged)\n\n\ndef delete_collection(source_id):\n # Delete the source's collection of submissions\n store.delete_source_directory(source_id)\n\n # Delete the source's reply keypair\n crypto_util.delete_reply_keypair(source_id)\n\n # Delete their entry in the db\n source = get_source(source_id)\n db_session.delete(source)\n db_session.commit()\n\n\[email protected]('/col/process', methods=('POST',))\ndef col_process():\n actions = {'delete': col_delete, 'star': col_star, 'un-star': col_un_star}\n if 'cols_selected' not in request.form:\n return redirect(url_for('index'))\n\n cols_selected = request.form.getlist('cols_selected') # getlist is cgi.FieldStorage.getlist\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n\ndef col_star(cols_selected):\n for sid in cols_selected:\n make_star_true(sid)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\ndef col_un_star(cols_selected):\n for source_id in cols_selected:\n make_star_false(source_id)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/col/delete/<sid>', methods=('POST',))\ndef col_delete_single(sid):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(sid)\n delete_collection(sid)\n flash(\"%s's collection deleted\" % (source.journalist_designation,), \"notification\")\n return redirect(url_for('index'))\n\n\ndef col_delete(cols_selected):\n \"\"\"deleting multiple collections from the index\"\"\"\n if len(cols_selected) < 1:\n flash(\"No collections selected to delete!\", \"warning\")\n else:\n for source_id in cols_selected:\n delete_collection(source_id)\n flash(\"%s %s deleted\" % (\n len(cols_selected),\n \"collection\" if len(cols_selected) == 1 else \"collections\"\n ), \"notification\")\n\n return redirect(url_for('index'))\n\n\[email protected]('/col/<sid>/<fn>')\ndef doc(sid, fn):\n if '..' in fn or fn.startswith('/'):\n abort(404)\n try:\n Submission.query.filter(Submission.filename == fn).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n db_session.commit()\n return send_file(store.path(sid, fn), mimetype=\"application/pgp-encrypted\")\n\n\[email protected]('/reply', methods=('POST',))\ndef reply():\n msg = request.form['msg']\n g.source.interaction_count += 1\n filename = \"{0}-reply.gpg\".format(g.source.interaction_count)\n\n crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=\n store.path(g.sid, filename))\n\n db_session.commit()\n return render_template('reply.html', sid=g.sid,\n codename=g.source.journalist_designation)\n\n\[email protected]('/regenerate-code', methods=('POST',))\ndef generate_code():\n g.source.journalist_designation = crypto_util.display_id()\n db_session.commit()\n return redirect('/col/' + g.sid)\n\n\[email protected]('/download_unread/<sid>')\ndef download_unread(sid):\n id = Source.query.filter(Source.filesystem_id == sid).one().id\n docs = [doc.filename for doc in\n Submission.query.filter(Submission.source_id == id, Submission.downloaded == False).all()]\n return bulk_download(sid, docs)\n\n\[email protected]('/bulk', methods=('POST',))\ndef bulk():\n action = request.form['action']\n\n doc_names_selected = request.form.getlist('doc_names_selected')\n docs_selected = [\n doc for doc in get_docs(g.sid) if doc['name'] in doc_names_selected]\n filenames_selected = [\n doc['name'] for doc in docs_selected]\n\n if action == 'download':\n return bulk_download(g.sid, filenames_selected)\n elif action == 'delete':\n return bulk_delete(g.sid, docs_selected)\n else:\n abort(400)\n\n\ndef bulk_delete(sid, docs_selected):\n source = get_source(sid)\n confirm_delete = bool(request.form.get('confirm_delete', False))\n if confirm_delete:\n for doc in docs_selected:\n db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())\n fn = store.path(sid, doc['name'])\n store.secure_unlink(fn)\n db_session.commit()\n return render_template('delete.html', sid=sid,\n codename=source.journalist_designation,\n docs_selected=docs_selected, confirm_delete=confirm_delete)\n\n\ndef bulk_download(sid, docs_selected):\n source = get_source(sid)\n filenames = []\n for doc in docs_selected:\n filenames.append(store.path(sid, doc))\n try:\n Submission.query.filter(Submission.filename == doc).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + doc + \" as downloaded: %s\" % (e,))\n db_session.commit()\n zip = store.get_bulk_archive(filenames)\n return send_file(zip.name, mimetype=\"application/zip\",\n attachment_filename=source.journalist_designation + \".zip\",\n as_attachment=True)\n\n\[email protected]('/flag', methods=('POST',))\ndef flag():\n g.source.flagged = True\n db_session.commit()\n return render_template('flag.html', sid=g.sid,\n codename=g.source.journalist_designation)\ndef write_pidfile():\n pid = str(os.getpid())\n with open(config.JOURNALIST_PIDFILE, 'w') as fp:\n fp.write(pid)\n\nif __name__ == \"__main__\":\n write_pidfile()\n # TODO make sure debug=False in production\n app.run(debug=True, host='0.0.0.0', port=8081)\n", "path": "securedrop/journalist.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport config\nimport version\nimport crypto_util\nimport store\nfrom db import db_session, Source, Submission, SourceStar, get_one_or_else\n\nimport os\nfrom datetime import datetime\nfrom flask import (Flask, request, render_template, send_file, redirect, flash, url_for, g, abort)\nfrom flask_wtf.csrf import CsrfProtect\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport background\n\napp = Flask(__name__, template_folder=config.JOURNALIST_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'logo.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_filter('datetimeformat')\ndef _jinja2_datetimeformat(dt, fmt=None):\n \"\"\"Template filter for readable formatting of datetime.datetime\"\"\"\n fmt = fmt or '%b %d, %Y %I:%M %p'\n return dt.strftime(fmt)\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef get_source(sid):\n \"\"\"Return a Source object, representing the database row, for the source\n with id `sid`\"\"\"\n source = None\n query = Source.query.filter(Source.filesystem_id == sid)\n source = get_one_or_else(query, app.logger, abort)\n\n return source\n\n\[email protected]_request\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n if request.method == 'POST':\n sid = request.form.get('sid')\n if sid:\n g.sid = sid\n g.source = get_source(sid)\n\n\ndef get_docs(sid):\n \"\"\"Get docs associated with source id `sid`, sorted by submission date\"\"\"\n docs = []\n for filename in os.listdir(store.path(sid)):\n os_stat = os.stat(store.path(sid, filename))\n docs.append(dict(\n name=filename,\n date=datetime.fromtimestamp(os_stat.st_mtime),\n size=os_stat.st_size,\n ))\n # sort in chronological order\n docs.sort(key=lambda x: int(x['name'].split('-')[0]))\n return docs\n\n\ndef make_star_true(sid):\n source = get_source(sid)\n if source.star:\n source.star.starred = True\n else:\n source_star = SourceStar(source)\n db_session.add(source_star)\n\n\ndef make_star_false(sid):\n source = get_source(sid)\n source.star.starred = False\n\n\[email protected]('/col/add_star/<sid>', methods=('POST',))\ndef add_star(sid):\n make_star_true(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected](\"/col/remove_star/<sid>\", methods=('POST',))\ndef remove_star(sid):\n make_star_false(sid)\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/')\ndef index():\n unstarred = []\n starred = []\n for source in Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all():\n star = SourceStar.query.filter(SourceStar.source_id == source.id).first()\n if star and star.starred:\n starred.append(source)\n else:\n unstarred.append(source)\n source.num_unread = len(\n Submission.query.filter(Submission.source_id == source.id, Submission.downloaded == False).all())\n\n return render_template('index.html', unstarred=unstarred, starred=starred)\n\n\[email protected]('/col/<sid>')\ndef col(sid):\n source = get_source(sid)\n docs = get_docs(sid)\n haskey = crypto_util.getkey(sid)\n return render_template(\"col.html\", sid=sid,\n codename=source.journalist_designation, docs=docs, haskey=haskey,\n flagged=source.flagged)\n\n\ndef delete_collection(source_id):\n # Delete the source's collection of submissions\n store.delete_source_directory(source_id)\n\n # Delete the source's reply keypair\n crypto_util.delete_reply_keypair(source_id)\n\n # Delete their entry in the db\n source = get_source(source_id)\n db_session.delete(source)\n db_session.commit()\n\n\[email protected]('/col/process', methods=('POST',))\ndef col_process():\n actions = {'delete': col_delete, 'star': col_star, 'un-star': col_un_star}\n if 'cols_selected' not in request.form:\n return redirect(url_for('index'))\n\n cols_selected = request.form.getlist('cols_selected') # getlist is cgi.FieldStorage.getlist\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n\ndef col_star(cols_selected):\n for sid in cols_selected:\n make_star_true(sid)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\ndef col_un_star(cols_selected):\n for source_id in cols_selected:\n make_star_false(source_id)\n\n db_session.commit()\n return redirect(url_for('index'))\n\n\[email protected]('/col/delete/<sid>', methods=('POST',))\ndef col_delete_single(sid):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(sid)\n delete_collection(sid)\n flash(\"%s's collection deleted\" % (source.journalist_designation,), \"notification\")\n return redirect(url_for('index'))\n\n\ndef col_delete(cols_selected):\n \"\"\"deleting multiple collections from the index\"\"\"\n if len(cols_selected) < 1:\n flash(\"No collections selected to delete!\", \"error\")\n else:\n for source_id in cols_selected:\n delete_collection(source_id)\n flash(\"%s %s deleted\" % (\n len(cols_selected),\n \"collection\" if len(cols_selected) == 1 else \"collections\"\n ), \"notification\")\n\n return redirect(url_for('index'))\n\n\[email protected]('/col/<sid>/<fn>')\ndef doc(sid, fn):\n if '..' in fn or fn.startswith('/'):\n abort(404)\n try:\n Submission.query.filter(Submission.filename == fn).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n db_session.commit()\n return send_file(store.path(sid, fn), mimetype=\"application/pgp-encrypted\")\n\n\[email protected]('/reply', methods=('POST',))\ndef reply():\n msg = request.form['msg']\n g.source.interaction_count += 1\n filename = \"{0}-reply.gpg\".format(g.source.interaction_count)\n\n crypto_util.encrypt(crypto_util.getkey(g.sid), msg, output=\n store.path(g.sid, filename))\n\n db_session.commit()\n return render_template('reply.html', sid=g.sid,\n codename=g.source.journalist_designation)\n\n\[email protected]('/regenerate-code', methods=('POST',))\ndef generate_code():\n g.source.journalist_designation = crypto_util.display_id()\n db_session.commit()\n return redirect('/col/' + g.sid)\n\n\[email protected]('/download_unread/<sid>')\ndef download_unread(sid):\n id = Source.query.filter(Source.filesystem_id == sid).one().id\n docs = [doc.filename for doc in\n Submission.query.filter(Submission.source_id == id, Submission.downloaded == False).all()]\n return bulk_download(sid, docs)\n\n\[email protected]('/bulk', methods=('POST',))\ndef bulk():\n action = request.form['action']\n\n doc_names_selected = request.form.getlist('doc_names_selected')\n docs_selected = [\n doc for doc in get_docs(g.sid) if doc['name'] in doc_names_selected]\n filenames_selected = [\n doc['name'] for doc in docs_selected]\n\n if not docs_selected:\n if action == 'download':\n flash(\"No collections selected to download!\", \"error\")\n elif action == 'delete':\n flash(\"No collections selected to delete!\", \"error\")\n return redirect(url_for('col', sid=g.sid))\n\n if action == 'download':\n return bulk_download(g.sid, filenames_selected)\n elif action == 'delete':\n return bulk_delete(g.sid, docs_selected)\n else:\n abort(400)\n\n\ndef bulk_delete(sid, docs_selected):\n source = get_source(sid)\n confirm_delete = bool(request.form.get('confirm_delete', False))\n if confirm_delete:\n for doc in docs_selected:\n db_session.delete(Submission.query.filter(Submission.filename == doc['name']).one())\n fn = store.path(sid, doc['name'])\n store.secure_unlink(fn)\n db_session.commit()\n return render_template('delete.html', sid=sid,\n codename=source.journalist_designation,\n docs_selected=docs_selected, confirm_delete=confirm_delete)\n\n\ndef bulk_download(sid, docs_selected):\n source = get_source(sid)\n filenames = []\n for doc in docs_selected:\n filenames.append(store.path(sid, doc))\n try:\n Submission.query.filter(Submission.filename == doc).one().downloaded = True\n except NoResultFound as e:\n app.logger.error(\"Could not mark \" + doc + \" as downloaded: %s\" % (e,))\n db_session.commit()\n zip = store.get_bulk_archive(filenames)\n return send_file(zip.name, mimetype=\"application/zip\",\n attachment_filename=source.journalist_designation + \".zip\",\n as_attachment=True)\n\n\[email protected]('/flag', methods=('POST',))\ndef flag():\n g.source.flagged = True\n db_session.commit()\n return render_template('flag.html', sid=g.sid,\n codename=g.source.journalist_designation)\ndef write_pidfile():\n pid = str(os.getpid())\n with open(config.JOURNALIST_PIDFILE, 'w') as fp:\n fp.write(pid)\n\nif __name__ == \"__main__\":\n write_pidfile()\n # TODO make sure debug=False in production\n app.run(debug=True, host='0.0.0.0', port=8081)\n", "path": "securedrop/journalist.py"}]} | 3,375 | 242 |
gh_patches_debug_8866 | rasdani/github-patches | git_diff | tensorflow__addons-618 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use package manager to pin TF dependency
Per https://github.com/tensorflow/community/pull/135 we should be using pip/conda to specify which Addons releases work with which TF versions.
This is blocked until the `tensorflow` and `tensorflow-gpu` consolidation scheduled for 2.1 release
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons.
16
17 TensorFlow Addons is a repository of contributions that conform to well-
18 established API patterns, but implement new functionality not available
19 in core TensorFlow. TensorFlow natively supports a large number of
20 operators, layers, metrics, losses, and optimizers. However, in a fast
21 moving field like ML, there are many interesting new developments that
22 cannot be integrated into core TensorFlow (because their broad
23 applicability is not yet clear, or it is mostly used by a smaller subset
24 of the community).
25 """
26
27 from __future__ import absolute_import
28 from __future__ import division
29 from __future__ import print_function
30
31 import os
32 import platform
33 import sys
34
35 from datetime import datetime
36 from setuptools import find_packages
37 from setuptools import setup
38 from setuptools.dist import Distribution
39 from setuptools import Extension
40
41 DOCLINES = __doc__.split('\n')
42
43 TFA_NIGHTLY = 'tfa-nightly'
44 TFA_RELEASE = 'tensorflow-addons'
45
46 if '--nightly' in sys.argv:
47 project_name = TFA_NIGHTLY
48 nightly_idx = sys.argv.index('--nightly')
49 sys.argv.pop(nightly_idx)
50 else:
51 project_name = TFA_RELEASE
52
53 # Version
54 version = {}
55 base_dir = os.path.dirname(os.path.abspath(__file__))
56 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
57 # yapf: disable
58 exec(fp.read(), version)
59 # yapf: enable
60
61 if project_name == TFA_NIGHTLY:
62 version['__version__'] += datetime.strftime(datetime.today(), "%Y%m%d")
63
64 # Dependencies
65 REQUIRED_PACKAGES = [
66 'six >= 1.10.0',
67 ]
68
69 if project_name == TFA_RELEASE:
70 # TODO: remove if-else condition when tf supports package consolidation.
71 if platform.system() == 'Linux':
72 REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')
73 else:
74 REQUIRED_PACKAGES.append('tensorflow == 2.0.0')
75 elif project_name == TFA_NIGHTLY:
76 REQUIRED_PACKAGES.append('tf-nightly')
77
78
79 class BinaryDistribution(Distribution):
80 """This class is needed in order to create OS specific wheels."""
81
82 def has_ext_modules(self):
83 return True
84
85
86 setup(
87 name=project_name,
88 version=version['__version__'],
89 description=DOCLINES[0],
90 long_description='\n'.join(DOCLINES[2:]),
91 author='Google Inc.',
92 author_email='[email protected]',
93 packages=find_packages(),
94 ext_modules=[Extension('_foo', ['stub.cc'])],
95 install_requires=REQUIRED_PACKAGES,
96 include_package_data=True,
97 zip_safe=False,
98 distclass=BinaryDistribution,
99 classifiers=[
100 'Development Status :: 4 - Beta',
101 'Intended Audience :: Developers',
102 'Intended Audience :: Education',
103 'Intended Audience :: Science/Research',
104 'License :: OSI Approved :: Apache Software License',
105 'Programming Language :: Python :: 2.7',
106 'Programming Language :: Python :: 3.5',
107 'Programming Language :: Python :: 3.6',
108 'Programming Language :: Python :: 3.7',
109 'Topic :: Scientific/Engineering :: Mathematics',
110 'Topic :: Software Development :: Libraries :: Python Modules',
111 'Topic :: Software Development :: Libraries',
112 ],
113 license='Apache 2.0',
114 keywords='tensorflow addons machine learning',
115 )
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,9 +69,9 @@
if project_name == TFA_RELEASE:
# TODO: remove if-else condition when tf supports package consolidation.
if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')
+ REQUIRED_PACKAGES.append('tensorflow-gpu >= 2.0.0')
else:
- REQUIRED_PACKAGES.append('tensorflow == 2.0.0')
+ REQUIRED_PACKAGES.append('tensorflow >= 2.0.0')
elif project_name == TFA_NIGHTLY:
REQUIRED_PACKAGES.append('tf-nightly')
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,9 +69,9 @@\n if project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n+ REQUIRED_PACKAGES.append('tensorflow-gpu >= 2.0.0')\n else:\n- REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\n+ REQUIRED_PACKAGES.append('tensorflow >= 2.0.0')\n elif project_name == TFA_NIGHTLY:\n REQUIRED_PACKAGES.append('tf-nightly')\n", "issue": "Use package manager to pin TF dependency\nPer https://github.com/tensorflow/community/pull/135 we should be using pip/conda to specify which Addons releases work with which TF versions. \r\n\r\nThis is blocked until the `tensorflow` and `tensorflow-gpu` consolidation scheduled for 2.1 release\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split('\\n')\n\nTFA_NIGHTLY = 'tfa-nightly'\nTFA_RELEASE = 'tensorflow-addons'\n\nif '--nightly' in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index('--nightly')\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nif project_name == TFA_NIGHTLY:\n version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\n# Dependencies\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nif project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n else:\n REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\nelif project_name == TFA_NIGHTLY:\n REQUIRED_PACKAGES.append('tf-nightly')\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n ext_modules=[Extension('_foo', ['stub.cc'])],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split('\\n')\n\nTFA_NIGHTLY = 'tfa-nightly'\nTFA_RELEASE = 'tensorflow-addons'\n\nif '--nightly' in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index('--nightly')\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nif project_name == TFA_NIGHTLY:\n version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\n# Dependencies\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nif project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tensorflow-gpu >= 2.0.0')\n else:\n REQUIRED_PACKAGES.append('tensorflow >= 2.0.0')\nelif project_name == TFA_NIGHTLY:\n REQUIRED_PACKAGES.append('tf-nightly')\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n ext_modules=[Extension('_foo', ['stub.cc'])],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}]} | 1,434 | 155 |
gh_patches_debug_5765 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3871 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
testing 4543: changing picture size in CKeditor doesn't effect frontend
**URL:** https://meinberlin-dev.liqd.net/vorhaben/2021-00074/
**user:** any
**expected behaviour:** if I change the size of my picture in CKeditor the size is also changed in frontend
**behaviour:** picture always has the same size, changes are not saved
**important screensize:**
**device & browser:** chrome, big sur
**Comment/Question:** Is that on purpose because we want the plan to look nice and not have screwed up pictures? I think we should allow it as we also do that for projects.
Screenshot?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/plans/models.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth.models import Group
3 from django.db import models
4 from django.db.models import Q
5 from django.urls import reverse
6 from django.utils.functional import cached_property
7 from django.utils.translation import ugettext_lazy as _
8
9 from adhocracy4 import transforms
10 from adhocracy4.administrative_districts.models import AdministrativeDistrict
11 from adhocracy4.ckeditor.fields import RichTextCollapsibleUploadingField
12 from adhocracy4.images.fields import ConfiguredImageField
13 from adhocracy4.images.fields import ImageCopyrightField
14 from adhocracy4.maps import fields as map_fields
15 from adhocracy4.models.base import UserGeneratedContentModel
16 from adhocracy4.phases.models import Phase
17 from adhocracy4.projects import models as project_models
18 from adhocracy4.projects.enums import Access
19 from adhocracy4.projects.fields import TopicField
20 from adhocracy4.projects.models import ProjectContactDetailMixin
21
22
23 class Plan(ProjectContactDetailMixin, UserGeneratedContentModel):
24
25 PARTICIPATION_YES = 0
26 PARTICIPATION_NO = 1
27 PARTICIPATION_UNDECIDED = 2
28 PARTICIPATION_CHOICES = (
29 (PARTICIPATION_YES, _('with')),
30 (PARTICIPATION_NO, _('without')),
31 (PARTICIPATION_UNDECIDED, _('undecided')),
32 )
33
34 STATUS_ONGOING = 0
35 STATUS_DONE = 1
36
37 STATUS_CHOICES = (
38 (STATUS_ONGOING, _('running')),
39 (STATUS_DONE, _('done'))
40 )
41
42 title = models.CharField(
43 max_length=120,
44 verbose_name=_('Title of your plan'),
45 help_text=_('Enter a meaningful title with a maximum '
46 'length of 120 characters. The title'
47 ' will appear in the project tile and on '
48 'top of the plan detail page.')
49 )
50 organisation = models.ForeignKey(
51 settings.A4_ORGANISATIONS_MODEL,
52 on_delete=models.CASCADE,
53 verbose_name=_('Organisation'))
54 projects = models.ManyToManyField(
55 project_models.Project,
56 related_name='plans',
57 blank=True
58 )
59 group = models.ForeignKey(
60 Group,
61 on_delete=models.SET_NULL,
62 blank=True,
63 null=True)
64 point = map_fields.PointField(
65 blank=True,
66 verbose_name=_('Can your plan be located on the map?'),
67 help_text=_('If you locate your plan, it will be shown '
68 'on the map in the project overview in addition '
69 'to the list. To set a pin, click inside the '
70 'highlighted area or enter an address. Once a '
71 'pin is set you can move it by dragging it.')
72 )
73 point_label = models.CharField(
74 blank=True,
75 default='',
76 max_length=255,
77 verbose_name=_('Name of the site'),
78 help_text=_('The name of the site (e.g. name of street, '
79 'building or park) makes it easier to locate '
80 'the plan. The maximum length is 255 characters.'),
81 )
82 district = models.ForeignKey(
83 AdministrativeDistrict,
84 verbose_name=_('District'),
85 help_text=_('Enter the district in which the plan is located or '
86 'whether it is a city-wide plan. In the project '
87 'overview projects can be filtered by district.'),
88 null=True,
89 blank=True,
90 on_delete=models.CASCADE
91 )
92 cost = models.CharField(
93 max_length=255,
94 verbose_name=_('Cost'),
95 help_text=_('Enter details of the estimated or actual costs '
96 'of the plan in no more than 255 characters.')
97 )
98 description = RichTextCollapsibleUploadingField(
99 config_name='collapsible-image-editor',
100 verbose_name=_('Description of your plan'),
101 help_text=_('Describe the key points of your plan. You can upload '
102 'PDFs and images, embed videos and link to external '
103 'URLs, among other things.')
104 )
105 description_image = ConfiguredImageField(
106 'plan_image',
107 verbose_name=_('Header image'),
108 upload_to='plan/description_image',
109 blank=True,
110 help_prefix=_(
111 'Visualize your plan with an image underneath the description.'
112 ),
113 )
114 description_image_copyright = models.CharField(
115 verbose_name=_('Header image copyright'),
116 blank=True,
117 max_length=120,
118 help_text=_('The name is displayed in the header image.')
119 )
120 tile_image = ConfiguredImageField(
121 'tileimage',
122 verbose_name=_('Tile image'),
123 help_prefix=_(
124 'The image will be shown in the project tile.'
125 ),
126 upload_to='plan/tile_images',
127 blank=True
128 )
129 tile_image_copyright = ImageCopyrightField(
130 verbose_name=_('Tile image copyright'),
131 help_text=_('The name is displayed in the tile image.')
132 )
133 topics = TopicField(
134 verbose_name=_('Topics'),
135 help_text=_('Assign your plan to 1 or 2 '
136 'topics. In the project '
137 'overview projects can be '
138 'filtered according to topics.')
139 )
140 status = models.SmallIntegerField(
141 choices=STATUS_CHOICES,
142 verbose_name=_('Status'),
143 help_text=_('In the project overview projects '
144 'can be filtered by status.')
145 )
146 participation = models.SmallIntegerField(
147 choices=PARTICIPATION_CHOICES,
148 verbose_name=_('Participation'),
149 help_text=_('In the project overview '
150 'projects can be filtered '
151 'according to participation '
152 'status.')
153 )
154 duration = models.CharField(
155 blank=True,
156 null=True,
157 max_length=255,
158 verbose_name=_('Duration'),
159 help_text=_('Provide information on the '
160 'expected duration of the plan in '
161 'no more than 255 characters.')
162 )
163 is_draft = models.BooleanField(default=True)
164
165 class Meta:
166 ordering = ['-created']
167
168 @property
169 def reference_number(self):
170 return '{:d}-{:05d}'.format(self.created.year, self.pk)
171
172 @property
173 def administrative_district(self):
174 return self.district
175
176 @property
177 def topic_names(self):
178 if hasattr(settings, 'A4_PROJECT_TOPICS'):
179 choices = dict(settings.A4_PROJECT_TOPICS)
180 return [choices.get(topic, topic) for topic in self.topics]
181 return []
182
183 @cached_property
184 def published_projects(self):
185 return self.projects.filter(
186 Q(access=Access.PUBLIC) | Q(access=Access.SEMIPUBLIC),
187 is_draft=False, is_archived=False)
188
189 @cached_property
190 def participation_string(self):
191 project_list = self.published_projects.values_list('id', flat=True)
192 phases_in_plan = Phase.objects\
193 .select_related('module__project')\
194 .filter(module__project_id__in=project_list)\
195 .order_by('-start_date')
196
197 if phases_in_plan.active_phases():
198 return _('running')
199
200 future_phases_with_start_date = phases_in_plan.future_phases()\
201 .exclude(start_date__isnull=True)
202
203 if future_phases_with_start_date:
204 future_phase = future_phases_with_start_date.first()
205 return _('starts at {}')\
206 .format(future_phase.start_date.strftime('%d.%m.%Y'))
207
208 def __str__(self):
209 return self.title
210
211 def get_absolute_url(self):
212 return reverse('meinberlin_plans:plan-detail',
213 kwargs=dict(pk='{:05d}'.format(self.pk),
214 year=self.created.year))
215
216 def save(self, *args, **kwargs):
217 self.description = transforms.clean_html_field(self.description)
218 super().save(*args, **kwargs)
219
220 def _get_group(self, user, organisation):
221 user_groups = user.groups.all()
222 org_groups = organisation.groups.all()
223 shared_groups = user_groups & org_groups
224 return shared_groups.distinct().first()
225
226 def is_group_member(self, user):
227 if self.group:
228 return user.groups.filter(id=self.group.id).exists()
229 return False
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py
--- a/meinberlin/apps/plans/models.py
+++ b/meinberlin/apps/plans/models.py
@@ -214,7 +214,8 @@
year=self.created.year))
def save(self, *args, **kwargs):
- self.description = transforms.clean_html_field(self.description)
+ self.description = transforms.clean_html_field(
+ self.description, 'collapsible-image-editor')
super().save(*args, **kwargs)
def _get_group(self, user, organisation):
| {"golden_diff": "diff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py\n--- a/meinberlin/apps/plans/models.py\n+++ b/meinberlin/apps/plans/models.py\n@@ -214,7 +214,8 @@\n year=self.created.year))\n \n def save(self, *args, **kwargs):\n- self.description = transforms.clean_html_field(self.description)\n+ self.description = transforms.clean_html_field(\n+ self.description, 'collapsible-image-editor')\n super().save(*args, **kwargs)\n \n def _get_group(self, user, organisation):\n", "issue": "testing 4543: changing picture size in CKeditor doesn't effect frontend\n**URL:** https://meinberlin-dev.liqd.net/vorhaben/2021-00074/\r\n**user:** any\r\n**expected behaviour:** if I change the size of my picture in CKeditor the size is also changed in frontend\r\n**behaviour:** picture always has the same size, changes are not saved\r\n**important screensize:**\r\n**device & browser:** chrome, big sur\r\n**Comment/Question:** Is that on purpose because we want the plan to look nice and not have screwed up pictures? I think we should allow it as we also do that for projects.\r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom adhocracy4.ckeditor.fields import RichTextCollapsibleUploadingField\nfrom adhocracy4.images.fields import ConfiguredImageField\nfrom adhocracy4.images.fields import ImageCopyrightField\nfrom adhocracy4.maps import fields as map_fields\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.enums import Access\nfrom adhocracy4.projects.fields import TopicField\nfrom adhocracy4.projects.models import ProjectContactDetailMixin\n\n\nclass Plan(ProjectContactDetailMixin, UserGeneratedContentModel):\n\n PARTICIPATION_YES = 0\n PARTICIPATION_NO = 1\n PARTICIPATION_UNDECIDED = 2\n PARTICIPATION_CHOICES = (\n (PARTICIPATION_YES, _('with')),\n (PARTICIPATION_NO, _('without')),\n (PARTICIPATION_UNDECIDED, _('undecided')),\n )\n\n STATUS_ONGOING = 0\n STATUS_DONE = 1\n\n STATUS_CHOICES = (\n (STATUS_ONGOING, _('running')),\n (STATUS_DONE, _('done'))\n )\n\n title = models.CharField(\n max_length=120,\n verbose_name=_('Title of your plan'),\n help_text=_('Enter a meaningful title with a maximum '\n 'length of 120 characters. The title'\n ' will appear in the project tile and on '\n 'top of the plan detail page.')\n )\n organisation = models.ForeignKey(\n settings.A4_ORGANISATIONS_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_('Organisation'))\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='plans',\n blank=True\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n blank=True,\n null=True)\n point = map_fields.PointField(\n blank=True,\n verbose_name=_('Can your plan be located on the map?'),\n help_text=_('If you locate your plan, it will be shown '\n 'on the map in the project overview in addition '\n 'to the list. To set a pin, click inside the '\n 'highlighted area or enter an address. Once a '\n 'pin is set you can move it by dragging it.')\n )\n point_label = models.CharField(\n blank=True,\n default='',\n max_length=255,\n verbose_name=_('Name of the site'),\n help_text=_('The name of the site (e.g. name of street, '\n 'building or park) makes it easier to locate '\n 'the plan. The maximum length is 255 characters.'),\n )\n district = models.ForeignKey(\n AdministrativeDistrict,\n verbose_name=_('District'),\n help_text=_('Enter the district in which the plan is located or '\n 'whether it is a city-wide plan. In the project '\n 'overview projects can be filtered by district.'),\n null=True,\n blank=True,\n on_delete=models.CASCADE\n )\n cost = models.CharField(\n max_length=255,\n verbose_name=_('Cost'),\n help_text=_('Enter details of the estimated or actual costs '\n 'of the plan in no more than 255 characters.')\n )\n description = RichTextCollapsibleUploadingField(\n config_name='collapsible-image-editor',\n verbose_name=_('Description of your plan'),\n help_text=_('Describe the key points of your plan. You can upload '\n 'PDFs and images, embed videos and link to external '\n 'URLs, among other things.')\n )\n description_image = ConfiguredImageField(\n 'plan_image',\n verbose_name=_('Header image'),\n upload_to='plan/description_image',\n blank=True,\n help_prefix=_(\n 'Visualize your plan with an image underneath the description.'\n ),\n )\n description_image_copyright = models.CharField(\n verbose_name=_('Header image copyright'),\n blank=True,\n max_length=120,\n help_text=_('The name is displayed in the header image.')\n )\n tile_image = ConfiguredImageField(\n 'tileimage',\n verbose_name=_('Tile image'),\n help_prefix=_(\n 'The image will be shown in the project tile.'\n ),\n upload_to='plan/tile_images',\n blank=True\n )\n tile_image_copyright = ImageCopyrightField(\n verbose_name=_('Tile image copyright'),\n help_text=_('The name is displayed in the tile image.')\n )\n topics = TopicField(\n verbose_name=_('Topics'),\n help_text=_('Assign your plan to 1 or 2 '\n 'topics. In the project '\n 'overview projects can be '\n 'filtered according to topics.')\n )\n status = models.SmallIntegerField(\n choices=STATUS_CHOICES,\n verbose_name=_('Status'),\n help_text=_('In the project overview projects '\n 'can be filtered by status.')\n )\n participation = models.SmallIntegerField(\n choices=PARTICIPATION_CHOICES,\n verbose_name=_('Participation'),\n help_text=_('In the project overview '\n 'projects can be filtered '\n 'according to participation '\n 'status.')\n )\n duration = models.CharField(\n blank=True,\n null=True,\n max_length=255,\n verbose_name=_('Duration'),\n help_text=_('Provide information on the '\n 'expected duration of the plan in '\n 'no more than 255 characters.')\n )\n is_draft = models.BooleanField(default=True)\n\n class Meta:\n ordering = ['-created']\n\n @property\n def reference_number(self):\n return '{:d}-{:05d}'.format(self.created.year, self.pk)\n\n @property\n def administrative_district(self):\n return self.district\n\n @property\n def topic_names(self):\n if hasattr(settings, 'A4_PROJECT_TOPICS'):\n choices = dict(settings.A4_PROJECT_TOPICS)\n return [choices.get(topic, topic) for topic in self.topics]\n return []\n\n @cached_property\n def published_projects(self):\n return self.projects.filter(\n Q(access=Access.PUBLIC) | Q(access=Access.SEMIPUBLIC),\n is_draft=False, is_archived=False)\n\n @cached_property\n def participation_string(self):\n project_list = self.published_projects.values_list('id', flat=True)\n phases_in_plan = Phase.objects\\\n .select_related('module__project')\\\n .filter(module__project_id__in=project_list)\\\n .order_by('-start_date')\n\n if phases_in_plan.active_phases():\n return _('running')\n\n future_phases_with_start_date = phases_in_plan.future_phases()\\\n .exclude(start_date__isnull=True)\n\n if future_phases_with_start_date:\n future_phase = future_phases_with_start_date.first()\n return _('starts at {}')\\\n .format(future_phase.start_date.strftime('%d.%m.%Y'))\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('meinberlin_plans:plan-detail',\n kwargs=dict(pk='{:05d}'.format(self.pk),\n year=self.created.year))\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n super().save(*args, **kwargs)\n\n def _get_group(self, user, organisation):\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n return shared_groups.distinct().first()\n\n def is_group_member(self, user):\n if self.group:\n return user.groups.filter(id=self.group.id).exists()\n return False\n", "path": "meinberlin/apps/plans/models.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom adhocracy4.ckeditor.fields import RichTextCollapsibleUploadingField\nfrom adhocracy4.images.fields import ConfiguredImageField\nfrom adhocracy4.images.fields import ImageCopyrightField\nfrom adhocracy4.maps import fields as map_fields\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.enums import Access\nfrom adhocracy4.projects.fields import TopicField\nfrom adhocracy4.projects.models import ProjectContactDetailMixin\n\n\nclass Plan(ProjectContactDetailMixin, UserGeneratedContentModel):\n\n PARTICIPATION_YES = 0\n PARTICIPATION_NO = 1\n PARTICIPATION_UNDECIDED = 2\n PARTICIPATION_CHOICES = (\n (PARTICIPATION_YES, _('with')),\n (PARTICIPATION_NO, _('without')),\n (PARTICIPATION_UNDECIDED, _('undecided')),\n )\n\n STATUS_ONGOING = 0\n STATUS_DONE = 1\n\n STATUS_CHOICES = (\n (STATUS_ONGOING, _('running')),\n (STATUS_DONE, _('done'))\n )\n\n title = models.CharField(\n max_length=120,\n verbose_name=_('Title of your plan'),\n help_text=_('Enter a meaningful title with a maximum '\n 'length of 120 characters. The title'\n ' will appear in the project tile and on '\n 'top of the plan detail page.')\n )\n organisation = models.ForeignKey(\n settings.A4_ORGANISATIONS_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_('Organisation'))\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='plans',\n blank=True\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n blank=True,\n null=True)\n point = map_fields.PointField(\n blank=True,\n verbose_name=_('Can your plan be located on the map?'),\n help_text=_('If you locate your plan, it will be shown '\n 'on the map in the project overview in addition '\n 'to the list. To set a pin, click inside the '\n 'highlighted area or enter an address. Once a '\n 'pin is set you can move it by dragging it.')\n )\n point_label = models.CharField(\n blank=True,\n default='',\n max_length=255,\n verbose_name=_('Name of the site'),\n help_text=_('The name of the site (e.g. name of street, '\n 'building or park) makes it easier to locate '\n 'the plan. The maximum length is 255 characters.'),\n )\n district = models.ForeignKey(\n AdministrativeDistrict,\n verbose_name=_('District'),\n help_text=_('Enter the district in which the plan is located or '\n 'whether it is a city-wide plan. In the project '\n 'overview projects can be filtered by district.'),\n null=True,\n blank=True,\n on_delete=models.CASCADE\n )\n cost = models.CharField(\n max_length=255,\n verbose_name=_('Cost'),\n help_text=_('Enter details of the estimated or actual costs '\n 'of the plan in no more than 255 characters.')\n )\n description = RichTextCollapsibleUploadingField(\n config_name='collapsible-image-editor',\n verbose_name=_('Description of your plan'),\n help_text=_('Describe the key points of your plan. You can upload '\n 'PDFs and images, embed videos and link to external '\n 'URLs, among other things.')\n )\n description_image = ConfiguredImageField(\n 'plan_image',\n verbose_name=_('Header image'),\n upload_to='plan/description_image',\n blank=True,\n help_prefix=_(\n 'Visualize your plan with an image underneath the description.'\n ),\n )\n description_image_copyright = models.CharField(\n verbose_name=_('Header image copyright'),\n blank=True,\n max_length=120,\n help_text=_('The name is displayed in the header image.')\n )\n tile_image = ConfiguredImageField(\n 'tileimage',\n verbose_name=_('Tile image'),\n help_prefix=_(\n 'The image will be shown in the project tile.'\n ),\n upload_to='plan/tile_images',\n blank=True\n )\n tile_image_copyright = ImageCopyrightField(\n verbose_name=_('Tile image copyright'),\n help_text=_('The name is displayed in the tile image.')\n )\n topics = TopicField(\n verbose_name=_('Topics'),\n help_text=_('Assign your plan to 1 or 2 '\n 'topics. In the project '\n 'overview projects can be '\n 'filtered according to topics.')\n )\n status = models.SmallIntegerField(\n choices=STATUS_CHOICES,\n verbose_name=_('Status'),\n help_text=_('In the project overview projects '\n 'can be filtered by status.')\n )\n participation = models.SmallIntegerField(\n choices=PARTICIPATION_CHOICES,\n verbose_name=_('Participation'),\n help_text=_('In the project overview '\n 'projects can be filtered '\n 'according to participation '\n 'status.')\n )\n duration = models.CharField(\n blank=True,\n null=True,\n max_length=255,\n verbose_name=_('Duration'),\n help_text=_('Provide information on the '\n 'expected duration of the plan in '\n 'no more than 255 characters.')\n )\n is_draft = models.BooleanField(default=True)\n\n class Meta:\n ordering = ['-created']\n\n @property\n def reference_number(self):\n return '{:d}-{:05d}'.format(self.created.year, self.pk)\n\n @property\n def administrative_district(self):\n return self.district\n\n @property\n def topic_names(self):\n if hasattr(settings, 'A4_PROJECT_TOPICS'):\n choices = dict(settings.A4_PROJECT_TOPICS)\n return [choices.get(topic, topic) for topic in self.topics]\n return []\n\n @cached_property\n def published_projects(self):\n return self.projects.filter(\n Q(access=Access.PUBLIC) | Q(access=Access.SEMIPUBLIC),\n is_draft=False, is_archived=False)\n\n @cached_property\n def participation_string(self):\n project_list = self.published_projects.values_list('id', flat=True)\n phases_in_plan = Phase.objects\\\n .select_related('module__project')\\\n .filter(module__project_id__in=project_list)\\\n .order_by('-start_date')\n\n if phases_in_plan.active_phases():\n return _('running')\n\n future_phases_with_start_date = phases_in_plan.future_phases()\\\n .exclude(start_date__isnull=True)\n\n if future_phases_with_start_date:\n future_phase = future_phases_with_start_date.first()\n return _('starts at {}')\\\n .format(future_phase.start_date.strftime('%d.%m.%Y'))\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('meinberlin_plans:plan-detail',\n kwargs=dict(pk='{:05d}'.format(self.pk),\n year=self.created.year))\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(\n self.description, 'collapsible-image-editor')\n super().save(*args, **kwargs)\n\n def _get_group(self, user, organisation):\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n return shared_groups.distinct().first()\n\n def is_group_member(self, user):\n if self.group:\n return user.groups.filter(id=self.group.id).exists()\n return False\n", "path": "meinberlin/apps/plans/models.py"}]} | 2,712 | 135 |
gh_patches_debug_13654 | rasdani/github-patches | git_diff | psf__black-1761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Blackd Packaging
**Describe the bug**
The [blackd documenation states](https://black.readthedocs.io/en/stable/blackd.html):
> blackd is not packaged alongside Black by default because it has additional dependencies. You will need to execute pip install black[d] to install it.
But `blackd` is included in [packages](https://github.com/psf/black/blob/master/setup.py#L65) for black.
**To Reproduce** Steps to reproduce the behavior:
```bash
$ python3 -m venv venv; source venv/bin/activate
$ pip install black
$ which blackd
/path/to/venv/bin/blackd
```
**Expected behavior**
No binary named `blackd` is installed because its webserver dependencies are not installed.
**Environment (please complete the following information):**
- Version: master
- OS and Python version: Linux
**Does this bug also happen on master?**
Yes, see above
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/blackd/__init__.py`
Content:
```
1 import asyncio
2 from concurrent.futures import Executor, ProcessPoolExecutor
3 from datetime import datetime
4 from functools import partial
5 import logging
6 from multiprocessing import freeze_support
7 from typing import Set, Tuple
8
9 from aiohttp import web
10 import aiohttp_cors
11 import black
12 import click
13
14 from _black_version import version as __version__
15
16 # This is used internally by tests to shut down the server prematurely
17 _stop_signal = asyncio.Event()
18
19 # Request headers
20 PROTOCOL_VERSION_HEADER = "X-Protocol-Version"
21 LINE_LENGTH_HEADER = "X-Line-Length"
22 PYTHON_VARIANT_HEADER = "X-Python-Variant"
23 SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
24 FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
25 DIFF_HEADER = "X-Diff"
26
27 BLACK_HEADERS = [
28 PROTOCOL_VERSION_HEADER,
29 LINE_LENGTH_HEADER,
30 PYTHON_VARIANT_HEADER,
31 SKIP_STRING_NORMALIZATION_HEADER,
32 FAST_OR_SAFE_HEADER,
33 DIFF_HEADER,
34 ]
35
36 # Response headers
37 BLACK_VERSION_HEADER = "X-Black-Version"
38
39
40 class InvalidVariantHeader(Exception):
41 pass
42
43
44 @click.command(context_settings={"help_option_names": ["-h", "--help"]})
45 @click.option(
46 "--bind-host", type=str, help="Address to bind the server to.", default="localhost"
47 )
48 @click.option("--bind-port", type=int, help="Port to listen on", default=45484)
49 @click.version_option(version=black.__version__)
50 def main(bind_host: str, bind_port: int) -> None:
51 logging.basicConfig(level=logging.INFO)
52 app = make_app()
53 ver = black.__version__
54 black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}")
55 web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
56
57
58 def make_app() -> web.Application:
59 app = web.Application()
60 executor = ProcessPoolExecutor()
61
62 cors = aiohttp_cors.setup(app)
63 resource = cors.add(app.router.add_resource("/"))
64 cors.add(
65 resource.add_route("POST", partial(handle, executor=executor)),
66 {
67 "*": aiohttp_cors.ResourceOptions(
68 allow_headers=(*BLACK_HEADERS, "Content-Type"), expose_headers="*"
69 )
70 },
71 )
72
73 return app
74
75
76 async def handle(request: web.Request, executor: Executor) -> web.Response:
77 headers = {BLACK_VERSION_HEADER: __version__}
78 try:
79 if request.headers.get(PROTOCOL_VERSION_HEADER, "1") != "1":
80 return web.Response(
81 status=501, text="This server only supports protocol version 1"
82 )
83 try:
84 line_length = int(
85 request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)
86 )
87 except ValueError:
88 return web.Response(status=400, text="Invalid line length header value")
89
90 if PYTHON_VARIANT_HEADER in request.headers:
91 value = request.headers[PYTHON_VARIANT_HEADER]
92 try:
93 pyi, versions = parse_python_variant_header(value)
94 except InvalidVariantHeader as e:
95 return web.Response(
96 status=400,
97 text=f"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}",
98 )
99 else:
100 pyi = False
101 versions = set()
102
103 skip_string_normalization = bool(
104 request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)
105 )
106 fast = False
107 if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
108 fast = True
109 mode = black.FileMode(
110 target_versions=versions,
111 is_pyi=pyi,
112 line_length=line_length,
113 string_normalization=not skip_string_normalization,
114 )
115 req_bytes = await request.content.read()
116 charset = request.charset if request.charset is not None else "utf8"
117 req_str = req_bytes.decode(charset)
118 then = datetime.utcnow()
119
120 loop = asyncio.get_event_loop()
121 formatted_str = await loop.run_in_executor(
122 executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)
123 )
124
125 # Only output the diff in the HTTP response
126 only_diff = bool(request.headers.get(DIFF_HEADER, False))
127 if only_diff:
128 now = datetime.utcnow()
129 src_name = f"In\t{then} +0000"
130 dst_name = f"Out\t{now} +0000"
131 loop = asyncio.get_event_loop()
132 formatted_str = await loop.run_in_executor(
133 executor,
134 partial(black.diff, req_str, formatted_str, src_name, dst_name),
135 )
136
137 return web.Response(
138 content_type=request.content_type,
139 charset=charset,
140 headers=headers,
141 text=formatted_str,
142 )
143 except black.NothingChanged:
144 return web.Response(status=204, headers=headers)
145 except black.InvalidInput as e:
146 return web.Response(status=400, headers=headers, text=str(e))
147 except Exception as e:
148 logging.exception("Exception during handling a request")
149 return web.Response(status=500, headers=headers, text=str(e))
150
151
152 def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
153 if value == "pyi":
154 return True, set()
155 else:
156 versions = set()
157 for version in value.split(","):
158 if version.startswith("py"):
159 version = version[len("py") :]
160 if "." in version:
161 major_str, *rest = version.split(".")
162 else:
163 major_str = version[0]
164 rest = [version[1:]] if len(version) > 1 else []
165 try:
166 major = int(major_str)
167 if major not in (2, 3):
168 raise InvalidVariantHeader("major version must be 2 or 3")
169 if len(rest) > 0:
170 minor = int(rest[0])
171 if major == 2 and minor != 7:
172 raise InvalidVariantHeader(
173 "minor version must be 7 for Python 2"
174 )
175 else:
176 # Default to lowest supported minor version.
177 minor = 7 if major == 2 else 3
178 version_str = f"PY{major}{minor}"
179 if major == 3 and not hasattr(black.TargetVersion, version_str):
180 raise InvalidVariantHeader(f"3.{minor} is not supported")
181 versions.add(black.TargetVersion[version_str])
182 except (KeyError, ValueError):
183 raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'")
184 return False, versions
185
186
187 def patched_main() -> None:
188 freeze_support()
189 black.patch_click()
190 main()
191
192
193 if __name__ == "__main__":
194 patched_main()
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py
--- a/src/blackd/__init__.py
+++ b/src/blackd/__init__.py
@@ -1,13 +1,24 @@
import asyncio
+import logging
+import sys
from concurrent.futures import Executor, ProcessPoolExecutor
from datetime import datetime
from functools import partial
-import logging
from multiprocessing import freeze_support
from typing import Set, Tuple
-from aiohttp import web
-import aiohttp_cors
+try:
+ from aiohttp import web
+ import aiohttp_cors
+except ImportError as ie:
+ print(
+ f"aiohttp dependency is not installed: {ie}. "
+ + "Please re-install black with the '[d]' extra install "
+ + "to obtain aiohttp_cors: `pip install black[d]`",
+ file=sys.stderr,
+ )
+ sys.exit(-1)
+
import black
import click
| {"golden_diff": "diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py\n--- a/src/blackd/__init__.py\n+++ b/src/blackd/__init__.py\n@@ -1,13 +1,24 @@\n import asyncio\n+import logging\n+import sys\n from concurrent.futures import Executor, ProcessPoolExecutor\n from datetime import datetime\n from functools import partial\n-import logging\n from multiprocessing import freeze_support\n from typing import Set, Tuple\n \n-from aiohttp import web\n-import aiohttp_cors\n+try:\n+ from aiohttp import web\n+ import aiohttp_cors\n+except ImportError as ie:\n+ print(\n+ f\"aiohttp dependency is not installed: {ie}. \"\n+ + \"Please re-install black with the '[d]' extra install \"\n+ + \"to obtain aiohttp_cors: `pip install black[d]`\",\n+ file=sys.stderr,\n+ )\n+ sys.exit(-1)\n+\n import black\n import click\n", "issue": "Blackd Packaging\n**Describe the bug**\r\n\r\nThe [blackd documenation states](https://black.readthedocs.io/en/stable/blackd.html):\r\n> blackd is not packaged alongside Black by default because it has additional dependencies. You will need to execute pip install black[d] to install it.\r\n\r\nBut `blackd` is included in [packages](https://github.com/psf/black/blob/master/setup.py#L65) for black.\r\n\r\n**To Reproduce** Steps to reproduce the behavior:\r\n\r\n```bash\r\n$ python3 -m venv venv; source venv/bin/activate\r\n$ pip install black\r\n$ which blackd\r\n/path/to/venv/bin/blackd\r\n```\r\n\r\n**Expected behavior**\r\n\r\nNo binary named `blackd` is installed because its webserver dependencies are not installed.\r\n\r\n**Environment (please complete the following information):**\r\n\r\n- Version: master\r\n- OS and Python version: Linux\r\n\r\n**Does this bug also happen on master?**\r\nYes, see above\r\n\n", "before_files": [{"content": "import asyncio\nfrom concurrent.futures import Executor, ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nimport logging\nfrom multiprocessing import freeze_support\nfrom typing import Set, Tuple\n\nfrom aiohttp import web\nimport aiohttp_cors\nimport black\nimport click\n\nfrom _black_version import version as __version__\n\n# This is used internally by tests to shut down the server prematurely\n_stop_signal = asyncio.Event()\n\n# Request headers\nPROTOCOL_VERSION_HEADER = \"X-Protocol-Version\"\nLINE_LENGTH_HEADER = \"X-Line-Length\"\nPYTHON_VARIANT_HEADER = \"X-Python-Variant\"\nSKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\nFAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\nDIFF_HEADER = \"X-Diff\"\n\nBLACK_HEADERS = [\n PROTOCOL_VERSION_HEADER,\n LINE_LENGTH_HEADER,\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n]\n\n# Response headers\nBLACK_VERSION_HEADER = \"X-Black-Version\"\n\n\nclass InvalidVariantHeader(Exception):\n pass\n\n\[email protected](context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\[email protected](\n \"--bind-host\", type=str, help=\"Address to bind the server to.\", default=\"localhost\"\n)\[email protected](\"--bind-port\", type=int, help=\"Port to listen on\", default=45484)\[email protected]_option(version=black.__version__)\ndef main(bind_host: str, bind_port: int) -> None:\n logging.basicConfig(level=logging.INFO)\n app = make_app()\n ver = black.__version__\n black.out(f\"blackd version {ver} listening on {bind_host} port {bind_port}\")\n web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)\n\n\ndef make_app() -> web.Application:\n app = web.Application()\n executor = ProcessPoolExecutor()\n\n cors = aiohttp_cors.setup(app)\n resource = cors.add(app.router.add_resource(\"/\"))\n cors.add(\n resource.add_route(\"POST\", partial(handle, executor=executor)),\n {\n \"*\": aiohttp_cors.ResourceOptions(\n allow_headers=(*BLACK_HEADERS, \"Content-Type\"), expose_headers=\"*\"\n )\n },\n )\n\n return app\n\n\nasync def handle(request: web.Request, executor: Executor) -> web.Response:\n headers = {BLACK_VERSION_HEADER: __version__}\n try:\n if request.headers.get(PROTOCOL_VERSION_HEADER, \"1\") != \"1\":\n return web.Response(\n status=501, text=\"This server only supports protocol version 1\"\n )\n try:\n line_length = int(\n request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)\n )\n except ValueError:\n return web.Response(status=400, text=\"Invalid line length header value\")\n\n if PYTHON_VARIANT_HEADER in request.headers:\n value = request.headers[PYTHON_VARIANT_HEADER]\n try:\n pyi, versions = parse_python_variant_header(value)\n except InvalidVariantHeader as e:\n return web.Response(\n status=400,\n text=f\"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}\",\n )\n else:\n pyi = False\n versions = set()\n\n skip_string_normalization = bool(\n request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)\n )\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n mode = black.FileMode(\n target_versions=versions,\n is_pyi=pyi,\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n req_str = req_bytes.decode(charset)\n then = datetime.utcnow()\n\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n now = datetime.utcnow()\n src_name = f\"In\\t{then} +0000\"\n dst_name = f\"Out\\t{now} +0000\"\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor,\n partial(black.diff, req_str, formatted_str, src_name, dst_name),\n )\n\n return web.Response(\n content_type=request.content_type,\n charset=charset,\n headers=headers,\n text=formatted_str,\n )\n except black.NothingChanged:\n return web.Response(status=204, headers=headers)\n except black.InvalidInput as e:\n return web.Response(status=400, headers=headers, text=str(e))\n except Exception as e:\n logging.exception(\"Exception during handling a request\")\n return web.Response(status=500, headers=headers, text=str(e))\n\n\ndef parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:\n if value == \"pyi\":\n return True, set()\n else:\n versions = set()\n for version in value.split(\",\"):\n if version.startswith(\"py\"):\n version = version[len(\"py\") :]\n if \".\" in version:\n major_str, *rest = version.split(\".\")\n else:\n major_str = version[0]\n rest = [version[1:]] if len(version) > 1 else []\n try:\n major = int(major_str)\n if major not in (2, 3):\n raise InvalidVariantHeader(\"major version must be 2 or 3\")\n if len(rest) > 0:\n minor = int(rest[0])\n if major == 2 and minor != 7:\n raise InvalidVariantHeader(\n \"minor version must be 7 for Python 2\"\n )\n else:\n # Default to lowest supported minor version.\n minor = 7 if major == 2 else 3\n version_str = f\"PY{major}{minor}\"\n if major == 3 and not hasattr(black.TargetVersion, version_str):\n raise InvalidVariantHeader(f\"3.{minor} is not supported\")\n versions.add(black.TargetVersion[version_str])\n except (KeyError, ValueError):\n raise InvalidVariantHeader(\"expected e.g. '3.7', 'py3.5'\")\n return False, versions\n\n\ndef patched_main() -> None:\n freeze_support()\n black.patch_click()\n main()\n\n\nif __name__ == \"__main__\":\n patched_main()\n", "path": "src/blackd/__init__.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport sys\nfrom concurrent.futures import Executor, ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nfrom multiprocessing import freeze_support\nfrom typing import Set, Tuple\n\ntry:\n from aiohttp import web\n import aiohttp_cors\nexcept ImportError as ie:\n print(\n f\"aiohttp dependency is not installed: {ie}. \"\n + \"Please re-install black with the '[d]' extra install \"\n + \"to obtain aiohttp_cors: `pip install black[d]`\",\n file=sys.stderr,\n )\n sys.exit(-1)\n\nimport black\nimport click\n\nfrom _black_version import version as __version__\n\n# This is used internally by tests to shut down the server prematurely\n_stop_signal = asyncio.Event()\n\n# Request headers\nPROTOCOL_VERSION_HEADER = \"X-Protocol-Version\"\nLINE_LENGTH_HEADER = \"X-Line-Length\"\nPYTHON_VARIANT_HEADER = \"X-Python-Variant\"\nSKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\nFAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\nDIFF_HEADER = \"X-Diff\"\n\nBLACK_HEADERS = [\n PROTOCOL_VERSION_HEADER,\n LINE_LENGTH_HEADER,\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n]\n\n# Response headers\nBLACK_VERSION_HEADER = \"X-Black-Version\"\n\n\nclass InvalidVariantHeader(Exception):\n pass\n\n\[email protected](context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\[email protected](\n \"--bind-host\", type=str, help=\"Address to bind the server to.\", default=\"localhost\"\n)\[email protected](\"--bind-port\", type=int, help=\"Port to listen on\", default=45484)\[email protected]_option(version=black.__version__)\ndef main(bind_host: str, bind_port: int) -> None:\n logging.basicConfig(level=logging.INFO)\n app = make_app()\n ver = black.__version__\n black.out(f\"blackd version {ver} listening on {bind_host} port {bind_port}\")\n web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)\n\n\ndef make_app() -> web.Application:\n app = web.Application()\n executor = ProcessPoolExecutor()\n\n cors = aiohttp_cors.setup(app)\n resource = cors.add(app.router.add_resource(\"/\"))\n cors.add(\n resource.add_route(\"POST\", partial(handle, executor=executor)),\n {\n \"*\": aiohttp_cors.ResourceOptions(\n allow_headers=(*BLACK_HEADERS, \"Content-Type\"), expose_headers=\"*\"\n )\n },\n )\n\n return app\n\n\nasync def handle(request: web.Request, executor: Executor) -> web.Response:\n headers = {BLACK_VERSION_HEADER: __version__}\n try:\n if request.headers.get(PROTOCOL_VERSION_HEADER, \"1\") != \"1\":\n return web.Response(\n status=501, text=\"This server only supports protocol version 1\"\n )\n try:\n line_length = int(\n request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)\n )\n except ValueError:\n return web.Response(status=400, text=\"Invalid line length header value\")\n\n if PYTHON_VARIANT_HEADER in request.headers:\n value = request.headers[PYTHON_VARIANT_HEADER]\n try:\n pyi, versions = parse_python_variant_header(value)\n except InvalidVariantHeader as e:\n return web.Response(\n status=400,\n text=f\"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}\",\n )\n else:\n pyi = False\n versions = set()\n\n skip_string_normalization = bool(\n request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)\n )\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n mode = black.FileMode(\n target_versions=versions,\n is_pyi=pyi,\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n req_str = req_bytes.decode(charset)\n then = datetime.utcnow()\n\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n now = datetime.utcnow()\n src_name = f\"In\\t{then} +0000\"\n dst_name = f\"Out\\t{now} +0000\"\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor,\n partial(black.diff, req_str, formatted_str, src_name, dst_name),\n )\n\n return web.Response(\n content_type=request.content_type,\n charset=charset,\n headers=headers,\n text=formatted_str,\n )\n except black.NothingChanged:\n return web.Response(status=204, headers=headers)\n except black.InvalidInput as e:\n return web.Response(status=400, headers=headers, text=str(e))\n except Exception as e:\n logging.exception(\"Exception during handling a request\")\n return web.Response(status=500, headers=headers, text=str(e))\n\n\ndef parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:\n if value == \"pyi\":\n return True, set()\n else:\n versions = set()\n for version in value.split(\",\"):\n if version.startswith(\"py\"):\n version = version[len(\"py\") :]\n if \".\" in version:\n major_str, *rest = version.split(\".\")\n else:\n major_str = version[0]\n rest = [version[1:]] if len(version) > 1 else []\n try:\n major = int(major_str)\n if major not in (2, 3):\n raise InvalidVariantHeader(\"major version must be 2 or 3\")\n if len(rest) > 0:\n minor = int(rest[0])\n if major == 2 and minor != 7:\n raise InvalidVariantHeader(\n \"minor version must be 7 for Python 2\"\n )\n else:\n # Default to lowest supported minor version.\n minor = 7 if major == 2 else 3\n version_str = f\"PY{major}{minor}\"\n if major == 3 and not hasattr(black.TargetVersion, version_str):\n raise InvalidVariantHeader(f\"3.{minor} is not supported\")\n versions.add(black.TargetVersion[version_str])\n except (KeyError, ValueError):\n raise InvalidVariantHeader(\"expected e.g. '3.7', 'py3.5'\")\n return False, versions\n\n\ndef patched_main() -> None:\n freeze_support()\n black.patch_click()\n main()\n\n\nif __name__ == \"__main__\":\n patched_main()\n", "path": "src/blackd/__init__.py"}]} | 2,406 | 214 |
gh_patches_debug_5541 | rasdani/github-patches | git_diff | conda__conda-build-3118 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't get conda index to work with channel_name
From the updated docs on 3.14.0, `channel_name` is now a positional argument. However, when I do
```bash
$ conda index <build_dir> <channel_name>
```
I get
```
(...)
FileNotFoundError: [Errno 2] No such file or directory: '<path_prefix>/<channel_name>'
```
instead of a custom channel name in the outputted html files.
Am I forgetting some special `argparse` magic or did #3091 introduce a bug?
I'm on conda 4.5.11 and conda-build 3.14.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/cli/main_index.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import logging
4 import os
5 import sys
6
7 from conda_build.conda_interface import ArgumentParser
8
9 from conda_build import api
10 from conda_build.index import DEFAULT_SUBDIRS, MAX_THREADS_DEFAULT
11
12 logging.basicConfig(level=logging.INFO)
13
14
15 def parse_args(args):
16 p = ArgumentParser(
17 description="Update package index metadata files in given directories.")
18
19 p.add_argument(
20 'dir',
21 help='Directory that contains an index to be updated.',
22 nargs='*',
23 default=[os.getcwd()],
24 )
25
26 p.add_argument(
27 '-c', "--check-md5",
28 action="store_true",
29 help="""Use hash values instead of file modification times for determining if a
30 package's metadata needs to be updated.""",
31 )
32 p.add_argument(
33 'channel_name',
34 help='Adding a channel name will create an index.html file within the subdir.',
35 nargs='?',
36 default=None,
37 )
38 p.add_argument(
39 '-s', '--subdir',
40 action='append',
41 help='Optional. The subdir to index. Can be given multiple times. If not provided, will '
42 'default to all of %s. If provided, will not create channeldata.json for the channel.'
43 '' % ', '.join(DEFAULT_SUBDIRS),
44 )
45 p.add_argument(
46 '-t', '--threads',
47 default=MAX_THREADS_DEFAULT,
48 type=int,
49 )
50 p.add_argument(
51 "-p", "--patch-generator",
52 help="Path to Python file that outputs metadata patch instructions"
53 )
54
55 args = p.parse_args(args)
56 return p, args
57
58
59 def execute(args):
60 _, args = parse_args(args)
61 api.update_index(args.dir, check_md5=args.check_md5, channel_name=args.channel_name,
62 threads=args.threads, subdir=args.subdir, patch_generator=args.patch_generator)
63
64
65 def main():
66 return execute(sys.argv[1:])
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_build/cli/main_index.py b/conda_build/cli/main_index.py
--- a/conda_build/cli/main_index.py
+++ b/conda_build/cli/main_index.py
@@ -30,10 +30,8 @@
package's metadata needs to be updated.""",
)
p.add_argument(
- 'channel_name',
- help='Adding a channel name will create an index.html file within the subdir.',
- nargs='?',
- default=None,
+ "-n", "--channel-name",
+ help="Customize the channel name listed in each channel's index.html.",
)
p.add_argument(
'-s', '--subdir',
| {"golden_diff": "diff --git a/conda_build/cli/main_index.py b/conda_build/cli/main_index.py\n--- a/conda_build/cli/main_index.py\n+++ b/conda_build/cli/main_index.py\n@@ -30,10 +30,8 @@\n package's metadata needs to be updated.\"\"\",\n )\n p.add_argument(\n- 'channel_name',\n- help='Adding a channel name will create an index.html file within the subdir.',\n- nargs='?',\n- default=None,\n+ \"-n\", \"--channel-name\",\n+ help=\"Customize the channel name listed in each channel's index.html.\",\n )\n p.add_argument(\n '-s', '--subdir',\n", "issue": "Can't get conda index to work with channel_name\nFrom the updated docs on 3.14.0, `channel_name` is now a positional argument. However, when I do\r\n```bash\r\n$ conda index <build_dir> <channel_name>\r\n```\r\nI get\r\n```\r\n(...)\r\nFileNotFoundError: [Errno 2] No such file or directory: '<path_prefix>/<channel_name>'\r\n```\r\ninstead of a custom channel name in the outputted html files. \r\n\r\nAm I forgetting some special `argparse` magic or did #3091 introduce a bug?\r\n\r\nI'm on conda 4.5.11 and conda-build 3.14.0\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\nimport sys\n\nfrom conda_build.conda_interface import ArgumentParser\n\nfrom conda_build import api\nfrom conda_build.index import DEFAULT_SUBDIRS, MAX_THREADS_DEFAULT\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef parse_args(args):\n p = ArgumentParser(\n description=\"Update package index metadata files in given directories.\")\n\n p.add_argument(\n 'dir',\n help='Directory that contains an index to be updated.',\n nargs='*',\n default=[os.getcwd()],\n )\n\n p.add_argument(\n '-c', \"--check-md5\",\n action=\"store_true\",\n help=\"\"\"Use hash values instead of file modification times for determining if a\n package's metadata needs to be updated.\"\"\",\n )\n p.add_argument(\n 'channel_name',\n help='Adding a channel name will create an index.html file within the subdir.',\n nargs='?',\n default=None,\n )\n p.add_argument(\n '-s', '--subdir',\n action='append',\n help='Optional. The subdir to index. Can be given multiple times. If not provided, will '\n 'default to all of %s. If provided, will not create channeldata.json for the channel.'\n '' % ', '.join(DEFAULT_SUBDIRS),\n )\n p.add_argument(\n '-t', '--threads',\n default=MAX_THREADS_DEFAULT,\n type=int,\n )\n p.add_argument(\n \"-p\", \"--patch-generator\",\n help=\"Path to Python file that outputs metadata patch instructions\"\n )\n\n args = p.parse_args(args)\n return p, args\n\n\ndef execute(args):\n _, args = parse_args(args)\n api.update_index(args.dir, check_md5=args.check_md5, channel_name=args.channel_name,\n threads=args.threads, subdir=args.subdir, patch_generator=args.patch_generator)\n\n\ndef main():\n return execute(sys.argv[1:])\n", "path": "conda_build/cli/main_index.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport logging\nimport os\nimport sys\n\nfrom conda_build.conda_interface import ArgumentParser\n\nfrom conda_build import api\nfrom conda_build.index import DEFAULT_SUBDIRS, MAX_THREADS_DEFAULT\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef parse_args(args):\n p = ArgumentParser(\n description=\"Update package index metadata files in given directories.\")\n\n p.add_argument(\n 'dir',\n help='Directory that contains an index to be updated.',\n nargs='*',\n default=[os.getcwd()],\n )\n\n p.add_argument(\n '-c', \"--check-md5\",\n action=\"store_true\",\n help=\"\"\"Use hash values instead of file modification times for determining if a\n package's metadata needs to be updated.\"\"\",\n )\n p.add_argument(\n \"-n\", \"--channel-name\",\n help=\"Customize the channel name listed in each channel's index.html.\",\n )\n p.add_argument(\n '-s', '--subdir',\n action='append',\n help='Optional. The subdir to index. Can be given multiple times. If not provided, will '\n 'default to all of %s. If provided, will not create channeldata.json for the channel.'\n '' % ', '.join(DEFAULT_SUBDIRS),\n )\n p.add_argument(\n '-t', '--threads',\n default=MAX_THREADS_DEFAULT,\n type=int,\n )\n p.add_argument(\n \"-p\", \"--patch-generator\",\n help=\"Path to Python file that outputs metadata patch instructions\"\n )\n\n args = p.parse_args(args)\n return p, args\n\n\ndef execute(args):\n _, args = parse_args(args)\n api.update_index(args.dir, check_md5=args.check_md5, channel_name=args.channel_name,\n threads=args.threads, subdir=args.subdir, patch_generator=args.patch_generator)\n\n\ndef main():\n return execute(sys.argv[1:])\n", "path": "conda_build/cli/main_index.py"}]} | 953 | 145 |
gh_patches_debug_14395 | rasdani/github-patches | git_diff | ethereum__web3.py-3027 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix logger for AsyncHTTPProvider
AsyncHTTPProvider is getting the logger for `web3.providers.HTTPProvider` but should instead use `web3.providers.AsyncHTTPProvider`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/providers/__init__.py`
Content:
```
1 from .async_base import ( # noqa: F401
2 AsyncBaseProvider,
3 )
4 from .base import ( # noqa: F401
5 BaseProvider,
6 JSONBaseProvider,
7 )
8 from .ipc import ( # noqa: F401,
9 IPCProvider,
10 )
11 from .rpc import ( # noqa: F401,
12 HTTPProvider,
13 )
14 from .websocket import ( # noqa: F401,
15 WebsocketProvider,
16 )
17 from .auto import ( # noqa: F401,
18 AutoProvider,
19 )
20
```
Path: `web3/providers/async_rpc.py`
Content:
```
1 import logging
2 from typing import (
3 Any,
4 Dict,
5 Iterable,
6 Optional,
7 Tuple,
8 Union,
9 )
10
11 from aiohttp import (
12 ClientSession,
13 )
14 from eth_typing import (
15 URI,
16 )
17 from eth_utils import (
18 to_dict,
19 )
20
21 from web3._utils.http import (
22 construct_user_agent,
23 )
24 from web3._utils.request import (
25 async_cache_and_return_session as _async_cache_and_return_session,
26 async_make_post_request,
27 get_default_http_endpoint,
28 )
29 from web3.types import (
30 AsyncMiddleware,
31 RPCEndpoint,
32 RPCResponse,
33 )
34
35 from ..datastructures import (
36 NamedElementOnion,
37 )
38 from ..middleware.exception_retry_request import (
39 async_http_retry_request_middleware,
40 )
41 from .async_base import (
42 AsyncJSONBaseProvider,
43 )
44
45
46 class AsyncHTTPProvider(AsyncJSONBaseProvider):
47 logger = logging.getLogger("web3.providers.HTTPProvider")
48 endpoint_uri = None
49 _request_kwargs = None
50 # type ignored b/c conflict with _middlewares attr on AsyncBaseProvider
51 _middlewares: Tuple[AsyncMiddleware, ...] = NamedElementOnion([(async_http_retry_request_middleware, "http_retry_request")]) # type: ignore # noqa: E501
52
53 def __init__(
54 self,
55 endpoint_uri: Optional[Union[URI, str]] = None,
56 request_kwargs: Optional[Any] = None,
57 ) -> None:
58 if endpoint_uri is None:
59 self.endpoint_uri = get_default_http_endpoint()
60 else:
61 self.endpoint_uri = URI(endpoint_uri)
62
63 self._request_kwargs = request_kwargs or {}
64
65 super().__init__()
66
67 async def cache_async_session(self, session: ClientSession) -> ClientSession:
68 return await _async_cache_and_return_session(self.endpoint_uri, session)
69
70 def __str__(self) -> str:
71 return f"RPC connection {self.endpoint_uri}"
72
73 @to_dict
74 def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:
75 if "headers" not in self._request_kwargs:
76 yield "headers", self.get_request_headers()
77 for key, value in self._request_kwargs.items():
78 yield key, value
79
80 def get_request_headers(self) -> Dict[str, str]:
81 return {
82 "Content-Type": "application/json",
83 "User-Agent": construct_user_agent(str(type(self))),
84 }
85
86 async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
87 self.logger.debug(
88 f"Making request HTTP. URI: {self.endpoint_uri}, Method: {method}"
89 )
90 request_data = self.encode_rpc_request(method, params)
91 raw_response = await async_make_post_request(
92 self.endpoint_uri, request_data, **self.get_request_kwargs()
93 )
94 response = self.decode_rpc_response(raw_response)
95 self.logger.debug(
96 f"Getting response HTTP. URI: {self.endpoint_uri}, "
97 f"Method: {method}, Response: {response}"
98 )
99 return response
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/web3/providers/__init__.py b/web3/providers/__init__.py
--- a/web3/providers/__init__.py
+++ b/web3/providers/__init__.py
@@ -1,6 +1,9 @@
from .async_base import ( # noqa: F401
AsyncBaseProvider,
)
+from .async_rpc import ( # noqa: F401
+ AsyncHTTPProvider,
+)
from .base import ( # noqa: F401
BaseProvider,
JSONBaseProvider,
diff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py
--- a/web3/providers/async_rpc.py
+++ b/web3/providers/async_rpc.py
@@ -44,7 +44,7 @@
class AsyncHTTPProvider(AsyncJSONBaseProvider):
- logger = logging.getLogger("web3.providers.HTTPProvider")
+ logger = logging.getLogger("web3.providers.AsyncHTTPProvider")
endpoint_uri = None
_request_kwargs = None
# type ignored b/c conflict with _middlewares attr on AsyncBaseProvider
| {"golden_diff": "diff --git a/web3/providers/__init__.py b/web3/providers/__init__.py\n--- a/web3/providers/__init__.py\n+++ b/web3/providers/__init__.py\n@@ -1,6 +1,9 @@\n from .async_base import ( # noqa: F401\n AsyncBaseProvider,\n )\n+from .async_rpc import ( # noqa: F401\n+ AsyncHTTPProvider,\n+)\n from .base import ( # noqa: F401\n BaseProvider,\n JSONBaseProvider,\ndiff --git a/web3/providers/async_rpc.py b/web3/providers/async_rpc.py\n--- a/web3/providers/async_rpc.py\n+++ b/web3/providers/async_rpc.py\n@@ -44,7 +44,7 @@\n \n \n class AsyncHTTPProvider(AsyncJSONBaseProvider):\n- logger = logging.getLogger(\"web3.providers.HTTPProvider\")\n+ logger = logging.getLogger(\"web3.providers.AsyncHTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n # type ignored b/c conflict with _middlewares attr on AsyncBaseProvider\n", "issue": "Fix logger for AsyncHTTPProvider\nAsyncHTTPProvider is getting the logger for `web3.providers.HTTPProvider` but should instead use `web3.providers.AsyncHTTPProvider`\n", "before_files": [{"content": "from .async_base import ( # noqa: F401\n AsyncBaseProvider,\n)\nfrom .base import ( # noqa: F401\n BaseProvider,\n JSONBaseProvider,\n)\nfrom .ipc import ( # noqa: F401,\n IPCProvider,\n)\nfrom .rpc import ( # noqa: F401,\n HTTPProvider,\n)\nfrom .websocket import ( # noqa: F401,\n WebsocketProvider,\n)\nfrom .auto import ( # noqa: F401,\n AutoProvider,\n)\n", "path": "web3/providers/__init__.py"}, {"content": "import logging\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Optional,\n Tuple,\n Union,\n)\n\nfrom aiohttp import (\n ClientSession,\n)\nfrom eth_typing import (\n URI,\n)\nfrom eth_utils import (\n to_dict,\n)\n\nfrom web3._utils.http import (\n construct_user_agent,\n)\nfrom web3._utils.request import (\n async_cache_and_return_session as _async_cache_and_return_session,\n async_make_post_request,\n get_default_http_endpoint,\n)\nfrom web3.types import (\n AsyncMiddleware,\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom ..datastructures import (\n NamedElementOnion,\n)\nfrom ..middleware.exception_retry_request import (\n async_http_retry_request_middleware,\n)\nfrom .async_base import (\n AsyncJSONBaseProvider,\n)\n\n\nclass AsyncHTTPProvider(AsyncJSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.HTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n # type ignored b/c conflict with _middlewares attr on AsyncBaseProvider\n _middlewares: Tuple[AsyncMiddleware, ...] = NamedElementOnion([(async_http_retry_request_middleware, \"http_retry_request\")]) # type: ignore # noqa: E501\n\n def __init__(\n self,\n endpoint_uri: Optional[Union[URI, str]] = None,\n request_kwargs: Optional[Any] = None,\n ) -> None:\n if endpoint_uri is None:\n self.endpoint_uri = get_default_http_endpoint()\n else:\n self.endpoint_uri = URI(endpoint_uri)\n\n self._request_kwargs = request_kwargs or {}\n\n super().__init__()\n\n async def cache_async_session(self, session: ClientSession) -> ClientSession:\n return await _async_cache_and_return_session(self.endpoint_uri, session)\n\n def __str__(self) -> str:\n return f\"RPC connection {self.endpoint_uri}\"\n\n @to_dict\n def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:\n if \"headers\" not in self._request_kwargs:\n yield \"headers\", self.get_request_headers()\n for key, value in self._request_kwargs.items():\n yield key, value\n\n def get_request_headers(self) -> Dict[str, str]:\n return {\n \"Content-Type\": \"application/json\",\n \"User-Agent\": construct_user_agent(str(type(self))),\n }\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request HTTP. URI: {self.endpoint_uri}, Method: {method}\"\n )\n request_data = self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri, request_data, **self.get_request_kwargs()\n )\n response = self.decode_rpc_response(raw_response)\n self.logger.debug(\n f\"Getting response HTTP. URI: {self.endpoint_uri}, \"\n f\"Method: {method}, Response: {response}\"\n )\n return response\n", "path": "web3/providers/async_rpc.py"}], "after_files": [{"content": "from .async_base import ( # noqa: F401\n AsyncBaseProvider,\n)\nfrom .async_rpc import ( # noqa: F401\n AsyncHTTPProvider,\n)\nfrom .base import ( # noqa: F401\n BaseProvider,\n JSONBaseProvider,\n)\nfrom .ipc import ( # noqa: F401,\n IPCProvider,\n)\nfrom .rpc import ( # noqa: F401,\n HTTPProvider,\n)\nfrom .websocket import ( # noqa: F401,\n WebsocketProvider,\n)\nfrom .auto import ( # noqa: F401,\n AutoProvider,\n)\n", "path": "web3/providers/__init__.py"}, {"content": "import logging\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Optional,\n Tuple,\n Union,\n)\n\nfrom aiohttp import (\n ClientSession,\n)\nfrom eth_typing import (\n URI,\n)\nfrom eth_utils import (\n to_dict,\n)\n\nfrom web3._utils.http import (\n construct_user_agent,\n)\nfrom web3._utils.request import (\n async_cache_and_return_session as _async_cache_and_return_session,\n async_make_post_request,\n get_default_http_endpoint,\n)\nfrom web3.types import (\n AsyncMiddleware,\n RPCEndpoint,\n RPCResponse,\n)\n\nfrom ..datastructures import (\n NamedElementOnion,\n)\nfrom ..middleware.exception_retry_request import (\n async_http_retry_request_middleware,\n)\nfrom .async_base import (\n AsyncJSONBaseProvider,\n)\n\n\nclass AsyncHTTPProvider(AsyncJSONBaseProvider):\n logger = logging.getLogger(\"web3.providers.AsyncHTTPProvider\")\n endpoint_uri = None\n _request_kwargs = None\n # type ignored b/c conflict with _middlewares attr on AsyncBaseProvider\n _middlewares: Tuple[AsyncMiddleware, ...] = NamedElementOnion([(async_http_retry_request_middleware, \"http_retry_request\")]) # type: ignore # noqa: E501\n\n def __init__(\n self,\n endpoint_uri: Optional[Union[URI, str]] = None,\n request_kwargs: Optional[Any] = None,\n ) -> None:\n if endpoint_uri is None:\n self.endpoint_uri = get_default_http_endpoint()\n else:\n self.endpoint_uri = URI(endpoint_uri)\n\n self._request_kwargs = request_kwargs or {}\n\n super().__init__()\n\n async def cache_async_session(self, session: ClientSession) -> ClientSession:\n return await _async_cache_and_return_session(self.endpoint_uri, session)\n\n def __str__(self) -> str:\n return f\"RPC connection {self.endpoint_uri}\"\n\n @to_dict\n def get_request_kwargs(self) -> Iterable[Tuple[str, Any]]:\n if \"headers\" not in self._request_kwargs:\n yield \"headers\", self.get_request_headers()\n for key, value in self._request_kwargs.items():\n yield key, value\n\n def get_request_headers(self) -> Dict[str, str]:\n return {\n \"Content-Type\": \"application/json\",\n \"User-Agent\": construct_user_agent(str(type(self))),\n }\n\n async def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:\n self.logger.debug(\n f\"Making request HTTP. URI: {self.endpoint_uri}, Method: {method}\"\n )\n request_data = self.encode_rpc_request(method, params)\n raw_response = await async_make_post_request(\n self.endpoint_uri, request_data, **self.get_request_kwargs()\n )\n response = self.decode_rpc_response(raw_response)\n self.logger.debug(\n f\"Getting response HTTP. URI: {self.endpoint_uri}, \"\n f\"Method: {method}, Response: {response}\"\n )\n return response\n", "path": "web3/providers/async_rpc.py"}]} | 1,303 | 237 |
gh_patches_debug_700 | rasdani/github-patches | git_diff | saulpw__visidata-1304 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[undo develop] undoing a reload blanks the entire sheet
Since v2.5 undo for reload has been removed, and replaced with quitguard+confirm! However, in that case an undo should not be set.
Current behaviour is that it blanks the sheet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/undo.py`
Content:
```
1 import itertools
2 from copy import copy
3
4 from visidata import vd, options, VisiData, BaseSheet, UNLOADED
5
6 BaseSheet.init('undone', list) # list of CommandLogRow for redo after undo
7
8 vd.option('undo', True, 'enable undo/redo')
9
10 nonUndo = '''commit open-file'''.split()
11
12 def isUndoableCommand(longname):
13 for n in nonUndo:
14 if longname.startswith(n):
15 return False
16 return True
17
18 @VisiData.api
19 def addUndo(vd, undofunc, *args, **kwargs):
20 'On undo of latest command, call ``undofunc(*args, **kwargs)``.'
21 if options.undo:
22 # occurs when VisiData is just starting up
23 if getattr(vd, 'activeCommand', UNLOADED) is UNLOADED:
24 return
25 r = vd.modifyCommand
26 # some special commands, like open-file, do not have an undofuncs set
27 if not r or not isUndoableCommand(r.longname):
28 return
29 if not r.undofuncs:
30 r.undofuncs = []
31 r.undofuncs.append((undofunc, args, kwargs))
32
33
34 @VisiData.api
35 def undo(vd, sheet):
36 if not options.undo:
37 vd.fail("options.undo not enabled")
38
39 # don't allow undo of first command on a sheet, which is always the command that created the sheet.
40 for cmdlogrow in sheet.cmdlog_sheet.rows[:0:-1]:
41 if cmdlogrow.undofuncs:
42 for undofunc, args, kwargs, in cmdlogrow.undofuncs[::-1]:
43 undofunc(*args, **kwargs)
44 sheet.undone.append(cmdlogrow)
45 sheet.cmdlog_sheet.rows.remove(cmdlogrow)
46
47 vd.clearCaches() # undofunc can invalidate the drawcache
48
49 vd.moveToReplayContext(cmdlogrow, sheet)
50 vd.status("%s undone" % cmdlogrow.longname)
51 return
52
53 vd.fail("nothing to undo on current sheet")
54
55
56 @VisiData.api
57 def redo(vd, sheet):
58 sheet.undone or vd.fail("nothing to redo")
59 cmdlogrow = sheet.undone.pop()
60 vd.replayOne(cmdlogrow)
61 vd.status("%s redone" % cmdlogrow.longname)
62
63 # undoers
64 def undoAttrFunc(objs, attrname):
65 'Return closure that sets attrname on each obj to its former value.'
66 oldvals = [(o, getattr(o, attrname)) for o in objs]
67 def _undofunc():
68 for o, v in oldvals:
69 setattr(o, attrname, v)
70 return _undofunc
71
72
73 class Fanout(list):
74 'Fan out attribute changes to every element in a list.'
75 def __getattr__(self, k):
76 return Fanout([getattr(o, k) for o in self])
77
78 def __setattr__(self, k, v):
79 vd.addUndo(undoAttrFunc(self, k))
80 for o in self:
81 setattr(o, k, v)
82
83 def __call__(self, *args, **kwargs):
84 return Fanout([o(*args, **kwargs) for o in self])
85
86
87 def undoAttrCopyFunc(objs, attrname):
88 'Return closure that sets attrname on each obj to its former value.'
89 oldvals = [(o, copy(getattr(o, attrname))) for o in objs]
90 def _undofunc():
91 for o, v in oldvals:
92 setattr(o, attrname, v)
93 return _undofunc
94
95
96 @VisiData.api
97 def addUndoSetValues(vd, cols, rows):
98 'Add undo function to reset values for *rows* in *cols*.'
99 oldvals = [(c, r, c.getValue(r)) for c,r in itertools.product(cols, vd.Progress(rows, gerund='doing'))]
100 def _undo():
101 for c, r, v in oldvals:
102 c.setValue(r, v)
103 vd.addUndo(_undo)
104
105 @VisiData.api
106 def addUndoColNames(vd, cols):
107 oldnames = [(c, c.name) for c in cols]
108 def _undo():
109 for c, name in oldnames:
110 c.name = name
111 vd.addUndo(_undo)
112
113
114 BaseSheet.addCommand('U', 'undo-last', 'vd.undo(sheet)', 'Undo the most recent change (options.undo must be enabled)')
115 BaseSheet.addCommand('R', 'redo-last', 'vd.redo(sheet)', 'Redo the most recent undo (options.undo must be enabled)')
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/undo.py b/visidata/undo.py
--- a/visidata/undo.py
+++ b/visidata/undo.py
@@ -7,7 +7,7 @@
vd.option('undo', True, 'enable undo/redo')
-nonUndo = '''commit open-file'''.split()
+nonUndo = '''commit open-file reload-sheet'''.split()
def isUndoableCommand(longname):
for n in nonUndo:
| {"golden_diff": "diff --git a/visidata/undo.py b/visidata/undo.py\n--- a/visidata/undo.py\n+++ b/visidata/undo.py\n@@ -7,7 +7,7 @@\n \n vd.option('undo', True, 'enable undo/redo')\n \n-nonUndo = '''commit open-file'''.split()\n+nonUndo = '''commit open-file reload-sheet'''.split()\n \n def isUndoableCommand(longname):\n for n in nonUndo:\n", "issue": "[undo develop] undoing a reload blanks the entire sheet\nSince v2.5 undo for reload has been removed, and replaced with quitguard+confirm! However, in that case an undo should not be set.\r\n\r\nCurrent behaviour is that it blanks the sheet.\n", "before_files": [{"content": "import itertools\nfrom copy import copy\n\nfrom visidata import vd, options, VisiData, BaseSheet, UNLOADED\n\nBaseSheet.init('undone', list) # list of CommandLogRow for redo after undo\n\nvd.option('undo', True, 'enable undo/redo')\n\nnonUndo = '''commit open-file'''.split()\n\ndef isUndoableCommand(longname):\n for n in nonUndo:\n if longname.startswith(n):\n return False\n return True\n\[email protected]\ndef addUndo(vd, undofunc, *args, **kwargs):\n 'On undo of latest command, call ``undofunc(*args, **kwargs)``.'\n if options.undo:\n # occurs when VisiData is just starting up\n if getattr(vd, 'activeCommand', UNLOADED) is UNLOADED:\n return\n r = vd.modifyCommand\n # some special commands, like open-file, do not have an undofuncs set\n if not r or not isUndoableCommand(r.longname):\n return\n if not r.undofuncs:\n r.undofuncs = []\n r.undofuncs.append((undofunc, args, kwargs))\n\n\[email protected]\ndef undo(vd, sheet):\n if not options.undo:\n vd.fail(\"options.undo not enabled\")\n\n # don't allow undo of first command on a sheet, which is always the command that created the sheet.\n for cmdlogrow in sheet.cmdlog_sheet.rows[:0:-1]:\n if cmdlogrow.undofuncs:\n for undofunc, args, kwargs, in cmdlogrow.undofuncs[::-1]:\n undofunc(*args, **kwargs)\n sheet.undone.append(cmdlogrow)\n sheet.cmdlog_sheet.rows.remove(cmdlogrow)\n\n vd.clearCaches() # undofunc can invalidate the drawcache\n\n vd.moveToReplayContext(cmdlogrow, sheet)\n vd.status(\"%s undone\" % cmdlogrow.longname)\n return\n\n vd.fail(\"nothing to undo on current sheet\")\n\n\[email protected]\ndef redo(vd, sheet):\n sheet.undone or vd.fail(\"nothing to redo\")\n cmdlogrow = sheet.undone.pop()\n vd.replayOne(cmdlogrow)\n vd.status(\"%s redone\" % cmdlogrow.longname)\n\n# undoers\ndef undoAttrFunc(objs, attrname):\n 'Return closure that sets attrname on each obj to its former value.'\n oldvals = [(o, getattr(o, attrname)) for o in objs]\n def _undofunc():\n for o, v in oldvals:\n setattr(o, attrname, v)\n return _undofunc\n\n\nclass Fanout(list):\n 'Fan out attribute changes to every element in a list.'\n def __getattr__(self, k):\n return Fanout([getattr(o, k) for o in self])\n\n def __setattr__(self, k, v):\n vd.addUndo(undoAttrFunc(self, k))\n for o in self:\n setattr(o, k, v)\n\n def __call__(self, *args, **kwargs):\n return Fanout([o(*args, **kwargs) for o in self])\n\n\ndef undoAttrCopyFunc(objs, attrname):\n 'Return closure that sets attrname on each obj to its former value.'\n oldvals = [(o, copy(getattr(o, attrname))) for o in objs]\n def _undofunc():\n for o, v in oldvals:\n setattr(o, attrname, v)\n return _undofunc\n\n\[email protected]\ndef addUndoSetValues(vd, cols, rows):\n 'Add undo function to reset values for *rows* in *cols*.'\n oldvals = [(c, r, c.getValue(r)) for c,r in itertools.product(cols, vd.Progress(rows, gerund='doing'))]\n def _undo():\n for c, r, v in oldvals:\n c.setValue(r, v)\n vd.addUndo(_undo)\n\[email protected]\ndef addUndoColNames(vd, cols):\n oldnames = [(c, c.name) for c in cols]\n def _undo():\n for c, name in oldnames:\n c.name = name\n vd.addUndo(_undo)\n\n\nBaseSheet.addCommand('U', 'undo-last', 'vd.undo(sheet)', 'Undo the most recent change (options.undo must be enabled)')\nBaseSheet.addCommand('R', 'redo-last', 'vd.redo(sheet)', 'Redo the most recent undo (options.undo must be enabled)')\n", "path": "visidata/undo.py"}], "after_files": [{"content": "import itertools\nfrom copy import copy\n\nfrom visidata import vd, options, VisiData, BaseSheet, UNLOADED\n\nBaseSheet.init('undone', list) # list of CommandLogRow for redo after undo\n\nvd.option('undo', True, 'enable undo/redo')\n\nnonUndo = '''commit open-file reload-sheet'''.split()\n\ndef isUndoableCommand(longname):\n for n in nonUndo:\n if longname.startswith(n):\n return False\n return True\n\[email protected]\ndef addUndo(vd, undofunc, *args, **kwargs):\n 'On undo of latest command, call ``undofunc(*args, **kwargs)``.'\n if options.undo:\n # occurs when VisiData is just starting up\n if getattr(vd, 'activeCommand', UNLOADED) is UNLOADED:\n return\n r = vd.modifyCommand\n # some special commands, like open-file, do not have an undofuncs set\n if not r or not isUndoableCommand(r.longname):\n return\n if not r.undofuncs:\n r.undofuncs = []\n r.undofuncs.append((undofunc, args, kwargs))\n\n\[email protected]\ndef undo(vd, sheet):\n if not options.undo:\n vd.fail(\"options.undo not enabled\")\n\n # don't allow undo of first command on a sheet, which is always the command that created the sheet.\n for cmdlogrow in sheet.cmdlog_sheet.rows[:0:-1]:\n if cmdlogrow.undofuncs:\n for undofunc, args, kwargs, in cmdlogrow.undofuncs[::-1]:\n undofunc(*args, **kwargs)\n sheet.undone.append(cmdlogrow)\n sheet.cmdlog_sheet.rows.remove(cmdlogrow)\n\n vd.clearCaches() # undofunc can invalidate the drawcache\n\n vd.moveToReplayContext(cmdlogrow, sheet)\n vd.status(\"%s undone\" % cmdlogrow.longname)\n return\n\n vd.fail(\"nothing to undo on current sheet\")\n\n\[email protected]\ndef redo(vd, sheet):\n sheet.undone or vd.fail(\"nothing to redo\")\n cmdlogrow = sheet.undone.pop()\n vd.replayOne(cmdlogrow)\n vd.status(\"%s redone\" % cmdlogrow.longname)\n\n# undoers\ndef undoAttrFunc(objs, attrname):\n 'Return closure that sets attrname on each obj to its former value.'\n oldvals = [(o, getattr(o, attrname)) for o in objs]\n def _undofunc():\n for o, v in oldvals:\n setattr(o, attrname, v)\n return _undofunc\n\n\nclass Fanout(list):\n 'Fan out attribute changes to every element in a list.'\n def __getattr__(self, k):\n return Fanout([getattr(o, k) for o in self])\n\n def __setattr__(self, k, v):\n vd.addUndo(undoAttrFunc(self, k))\n for o in self:\n setattr(o, k, v)\n\n def __call__(self, *args, **kwargs):\n return Fanout([o(*args, **kwargs) for o in self])\n\n\ndef undoAttrCopyFunc(objs, attrname):\n 'Return closure that sets attrname on each obj to its former value.'\n oldvals = [(o, copy(getattr(o, attrname))) for o in objs]\n def _undofunc():\n for o, v in oldvals:\n setattr(o, attrname, v)\n return _undofunc\n\n\[email protected]\ndef addUndoSetValues(vd, cols, rows):\n 'Add undo function to reset values for *rows* in *cols*.'\n oldvals = [(c, r, c.getValue(r)) for c,r in itertools.product(cols, vd.Progress(rows, gerund='doing'))]\n def _undo():\n for c, r, v in oldvals:\n c.setValue(r, v)\n vd.addUndo(_undo)\n\[email protected]\ndef addUndoColNames(vd, cols):\n oldnames = [(c, c.name) for c in cols]\n def _undo():\n for c, name in oldnames:\n c.name = name\n vd.addUndo(_undo)\n\n\nBaseSheet.addCommand('U', 'undo-last', 'vd.undo(sheet)', 'Undo the most recent change (options.undo must be enabled)')\nBaseSheet.addCommand('R', 'redo-last', 'vd.redo(sheet)', 'Redo the most recent undo (options.undo must be enabled)')\n", "path": "visidata/undo.py"}]} | 1,574 | 102 |
gh_patches_debug_22422 | rasdani/github-patches | git_diff | pypi__warehouse-1335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle Translation of the <title> tag
Currently we can't really translate the `<title>` tag because our block doesn't allow a template to add a l20n ID to it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/i18n/l20n.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import jinja2
14
15 from markupsafe import Markup as M # noqa
16
17 from warehouse.filters import tojson
18
19
20 _L20N_TEMPLATE = jinja2.Template(
21 'data-l10n-id="{{ tid }}"'
22 '{% if data %} data-l10n-args="{{ data }}"{% endif %}',
23 autoescape=True,
24 )
25
26
27 def l20n(tid, **kwargs):
28 data = tojson(kwargs) if kwargs else None
29 return M(_L20N_TEMPLATE.render(tid=tid, data=data))
30
```
Path: `warehouse/i18n/__init__.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from babel.core import Locale
14
15
16 def _locale(request):
17 """
18 Computes a babel.core:Locale() object for this request.
19 """
20 return Locale.parse(request.locale_name)
21
22
23 def includeme(config):
24 # Add the request attributes
25 config.add_request_method(_locale, name="locale", reify=True)
26
27 # Register our i18n/l10n filters for Jinja2
28 filters = config.get_settings().setdefault("jinja2.filters", {})
29 filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
30 filters.setdefault(
31 "format_datetime",
32 "warehouse.i18n.filters:format_datetime",
33 )
34 filters.setdefault(
35 "format_rfc822_datetime",
36 "warehouse.i18n.filters:format_rfc822_datetime",
37 )
38
39 # Register our utility functions with Jinja2
40 jglobals = config.get_settings().setdefault("jinja2.globals", {})
41 jglobals.setdefault("l20n", "warehouse.i18n.l20n:l20n")
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py
--- a/warehouse/i18n/__init__.py
+++ b/warehouse/i18n/__init__.py
@@ -35,7 +35,3 @@
"format_rfc822_datetime",
"warehouse.i18n.filters:format_rfc822_datetime",
)
-
- # Register our utility functions with Jinja2
- jglobals = config.get_settings().setdefault("jinja2.globals", {})
- jglobals.setdefault("l20n", "warehouse.i18n.l20n:l20n")
diff --git a/warehouse/i18n/l20n.py b/warehouse/i18n/l20n.py
deleted file mode 100644
--- a/warehouse/i18n/l20n.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import jinja2
-
-from markupsafe import Markup as M # noqa
-
-from warehouse.filters import tojson
-
-
-_L20N_TEMPLATE = jinja2.Template(
- 'data-l10n-id="{{ tid }}"'
- '{% if data %} data-l10n-args="{{ data }}"{% endif %}',
- autoescape=True,
-)
-
-
-def l20n(tid, **kwargs):
- data = tojson(kwargs) if kwargs else None
- return M(_L20N_TEMPLATE.render(tid=tid, data=data))
| {"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -35,7 +35,3 @@\n \"format_rfc822_datetime\",\n \"warehouse.i18n.filters:format_rfc822_datetime\",\n )\n-\n- # Register our utility functions with Jinja2\n- jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n- jglobals.setdefault(\"l20n\", \"warehouse.i18n.l20n:l20n\")\ndiff --git a/warehouse/i18n/l20n.py b/warehouse/i18n/l20n.py\ndeleted file mode 100644\n--- a/warehouse/i18n/l20n.py\n+++ /dev/null\n@@ -1,29 +0,0 @@\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import jinja2\n-\n-from markupsafe import Markup as M # noqa\n-\n-from warehouse.filters import tojson\n-\n-\n-_L20N_TEMPLATE = jinja2.Template(\n- 'data-l10n-id=\"{{ tid }}\"'\n- '{% if data %} data-l10n-args=\"{{ data }}\"{% endif %}',\n- autoescape=True,\n-)\n-\n-\n-def l20n(tid, **kwargs):\n- data = tojson(kwargs) if kwargs else None\n- return M(_L20N_TEMPLATE.render(tid=tid, data=data))\n", "issue": "Handle Translation of the <title> tag\nCurrently we can't really translate the `<title>` tag because our block doesn't allow a template to add a l20n ID to it.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport jinja2\n\nfrom markupsafe import Markup as M # noqa\n\nfrom warehouse.filters import tojson\n\n\n_L20N_TEMPLATE = jinja2.Template(\n 'data-l10n-id=\"{{ tid }}\"'\n '{% if data %} data-l10n-args=\"{{ data }}\"{% endif %}',\n autoescape=True,\n)\n\n\ndef l20n(tid, **kwargs):\n data = tojson(kwargs) if kwargs else None\n return M(_L20N_TEMPLATE.render(tid=tid, data=data))\n", "path": "warehouse/i18n/l20n.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\n \"format_datetime\",\n \"warehouse.i18n.filters:format_datetime\",\n )\n filters.setdefault(\n \"format_rfc822_datetime\",\n \"warehouse.i18n.filters:format_rfc822_datetime\",\n )\n\n # Register our utility functions with Jinja2\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"l20n\", \"warehouse.i18n.l20n:l20n\")\n", "path": "warehouse/i18n/__init__.py"}], "after_files": [{"content": null, "path": "warehouse/i18n/l20n.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\n \"format_datetime\",\n \"warehouse.i18n.filters:format_datetime\",\n )\n filters.setdefault(\n \"format_rfc822_datetime\",\n \"warehouse.i18n.filters:format_rfc822_datetime\",\n )\n", "path": "warehouse/i18n/__init__.py"}]} | 1,040 | 477 |
gh_patches_debug_13548 | rasdani/github-patches | git_diff | sosreport__sos-2628 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[pulp] no obfuscation of proxy_password in /etc/pulp/server/plugins.conf.d/*.json files
Reproducer: `/etc/pulp/server/plugins.conf.d/iso_importer.json` with:
"proxy_password": 'my_SECRET_password',
Then `sos report` does not obfuscate this password.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sos/report/plugins/pulp.py`
Content:
```
1 # Copyright (C) 2018 Red Hat, Inc., Jake Hunsaker <[email protected]>
2
3 # This file is part of the sos project: https://github.com/sosreport/sos
4 #
5 # This copyrighted material is made available to anyone wishing to use,
6 # modify, copy, or redistribute it subject to the terms and conditions of
7 # version 2 of the GNU General Public License.
8 #
9 # See the LICENSE file in the source distribution for further information.
10
11 from sos.report.plugins import Plugin, RedHatPlugin
12 from pipes import quote
13 from re import match
14
15
16 class Pulp(Plugin, RedHatPlugin):
17
18 short_desc = 'Pulp platform'
19
20 plugin_name = "pulp"
21 packages = ("pulp-server", "pulp-katello", "python3-pulpcore")
22 files = ("/etc/pulp/settings.py",)
23 option_list = [
24 ('tasks', 'number of tasks to collect from DB queries', 'fast', 200)
25 ]
26
27 def setup(self):
28
29 # get mongo DB host and port from line like:
30 # seeds: host1:27017,host2:27017
31 # take just the very first URI and ignore possible failover
32 # if no such config is present, default to localhost:27017
33 # further, take optional user credentials - here we assume the
34 # credentials dont contain a whitespace character (that would
35 # make the parsing more difficult)
36 #
37 # further, collect location of CA file for contacting qpid in section
38 # [messaging]
39 # certfile: /etc/pki/katello/qpid_client_striped.crt
40 self.dbhost = "localhost"
41 self.dbport = "27017"
42 self.dbuser = ""
43 self.dbpassword = ""
44 self.messaging_cert_file = ""
45 in_messaging_section = False
46 try:
47 for line in open("/etc/pulp/server.conf").read().splitlines():
48 if match(r"^\s*seeds:\s+\S+:\S+", line):
49 uri = line.split()[1].split(',')[0].split(':')
50 self.dbhost = uri[0]
51 self.dbport = uri[1]
52 if match(r"\s*username:\s+\S+", line):
53 self.dbuser = "-u %s" % line.split()[1]
54 if match(r"\s*password:\s+\S+", line):
55 self.dbpassword = "-p %s" % line.split()[1]
56 if line.startswith("[messaging]"):
57 in_messaging_section = True
58 if in_messaging_section and line.startswith("certfile:"):
59 self.messaging_cert_file = line.split()[1]
60 in_messaging_section = False
61 except IOError:
62 # fallback when the cfg file is not accessible
63 pass
64
65 self.add_file_tags({
66 '/etc/default/pulp_workers': 'pulp_worker_defaults'
67 })
68
69 self.add_copy_spec([
70 "/etc/pulp/*.conf",
71 "/etc/pulp/settings.py",
72 "/etc/pulp/server/plugins.conf.d/",
73 "/etc/default/pulp*",
74 "/var/log/httpd/pulp-http.log*",
75 "/var/log/httpd/pulp-https.log*",
76 "/var/log/httpd/pulp-http_access_ssl.log*",
77 "/var/log/httpd/pulp-https_access_ssl.log*",
78 "/var/log/httpd/pulp-http_error_ssl.log*",
79 "/var/log/httpd/pulp-https_error_ssl.log*"
80 ])
81
82 num_tasks = self.get_option('tasks')
83
84 mtasks = self.build_mongo_cmd(
85 '\"DBQuery.shellBatchSize=%s;; '
86 'db.task_status.find().sort({finish_time: -1})'
87 '.pretty().shellPrint()\"' % num_tasks
88 )
89
90 mres = self.build_mongo_cmd(
91 '\"DBQuery.shellBatchSize=%s;; '
92 'db.reserved_resources.find().pretty().shellPrint()\"' % num_tasks
93 )
94
95 prun = self.build_mongo_cmd(
96 r'"DBQuery.shellBatchSize=%s;; '
97 r'db.task_status.find({state:{\$ne: \"finished\"}}).pretty()'
98 r'.shellPrint()"' % num_tasks
99 )
100
101 # prints mongo collection sizes sorted from biggest and in human
102 # readable output
103 csizes = self.build_mongo_cmd(
104 '\"function humanReadable(bytes) {'
105 ' var i = -1;'
106 ' var byteUnits = [\'kB\', \'MB\', \'GB\', \'TB\', \'PB\', '
107 ' \'EB\', \'ZB\', \'YB\'];'
108 ' do {'
109 ' bytes = bytes / 1024;'
110 ' i++;'
111 ' } while (bytes > 1024);'
112 ' return Math.max(bytes, 0.1).toFixed(1) + \' \' + byteUnits[i];'
113 '};'
114 'var collectionNames = db.getCollectionNames(), stats = [];'
115 'collectionNames.forEach(function (n) {'
116 ' stats.push(db[n].stats());'
117 ' });'
118 'stats = stats.sort(function(a, b) {'
119 ' return b[\'size\'] - a[\'size\']; });'
120 'for (var c in stats) {'
121 ' print(stats[c][\'ns\'] + \': \' +'
122 ' humanReadable(stats[c][\'size\']) + \' (\' +'
123 ' humanReadable(stats[c][\'storageSize\']) + \')\'); }\"'
124 )
125
126 dbstats = self.build_mongo_cmd('\"db.stats()\"')
127
128 self.add_cmd_output(mtasks, suggest_filename="mongo-task_status")
129 self.add_cmd_output(mres, suggest_filename="mongo-reserved_resources")
130 self.add_cmd_output(prun, suggest_filename="pulp-running_tasks")
131 self.add_cmd_output(csizes, suggest_filename="mongo-collection_sizes")
132 self.add_cmd_output(dbstats, suggest_filename="mongo-db_stats")
133 self.add_cmd_output([
134 "qpid-stat -%s --ssl-certificate=%s -b amqps://localhost:5671" %
135 (opt, self.messaging_cert_file) for opt in "quc"
136 ])
137 self.add_cmd_output(
138 "sudo -u pulp PULP_SETTINGS='/etc/pulp/settings.py' "
139 "DJANGO_SETTINGS_MODULE='pulpcore.app.settings' dynaconf list",
140 suggest_filename="dynaconf_list"
141 )
142
143 def build_mongo_cmd(self, query):
144 _cmd = "bash -c %s"
145 _mondb = "--host %s --port %s %s %s" % (self.dbhost, self.dbport,
146 self.dbuser, self.dbpassword)
147 _moncmd = "mongo pulp_database %s --eval %s"
148 return _cmd % quote(_moncmd % (_mondb, query))
149
150 def postproc(self):
151
152 # Handle all ".conf" files under /etc/pulp - note that this includes
153 # files nested at several distinct directory levels. For this reason we
154 # use a regex that matches all these path components with ".*", and
155 # ensure that the path ends with ".conf".
156 etcreg = r"(([a-z].*(passw|token|cred|secret).*)\:(\s))(.*)"
157 repl = r"\1 ********"
158 self.do_path_regex_sub(r"/etc/pulp/(.*)\.conf$", etcreg, repl)
159
160 # Now handle JSON-formatted data in the same /etc/pulp directory
161 # structure. We use a different substitution string here to preserve
162 # the file's JSON syntax.
163 jreg = r"(\s*\".*(passw|cred|token|secret).*\"\s*:\s*\")(.*)(\")"
164 repl = r"\1********\4"
165 self.do_path_regex_sub("/etc/pulp(.*)(.json$)", jreg, repl)
166
167 # obfuscate SECRET_KEY = .. and 'PASSWORD': .. in dynaconf list output
168 # and also in settings.py
169 # count with option that PASSWORD is with(out) quotes or in capitals
170 key_pass_re = r"(SECRET_KEY\s*=|(password|PASSWORD)(\"|'|:)+)\s*(\S*)"
171 repl = r"\1 ********"
172 self.do_path_regex_sub("/etc/pulp/settings.py", key_pass_re, repl)
173 self.do_cmd_output_sub("dynaconf list", key_pass_re, repl)
174
175 # vim: set et ts=4 sw=4 :
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sos/report/plugins/pulp.py b/sos/report/plugins/pulp.py
--- a/sos/report/plugins/pulp.py
+++ b/sos/report/plugins/pulp.py
@@ -160,8 +160,8 @@
# Now handle JSON-formatted data in the same /etc/pulp directory
# structure. We use a different substitution string here to preserve
# the file's JSON syntax.
- jreg = r"(\s*\".*(passw|cred|token|secret).*\"\s*:\s*\")(.*)(\")"
- repl = r"\1********\4"
+ jreg = r"(\s*\".*(passw|cred|token|secret).*\"\s*:(\s))(.*)(\w+)"
+ repl = r"\1********"
self.do_path_regex_sub("/etc/pulp(.*)(.json$)", jreg, repl)
# obfuscate SECRET_KEY = .. and 'PASSWORD': .. in dynaconf list output
| {"golden_diff": "diff --git a/sos/report/plugins/pulp.py b/sos/report/plugins/pulp.py\n--- a/sos/report/plugins/pulp.py\n+++ b/sos/report/plugins/pulp.py\n@@ -160,8 +160,8 @@\n # Now handle JSON-formatted data in the same /etc/pulp directory\n # structure. We use a different substitution string here to preserve\n # the file's JSON syntax.\n- jreg = r\"(\\s*\\\".*(passw|cred|token|secret).*\\\"\\s*:\\s*\\\")(.*)(\\\")\"\n- repl = r\"\\1********\\4\"\n+ jreg = r\"(\\s*\\\".*(passw|cred|token|secret).*\\\"\\s*:(\\s))(.*)(\\w+)\"\n+ repl = r\"\\1********\"\n self.do_path_regex_sub(\"/etc/pulp(.*)(.json$)\", jreg, repl)\n \n # obfuscate SECRET_KEY = .. and 'PASSWORD': .. in dynaconf list output\n", "issue": "[pulp] no obfuscation of proxy_password in /etc/pulp/server/plugins.conf.d/*.json files\nReproducer: `/etc/pulp/server/plugins.conf.d/iso_importer.json` with:\r\n\r\n \"proxy_password\": 'my_SECRET_password',\r\n\r\nThen `sos report` does not obfuscate this password.\n", "before_files": [{"content": "# Copyright (C) 2018 Red Hat, Inc., Jake Hunsaker <[email protected]>\n\n# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, RedHatPlugin\nfrom pipes import quote\nfrom re import match\n\n\nclass Pulp(Plugin, RedHatPlugin):\n\n short_desc = 'Pulp platform'\n\n plugin_name = \"pulp\"\n packages = (\"pulp-server\", \"pulp-katello\", \"python3-pulpcore\")\n files = (\"/etc/pulp/settings.py\",)\n option_list = [\n ('tasks', 'number of tasks to collect from DB queries', 'fast', 200)\n ]\n\n def setup(self):\n\n # get mongo DB host and port from line like:\n # seeds: host1:27017,host2:27017\n # take just the very first URI and ignore possible failover\n # if no such config is present, default to localhost:27017\n # further, take optional user credentials - here we assume the\n # credentials dont contain a whitespace character (that would\n # make the parsing more difficult)\n #\n # further, collect location of CA file for contacting qpid in section\n # [messaging]\n # certfile: /etc/pki/katello/qpid_client_striped.crt\n self.dbhost = \"localhost\"\n self.dbport = \"27017\"\n self.dbuser = \"\"\n self.dbpassword = \"\"\n self.messaging_cert_file = \"\"\n in_messaging_section = False\n try:\n for line in open(\"/etc/pulp/server.conf\").read().splitlines():\n if match(r\"^\\s*seeds:\\s+\\S+:\\S+\", line):\n uri = line.split()[1].split(',')[0].split(':')\n self.dbhost = uri[0]\n self.dbport = uri[1]\n if match(r\"\\s*username:\\s+\\S+\", line):\n self.dbuser = \"-u %s\" % line.split()[1]\n if match(r\"\\s*password:\\s+\\S+\", line):\n self.dbpassword = \"-p %s\" % line.split()[1]\n if line.startswith(\"[messaging]\"):\n in_messaging_section = True\n if in_messaging_section and line.startswith(\"certfile:\"):\n self.messaging_cert_file = line.split()[1]\n in_messaging_section = False\n except IOError:\n # fallback when the cfg file is not accessible\n pass\n\n self.add_file_tags({\n '/etc/default/pulp_workers': 'pulp_worker_defaults'\n })\n\n self.add_copy_spec([\n \"/etc/pulp/*.conf\",\n \"/etc/pulp/settings.py\",\n \"/etc/pulp/server/plugins.conf.d/\",\n \"/etc/default/pulp*\",\n \"/var/log/httpd/pulp-http.log*\",\n \"/var/log/httpd/pulp-https.log*\",\n \"/var/log/httpd/pulp-http_access_ssl.log*\",\n \"/var/log/httpd/pulp-https_access_ssl.log*\",\n \"/var/log/httpd/pulp-http_error_ssl.log*\",\n \"/var/log/httpd/pulp-https_error_ssl.log*\"\n ])\n\n num_tasks = self.get_option('tasks')\n\n mtasks = self.build_mongo_cmd(\n '\\\"DBQuery.shellBatchSize=%s;; '\n 'db.task_status.find().sort({finish_time: -1})'\n '.pretty().shellPrint()\\\"' % num_tasks\n )\n\n mres = self.build_mongo_cmd(\n '\\\"DBQuery.shellBatchSize=%s;; '\n 'db.reserved_resources.find().pretty().shellPrint()\\\"' % num_tasks\n )\n\n prun = self.build_mongo_cmd(\n r'\"DBQuery.shellBatchSize=%s;; '\n r'db.task_status.find({state:{\\$ne: \\\"finished\\\"}}).pretty()'\n r'.shellPrint()\"' % num_tasks\n )\n\n # prints mongo collection sizes sorted from biggest and in human\n # readable output\n csizes = self.build_mongo_cmd(\n '\\\"function humanReadable(bytes) {'\n ' var i = -1;'\n ' var byteUnits = [\\'kB\\', \\'MB\\', \\'GB\\', \\'TB\\', \\'PB\\', '\n ' \\'EB\\', \\'ZB\\', \\'YB\\'];'\n ' do {'\n ' bytes = bytes / 1024;'\n ' i++;'\n ' } while (bytes > 1024);'\n ' return Math.max(bytes, 0.1).toFixed(1) + \\' \\' + byteUnits[i];'\n '};'\n 'var collectionNames = db.getCollectionNames(), stats = [];'\n 'collectionNames.forEach(function (n) {'\n ' stats.push(db[n].stats());'\n ' });'\n 'stats = stats.sort(function(a, b) {'\n ' return b[\\'size\\'] - a[\\'size\\']; });'\n 'for (var c in stats) {'\n ' print(stats[c][\\'ns\\'] + \\': \\' +'\n ' humanReadable(stats[c][\\'size\\']) + \\' (\\' +'\n ' humanReadable(stats[c][\\'storageSize\\']) + \\')\\'); }\\\"'\n )\n\n dbstats = self.build_mongo_cmd('\\\"db.stats()\\\"')\n\n self.add_cmd_output(mtasks, suggest_filename=\"mongo-task_status\")\n self.add_cmd_output(mres, suggest_filename=\"mongo-reserved_resources\")\n self.add_cmd_output(prun, suggest_filename=\"pulp-running_tasks\")\n self.add_cmd_output(csizes, suggest_filename=\"mongo-collection_sizes\")\n self.add_cmd_output(dbstats, suggest_filename=\"mongo-db_stats\")\n self.add_cmd_output([\n \"qpid-stat -%s --ssl-certificate=%s -b amqps://localhost:5671\" %\n (opt, self.messaging_cert_file) for opt in \"quc\"\n ])\n self.add_cmd_output(\n \"sudo -u pulp PULP_SETTINGS='/etc/pulp/settings.py' \"\n \"DJANGO_SETTINGS_MODULE='pulpcore.app.settings' dynaconf list\",\n suggest_filename=\"dynaconf_list\"\n )\n\n def build_mongo_cmd(self, query):\n _cmd = \"bash -c %s\"\n _mondb = \"--host %s --port %s %s %s\" % (self.dbhost, self.dbport,\n self.dbuser, self.dbpassword)\n _moncmd = \"mongo pulp_database %s --eval %s\"\n return _cmd % quote(_moncmd % (_mondb, query))\n\n def postproc(self):\n\n # Handle all \".conf\" files under /etc/pulp - note that this includes\n # files nested at several distinct directory levels. For this reason we\n # use a regex that matches all these path components with \".*\", and\n # ensure that the path ends with \".conf\".\n etcreg = r\"(([a-z].*(passw|token|cred|secret).*)\\:(\\s))(.*)\"\n repl = r\"\\1 ********\"\n self.do_path_regex_sub(r\"/etc/pulp/(.*)\\.conf$\", etcreg, repl)\n\n # Now handle JSON-formatted data in the same /etc/pulp directory\n # structure. We use a different substitution string here to preserve\n # the file's JSON syntax.\n jreg = r\"(\\s*\\\".*(passw|cred|token|secret).*\\\"\\s*:\\s*\\\")(.*)(\\\")\"\n repl = r\"\\1********\\4\"\n self.do_path_regex_sub(\"/etc/pulp(.*)(.json$)\", jreg, repl)\n\n # obfuscate SECRET_KEY = .. and 'PASSWORD': .. in dynaconf list output\n # and also in settings.py\n # count with option that PASSWORD is with(out) quotes or in capitals\n key_pass_re = r\"(SECRET_KEY\\s*=|(password|PASSWORD)(\\\"|'|:)+)\\s*(\\S*)\"\n repl = r\"\\1 ********\"\n self.do_path_regex_sub(\"/etc/pulp/settings.py\", key_pass_re, repl)\n self.do_cmd_output_sub(\"dynaconf list\", key_pass_re, repl)\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/pulp.py"}], "after_files": [{"content": "# Copyright (C) 2018 Red Hat, Inc., Jake Hunsaker <[email protected]>\n\n# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, RedHatPlugin\nfrom pipes import quote\nfrom re import match\n\n\nclass Pulp(Plugin, RedHatPlugin):\n\n short_desc = 'Pulp platform'\n\n plugin_name = \"pulp\"\n packages = (\"pulp-server\", \"pulp-katello\", \"python3-pulpcore\")\n files = (\"/etc/pulp/settings.py\",)\n option_list = [\n ('tasks', 'number of tasks to collect from DB queries', 'fast', 200)\n ]\n\n def setup(self):\n\n # get mongo DB host and port from line like:\n # seeds: host1:27017,host2:27017\n # take just the very first URI and ignore possible failover\n # if no such config is present, default to localhost:27017\n # further, take optional user credentials - here we assume the\n # credentials dont contain a whitespace character (that would\n # make the parsing more difficult)\n #\n # further, collect location of CA file for contacting qpid in section\n # [messaging]\n # certfile: /etc/pki/katello/qpid_client_striped.crt\n self.dbhost = \"localhost\"\n self.dbport = \"27017\"\n self.dbuser = \"\"\n self.dbpassword = \"\"\n self.messaging_cert_file = \"\"\n in_messaging_section = False\n try:\n for line in open(\"/etc/pulp/server.conf\").read().splitlines():\n if match(r\"^\\s*seeds:\\s+\\S+:\\S+\", line):\n uri = line.split()[1].split(',')[0].split(':')\n self.dbhost = uri[0]\n self.dbport = uri[1]\n if match(r\"\\s*username:\\s+\\S+\", line):\n self.dbuser = \"-u %s\" % line.split()[1]\n if match(r\"\\s*password:\\s+\\S+\", line):\n self.dbpassword = \"-p %s\" % line.split()[1]\n if line.startswith(\"[messaging]\"):\n in_messaging_section = True\n if in_messaging_section and line.startswith(\"certfile:\"):\n self.messaging_cert_file = line.split()[1]\n in_messaging_section = False\n except IOError:\n # fallback when the cfg file is not accessible\n pass\n\n self.add_file_tags({\n '/etc/default/pulp_workers': 'pulp_worker_defaults'\n })\n\n self.add_copy_spec([\n \"/etc/pulp/*.conf\",\n \"/etc/pulp/settings.py\",\n \"/etc/pulp/server/plugins.conf.d/\",\n \"/etc/default/pulp*\",\n \"/var/log/httpd/pulp-http.log*\",\n \"/var/log/httpd/pulp-https.log*\",\n \"/var/log/httpd/pulp-http_access_ssl.log*\",\n \"/var/log/httpd/pulp-https_access_ssl.log*\",\n \"/var/log/httpd/pulp-http_error_ssl.log*\",\n \"/var/log/httpd/pulp-https_error_ssl.log*\"\n ])\n\n num_tasks = self.get_option('tasks')\n\n mtasks = self.build_mongo_cmd(\n '\\\"DBQuery.shellBatchSize=%s;; '\n 'db.task_status.find().sort({finish_time: -1})'\n '.pretty().shellPrint()\\\"' % num_tasks\n )\n\n mres = self.build_mongo_cmd(\n '\\\"DBQuery.shellBatchSize=%s;; '\n 'db.reserved_resources.find().pretty().shellPrint()\\\"' % num_tasks\n )\n\n prun = self.build_mongo_cmd(\n r'\"DBQuery.shellBatchSize=%s;; '\n r'db.task_status.find({state:{\\$ne: \\\"finished\\\"}}).pretty()'\n r'.shellPrint()\"' % num_tasks\n )\n\n # prints mongo collection sizes sorted from biggest and in human\n # readable output\n csizes = self.build_mongo_cmd(\n '\\\"function humanReadable(bytes) {'\n ' var i = -1;'\n ' var byteUnits = [\\'kB\\', \\'MB\\', \\'GB\\', \\'TB\\', \\'PB\\', '\n ' \\'EB\\', \\'ZB\\', \\'YB\\'];'\n ' do {'\n ' bytes = bytes / 1024;'\n ' i++;'\n ' } while (bytes > 1024);'\n ' return Math.max(bytes, 0.1).toFixed(1) + \\' \\' + byteUnits[i];'\n '};'\n 'var collectionNames = db.getCollectionNames(), stats = [];'\n 'collectionNames.forEach(function (n) {'\n ' stats.push(db[n].stats());'\n ' });'\n 'stats = stats.sort(function(a, b) {'\n ' return b[\\'size\\'] - a[\\'size\\']; });'\n 'for (var c in stats) {'\n ' print(stats[c][\\'ns\\'] + \\': \\' +'\n ' humanReadable(stats[c][\\'size\\']) + \\' (\\' +'\n ' humanReadable(stats[c][\\'storageSize\\']) + \\')\\'); }\\\"'\n )\n\n dbstats = self.build_mongo_cmd('\\\"db.stats()\\\"')\n\n self.add_cmd_output(mtasks, suggest_filename=\"mongo-task_status\")\n self.add_cmd_output(mres, suggest_filename=\"mongo-reserved_resources\")\n self.add_cmd_output(prun, suggest_filename=\"pulp-running_tasks\")\n self.add_cmd_output(csizes, suggest_filename=\"mongo-collection_sizes\")\n self.add_cmd_output(dbstats, suggest_filename=\"mongo-db_stats\")\n self.add_cmd_output([\n \"qpid-stat -%s --ssl-certificate=%s -b amqps://localhost:5671\" %\n (opt, self.messaging_cert_file) for opt in \"quc\"\n ])\n self.add_cmd_output(\n \"sudo -u pulp PULP_SETTINGS='/etc/pulp/settings.py' \"\n \"DJANGO_SETTINGS_MODULE='pulpcore.app.settings' dynaconf list\",\n suggest_filename=\"dynaconf_list\"\n )\n\n def build_mongo_cmd(self, query):\n _cmd = \"bash -c %s\"\n _mondb = \"--host %s --port %s %s %s\" % (self.dbhost, self.dbport,\n self.dbuser, self.dbpassword)\n _moncmd = \"mongo pulp_database %s --eval %s\"\n return _cmd % quote(_moncmd % (_mondb, query))\n\n def postproc(self):\n\n # Handle all \".conf\" files under /etc/pulp - note that this includes\n # files nested at several distinct directory levels. For this reason we\n # use a regex that matches all these path components with \".*\", and\n # ensure that the path ends with \".conf\".\n etcreg = r\"(([a-z].*(passw|token|cred|secret).*)\\:(\\s))(.*)\"\n repl = r\"\\1 ********\"\n self.do_path_regex_sub(r\"/etc/pulp/(.*)\\.conf$\", etcreg, repl)\n\n # Now handle JSON-formatted data in the same /etc/pulp directory\n # structure. We use a different substitution string here to preserve\n # the file's JSON syntax.\n jreg = r\"(\\s*\\\".*(passw|cred|token|secret).*\\\"\\s*:(\\s))(.*)(\\w+)\"\n repl = r\"\\1********\"\n self.do_path_regex_sub(\"/etc/pulp(.*)(.json$)\", jreg, repl)\n\n # obfuscate SECRET_KEY = .. and 'PASSWORD': .. in dynaconf list output\n # and also in settings.py\n # count with option that PASSWORD is with(out) quotes or in capitals\n key_pass_re = r\"(SECRET_KEY\\s*=|(password|PASSWORD)(\\\"|'|:)+)\\s*(\\S*)\"\n repl = r\"\\1 ********\"\n self.do_path_regex_sub(\"/etc/pulp/settings.py\", key_pass_re, repl)\n self.do_cmd_output_sub(\"dynaconf list\", key_pass_re, repl)\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/pulp.py"}]} | 2,637 | 223 |
gh_patches_debug_28516 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avoid sending typing activity when bot is invoked as skill
We should port this once the C# PR is merged.
See [parent](https://github.com/microsoft/botframework-sdk/issues/6049)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 import asyncio
4 from typing import Awaitable, Callable
5
6 from botbuilder.schema import Activity, ActivityTypes
7
8 from .middleware_set import Middleware
9 from .turn_context import TurnContext
10
11
12 class Timer:
13 clear_timer = False
14
15 def set_timeout(self, func, span):
16 async def some_fn(): # pylint: disable=function-redefined
17 await asyncio.sleep(span)
18 if not self.clear_timer:
19 await func()
20
21 asyncio.ensure_future(some_fn())
22
23 def set_clear_timer(self):
24 self.clear_timer = True
25
26
27 class ShowTypingMiddleware(Middleware):
28 """
29 When added, this middleware will send typing activities back to the user when a Message activity
30 is received to let them know that the bot has received the message and is working on the response.
31 You can specify a delay before the first typing activity is sent and then a frequency, which
32 determines how often another typing activity is sent. Typing activities will continue to be sent
33 until your bot sends another message back to the user.
34 """
35
36 def __init__(self, delay: float = 0.5, period: float = 2.0):
37 """
38 Initializes the middleware.
39
40 :param delay: Delay in seconds for the first typing indicator to be sent.
41 :param period: Delay in seconds for subsequent typing indicators.
42 """
43
44 if delay < 0:
45 raise ValueError("Delay must be greater than or equal to zero")
46
47 if period <= 0:
48 raise ValueError("Repeat period must be greater than zero")
49
50 self._delay = delay
51 self._period = period
52
53 async def on_turn(
54 self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
55 ):
56 timer = Timer()
57
58 def start_interval(context: TurnContext, delay, period):
59 async def aux():
60 typing_activity = Activity(
61 type=ActivityTypes.typing, relates_to=context.activity.relates_to,
62 )
63
64 conversation_reference = TurnContext.get_conversation_reference(
65 context.activity
66 )
67
68 typing_activity = TurnContext.apply_conversation_reference(
69 typing_activity, conversation_reference
70 )
71
72 asyncio.ensure_future(
73 context.adapter.send_activities(context, [typing_activity])
74 )
75
76 # restart the timer, with the 'period' value for the delay
77 timer.set_timeout(aux, period)
78
79 # first time through we use the 'delay' value for the timer.
80 timer.set_timeout(aux, delay)
81
82 def stop_interval():
83 timer.set_clear_timer()
84
85 # if it's a message, start sending typing activities until the
86 # bot logic is done.
87 if context.activity.type == ActivityTypes.message:
88 start_interval(context, self._delay, self._period)
89
90 # call the bot logic
91 result = await logic()
92
93 stop_interval()
94
95 return result
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py
@@ -4,7 +4,9 @@
from typing import Awaitable, Callable
from botbuilder.schema import Activity, ActivityTypes
+from botframework.connector.auth import ClaimsIdentity, SkillValidation
+from .bot_adapter import BotAdapter
from .middleware_set import Middleware
from .turn_context import TurnContext
@@ -82,9 +84,12 @@
def stop_interval():
timer.set_clear_timer()
- # if it's a message, start sending typing activities until the
- # bot logic is done.
- if context.activity.type == ActivityTypes.message:
+ # Start a timer to periodically send the typing activity
+ # (bots running as skills should not send typing activity)
+ if (
+ context.activity.type == ActivityTypes.message
+ and not ShowTypingMiddleware._is_skill_bot(context)
+ ):
start_interval(context, self._delay, self._period)
# call the bot logic
@@ -93,3 +98,10 @@
stop_interval()
return result
+
+ @staticmethod
+ def _is_skill_bot(context: TurnContext) -> bool:
+ claims_identity = context.turn_state.get(BotAdapter.BOT_IDENTITY_KEY)
+ return isinstance(
+ claims_identity, ClaimsIdentity
+ ) and SkillValidation.is_skill_claim(claims_identity.claims)
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py\n@@ -4,7 +4,9 @@\n from typing import Awaitable, Callable\r\n \r\n from botbuilder.schema import Activity, ActivityTypes\r\n+from botframework.connector.auth import ClaimsIdentity, SkillValidation\r\n \r\n+from .bot_adapter import BotAdapter\r\n from .middleware_set import Middleware\r\n from .turn_context import TurnContext\r\n \r\n@@ -82,9 +84,12 @@\n def stop_interval():\r\n timer.set_clear_timer()\r\n \r\n- # if it's a message, start sending typing activities until the\r\n- # bot logic is done.\r\n- if context.activity.type == ActivityTypes.message:\r\n+ # Start a timer to periodically send the typing activity\r\n+ # (bots running as skills should not send typing activity)\r\n+ if (\r\n+ context.activity.type == ActivityTypes.message\r\n+ and not ShowTypingMiddleware._is_skill_bot(context)\r\n+ ):\r\n start_interval(context, self._delay, self._period)\r\n \r\n # call the bot logic\r\n@@ -93,3 +98,10 @@\n stop_interval()\r\n \r\n return result\r\n+\r\n+ @staticmethod\r\n+ def _is_skill_bot(context: TurnContext) -> bool:\r\n+ claims_identity = context.turn_state.get(BotAdapter.BOT_IDENTITY_KEY)\r\n+ return isinstance(\r\n+ claims_identity, ClaimsIdentity\r\n+ ) and SkillValidation.is_skill_claim(claims_identity.claims)\n", "issue": "Avoid sending typing activity when bot is invoked as skill\nWe should port this once the C# PR is merged.\r\nSee [parent](https://github.com/microsoft/botframework-sdk/issues/6049)\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\nimport asyncio\r\nfrom typing import Awaitable, Callable\r\n\r\nfrom botbuilder.schema import Activity, ActivityTypes\r\n\r\nfrom .middleware_set import Middleware\r\nfrom .turn_context import TurnContext\r\n\r\n\r\nclass Timer:\r\n clear_timer = False\r\n\r\n def set_timeout(self, func, span):\r\n async def some_fn(): # pylint: disable=function-redefined\r\n await asyncio.sleep(span)\r\n if not self.clear_timer:\r\n await func()\r\n\r\n asyncio.ensure_future(some_fn())\r\n\r\n def set_clear_timer(self):\r\n self.clear_timer = True\r\n\r\n\r\nclass ShowTypingMiddleware(Middleware):\r\n \"\"\"\r\n When added, this middleware will send typing activities back to the user when a Message activity\r\n is received to let them know that the bot has received the message and is working on the response.\r\n You can specify a delay before the first typing activity is sent and then a frequency, which\r\n determines how often another typing activity is sent. Typing activities will continue to be sent\r\n until your bot sends another message back to the user.\r\n \"\"\"\r\n\r\n def __init__(self, delay: float = 0.5, period: float = 2.0):\r\n \"\"\"\r\n Initializes the middleware.\r\n\r\n :param delay: Delay in seconds for the first typing indicator to be sent.\r\n :param period: Delay in seconds for subsequent typing indicators.\r\n \"\"\"\r\n\r\n if delay < 0:\r\n raise ValueError(\"Delay must be greater than or equal to zero\")\r\n\r\n if period <= 0:\r\n raise ValueError(\"Repeat period must be greater than zero\")\r\n\r\n self._delay = delay\r\n self._period = period\r\n\r\n async def on_turn(\r\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\r\n ):\r\n timer = Timer()\r\n\r\n def start_interval(context: TurnContext, delay, period):\r\n async def aux():\r\n typing_activity = Activity(\r\n type=ActivityTypes.typing, relates_to=context.activity.relates_to,\r\n )\r\n\r\n conversation_reference = TurnContext.get_conversation_reference(\r\n context.activity\r\n )\r\n\r\n typing_activity = TurnContext.apply_conversation_reference(\r\n typing_activity, conversation_reference\r\n )\r\n\r\n asyncio.ensure_future(\r\n context.adapter.send_activities(context, [typing_activity])\r\n )\r\n\r\n # restart the timer, with the 'period' value for the delay\r\n timer.set_timeout(aux, period)\r\n\r\n # first time through we use the 'delay' value for the timer.\r\n timer.set_timeout(aux, delay)\r\n\r\n def stop_interval():\r\n timer.set_clear_timer()\r\n\r\n # if it's a message, start sending typing activities until the\r\n # bot logic is done.\r\n if context.activity.type == ActivityTypes.message:\r\n start_interval(context, self._delay, self._period)\r\n\r\n # call the bot logic\r\n result = await logic()\r\n\r\n stop_interval()\r\n\r\n return result\r\n", "path": "libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\nimport asyncio\r\nfrom typing import Awaitable, Callable\r\n\r\nfrom botbuilder.schema import Activity, ActivityTypes\r\nfrom botframework.connector.auth import ClaimsIdentity, SkillValidation\r\n\r\nfrom .bot_adapter import BotAdapter\r\nfrom .middleware_set import Middleware\r\nfrom .turn_context import TurnContext\r\n\r\n\r\nclass Timer:\r\n clear_timer = False\r\n\r\n def set_timeout(self, func, span):\r\n async def some_fn(): # pylint: disable=function-redefined\r\n await asyncio.sleep(span)\r\n if not self.clear_timer:\r\n await func()\r\n\r\n asyncio.ensure_future(some_fn())\r\n\r\n def set_clear_timer(self):\r\n self.clear_timer = True\r\n\r\n\r\nclass ShowTypingMiddleware(Middleware):\r\n \"\"\"\r\n When added, this middleware will send typing activities back to the user when a Message activity\r\n is received to let them know that the bot has received the message and is working on the response.\r\n You can specify a delay before the first typing activity is sent and then a frequency, which\r\n determines how often another typing activity is sent. Typing activities will continue to be sent\r\n until your bot sends another message back to the user.\r\n \"\"\"\r\n\r\n def __init__(self, delay: float = 0.5, period: float = 2.0):\r\n \"\"\"\r\n Initializes the middleware.\r\n\r\n :param delay: Delay in seconds for the first typing indicator to be sent.\r\n :param period: Delay in seconds for subsequent typing indicators.\r\n \"\"\"\r\n\r\n if delay < 0:\r\n raise ValueError(\"Delay must be greater than or equal to zero\")\r\n\r\n if period <= 0:\r\n raise ValueError(\"Repeat period must be greater than zero\")\r\n\r\n self._delay = delay\r\n self._period = period\r\n\r\n async def on_turn(\r\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\r\n ):\r\n timer = Timer()\r\n\r\n def start_interval(context: TurnContext, delay, period):\r\n async def aux():\r\n typing_activity = Activity(\r\n type=ActivityTypes.typing, relates_to=context.activity.relates_to,\r\n )\r\n\r\n conversation_reference = TurnContext.get_conversation_reference(\r\n context.activity\r\n )\r\n\r\n typing_activity = TurnContext.apply_conversation_reference(\r\n typing_activity, conversation_reference\r\n )\r\n\r\n asyncio.ensure_future(\r\n context.adapter.send_activities(context, [typing_activity])\r\n )\r\n\r\n # restart the timer, with the 'period' value for the delay\r\n timer.set_timeout(aux, period)\r\n\r\n # first time through we use the 'delay' value for the timer.\r\n timer.set_timeout(aux, delay)\r\n\r\n def stop_interval():\r\n timer.set_clear_timer()\r\n\r\n # Start a timer to periodically send the typing activity\r\n # (bots running as skills should not send typing activity)\r\n if (\r\n context.activity.type == ActivityTypes.message\r\n and not ShowTypingMiddleware._is_skill_bot(context)\r\n ):\r\n start_interval(context, self._delay, self._period)\r\n\r\n # call the bot logic\r\n result = await logic()\r\n\r\n stop_interval()\r\n\r\n return result\r\n\r\n @staticmethod\r\n def _is_skill_bot(context: TurnContext) -> bool:\r\n claims_identity = context.turn_state.get(BotAdapter.BOT_IDENTITY_KEY)\r\n return isinstance(\r\n claims_identity, ClaimsIdentity\r\n ) and SkillValidation.is_skill_claim(claims_identity.claims)\r\n", "path": "libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py"}]} | 1,125 | 374 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.