problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_11283 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ignoring ${} parameters from Fn::Sub checks
*cfn-lint version: 0.44.1*
*Description of issue.*
I using the `AWS::ApiGatewayV2::Api` clause and referencing the [`RouteSelectionExpression`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-api.html#cfn-apigatewayv2-api-routeselectionexpression) property which takes values like `${request.body.action}`. Cloudformation accepts `RouteSelectionExpression: "${request.body.action}"` as perfectly valid but cfn-lint is triggering `E1029: Found an embedded parameter "${request.body.action}" outside of an "Fn::Sub" at Resources/agwApi/Properties/RouteSelectionExpression`. I'm not sure if there's a way to escape the ${} characters which makes cfn-lint happy as well as CFN.
Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/SubNeeded.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from functools import reduce # pylint: disable=redefined-builtin
6 import re
7 import six
8 from cfnlint.rules import CloudFormationLintRule
9 from cfnlint.rules import RuleMatch
10
11
12 class SubNeeded(CloudFormationLintRule):
13 """Check if a substitution string exists without a substitution function"""
14 id = 'E1029'
15 shortdesc = 'Sub is required if a variable is used in a string'
16 description = 'If a substitution variable exists in a string but isn\'t wrapped with the Fn::Sub function the deployment will fail.'
17 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
18 tags = ['functions', 'sub']
19
20 # Free-form text properties to exclude from this rule
21 excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
22 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',
23 'RequestMappingTemplate', 'LogFormat', 'TemplateBody', 'ResponseMappingTemplate']
24 api_excludes = ['Uri', 'Body', 'ConnectionId']
25
26
27 # IAM Policy has special variables that don't require !Sub, Check for these
28 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html
29 # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html
30 # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html
31 # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down
32 # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html
33 resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',
34 '${aws:TokenIssueTime}', '${aws:principaltype}',
35 '${aws:SecureTransport}', '${aws:SourceIp}',
36 '${aws:UserAgent}', '${aws:userid}',
37 '${aws:username}', '${ec2:SourceInstanceARN}',
38 '${iot:Connection.Thing.ThingName}',
39 '${iot:Connection.Thing.ThingTypeName}',
40 '${iot:Connection.Thing.IsAttached}',
41 '${iot:ClientId}', '${transfer:HomeBucket}',
42 '${transfer:HomeDirectory}', '${transfer:HomeFolder}',
43 '${transfer:UserName}', '${redshift:DbUser}',
44 '${cognito-identity.amazonaws.com:aud}',
45 '${cognito-identity.amazonaws.com:sub}',
46 '${cognito-identity.amazonaws.com:amr}']
47
48 # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html
49 condition_excludes = [
50 '${redshift:DbUser}',
51 ]
52
53 def __init__(self):
54 """Init"""
55 super(SubNeeded, self).__init__()
56 self.config_definition = {
57 'custom_excludes': {
58 'default': '',
59 'type': 'string'
60 }
61 }
62 self.configure()
63 self.subParameterRegex = re.compile(r'(\$\{[A-Za-z0-9_:\.]+\})')
64
65 def _match_values(self, cfnelem, path):
66 """Recursively search for values matching the searchRegex"""
67 values = []
68 if isinstance(cfnelem, dict):
69 for key in cfnelem:
70 pathprop = path[:]
71 pathprop.append(key)
72 values.extend(self._match_values(cfnelem[key], pathprop))
73 elif isinstance(cfnelem, list):
74 for index, item in enumerate(cfnelem):
75 pathprop = path[:]
76 pathprop.append(index)
77 values.extend(self._match_values(item, pathprop))
78 else:
79 # Leaf node
80 if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):
81 for variable in re.findall(self.subParameterRegex, cfnelem):
82 values.append(path + [variable])
83
84 return values
85
86 def match_values(self, cfn):
87 """
88 Search for values in all parts of the templates that match the searchRegex
89 """
90 results = []
91 results.extend(self._match_values(cfn.template, []))
92 # Globals are removed during a transform. They need to be checked manually
93 results.extend(self._match_values(cfn.template.get('Globals', {}), []))
94 return results
95
96 def _api_exceptions(self, value):
97 """ Key value exceptions """
98 parameter_search = re.compile(r'^\$\{stageVariables\..*\}$')
99 return re.match(parameter_search, value)
100
101 def _variable_custom_excluded(self, value):
102 """ User-defined exceptions for variables, anywhere in the file """
103 custom_excludes = self.config['custom_excludes']
104 if custom_excludes:
105 custom_search = re.compile(custom_excludes)
106 return re.match(custom_search, value)
107 return False
108
109 def match(self, cfn):
110 matches = []
111
112 # Get a list of paths to every leaf node string containing at least one ${parameter}
113 parameter_string_paths = self.match_values(cfn)
114 # We want to search all of the paths to check if each one contains an 'Fn::Sub'
115 for parameter_string_path in parameter_string_paths:
116 if parameter_string_path[0] in ['Parameters']:
117 continue
118 # Exclude the special IAM variables
119 variable = parameter_string_path[-1]
120
121 if 'Resource' in parameter_string_path:
122 if variable in self.resource_excludes:
123 continue
124 if 'NotResource' in parameter_string_path:
125 if variable in self.resource_excludes:
126 continue
127 if 'Condition' in parameter_string_path:
128 if variable in self.condition_excludes:
129 continue
130
131 # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub
132 # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html
133
134 if 'DefinitionString' in parameter_string_path:
135 modified_parameter_string_path = parameter_string_path
136 index = parameter_string_path.index('DefinitionString')
137 modified_parameter_string_path[index] = 'DefinitionSubstitutions'
138 modified_parameter_string_path = modified_parameter_string_path[:index+1]
139 modified_parameter_string_path.append(variable[2:-1])
140 if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):
141 continue
142
143 # Exclude variables that match custom exclude filters, if configured
144 # (for third-party tools that pre-process templates before uploading them to AWS)
145 if self._variable_custom_excluded(variable):
146 continue
147
148 # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)
149 if variable.startswith('${!'):
150 continue
151
152 found_sub = False
153 # Does the path contain an 'Fn::Sub'?
154 for step in parameter_string_path:
155 if step in self.api_excludes:
156 if self._api_exceptions(parameter_string_path[-1]):
157 found_sub = True
158 elif step == 'Fn::Sub' or step in self.excludes:
159 found_sub = True
160
161 # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly
162 if not found_sub:
163 # Remove the last item (the variable) to prevent multiple errors on 1 line errors
164 path = parameter_string_path[:-1]
165 message = 'Found an embedded parameter "{}" outside of an "Fn::Sub" at {}'.format(
166 variable, '/'.join(map(str, path)))
167 matches.append(RuleMatch(path, message))
168
169 return matches
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py
--- a/src/cfnlint/rules/functions/SubNeeded.py
+++ b/src/cfnlint/rules/functions/SubNeeded.py
@@ -20,7 +20,8 @@
# Free-form text properties to exclude from this rule
excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',
'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',
- 'RequestMappingTemplate', 'LogFormat', 'TemplateBody', 'ResponseMappingTemplate']
+ 'RequestMappingTemplate', 'LogFormat', 'TemplateBody', 'ResponseMappingTemplate',
+ 'RouteSelectionExpression']
api_excludes = ['Uri', 'Body', 'ConnectionId']
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNeeded.py b/src/cfnlint/rules/functions/SubNeeded.py\n--- a/src/cfnlint/rules/functions/SubNeeded.py\n+++ b/src/cfnlint/rules/functions/SubNeeded.py\n@@ -20,7 +20,8 @@\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n- 'RequestMappingTemplate', 'LogFormat', 'TemplateBody', 'ResponseMappingTemplate']\n+ 'RequestMappingTemplate', 'LogFormat', 'TemplateBody', 'ResponseMappingTemplate',\n+ 'RouteSelectionExpression']\n api_excludes = ['Uri', 'Body', 'ConnectionId']\n", "issue": "Ignoring ${} parameters from Fn::Sub checks\n*cfn-lint version: 0.44.1*\r\n\r\n*Description of issue.* \r\nI using the `AWS::ApiGatewayV2::Api` clause and referencing the [`RouteSelectionExpression`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-api.html#cfn-apigatewayv2-api-routeselectionexpression) property which takes values like `${request.body.action}`. Cloudformation accepts `RouteSelectionExpression: \"${request.body.action}\"` as perfectly valid but cfn-lint is triggering `E1029: Found an embedded parameter \"${request.body.action}\" outside of an \"Fn::Sub\" at Resources/agwApi/Properties/RouteSelectionExpression`. I'm not sure if there's a way to escape the ${} characters which makes cfn-lint happy as well as CFN.\r\n\r\n\r\nCfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom functools import reduce # pylint: disable=redefined-builtin\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n 'RequestMappingTemplate', 'LogFormat', 'TemplateBody', 'ResponseMappingTemplate']\n api_excludes = ['Uri', 'Body', 'ConnectionId']\n\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n self.config_definition = {\n 'custom_excludes': {\n 'default': '',\n 'type': 'string'\n }\n }\n self.configure()\n self.subParameterRegex = re.compile(r'(\\$\\{[A-Za-z0-9_:\\.]+\\})')\n\n def _match_values(self, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):\n for variable in re.findall(self.subParameterRegex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def _variable_custom_excluded(self, value):\n \"\"\" User-defined exceptions for variables, anywhere in the file \"\"\"\n custom_excludes = self.config['custom_excludes']\n if custom_excludes:\n custom_search = re.compile(custom_excludes)\n return re.match(custom_search, value)\n return False\n\n def match(self, cfn):\n matches = []\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(cfn)\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n if parameter_string_path[0] in ['Parameters']:\n continue\n # Exclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'NotResource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub\n # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html\n\n if 'DefinitionString' in parameter_string_path:\n modified_parameter_string_path = parameter_string_path\n index = parameter_string_path.index('DefinitionString')\n modified_parameter_string_path[index] = 'DefinitionSubstitutions'\n modified_parameter_string_path = modified_parameter_string_path[:index+1]\n modified_parameter_string_path.append(variable[2:-1])\n if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):\n continue\n\n # Exclude variables that match custom exclude filters, if configured\n # (for third-party tools that pre-process templates before uploading them to AWS)\n if self._variable_custom_excluded(variable):\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter \"{}\" outside of an \"Fn::Sub\" at {}'.format(\n variable, '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom functools import reduce # pylint: disable=redefined-builtin\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass SubNeeded(CloudFormationLintRule):\n \"\"\"Check if a substitution string exists without a substitution function\"\"\"\n id = 'E1029'\n shortdesc = 'Sub is required if a variable is used in a string'\n description = 'If a substitution variable exists in a string but isn\\'t wrapped with the Fn::Sub function the deployment will fail.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub']\n\n # Free-form text properties to exclude from this rule\n excludes = ['UserData', 'ZipFile', 'Condition', 'AWS::CloudFormation::Init',\n 'CloudWatchAlarmDefinition', 'TopicRulePayload', 'BuildSpec',\n 'RequestMappingTemplate', 'LogFormat', 'TemplateBody', 'ResponseMappingTemplate',\n 'RouteSelectionExpression']\n api_excludes = ['Uri', 'Body', 'ConnectionId']\n\n\n # IAM Policy has special variables that don't require !Sub, Check for these\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/basic-policy-variables.html\n # https://docs.aws.amazon.com/iot/latest/developerguide/thing-policy-variables.html\n # https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down\n # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html\n resource_excludes = ['${aws:CurrentTime}', '${aws:EpochTime}',\n '${aws:TokenIssueTime}', '${aws:principaltype}',\n '${aws:SecureTransport}', '${aws:SourceIp}',\n '${aws:UserAgent}', '${aws:userid}',\n '${aws:username}', '${ec2:SourceInstanceARN}',\n '${iot:Connection.Thing.ThingName}',\n '${iot:Connection.Thing.ThingTypeName}',\n '${iot:Connection.Thing.IsAttached}',\n '${iot:ClientId}', '${transfer:HomeBucket}',\n '${transfer:HomeDirectory}', '${transfer:HomeFolder}',\n '${transfer:UserName}', '${redshift:DbUser}',\n '${cognito-identity.amazonaws.com:aud}',\n '${cognito-identity.amazonaws.com:sub}',\n '${cognito-identity.amazonaws.com:amr}']\n\n # https://docs.aws.amazon.com/redshift/latest/mgmt/redshift-iam-access-control-identity-based.html\n condition_excludes = [\n '${redshift:DbUser}',\n ]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(SubNeeded, self).__init__()\n self.config_definition = {\n 'custom_excludes': {\n 'default': '',\n 'type': 'string'\n }\n }\n self.configure()\n self.subParameterRegex = re.compile(r'(\\$\\{[A-Za-z0-9_:\\.]+\\})')\n\n def _match_values(self, cfnelem, path):\n \"\"\"Recursively search for values matching the searchRegex\"\"\"\n values = []\n if isinstance(cfnelem, dict):\n for key in cfnelem:\n pathprop = path[:]\n pathprop.append(key)\n values.extend(self._match_values(cfnelem[key], pathprop))\n elif isinstance(cfnelem, list):\n for index, item in enumerate(cfnelem):\n pathprop = path[:]\n pathprop.append(index)\n values.extend(self._match_values(item, pathprop))\n else:\n # Leaf node\n if isinstance(cfnelem, six.string_types): # and re.match(searchRegex, cfnelem):\n for variable in re.findall(self.subParameterRegex, cfnelem):\n values.append(path + [variable])\n\n return values\n\n def match_values(self, cfn):\n \"\"\"\n Search for values in all parts of the templates that match the searchRegex\n \"\"\"\n results = []\n results.extend(self._match_values(cfn.template, []))\n # Globals are removed during a transform. They need to be checked manually\n results.extend(self._match_values(cfn.template.get('Globals', {}), []))\n return results\n\n def _api_exceptions(self, value):\n \"\"\" Key value exceptions \"\"\"\n parameter_search = re.compile(r'^\\$\\{stageVariables\\..*\\}$')\n return re.match(parameter_search, value)\n\n def _variable_custom_excluded(self, value):\n \"\"\" User-defined exceptions for variables, anywhere in the file \"\"\"\n custom_excludes = self.config['custom_excludes']\n if custom_excludes:\n custom_search = re.compile(custom_excludes)\n return re.match(custom_search, value)\n return False\n\n def match(self, cfn):\n matches = []\n\n # Get a list of paths to every leaf node string containing at least one ${parameter}\n parameter_string_paths = self.match_values(cfn)\n # We want to search all of the paths to check if each one contains an 'Fn::Sub'\n for parameter_string_path in parameter_string_paths:\n if parameter_string_path[0] in ['Parameters']:\n continue\n # Exclude the special IAM variables\n variable = parameter_string_path[-1]\n\n if 'Resource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'NotResource' in parameter_string_path:\n if variable in self.resource_excludes:\n continue\n if 'Condition' in parameter_string_path:\n if variable in self.condition_excludes:\n continue\n\n # Step Function State Machine has a Definition Substitution that allows usage of special variables outside of a !Sub\n # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-stepfunctions-statemachine-definitionsubstitutions.html\n\n if 'DefinitionString' in parameter_string_path:\n modified_parameter_string_path = parameter_string_path\n index = parameter_string_path.index('DefinitionString')\n modified_parameter_string_path[index] = 'DefinitionSubstitutions'\n modified_parameter_string_path = modified_parameter_string_path[:index+1]\n modified_parameter_string_path.append(variable[2:-1])\n if reduce(lambda c, k: c.get(k, {}), modified_parameter_string_path, cfn.template):\n continue\n\n # Exclude variables that match custom exclude filters, if configured\n # (for third-party tools that pre-process templates before uploading them to AWS)\n if self._variable_custom_excluded(variable):\n continue\n\n # Exclude literals (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html)\n if variable.startswith('${!'):\n continue\n\n found_sub = False\n # Does the path contain an 'Fn::Sub'?\n for step in parameter_string_path:\n if step in self.api_excludes:\n if self._api_exceptions(parameter_string_path[-1]):\n found_sub = True\n elif step == 'Fn::Sub' or step in self.excludes:\n found_sub = True\n\n # If we didn't find an 'Fn::Sub' it means a string containing a ${parameter} may not be evaluated correctly\n if not found_sub:\n # Remove the last item (the variable) to prevent multiple errors on 1 line errors\n path = parameter_string_path[:-1]\n message = 'Found an embedded parameter \"{}\" outside of an \"Fn::Sub\" at {}'.format(\n variable, '/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n", "path": "src/cfnlint/rules/functions/SubNeeded.py"}]} | 2,584 | 169 |
gh_patches_debug_10770 | rasdani/github-patches | git_diff | conda__conda-build-825 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda-build 1.19.1 breaks C compilation
I am using conda build on [this recipe](https://github.com/mmp2/megaman/tree/master/conda_recipes/pyamg)
It works on 1.19.0, but on 1.19.1 fails with this message:
```
creating build/temp.macosx-10.5-x86_64-3.5
creating build/temp.macosx-10.5-x86_64-3.5/pyamg
creating build/temp.macosx-10.5-x86_64-3.5/pyamg/amg_core
compile options: '-D__STDC_FORMAT_MACROS=1 -I/Users/jakevdp/anaconda/envs/_build/lib/python3.5/site-packages/numpy/core/include -I/Users/jakevdp/anaconda/envs/_build/include/python3.5m -c'
g++: pyamg/amg_core/amg_core_wrap.cxx
g++ -bundle -undefined dynamic_lookup -L/Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -arch x86_64 build/temp.macosx-10.5-x86_64-3.5/pyamg/amg_core/amg_core_wrap.o -L/Users/jakevdp/anaconda/envs/_build/lib -o build/lib.macosx-10.5-x86_64-3.5/pyamg/amg_core/_amg_core.cpython-35m-darwin.so
g++: error: unrecognized command line option ‘-rpath’
g++: error: unrecognized command line option ‘-rpath’
g++: error: unrecognized command line option â-rpathâ
g++: error: unrecognized command line option â-rpathâ
error: Command "g++ -bundle -undefined dynamic_lookup -L/Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -arch x86_64 build/temp.macosx-10.5-x86_64-3.5/pyamg/amg_core/amg_core_wrap.o -L/Users/jakevdp/anaconda/envs/_build/lib -o build/lib.macosx-10.5-x86_64-3.5/pyamg/amg_core/_amg_core.cpython-35m-darwin.so" failed with exit status 1
Command failed: /bin/bash -x -e /Users/jakevdp/Opensource/megaman/conda_recipes/pyamg/build.sh
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/environ.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import sys
5 from os.path import join, normpath, isabs
6 from subprocess import STDOUT, check_output, CalledProcessError, Popen, PIPE
7 import multiprocessing
8 import warnings
9
10 import conda.config as cc
11
12 from conda_build.config import config
13
14 from conda_build import source
15 from conda_build.scripts import prepend_bin_path
16
17
18 def get_perl_ver():
19 return str(config.CONDA_PERL)
20
21
22 def get_py_ver():
23 return '.'.join(str(config.CONDA_PY))
24
25
26 def get_npy_ver():
27 if config.CONDA_NPY:
28 # Convert int -> string, e.g.
29 # 17 -> '1.7'
30 # 110 -> '1.10'
31 conda_npy = str(config.CONDA_NPY)
32 return conda_npy[0] + '.' + conda_npy[1:]
33 return ''
34
35
36 def get_stdlib_dir():
37 return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else
38 'lib/python%s' % get_py_ver())
39
40
41 def get_sp_dir():
42 return join(get_stdlib_dir(), 'site-packages')
43
44
45 def get_git_build_info(src_dir, git_url, expected_rev):
46 expected_rev = expected_rev or 'HEAD'
47 env = os.environ.copy()
48 d = {}
49 git_dir = join(src_dir, '.git')
50 if not os.path.exists(git_dir):
51 return d
52
53 env['GIT_DIR'] = git_dir
54 try:
55 # Verify current commit matches expected commit
56 current_commit = check_output(["git", "log", "-n1", "--format=%H"],
57 env=env, stderr=STDOUT)
58 current_commit = current_commit.decode('utf-8')
59 expected_tag_commit = check_output(["git", "log", "-n1", "--format=%H",
60 expected_rev],
61 env=env, stderr=STDOUT)
62 expected_tag_commit = expected_tag_commit.decode('utf-8')
63
64 # Verify correct remote url. Need to find the git cache directory,
65 # and check the remote from there.
66 cache_details = check_output(["git", "remote", "-v"], env=env,
67 stderr=STDOUT)
68 cache_details = cache_details.decode('utf-8')
69 cache_dir = cache_details.split('\n')[0].split()[1]
70 assert "conda-bld/git_cache" in cache_dir
71
72 env['GIT_DIR'] = cache_dir
73 remote_details = check_output(["git", "remote", "-v"], env=env,
74 stderr=STDOUT)
75 remote_details = remote_details.decode('utf-8')
76 remote_url = remote_details.split('\n')[0].split()[1]
77 if '://' not in remote_url:
78 # Local filepaths are allowed, but make sure we normalize them
79 remote_url = normpath(remote_url)
80
81 # If the current source directory in conda-bld/work doesn't match the
82 # user's metadata git_url or git_rev, then we aren't looking at the
83 # right source.
84 if remote_url != git_url or current_commit != expected_tag_commit:
85 return d
86 except CalledProcessError:
87 return d
88
89 env['GIT_DIR'] = git_dir
90
91 # grab information from describe
92 key_name = lambda a: "GIT_DESCRIBE_{}".format(a)
93 keys = [key_name("TAG"), key_name("NUMBER"), key_name("HASH")]
94 env = {str(key): str(value) for key, value in env.items()}
95 process = Popen(["git", "describe", "--tags", "--long", "HEAD"],
96 stdout=PIPE, stderr=PIPE,
97 env=env)
98 output = process.communicate()[0].strip()
99 output = output.decode('utf-8')
100 parts = output.rsplit('-', 2)
101 parts_length = len(parts)
102 if parts_length == 3:
103 d.update(dict(zip(keys, parts)))
104 # get the _full_ hash of the current HEAD
105 process = Popen(["git", "rev-parse", "HEAD"],
106 stdout=PIPE, stderr=PIPE, env=env)
107 output = process.communicate()[0].strip()
108 output = output.decode('utf-8')
109 d['GIT_FULL_HASH'] = output
110 # set up the build string
111 if key_name('NUMBER') in d and key_name('HASH') in d:
112 d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],
113 d[key_name('HASH')])
114
115 return d
116
117
118 def get_dict(m=None, prefix=None):
119 if not prefix:
120 prefix = config.build_prefix
121
122 python = config.build_python
123 d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}
124 d['CONDA_DEFAULT_ENV'] = config.build_prefix
125 d['ARCH'] = str(cc.bits)
126 d['PREFIX'] = prefix
127 d['PYTHON'] = python
128 d['PY3K'] = str(config.PY3K)
129 d['STDLIB_DIR'] = get_stdlib_dir()
130 d['SP_DIR'] = get_sp_dir()
131 d['SYS_PREFIX'] = sys.prefix
132 d['SYS_PYTHON'] = sys.executable
133 d['PERL_VER'] = get_perl_ver()
134 d['PY_VER'] = get_py_ver()
135 if get_npy_ver():
136 d['NPY_VER'] = get_npy_ver()
137 d['SRC_DIR'] = source.get_dir()
138 if "LANG" in os.environ:
139 d['LANG'] = os.environ['LANG']
140 if "HTTPS_PROXY" in os.environ:
141 d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']
142 if "HTTP_PROXY" in os.environ:
143 d['HTTP_PROXY'] = os.environ['HTTP_PROXY']
144
145 if m:
146 for var_name in m.get_value('build/script_env', []):
147 value = os.getenv(var_name)
148 if value is None:
149 warnings.warn(
150 "The environment variable '%s' is undefined." % var_name,
151 UserWarning
152 )
153 else:
154 d[var_name] = value
155
156 if sys.platform == "darwin":
157 # multiprocessing.cpu_count() is not reliable on OSX
158 # See issue #645 on github.com/conda/conda-build
159 out, err = Popen('sysctl -n hw.logicalcpu', shell=True,
160 stdout=PIPE).communicate()
161 d['CPU_COUNT'] = out.decode('utf-8').strip()
162 else:
163 try:
164 d['CPU_COUNT'] = str(multiprocessing.cpu_count())
165 except NotImplementedError:
166 d['CPU_COUNT'] = "1"
167
168 if m and m.get_value('source/git_url'):
169 git_url = m.get_value('source/git_url')
170 if '://' not in git_url:
171 # If git_url is a relative path instead of a url, convert it to an
172 # abspath
173 if not isabs(git_url):
174 git_url = join(m.path, git_url)
175 git_url = normpath(join(m.path, git_url))
176 d.update(**get_git_build_info(d['SRC_DIR'],
177 git_url,
178 m.get_value('source/git_rev')))
179
180 d['PATH'] = dict(os.environ)['PATH']
181 d = prepend_bin_path(d, prefix)
182
183 if sys.platform == 'win32':
184 # -------- Windows
185 d['SCRIPTS'] = join(prefix, 'Scripts')
186 d['LIBRARY_PREFIX'] = join(prefix, 'Library')
187 d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')
188 d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')
189 d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')
190
191 drive, tail = prefix.split(':')
192 d['CYGWIN_PREFIX'] = ''.join(['/cygdrive/', drive.lower(),
193 tail.replace('\\', '/')])
194
195 d['R'] = join(prefix, 'Scripts', 'R.exe')
196 else:
197 # -------- Unix
198 d['HOME'] = os.getenv('HOME', 'UNKNOWN')
199 d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')
200 d['R'] = join(prefix, 'bin', 'R')
201
202 # in case CFLAGS was added in the `script_env` section above
203 cflags = d.get('CFLAGS', '')
204 cxxflags = d.get('CXXFLAGS', '')
205 ldflags = d.get('LDFLAGS', '')
206
207 if sys.platform == 'darwin':
208 # -------- OSX
209 d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'
210 d['CFLAGS'] = cflags + ' -arch %(OSX_ARCH)s' % d
211 d['CXXFLAGS'] = cxxflags + ' -arch %(OSX_ARCH)s' % d
212 rpath = ' -rpath %(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.
213 d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d
214 d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
215
216 elif sys.platform.startswith('linux'):
217 # -------- Linux
218 d['LD_RUN_PATH'] = prefix + '/lib'
219 if cc.bits == 32:
220 d['CFLAGS'] = cflags + ' -m32'
221 d['CXXFLAGS'] = cxxflags + ' -m32'
222
223 if m:
224 d['PKG_NAME'] = m.name()
225 d['PKG_VERSION'] = m.version()
226 d['PKG_BUILDNUM'] = str(m.build_number())
227 d['PKG_BUILD_STRING'] = str(m.build_id())
228 d['RECIPE_DIR'] = m.path
229
230 return d
231
232
233 if __name__ == '__main__':
234 e = get_dict()
235 for k in sorted(e):
236 assert isinstance(e[k], str), k
237 print('%s=%s' % (k, e[k]))
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_build/environ.py b/conda_build/environ.py
--- a/conda_build/environ.py
+++ b/conda_build/environ.py
@@ -209,7 +209,7 @@
d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'
d['CFLAGS'] = cflags + ' -arch %(OSX_ARCH)s' % d
d['CXXFLAGS'] = cxxflags + ' -arch %(OSX_ARCH)s' % d
- rpath = ' -rpath %(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.
+ rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.
d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d
d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
| {"golden_diff": "diff --git a/conda_build/environ.py b/conda_build/environ.py\n--- a/conda_build/environ.py\n+++ b/conda_build/environ.py\n@@ -209,7 +209,7 @@\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = cflags + ' -arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = cxxflags + ' -arch %(OSX_ARCH)s' % d\n- rpath = ' -rpath %(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n+ rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n", "issue": "conda-build 1.19.1 breaks C compilation\nI am using conda build on [this recipe](https://github.com/mmp2/megaman/tree/master/conda_recipes/pyamg)\n\nIt works on 1.19.0, but on 1.19.1 fails with this message:\n\n```\ncreating build/temp.macosx-10.5-x86_64-3.5\ncreating build/temp.macosx-10.5-x86_64-3.5/pyamg\ncreating build/temp.macosx-10.5-x86_64-3.5/pyamg/amg_core\ncompile options: '-D__STDC_FORMAT_MACROS=1 -I/Users/jakevdp/anaconda/envs/_build/lib/python3.5/site-packages/numpy/core/include -I/Users/jakevdp/anaconda/envs/_build/include/python3.5m -c'\ng++: pyamg/amg_core/amg_core_wrap.cxx\ng++ -bundle -undefined dynamic_lookup -L/Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -arch x86_64 build/temp.macosx-10.5-x86_64-3.5/pyamg/amg_core/amg_core_wrap.o -L/Users/jakevdp/anaconda/envs/_build/lib -o build/lib.macosx-10.5-x86_64-3.5/pyamg/amg_core/_amg_core.cpython-35m-darwin.so\ng++: error: unrecognized command line option \u2018-rpath\u2019\ng++: error: unrecognized command line option \u2018-rpath\u2019\ng++: error: unrecognized command line option \u00e2\u0080\u0098-rpath\u00e2\u0080\u0099\ng++: error: unrecognized command line option \u00e2\u0080\u0098-rpath\u00e2\u0080\u0099\nerror: Command \"g++ -bundle -undefined dynamic_lookup -L/Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -rpath /Users/jakevdp/anaconda/envs/_build/lib -arch x86_64 build/temp.macosx-10.5-x86_64-3.5/pyamg/amg_core/amg_core_wrap.o -L/Users/jakevdp/anaconda/envs/_build/lib -o build/lib.macosx-10.5-x86_64-3.5/pyamg/amg_core/_amg_core.cpython-35m-darwin.so\" failed with exit status 1\nCommand failed: /bin/bash -x -e /Users/jakevdp/Opensource/megaman/conda_recipes/pyamg/build.sh\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import join, normpath, isabs\nfrom subprocess import STDOUT, check_output, CalledProcessError, Popen, PIPE\nimport multiprocessing\nimport warnings\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\nfrom conda_build import source\nfrom conda_build.scripts import prepend_bin_path\n\n\ndef get_perl_ver():\n return str(config.CONDA_PERL)\n\n\ndef get_py_ver():\n return '.'.join(str(config.CONDA_PY))\n\n\ndef get_npy_ver():\n if config.CONDA_NPY:\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n conda_npy = str(config.CONDA_NPY)\n return conda_npy[0] + '.' + conda_npy[1:]\n return ''\n\n\ndef get_stdlib_dir():\n return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else\n 'lib/python%s' % get_py_ver())\n\n\ndef get_sp_dir():\n return join(get_stdlib_dir(), 'site-packages')\n\n\ndef get_git_build_info(src_dir, git_url, expected_rev):\n expected_rev = expected_rev or 'HEAD'\n env = os.environ.copy()\n d = {}\n git_dir = join(src_dir, '.git')\n if not os.path.exists(git_dir):\n return d\n\n env['GIT_DIR'] = git_dir\n try:\n # Verify current commit matches expected commit\n current_commit = check_output([\"git\", \"log\", \"-n1\", \"--format=%H\"],\n env=env, stderr=STDOUT)\n current_commit = current_commit.decode('utf-8')\n expected_tag_commit = check_output([\"git\", \"log\", \"-n1\", \"--format=%H\",\n expected_rev],\n env=env, stderr=STDOUT)\n expected_tag_commit = expected_tag_commit.decode('utf-8')\n\n # Verify correct remote url. Need to find the git cache directory,\n # and check the remote from there.\n cache_details = check_output([\"git\", \"remote\", \"-v\"], env=env,\n stderr=STDOUT)\n cache_details = cache_details.decode('utf-8')\n cache_dir = cache_details.split('\\n')[0].split()[1]\n assert \"conda-bld/git_cache\" in cache_dir\n\n env['GIT_DIR'] = cache_dir\n remote_details = check_output([\"git\", \"remote\", \"-v\"], env=env,\n stderr=STDOUT)\n remote_details = remote_details.decode('utf-8')\n remote_url = remote_details.split('\\n')[0].split()[1]\n if '://' not in remote_url:\n # Local filepaths are allowed, but make sure we normalize them\n remote_url = normpath(remote_url)\n\n # If the current source directory in conda-bld/work doesn't match the\n # user's metadata git_url or git_rev, then we aren't looking at the\n # right source.\n if remote_url != git_url or current_commit != expected_tag_commit:\n return d\n except CalledProcessError:\n return d\n\n env['GIT_DIR'] = git_dir\n\n # grab information from describe\n key_name = lambda a: \"GIT_DESCRIBE_{}\".format(a)\n keys = [key_name(\"TAG\"), key_name(\"NUMBER\"), key_name(\"HASH\")]\n env = {str(key): str(value) for key, value in env.items()}\n process = Popen([\"git\", \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n stdout=PIPE, stderr=PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n parts_length = len(parts)\n if parts_length == 3:\n d.update(dict(zip(keys, parts)))\n # get the _full_ hash of the current HEAD\n process = Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=PIPE, stderr=PIPE, env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n d['GIT_FULL_HASH'] = output\n # set up the build string\n if key_name('NUMBER') in d and key_name('HASH') in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],\n d[key_name('HASH')])\n\n return d\n\n\ndef get_dict(m=None, prefix=None):\n if not prefix:\n prefix = config.build_prefix\n\n python = config.build_python\n d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}\n d['CONDA_DEFAULT_ENV'] = config.build_prefix\n d['ARCH'] = str(cc.bits)\n d['PREFIX'] = prefix\n d['PYTHON'] = python\n d['PY3K'] = str(config.PY3K)\n d['STDLIB_DIR'] = get_stdlib_dir()\n d['SP_DIR'] = get_sp_dir()\n d['SYS_PREFIX'] = sys.prefix\n d['SYS_PYTHON'] = sys.executable\n d['PERL_VER'] = get_perl_ver()\n d['PY_VER'] = get_py_ver()\n if get_npy_ver():\n d['NPY_VER'] = get_npy_ver()\n d['SRC_DIR'] = source.get_dir()\n if \"LANG\" in os.environ:\n d['LANG'] = os.environ['LANG']\n if \"HTTPS_PROXY\" in os.environ:\n d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']\n if \"HTTP_PROXY\" in os.environ:\n d['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n\n if m:\n for var_name in m.get_value('build/script_env', []):\n value = os.getenv(var_name)\n if value is None:\n warnings.warn(\n \"The environment variable '%s' is undefined.\" % var_name,\n UserWarning\n )\n else:\n d[var_name] = value\n\n if sys.platform == \"darwin\":\n # multiprocessing.cpu_count() is not reliable on OSX\n # See issue #645 on github.com/conda/conda-build\n out, err = Popen('sysctl -n hw.logicalcpu', shell=True,\n stdout=PIPE).communicate()\n d['CPU_COUNT'] = out.decode('utf-8').strip()\n else:\n try:\n d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n except NotImplementedError:\n d['CPU_COUNT'] = \"1\"\n\n if m and m.get_value('source/git_url'):\n git_url = m.get_value('source/git_url')\n if '://' not in git_url:\n # If git_url is a relative path instead of a url, convert it to an\n # abspath\n if not isabs(git_url):\n git_url = join(m.path, git_url)\n git_url = normpath(join(m.path, git_url))\n d.update(**get_git_build_info(d['SRC_DIR'],\n git_url,\n m.get_value('source/git_rev')))\n\n d['PATH'] = dict(os.environ)['PATH']\n d = prepend_bin_path(d, prefix)\n\n if sys.platform == 'win32':\n # -------- Windows\n d['SCRIPTS'] = join(prefix, 'Scripts')\n d['LIBRARY_PREFIX'] = join(prefix, 'Library')\n d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')\n d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')\n d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')\n\n drive, tail = prefix.split(':')\n d['CYGWIN_PREFIX'] = ''.join(['/cygdrive/', drive.lower(),\n tail.replace('\\\\', '/')])\n\n d['R'] = join(prefix, 'Scripts', 'R.exe')\n else:\n # -------- Unix\n d['HOME'] = os.getenv('HOME', 'UNKNOWN')\n d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')\n d['R'] = join(prefix, 'bin', 'R')\n\n # in case CFLAGS was added in the `script_env` section above\n cflags = d.get('CFLAGS', '')\n cxxflags = d.get('CXXFLAGS', '')\n ldflags = d.get('LDFLAGS', '')\n\n if sys.platform == 'darwin':\n # -------- OSX\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = cflags + ' -arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = cxxflags + ' -arch %(OSX_ARCH)s' % d\n rpath = ' -rpath %(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n\n elif sys.platform.startswith('linux'):\n # -------- Linux\n d['LD_RUN_PATH'] = prefix + '/lib'\n if cc.bits == 32:\n d['CFLAGS'] = cflags + ' -m32'\n d['CXXFLAGS'] = cxxflags + ' -m32'\n\n if m:\n d['PKG_NAME'] = m.name()\n d['PKG_VERSION'] = m.version()\n d['PKG_BUILDNUM'] = str(m.build_number())\n d['PKG_BUILD_STRING'] = str(m.build_id())\n d['RECIPE_DIR'] = m.path\n\n return d\n\n\nif __name__ == '__main__':\n e = get_dict()\n for k in sorted(e):\n assert isinstance(e[k], str), k\n print('%s=%s' % (k, e[k]))\n", "path": "conda_build/environ.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import join, normpath, isabs\nfrom subprocess import STDOUT, check_output, CalledProcessError, Popen, PIPE\nimport multiprocessing\nimport warnings\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\nfrom conda_build import source\nfrom conda_build.scripts import prepend_bin_path\n\n\ndef get_perl_ver():\n return str(config.CONDA_PERL)\n\n\ndef get_py_ver():\n return '.'.join(str(config.CONDA_PY))\n\n\ndef get_npy_ver():\n if config.CONDA_NPY:\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n conda_npy = str(config.CONDA_NPY)\n return conda_npy[0] + '.' + conda_npy[1:]\n return ''\n\n\ndef get_stdlib_dir():\n return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else\n 'lib/python%s' % get_py_ver())\n\n\ndef get_sp_dir():\n return join(get_stdlib_dir(), 'site-packages')\n\n\ndef get_git_build_info(src_dir, git_url, expected_rev):\n expected_rev = expected_rev or 'HEAD'\n env = os.environ.copy()\n d = {}\n git_dir = join(src_dir, '.git')\n if not os.path.exists(git_dir):\n return d\n\n env['GIT_DIR'] = git_dir\n try:\n # Verify current commit matches expected commit\n current_commit = check_output([\"git\", \"log\", \"-n1\", \"--format=%H\"],\n env=env, stderr=STDOUT)\n current_commit = current_commit.decode('utf-8')\n expected_tag_commit = check_output([\"git\", \"log\", \"-n1\", \"--format=%H\",\n expected_rev],\n env=env, stderr=STDOUT)\n expected_tag_commit = expected_tag_commit.decode('utf-8')\n\n # Verify correct remote url. Need to find the git cache directory,\n # and check the remote from there.\n cache_details = check_output([\"git\", \"remote\", \"-v\"], env=env,\n stderr=STDOUT)\n cache_details = cache_details.decode('utf-8')\n cache_dir = cache_details.split('\\n')[0].split()[1]\n assert \"conda-bld/git_cache\" in cache_dir\n\n env['GIT_DIR'] = cache_dir\n remote_details = check_output([\"git\", \"remote\", \"-v\"], env=env,\n stderr=STDOUT)\n remote_details = remote_details.decode('utf-8')\n remote_url = remote_details.split('\\n')[0].split()[1]\n if '://' not in remote_url:\n # Local filepaths are allowed, but make sure we normalize them\n remote_url = normpath(remote_url)\n\n # If the current source directory in conda-bld/work doesn't match the\n # user's metadata git_url or git_rev, then we aren't looking at the\n # right source.\n if remote_url != git_url or current_commit != expected_tag_commit:\n return d\n except CalledProcessError:\n return d\n\n env['GIT_DIR'] = git_dir\n\n # grab information from describe\n key_name = lambda a: \"GIT_DESCRIBE_{}\".format(a)\n keys = [key_name(\"TAG\"), key_name(\"NUMBER\"), key_name(\"HASH\")]\n env = {str(key): str(value) for key, value in env.items()}\n process = Popen([\"git\", \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n stdout=PIPE, stderr=PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n parts_length = len(parts)\n if parts_length == 3:\n d.update(dict(zip(keys, parts)))\n # get the _full_ hash of the current HEAD\n process = Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=PIPE, stderr=PIPE, env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n d['GIT_FULL_HASH'] = output\n # set up the build string\n if key_name('NUMBER') in d and key_name('HASH') in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],\n d[key_name('HASH')])\n\n return d\n\n\ndef get_dict(m=None, prefix=None):\n if not prefix:\n prefix = config.build_prefix\n\n python = config.build_python\n d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}\n d['CONDA_DEFAULT_ENV'] = config.build_prefix\n d['ARCH'] = str(cc.bits)\n d['PREFIX'] = prefix\n d['PYTHON'] = python\n d['PY3K'] = str(config.PY3K)\n d['STDLIB_DIR'] = get_stdlib_dir()\n d['SP_DIR'] = get_sp_dir()\n d['SYS_PREFIX'] = sys.prefix\n d['SYS_PYTHON'] = sys.executable\n d['PERL_VER'] = get_perl_ver()\n d['PY_VER'] = get_py_ver()\n if get_npy_ver():\n d['NPY_VER'] = get_npy_ver()\n d['SRC_DIR'] = source.get_dir()\n if \"LANG\" in os.environ:\n d['LANG'] = os.environ['LANG']\n if \"HTTPS_PROXY\" in os.environ:\n d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']\n if \"HTTP_PROXY\" in os.environ:\n d['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n\n if m:\n for var_name in m.get_value('build/script_env', []):\n value = os.getenv(var_name)\n if value is None:\n warnings.warn(\n \"The environment variable '%s' is undefined.\" % var_name,\n UserWarning\n )\n else:\n d[var_name] = value\n\n if sys.platform == \"darwin\":\n # multiprocessing.cpu_count() is not reliable on OSX\n # See issue #645 on github.com/conda/conda-build\n out, err = Popen('sysctl -n hw.logicalcpu', shell=True,\n stdout=PIPE).communicate()\n d['CPU_COUNT'] = out.decode('utf-8').strip()\n else:\n try:\n d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n except NotImplementedError:\n d['CPU_COUNT'] = \"1\"\n\n if m and m.get_value('source/git_url'):\n git_url = m.get_value('source/git_url')\n if '://' not in git_url:\n # If git_url is a relative path instead of a url, convert it to an\n # abspath\n if not isabs(git_url):\n git_url = join(m.path, git_url)\n git_url = normpath(join(m.path, git_url))\n d.update(**get_git_build_info(d['SRC_DIR'],\n git_url,\n m.get_value('source/git_rev')))\n\n d['PATH'] = dict(os.environ)['PATH']\n d = prepend_bin_path(d, prefix)\n\n if sys.platform == 'win32':\n # -------- Windows\n d['SCRIPTS'] = join(prefix, 'Scripts')\n d['LIBRARY_PREFIX'] = join(prefix, 'Library')\n d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')\n d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')\n d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')\n\n drive, tail = prefix.split(':')\n d['CYGWIN_PREFIX'] = ''.join(['/cygdrive/', drive.lower(),\n tail.replace('\\\\', '/')])\n\n d['R'] = join(prefix, 'Scripts', 'R.exe')\n else:\n # -------- Unix\n d['HOME'] = os.getenv('HOME', 'UNKNOWN')\n d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')\n d['R'] = join(prefix, 'bin', 'R')\n\n # in case CFLAGS was added in the `script_env` section above\n cflags = d.get('CFLAGS', '')\n cxxflags = d.get('CXXFLAGS', '')\n ldflags = d.get('LDFLAGS', '')\n\n if sys.platform == 'darwin':\n # -------- OSX\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = cflags + ' -arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = cxxflags + ' -arch %(OSX_ARCH)s' % d\n rpath = ' -Wl,-rpath,%(PREFIX)s/lib' % d # SIP workaround, DYLD_* no longer works.\n d['LDFLAGS'] = ldflags + rpath + ' -arch %(OSX_ARCH)s' % d\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n\n elif sys.platform.startswith('linux'):\n # -------- Linux\n d['LD_RUN_PATH'] = prefix + '/lib'\n if cc.bits == 32:\n d['CFLAGS'] = cflags + ' -m32'\n d['CXXFLAGS'] = cxxflags + ' -m32'\n\n if m:\n d['PKG_NAME'] = m.name()\n d['PKG_VERSION'] = m.version()\n d['PKG_BUILDNUM'] = str(m.build_number())\n d['PKG_BUILD_STRING'] = str(m.build_id())\n d['RECIPE_DIR'] = m.path\n\n return d\n\n\nif __name__ == '__main__':\n e = get_dict()\n for k in sorted(e):\n assert isinstance(e[k], str), k\n print('%s=%s' % (k, e[k]))\n", "path": "conda_build/environ.py"}]} | 3,708 | 229 |
gh_patches_debug_7600 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-4985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Deepspeed elastic launcher is incompatible with torch 2.0
**Describe the bug**
If I run `deepspeed` with `--elastic_training`, I got the following error:
```
Traceback (most recent call last):
File "./lib/python3.9/site-packages/torch/distributed/elastic/metrics/api.py", line 129, in wrapper
result = f(*args, **kwargs)
File "./lib/python3.9/site-packages/torch/distributed/elastic/agent/server/api.py", line 723, in run
result = self._invoke_run(role)
File "./lib/python3.9/site-packages/deepspeed/elasticity/elastic_agent.py", line 126, in _invoke_run
self._initialize_workers(self._worker_group)
File "./lib/python3.9/site-packages/torch/distributed/elastic/metrics/api.py", line 129, in wrapper
result = f(*args, **kwargs)
File "./lib/python3.9/site-packages/torch/distributed/elastic/agent/server/api.py", line 692, in _initialize_workers
self._rendezvous(worker_group)
File "./lib/python3.9/site-packages/torch/distributed/elastic/metrics/api.py", line 129, in wrapper
result = f(*args, **kwargs)
File "./lib/python3.9/site-packages/torch/distributed/elastic/agent/server/api.py", line 556, in _rendezvous
self._set_master_addr_port(
TypeError: _set_master_addr_port() takes 3 positional arguments but 4 were given
```
The torch repository's pull request [#88922](https://github.com/pytorch/pytorch/pull/88922) introduces an additional parameter called `local_addr`. You can find the specific code changes related to this parameter in [this section](https://github.com/pytorch/pytorch/commit/5d37890b8eb952560f22b1b8ff661a024bf4b6d9#diff-0332dff4db45fd26518da1eddd4a93eb965a006de743edddba2316b64e2b1f6cR507) of the commit. However, it appears that the deepspeed project is still using the outdated method and has not incorporated this new parameter yet.
**To Reproduce**
Run any script with `--elastic_training`
**System info (please complete the following information):**
- Python 3.9
- torch 2.0.1
**Launcher context**
`deepspeed` launcher
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepspeed/elasticity/elastic_agent.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 from torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent
7 from typing import Any, Dict, Optional, Tuple
8 from datetime import datetime
9 from torch.distributed.elastic.agent.server.api import log, _get_socket_with_port
10 from torch.distributed.elastic.metrics import put_metric
11 from torch.distributed.elastic.agent.server.api import (
12 RunResult,
13 WorkerGroup,
14 WorkerSpec,
15 WorkerState,
16 )
17 from torch.distributed import Store
18 import time
19 import os
20 from torch.distributed.elastic.multiprocessing import start_processes
21 from torch.distributed.elastic.utils import macros
22 import shutil
23 import copy
24 from contextlib import closing
25 import subprocess
26
27
28 class DSElasticAgent(LocalElasticAgent):
29
30 def __init__(
31 self,
32 spec: WorkerSpec,
33 env: Dict,
34 start_method="spawn",
35 exit_barrier_timeout: float = 300,
36 log_dir: Optional[str] = None,
37 ):
38 super().__init__(spec, start_method, exit_barrier_timeout, log_dir)
39 self.ds_env = env
40
41 @staticmethod
42 def _set_master_addr_port(store: Store, master_addr: Optional[str], master_port: Optional[int]):
43 if master_port is None:
44 sock = _get_socket_with_port()
45 with closing(sock):
46 master_port = sock.getsockname()[1]
47
48 if master_addr is None:
49 # master_addr = _get_fq_hostname()
50 result = subprocess.check_output("hostname -I", shell=True)
51 master_addr = result.decode('utf-8').split()[0]
52
53 store.set("MASTER_ADDR", master_addr.encode(encoding="UTF-8"))
54 store.set("MASTER_PORT", str(master_port).encode(encoding="UTF-8"))
55
56 def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:
57 spec = worker_group.spec
58 store = worker_group.store
59 assert store is not None
60 master_addr, master_port = super()._get_master_addr_port(store)
61 restart_count = spec.max_restarts - self._remaining_restarts
62
63 use_agent_store = spec.rdzv_handler.get_backend() == "static"
64
65 args: Dict[int, Tuple] = {}
66 envs: Dict[int, Dict[str, str]] = {}
67 for worker in worker_group.workers:
68 local_rank = worker.local_rank
69
70 worker_env_ds = copy.deepcopy(self.ds_env)
71 worker_env_elastic = {
72 "LOCAL_RANK": str(local_rank),
73 "RANK": str(worker.global_rank),
74 "GROUP_RANK": str(worker_group.group_rank),
75 "ROLE_RANK": str(worker.role_rank),
76 "ROLE_NAME": spec.role,
77 "LOCAL_WORLD_SIZE": str(spec.local_world_size),
78 "WORLD_SIZE": str(worker.world_size),
79 "GROUP_WORLD_SIZE": str(worker_group.group_world_size),
80 "ROLE_WORLD_SIZE": str(worker.role_world_size),
81 "MASTER_ADDR": master_addr,
82 "MASTER_PORT": str(master_port),
83 "TORCHELASTIC_RESTART_COUNT": str(restart_count),
84 "TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
85 "TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
86 "TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
87 "NCCL_ASYNC_ERROR_HANDLING": os.getenv("NCCL_ASYNC_ERROR_HANDLING", str(1)),
88 }
89 worker_env_ds.update(worker_env_elastic)
90 if "OMP_NUM_THREADS" in os.environ:
91 worker_env_ds["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
92
93 envs[local_rank] = worker_env_ds
94 worker_args = list(spec.args)
95 worker_args = macros.substitute(worker_args, str(local_rank))
96 args[local_rank] = tuple(worker_args)
97
98 # scaling events do not count towards restarts (gets same attempt #)
99 # remove existing log dir if this restart is due to a scaling event
100 attempt_log_dir = os.path.join(self._log_dir, f"attempt_{restart_count}")
101 shutil.rmtree(attempt_log_dir, ignore_errors=True)
102 os.makedirs(attempt_log_dir)
103
104 assert spec.entrypoint is not None
105 self._pcontext = start_processes(
106 name=spec.role,
107 entrypoint=spec.entrypoint,
108 args=args,
109 envs=envs,
110 log_dir=attempt_log_dir,
111 start_method=self._start_method,
112 redirects=spec.redirects,
113 tee=spec.tee,
114 )
115
116 return self._pcontext.pids()
117
118 def _invoke_run(self, role: str = "default") -> RunResult:
119 # NOTE: currently only works for a single role
120
121 spec = self._worker_group.spec
122 role = spec.role
123
124 log.info(f"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}")
125
126 self._initialize_workers(self._worker_group)
127 monitor_interval = spec.monitor_interval
128 rdzv_handler = spec.rdzv_handler
129
130 participants = rdzv_handler._state_holder.state.participants
131
132 while True:
133 assert self._worker_group.state != WorkerState.INIT
134 time.sleep(monitor_interval)
135 run_result = self._monitor_workers(self._worker_group)
136 state = run_result.state
137 self._worker_group.state = state
138
139 expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval *
140 rdzv_handler._settings.keep_alive_max_attempt)
141 _dead_nodes = [
142 node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items()
143 if last_heartbeat < expire_time
144 ]
145
146 put_metric(f"workers.{role}.remaining_restarts", self._remaining_restarts)
147 put_metric(f"workers.{role}.{state.name.lower()}", 1)
148
149 if state == WorkerState.SUCCEEDED:
150 log.info(f"[{role}] worker group successfully finished."
151 f" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.")
152 self._exit_barrier()
153 return run_result
154 elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED
155 } or len(participants) > len(rdzv_handler._state_holder.state.participants):
156 if self._remaining_restarts > 0:
157 log.info(f"[{role}] Worker group {state.name}. "
158 f"{self._remaining_restarts}/{spec.max_restarts} attempts left;"
159 f" will restart worker group")
160 self._remaining_restarts -= 1
161 # rdzv_handler._state_holder.state.restart = False
162 self._restart_workers(self._worker_group)
163 participants = rdzv_handler._state_holder.state.participants
164
165 else:
166 self._stop_workers(self._worker_group)
167 self._worker_group.state = WorkerState.FAILED
168 self._exit_barrier()
169 return run_result
170 elif state == WorkerState.HEALTHY:
171 # membership changes do not count as retries
172 num_nodes_waiting = rdzv_handler.num_nodes_waiting()
173 group_rank = self._worker_group.group_rank
174 if num_nodes_waiting > 0:
175 log.info(f"[{role}] Detected {num_nodes_waiting} "
176 f"new nodes from group_rank={group_rank}; "
177 f"will restart worker group")
178 self._restart_workers(self._worker_group)
179 participants = rdzv_handler._state_holder.state.participants
180 else:
181 raise Exception(f"[{role}] Worker group in {state.name} state")
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepspeed/elasticity/elastic_agent.py b/deepspeed/elasticity/elastic_agent.py
--- a/deepspeed/elasticity/elastic_agent.py
+++ b/deepspeed/elasticity/elastic_agent.py
@@ -39,7 +39,10 @@
self.ds_env = env
@staticmethod
- def _set_master_addr_port(store: Store, master_addr: Optional[str], master_port: Optional[int]):
+ def _set_master_addr_port(store: Store,
+ master_addr: Optional[str],
+ master_port: Optional[int],
+ local_addr: Optional[str] = None):
if master_port is None:
sock = _get_socket_with_port()
with closing(sock):
| {"golden_diff": "diff --git a/deepspeed/elasticity/elastic_agent.py b/deepspeed/elasticity/elastic_agent.py\n--- a/deepspeed/elasticity/elastic_agent.py\n+++ b/deepspeed/elasticity/elastic_agent.py\n@@ -39,7 +39,10 @@\n self.ds_env = env\n \n @staticmethod\n- def _set_master_addr_port(store: Store, master_addr: Optional[str], master_port: Optional[int]):\n+ def _set_master_addr_port(store: Store,\n+ master_addr: Optional[str],\n+ master_port: Optional[int],\n+ local_addr: Optional[str] = None):\n if master_port is None:\n sock = _get_socket_with_port()\n with closing(sock):\n", "issue": "[BUG] Deepspeed elastic launcher is incompatible with torch 2.0\n**Describe the bug**\r\n\r\nIf I run `deepspeed` with `--elastic_training`, I got the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"./lib/python3.9/site-packages/torch/distributed/elastic/metrics/api.py\", line 129, in wrapper\r\n result = f(*args, **kwargs)\r\n File \"./lib/python3.9/site-packages/torch/distributed/elastic/agent/server/api.py\", line 723, in run\r\n result = self._invoke_run(role)\r\n File \"./lib/python3.9/site-packages/deepspeed/elasticity/elastic_agent.py\", line 126, in _invoke_run\r\n self._initialize_workers(self._worker_group)\r\n File \"./lib/python3.9/site-packages/torch/distributed/elastic/metrics/api.py\", line 129, in wrapper\r\n result = f(*args, **kwargs)\r\n File \"./lib/python3.9/site-packages/torch/distributed/elastic/agent/server/api.py\", line 692, in _initialize_workers\r\n self._rendezvous(worker_group)\r\n File \"./lib/python3.9/site-packages/torch/distributed/elastic/metrics/api.py\", line 129, in wrapper\r\n result = f(*args, **kwargs)\r\n File \"./lib/python3.9/site-packages/torch/distributed/elastic/agent/server/api.py\", line 556, in _rendezvous\r\n self._set_master_addr_port(\r\nTypeError: _set_master_addr_port() takes 3 positional arguments but 4 were given\r\n```\r\n\r\nThe torch repository's pull request [#88922](https://github.com/pytorch/pytorch/pull/88922) introduces an additional parameter called `local_addr`. You can find the specific code changes related to this parameter in [this section](https://github.com/pytorch/pytorch/commit/5d37890b8eb952560f22b1b8ff661a024bf4b6d9#diff-0332dff4db45fd26518da1eddd4a93eb965a006de743edddba2316b64e2b1f6cR507) of the commit. However, it appears that the deepspeed project is still using the outdated method and has not incorporated this new parameter yet.\r\n\r\n**To Reproduce**\r\n\r\nRun any script with `--elastic_training`\r\n\r\n**System info (please complete the following information):**\r\n - Python 3.9\r\n - torch 2.0.1\r\n\r\n**Launcher context**\r\n`deepspeed` launcher\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent\nfrom typing import Any, Dict, Optional, Tuple\nfrom datetime import datetime\nfrom torch.distributed.elastic.agent.server.api import log, _get_socket_with_port\nfrom torch.distributed.elastic.metrics import put_metric\nfrom torch.distributed.elastic.agent.server.api import (\n RunResult,\n WorkerGroup,\n WorkerSpec,\n WorkerState,\n)\nfrom torch.distributed import Store\nimport time\nimport os\nfrom torch.distributed.elastic.multiprocessing import start_processes\nfrom torch.distributed.elastic.utils import macros\nimport shutil\nimport copy\nfrom contextlib import closing\nimport subprocess\n\n\nclass DSElasticAgent(LocalElasticAgent):\n\n def __init__(\n self,\n spec: WorkerSpec,\n env: Dict,\n start_method=\"spawn\",\n exit_barrier_timeout: float = 300,\n log_dir: Optional[str] = None,\n ):\n super().__init__(spec, start_method, exit_barrier_timeout, log_dir)\n self.ds_env = env\n\n @staticmethod\n def _set_master_addr_port(store: Store, master_addr: Optional[str], master_port: Optional[int]):\n if master_port is None:\n sock = _get_socket_with_port()\n with closing(sock):\n master_port = sock.getsockname()[1]\n\n if master_addr is None:\n # master_addr = _get_fq_hostname()\n result = subprocess.check_output(\"hostname -I\", shell=True)\n master_addr = result.decode('utf-8').split()[0]\n\n store.set(\"MASTER_ADDR\", master_addr.encode(encoding=\"UTF-8\"))\n store.set(\"MASTER_PORT\", str(master_port).encode(encoding=\"UTF-8\"))\n\n def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:\n spec = worker_group.spec\n store = worker_group.store\n assert store is not None\n master_addr, master_port = super()._get_master_addr_port(store)\n restart_count = spec.max_restarts - self._remaining_restarts\n\n use_agent_store = spec.rdzv_handler.get_backend() == \"static\"\n\n args: Dict[int, Tuple] = {}\n envs: Dict[int, Dict[str, str]] = {}\n for worker in worker_group.workers:\n local_rank = worker.local_rank\n\n worker_env_ds = copy.deepcopy(self.ds_env)\n worker_env_elastic = {\n \"LOCAL_RANK\": str(local_rank),\n \"RANK\": str(worker.global_rank),\n \"GROUP_RANK\": str(worker_group.group_rank),\n \"ROLE_RANK\": str(worker.role_rank),\n \"ROLE_NAME\": spec.role,\n \"LOCAL_WORLD_SIZE\": str(spec.local_world_size),\n \"WORLD_SIZE\": str(worker.world_size),\n \"GROUP_WORLD_SIZE\": str(worker_group.group_world_size),\n \"ROLE_WORLD_SIZE\": str(worker.role_world_size),\n \"MASTER_ADDR\": master_addr,\n \"MASTER_PORT\": str(master_port),\n \"TORCHELASTIC_RESTART_COUNT\": str(restart_count),\n \"TORCHELASTIC_MAX_RESTARTS\": str(spec.max_restarts),\n \"TORCHELASTIC_RUN_ID\": spec.rdzv_handler.get_run_id(),\n \"TORCHELASTIC_USE_AGENT_STORE\": str(use_agent_store),\n \"NCCL_ASYNC_ERROR_HANDLING\": os.getenv(\"NCCL_ASYNC_ERROR_HANDLING\", str(1)),\n }\n worker_env_ds.update(worker_env_elastic)\n if \"OMP_NUM_THREADS\" in os.environ:\n worker_env_ds[\"OMP_NUM_THREADS\"] = os.environ[\"OMP_NUM_THREADS\"]\n\n envs[local_rank] = worker_env_ds\n worker_args = list(spec.args)\n worker_args = macros.substitute(worker_args, str(local_rank))\n args[local_rank] = tuple(worker_args)\n\n # scaling events do not count towards restarts (gets same attempt #)\n # remove existing log dir if this restart is due to a scaling event\n attempt_log_dir = os.path.join(self._log_dir, f\"attempt_{restart_count}\")\n shutil.rmtree(attempt_log_dir, ignore_errors=True)\n os.makedirs(attempt_log_dir)\n\n assert spec.entrypoint is not None\n self._pcontext = start_processes(\n name=spec.role,\n entrypoint=spec.entrypoint,\n args=args,\n envs=envs,\n log_dir=attempt_log_dir,\n start_method=self._start_method,\n redirects=spec.redirects,\n tee=spec.tee,\n )\n\n return self._pcontext.pids()\n\n def _invoke_run(self, role: str = \"default\") -> RunResult:\n # NOTE: currently only works for a single role\n\n spec = self._worker_group.spec\n role = spec.role\n\n log.info(f\"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}\")\n\n self._initialize_workers(self._worker_group)\n monitor_interval = spec.monitor_interval\n rdzv_handler = spec.rdzv_handler\n\n participants = rdzv_handler._state_holder.state.participants\n\n while True:\n assert self._worker_group.state != WorkerState.INIT\n time.sleep(monitor_interval)\n run_result = self._monitor_workers(self._worker_group)\n state = run_result.state\n self._worker_group.state = state\n\n expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval *\n rdzv_handler._settings.keep_alive_max_attempt)\n _dead_nodes = [\n node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items()\n if last_heartbeat < expire_time\n ]\n\n put_metric(f\"workers.{role}.remaining_restarts\", self._remaining_restarts)\n put_metric(f\"workers.{role}.{state.name.lower()}\", 1)\n\n if state == WorkerState.SUCCEEDED:\n log.info(f\"[{role}] worker group successfully finished.\"\n f\" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.\")\n self._exit_barrier()\n return run_result\n elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED\n } or len(participants) > len(rdzv_handler._state_holder.state.participants):\n if self._remaining_restarts > 0:\n log.info(f\"[{role}] Worker group {state.name}. \"\n f\"{self._remaining_restarts}/{spec.max_restarts} attempts left;\"\n f\" will restart worker group\")\n self._remaining_restarts -= 1\n # rdzv_handler._state_holder.state.restart = False\n self._restart_workers(self._worker_group)\n participants = rdzv_handler._state_holder.state.participants\n\n else:\n self._stop_workers(self._worker_group)\n self._worker_group.state = WorkerState.FAILED\n self._exit_barrier()\n return run_result\n elif state == WorkerState.HEALTHY:\n # membership changes do not count as retries\n num_nodes_waiting = rdzv_handler.num_nodes_waiting()\n group_rank = self._worker_group.group_rank\n if num_nodes_waiting > 0:\n log.info(f\"[{role}] Detected {num_nodes_waiting} \"\n f\"new nodes from group_rank={group_rank}; \"\n f\"will restart worker group\")\n self._restart_workers(self._worker_group)\n participants = rdzv_handler._state_holder.state.participants\n else:\n raise Exception(f\"[{role}] Worker group in {state.name} state\")\n", "path": "deepspeed/elasticity/elastic_agent.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom torch.distributed.elastic.agent.server.local_elastic_agent import LocalElasticAgent\nfrom typing import Any, Dict, Optional, Tuple\nfrom datetime import datetime\nfrom torch.distributed.elastic.agent.server.api import log, _get_socket_with_port\nfrom torch.distributed.elastic.metrics import put_metric\nfrom torch.distributed.elastic.agent.server.api import (\n RunResult,\n WorkerGroup,\n WorkerSpec,\n WorkerState,\n)\nfrom torch.distributed import Store\nimport time\nimport os\nfrom torch.distributed.elastic.multiprocessing import start_processes\nfrom torch.distributed.elastic.utils import macros\nimport shutil\nimport copy\nfrom contextlib import closing\nimport subprocess\n\n\nclass DSElasticAgent(LocalElasticAgent):\n\n def __init__(\n self,\n spec: WorkerSpec,\n env: Dict,\n start_method=\"spawn\",\n exit_barrier_timeout: float = 300,\n log_dir: Optional[str] = None,\n ):\n super().__init__(spec, start_method, exit_barrier_timeout, log_dir)\n self.ds_env = env\n\n @staticmethod\n def _set_master_addr_port(store: Store,\n master_addr: Optional[str],\n master_port: Optional[int],\n local_addr: Optional[str] = None):\n if master_port is None:\n sock = _get_socket_with_port()\n with closing(sock):\n master_port = sock.getsockname()[1]\n\n if master_addr is None:\n # master_addr = _get_fq_hostname()\n result = subprocess.check_output(\"hostname -I\", shell=True)\n master_addr = result.decode('utf-8').split()[0]\n\n store.set(\"MASTER_ADDR\", master_addr.encode(encoding=\"UTF-8\"))\n store.set(\"MASTER_PORT\", str(master_port).encode(encoding=\"UTF-8\"))\n\n def _start_workers(self, worker_group: WorkerGroup) -> Dict[int, Any]:\n spec = worker_group.spec\n store = worker_group.store\n assert store is not None\n master_addr, master_port = super()._get_master_addr_port(store)\n restart_count = spec.max_restarts - self._remaining_restarts\n\n use_agent_store = spec.rdzv_handler.get_backend() == \"static\"\n\n args: Dict[int, Tuple] = {}\n envs: Dict[int, Dict[str, str]] = {}\n for worker in worker_group.workers:\n local_rank = worker.local_rank\n\n worker_env_ds = copy.deepcopy(self.ds_env)\n worker_env_elastic = {\n \"LOCAL_RANK\": str(local_rank),\n \"RANK\": str(worker.global_rank),\n \"GROUP_RANK\": str(worker_group.group_rank),\n \"ROLE_RANK\": str(worker.role_rank),\n \"ROLE_NAME\": spec.role,\n \"LOCAL_WORLD_SIZE\": str(spec.local_world_size),\n \"WORLD_SIZE\": str(worker.world_size),\n \"GROUP_WORLD_SIZE\": str(worker_group.group_world_size),\n \"ROLE_WORLD_SIZE\": str(worker.role_world_size),\n \"MASTER_ADDR\": master_addr,\n \"MASTER_PORT\": str(master_port),\n \"TORCHELASTIC_RESTART_COUNT\": str(restart_count),\n \"TORCHELASTIC_MAX_RESTARTS\": str(spec.max_restarts),\n \"TORCHELASTIC_RUN_ID\": spec.rdzv_handler.get_run_id(),\n \"TORCHELASTIC_USE_AGENT_STORE\": str(use_agent_store),\n \"NCCL_ASYNC_ERROR_HANDLING\": os.getenv(\"NCCL_ASYNC_ERROR_HANDLING\", str(1)),\n }\n worker_env_ds.update(worker_env_elastic)\n if \"OMP_NUM_THREADS\" in os.environ:\n worker_env_ds[\"OMP_NUM_THREADS\"] = os.environ[\"OMP_NUM_THREADS\"]\n\n envs[local_rank] = worker_env_ds\n worker_args = list(spec.args)\n worker_args = macros.substitute(worker_args, str(local_rank))\n args[local_rank] = tuple(worker_args)\n\n # scaling events do not count towards restarts (gets same attempt #)\n # remove existing log dir if this restart is due to a scaling event\n attempt_log_dir = os.path.join(self._log_dir, f\"attempt_{restart_count}\")\n shutil.rmtree(attempt_log_dir, ignore_errors=True)\n os.makedirs(attempt_log_dir)\n\n assert spec.entrypoint is not None\n self._pcontext = start_processes(\n name=spec.role,\n entrypoint=spec.entrypoint,\n args=args,\n envs=envs,\n log_dir=attempt_log_dir,\n start_method=self._start_method,\n redirects=spec.redirects,\n tee=spec.tee,\n )\n\n return self._pcontext.pids()\n\n def _invoke_run(self, role: str = \"default\") -> RunResult:\n # NOTE: currently only works for a single role\n\n spec = self._worker_group.spec\n role = spec.role\n\n log.info(f\"[{role}] starting workers for entrypoint: {spec.get_entrypoint_name()}\")\n\n self._initialize_workers(self._worker_group)\n monitor_interval = spec.monitor_interval\n rdzv_handler = spec.rdzv_handler\n\n participants = rdzv_handler._state_holder.state.participants\n\n while True:\n assert self._worker_group.state != WorkerState.INIT\n time.sleep(monitor_interval)\n run_result = self._monitor_workers(self._worker_group)\n state = run_result.state\n self._worker_group.state = state\n\n expire_time = datetime.utcnow() - (rdzv_handler._settings.keep_alive_interval *\n rdzv_handler._settings.keep_alive_max_attempt)\n _dead_nodes = [\n node for node, last_heartbeat in rdzv_handler._state_holder.state.last_heartbeats.items()\n if last_heartbeat < expire_time\n ]\n\n put_metric(f\"workers.{role}.remaining_restarts\", self._remaining_restarts)\n put_metric(f\"workers.{role}.{state.name.lower()}\", 1)\n\n if state == WorkerState.SUCCEEDED:\n log.info(f\"[{role}] worker group successfully finished.\"\n f\" Waiting {self._exit_barrier_timeout} seconds for other agents to finish.\")\n self._exit_barrier()\n return run_result\n elif state in {WorkerState.UNHEALTHY, WorkerState.FAILED\n } or len(participants) > len(rdzv_handler._state_holder.state.participants):\n if self._remaining_restarts > 0:\n log.info(f\"[{role}] Worker group {state.name}. \"\n f\"{self._remaining_restarts}/{spec.max_restarts} attempts left;\"\n f\" will restart worker group\")\n self._remaining_restarts -= 1\n # rdzv_handler._state_holder.state.restart = False\n self._restart_workers(self._worker_group)\n participants = rdzv_handler._state_holder.state.participants\n\n else:\n self._stop_workers(self._worker_group)\n self._worker_group.state = WorkerState.FAILED\n self._exit_barrier()\n return run_result\n elif state == WorkerState.HEALTHY:\n # membership changes do not count as retries\n num_nodes_waiting = rdzv_handler.num_nodes_waiting()\n group_rank = self._worker_group.group_rank\n if num_nodes_waiting > 0:\n log.info(f\"[{role}] Detected {num_nodes_waiting} \"\n f\"new nodes from group_rank={group_rank}; \"\n f\"will restart worker group\")\n self._restart_workers(self._worker_group)\n participants = rdzv_handler._state_holder.state.participants\n else:\n raise Exception(f\"[{role}] Worker group in {state.name} state\")\n", "path": "deepspeed/elasticity/elastic_agent.py"}]} | 2,942 | 160 |
gh_patches_debug_38330 | rasdani/github-patches | git_diff | piskvorky__gensim-463 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dict.iteritems() usage breaks "keywords" function in python 3
Calling `gensim.summarization.keywords` in python3 results in `AttributeError: 'dict' object has no attribute 'iteritems'`.
Looks like there are offenders [here](https://github.com/piskvorky/gensim/blob/485d024e119d0ff818ea6651042748a3bff21081/gensim/summarization/keywords.py#L35) and [here](https://github.com/piskvorky/gensim/blob/485d024e119d0ff818ea6651042748a3bff21081/gensim/summarization/keywords.py#L114). I assume these should be fixed with six.iteritems?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/summarization/keywords.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
5
6 from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank
7 from gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word
8 from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word
9 from gensim.summarization.commons import build_graph as _build_graph
10 from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
11 from itertools import combinations as _combinations
12 from six.moves.queue import Queue as _Queue
13 from six.moves import xrange
14
15
16 WINDOW_SIZE = 2
17
18 """Check tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters
19 Example: filter for nouns and adjectives:
20 INCLUDING_FILTER = ['NN', 'JJ']"""
21 INCLUDING_FILTER = ['NN', 'JJ']
22 EXCLUDING_FILTER = []
23
24
25 def _get_pos_filters():
26 return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)
27
28
29 def _get_words_for_graph(tokens):
30 include_filters, exclude_filters = _get_pos_filters()
31 if include_filters and exclude_filters:
32 raise ValueError("Can't use both include and exclude filters, should use only one")
33
34 result = []
35 for word, unit in tokens.iteritems():
36 if exclude_filters and unit.tag in exclude_filters:
37 continue
38 if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:
39 result.append(unit.token)
40 return result
41
42
43 def _get_first_window(split_text):
44 return split_text[:WINDOW_SIZE]
45
46
47 def _set_graph_edge(graph, tokens, word_a, word_b):
48 if word_a in tokens and word_b in tokens:
49 lemma_a = tokens[word_a].token
50 lemma_b = tokens[word_b].token
51 edge = (lemma_a, lemma_b)
52
53 if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):
54 graph.add_edge(edge)
55
56
57 def _process_first_window(graph, tokens, split_text):
58 first_window = _get_first_window(split_text)
59 for word_a, word_b in _combinations(first_window, 2):
60 _set_graph_edge(graph, tokens, word_a, word_b)
61
62
63 def _init_queue(split_text):
64 queue = _Queue()
65 first_window = _get_first_window(split_text)
66 for word in first_window[1:]:
67 queue.put(word)
68 return queue
69
70
71 def _process_word(graph, tokens, queue, word):
72 for word_to_compare in _queue_iterator(queue):
73 _set_graph_edge(graph, tokens, word, word_to_compare)
74
75
76 def _update_queue(queue, word):
77 queue.get()
78 queue.put(word)
79 assert queue.qsize() == (WINDOW_SIZE - 1)
80
81
82 def _process_text(graph, tokens, split_text):
83 queue = _init_queue(split_text)
84 for i in xrange(WINDOW_SIZE, len(split_text)):
85 word = split_text[i]
86 _process_word(graph, tokens, queue, word)
87 _update_queue(queue, word)
88
89
90 def _queue_iterator(queue):
91 iterations = queue.qsize()
92 for i in xrange(iterations):
93 var = queue.get()
94 yield var
95 queue.put(var)
96
97
98 def _set_graph_edges(graph, tokens, split_text):
99 _process_first_window(graph, tokens, split_text)
100 _process_text(graph, tokens, split_text)
101
102
103 def _extract_tokens(lemmas, scores, ratio, words):
104 lemmas.sort(key=lambda s: scores[s], reverse=True)
105
106 # If no "words" option is selected, the number of sentences is
107 # reduced by the provided ratio, else, the ratio is ignored.
108 length = len(lemmas) * ratio if words is None else words
109 return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]
110
111
112 def _lemmas_to_words(tokens):
113 lemma_to_word = {}
114 for word, unit in tokens.iteritems():
115 lemma = unit.token
116 if lemma in lemma_to_word:
117 lemma_to_word[lemma].append(word)
118 else:
119 lemma_to_word[lemma] = [word]
120 return lemma_to_word
121
122
123 def _get_keywords_with_score(extracted_lemmas, lemma_to_word):
124 """
125 :param extracted_lemmas:list of tuples
126 :param lemma_to_word: dict of {lemma:list of words}
127 :return: dict of {keyword:score}
128 """
129 keywords = {}
130 for score, lemma in extracted_lemmas:
131 keyword_list = lemma_to_word[lemma]
132 for keyword in keyword_list:
133 keywords[keyword] = score
134 return keywords
135
136
137 def _strip_word(word):
138 stripped_word_list = list(_tokenize_by_word(word))
139 return stripped_word_list[0] if stripped_word_list else ""
140
141
142 def _get_combined_keywords(_keywords, split_text):
143 """
144 :param keywords:dict of keywords:scores
145 :param split_text: list of strings
146 :return: combined_keywords:list
147 """
148 result = []
149 _keywords = _keywords.copy()
150 len_text = len(split_text)
151 for i in xrange(len_text):
152 word = _strip_word(split_text[i])
153 if word in _keywords:
154 combined_word = [word]
155 if i + 1 == len_text:
156 result.append(word) # appends last word if keyword and doesn't iterate
157 for j in xrange(i + 1, len_text):
158 other_word = _strip_word(split_text[j])
159 if other_word in _keywords and other_word == split_text[j].decode("utf-8"):
160 combined_word.append(other_word)
161 else:
162 for keyword in combined_word:
163 _keywords.pop(keyword)
164 result.append(" ".join(combined_word))
165 break
166 return result
167
168
169 def _get_average_score(concept, _keywords):
170 word_list = concept.split()
171 word_counter = 0
172 total = 0
173 for word in word_list:
174 total += _keywords[word]
175 word_counter += 1
176 return total / word_counter
177
178
179 def _format_results(_keywords, combined_keywords, split, scores):
180 """
181 :param keywords:dict of keywords:scores
182 :param combined_keywords:list of word/s
183 """
184 combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)
185 if scores:
186 return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]
187 if split:
188 return combined_keywords
189 return "\n".join(combined_keywords)
190
191
192 def keywords(text, ratio=0.2, words=None, split=False, scores=False):
193 # Gets a dict of word -> lemma
194 tokens = _clean_text_by_word(text)
195 split_text = list(_tokenize_by_word(text))
196
197 # Creates the graph and adds the edges
198 graph = _build_graph(_get_words_for_graph(tokens))
199 _set_graph_edges(graph, tokens, split_text)
200 del split_text # It's no longer used
201
202 _remove_unreachable_nodes(graph)
203
204 # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score
205 pagerank_scores = _pagerank(graph)
206
207 extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)
208
209 lemmas_to_word = _lemmas_to_words(tokens)
210 keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)
211
212 # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined
213 combined_keywords = _get_combined_keywords(keywords, text.split())
214
215 return _format_results(keywords, combined_keywords, split, scores)
216
217
218 def get_graph(text):
219 tokens = _clean_text_by_word(text)
220 split_text = list(_tokenize_by_word(text))
221
222 graph = _build_graph(_get_words_for_graph(tokens))
223 _set_graph_edges(graph, tokens, split_text)
224
225 return graph
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gensim/summarization/keywords.py b/gensim/summarization/keywords.py
--- a/gensim/summarization/keywords.py
+++ b/gensim/summarization/keywords.py
@@ -8,9 +8,11 @@
from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word
from gensim.summarization.commons import build_graph as _build_graph
from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
+from gensim.utils import to_unicode
from itertools import combinations as _combinations
from six.moves.queue import Queue as _Queue
from six.moves import xrange
+from six import iteritems
WINDOW_SIZE = 2
@@ -32,7 +34,7 @@
raise ValueError("Can't use both include and exclude filters, should use only one")
result = []
- for word, unit in tokens.iteritems():
+ for word, unit in iteritems(tokens):
if exclude_filters and unit.tag in exclude_filters:
continue
if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:
@@ -111,7 +113,7 @@
def _lemmas_to_words(tokens):
lemma_to_word = {}
- for word, unit in tokens.iteritems():
+ for word, unit in iteritems(tokens):
lemma = unit.token
if lemma in lemma_to_word:
lemma_to_word[lemma].append(word)
@@ -156,7 +158,7 @@
result.append(word) # appends last word if keyword and doesn't iterate
for j in xrange(i + 1, len_text):
other_word = _strip_word(split_text[j])
- if other_word in _keywords and other_word == split_text[j].decode("utf-8"):
+ if other_word in _keywords and other_word == split_text[j]:
combined_word.append(other_word)
else:
for keyword in combined_word:
@@ -191,6 +193,7 @@
def keywords(text, ratio=0.2, words=None, split=False, scores=False):
# Gets a dict of word -> lemma
+ text = to_unicode(text)
tokens = _clean_text_by_word(text)
split_text = list(_tokenize_by_word(text))
| {"golden_diff": "diff --git a/gensim/summarization/keywords.py b/gensim/summarization/keywords.py\n--- a/gensim/summarization/keywords.py\n+++ b/gensim/summarization/keywords.py\n@@ -8,9 +8,11 @@\n from gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word\n from gensim.summarization.commons import build_graph as _build_graph\n from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\n+from gensim.utils import to_unicode\n from itertools import combinations as _combinations\n from six.moves.queue import Queue as _Queue\n from six.moves import xrange\n+from six import iteritems\n \n \n WINDOW_SIZE = 2\n@@ -32,7 +34,7 @@\n raise ValueError(\"Can't use both include and exclude filters, should use only one\")\n \n result = []\n- for word, unit in tokens.iteritems():\n+ for word, unit in iteritems(tokens):\n if exclude_filters and unit.tag in exclude_filters:\n continue\n if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:\n@@ -111,7 +113,7 @@\n \n def _lemmas_to_words(tokens):\n lemma_to_word = {}\n- for word, unit in tokens.iteritems():\n+ for word, unit in iteritems(tokens):\n lemma = unit.token\n if lemma in lemma_to_word:\n lemma_to_word[lemma].append(word)\n@@ -156,7 +158,7 @@\n result.append(word) # appends last word if keyword and doesn't iterate\n for j in xrange(i + 1, len_text):\n other_word = _strip_word(split_text[j])\n- if other_word in _keywords and other_word == split_text[j].decode(\"utf-8\"):\n+ if other_word in _keywords and other_word == split_text[j]:\n combined_word.append(other_word)\n else:\n for keyword in combined_word:\n@@ -191,6 +193,7 @@\n \n def keywords(text, ratio=0.2, words=None, split=False, scores=False):\n # Gets a dict of word -> lemma\n+ text = to_unicode(text)\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n", "issue": "dict.iteritems() usage breaks \"keywords\" function in python 3\nCalling `gensim.summarization.keywords` in python3 results in `AttributeError: 'dict' object has no attribute 'iteritems'`.\n\nLooks like there are offenders [here](https://github.com/piskvorky/gensim/blob/485d024e119d0ff818ea6651042748a3bff21081/gensim/summarization/keywords.py#L35) and [here](https://github.com/piskvorky/gensim/blob/485d024e119d0ff818ea6651042748a3bff21081/gensim/summarization/keywords.py#L114). I assume these should be fixed with six.iteritems?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word\nfrom gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom itertools import combinations as _combinations\nfrom six.moves.queue import Queue as _Queue\nfrom six.moves import xrange\n\n\nWINDOW_SIZE = 2\n\n\"\"\"Check tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters\nExample: filter for nouns and adjectives:\nINCLUDING_FILTER = ['NN', 'JJ']\"\"\"\nINCLUDING_FILTER = ['NN', 'JJ']\nEXCLUDING_FILTER = []\n\n\ndef _get_pos_filters():\n return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)\n\n\ndef _get_words_for_graph(tokens):\n include_filters, exclude_filters = _get_pos_filters()\n if include_filters and exclude_filters:\n raise ValueError(\"Can't use both include and exclude filters, should use only one\")\n\n result = []\n for word, unit in tokens.iteritems():\n if exclude_filters and unit.tag in exclude_filters:\n continue\n if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:\n result.append(unit.token)\n return result\n\n\ndef _get_first_window(split_text):\n return split_text[:WINDOW_SIZE]\n\n\ndef _set_graph_edge(graph, tokens, word_a, word_b):\n if word_a in tokens and word_b in tokens:\n lemma_a = tokens[word_a].token\n lemma_b = tokens[word_b].token\n edge = (lemma_a, lemma_b)\n\n if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):\n graph.add_edge(edge)\n\n\ndef _process_first_window(graph, tokens, split_text):\n first_window = _get_first_window(split_text)\n for word_a, word_b in _combinations(first_window, 2):\n _set_graph_edge(graph, tokens, word_a, word_b)\n\n\ndef _init_queue(split_text):\n queue = _Queue()\n first_window = _get_first_window(split_text)\n for word in first_window[1:]:\n queue.put(word)\n return queue\n\n\ndef _process_word(graph, tokens, queue, word):\n for word_to_compare in _queue_iterator(queue):\n _set_graph_edge(graph, tokens, word, word_to_compare)\n\n\ndef _update_queue(queue, word):\n queue.get()\n queue.put(word)\n assert queue.qsize() == (WINDOW_SIZE - 1)\n\n\ndef _process_text(graph, tokens, split_text):\n queue = _init_queue(split_text)\n for i in xrange(WINDOW_SIZE, len(split_text)):\n word = split_text[i]\n _process_word(graph, tokens, queue, word)\n _update_queue(queue, word)\n\n\ndef _queue_iterator(queue):\n iterations = queue.qsize()\n for i in xrange(iterations):\n var = queue.get()\n yield var\n queue.put(var)\n\n\ndef _set_graph_edges(graph, tokens, split_text):\n _process_first_window(graph, tokens, split_text)\n _process_text(graph, tokens, split_text)\n\n\ndef _extract_tokens(lemmas, scores, ratio, words):\n lemmas.sort(key=lambda s: scores[s], reverse=True)\n\n # If no \"words\" option is selected, the number of sentences is\n # reduced by the provided ratio, else, the ratio is ignored.\n length = len(lemmas) * ratio if words is None else words\n return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]\n\n\ndef _lemmas_to_words(tokens):\n lemma_to_word = {}\n for word, unit in tokens.iteritems():\n lemma = unit.token\n if lemma in lemma_to_word:\n lemma_to_word[lemma].append(word)\n else:\n lemma_to_word[lemma] = [word]\n return lemma_to_word\n\n\ndef _get_keywords_with_score(extracted_lemmas, lemma_to_word):\n \"\"\"\n :param extracted_lemmas:list of tuples\n :param lemma_to_word: dict of {lemma:list of words}\n :return: dict of {keyword:score}\n \"\"\"\n keywords = {}\n for score, lemma in extracted_lemmas:\n keyword_list = lemma_to_word[lemma]\n for keyword in keyword_list:\n keywords[keyword] = score\n return keywords\n\n\ndef _strip_word(word):\n stripped_word_list = list(_tokenize_by_word(word))\n return stripped_word_list[0] if stripped_word_list else \"\"\n\n\ndef _get_combined_keywords(_keywords, split_text):\n \"\"\"\n :param keywords:dict of keywords:scores\n :param split_text: list of strings\n :return: combined_keywords:list\n \"\"\"\n result = []\n _keywords = _keywords.copy()\n len_text = len(split_text)\n for i in xrange(len_text):\n word = _strip_word(split_text[i])\n if word in _keywords:\n combined_word = [word]\n if i + 1 == len_text:\n result.append(word) # appends last word if keyword and doesn't iterate\n for j in xrange(i + 1, len_text):\n other_word = _strip_word(split_text[j])\n if other_word in _keywords and other_word == split_text[j].decode(\"utf-8\"):\n combined_word.append(other_word)\n else:\n for keyword in combined_word:\n _keywords.pop(keyword)\n result.append(\" \".join(combined_word))\n break\n return result\n\n\ndef _get_average_score(concept, _keywords):\n word_list = concept.split()\n word_counter = 0\n total = 0\n for word in word_list:\n total += _keywords[word]\n word_counter += 1\n return total / word_counter\n\n\ndef _format_results(_keywords, combined_keywords, split, scores):\n \"\"\"\n :param keywords:dict of keywords:scores\n :param combined_keywords:list of word/s\n \"\"\"\n combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)\n if scores:\n return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]\n if split:\n return combined_keywords\n return \"\\n\".join(combined_keywords)\n\n\ndef keywords(text, ratio=0.2, words=None, split=False, scores=False):\n # Gets a dict of word -> lemma\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n # Creates the graph and adds the edges\n graph = _build_graph(_get_words_for_graph(tokens))\n _set_graph_edges(graph, tokens, split_text)\n del split_text # It's no longer used\n\n _remove_unreachable_nodes(graph)\n\n # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score\n pagerank_scores = _pagerank(graph)\n\n extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)\n\n lemmas_to_word = _lemmas_to_words(tokens)\n keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)\n\n # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined\n combined_keywords = _get_combined_keywords(keywords, text.split())\n\n return _format_results(keywords, combined_keywords, split, scores)\n\n\ndef get_graph(text):\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n graph = _build_graph(_get_words_for_graph(tokens))\n _set_graph_edges(graph, tokens, split_text)\n\n return graph\n", "path": "gensim/summarization/keywords.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_word as _clean_text_by_word\nfrom gensim.summarization.textcleaner import tokenize_by_word as _tokenize_by_word\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom gensim.utils import to_unicode\nfrom itertools import combinations as _combinations\nfrom six.moves.queue import Queue as _Queue\nfrom six.moves import xrange\nfrom six import iteritems\n\n\nWINDOW_SIZE = 2\n\n\"\"\"Check tags in http://www.clips.ua.ac.be/pages/mbsp-tags and use only first two letters\nExample: filter for nouns and adjectives:\nINCLUDING_FILTER = ['NN', 'JJ']\"\"\"\nINCLUDING_FILTER = ['NN', 'JJ']\nEXCLUDING_FILTER = []\n\n\ndef _get_pos_filters():\n return frozenset(INCLUDING_FILTER), frozenset(EXCLUDING_FILTER)\n\n\ndef _get_words_for_graph(tokens):\n include_filters, exclude_filters = _get_pos_filters()\n if include_filters and exclude_filters:\n raise ValueError(\"Can't use both include and exclude filters, should use only one\")\n\n result = []\n for word, unit in iteritems(tokens):\n if exclude_filters and unit.tag in exclude_filters:\n continue\n if (include_filters and unit.tag in include_filters) or not include_filters or not unit.tag:\n result.append(unit.token)\n return result\n\n\ndef _get_first_window(split_text):\n return split_text[:WINDOW_SIZE]\n\n\ndef _set_graph_edge(graph, tokens, word_a, word_b):\n if word_a in tokens and word_b in tokens:\n lemma_a = tokens[word_a].token\n lemma_b = tokens[word_b].token\n edge = (lemma_a, lemma_b)\n\n if graph.has_node(lemma_a) and graph.has_node(lemma_b) and not graph.has_edge(edge):\n graph.add_edge(edge)\n\n\ndef _process_first_window(graph, tokens, split_text):\n first_window = _get_first_window(split_text)\n for word_a, word_b in _combinations(first_window, 2):\n _set_graph_edge(graph, tokens, word_a, word_b)\n\n\ndef _init_queue(split_text):\n queue = _Queue()\n first_window = _get_first_window(split_text)\n for word in first_window[1:]:\n queue.put(word)\n return queue\n\n\ndef _process_word(graph, tokens, queue, word):\n for word_to_compare in _queue_iterator(queue):\n _set_graph_edge(graph, tokens, word, word_to_compare)\n\n\ndef _update_queue(queue, word):\n queue.get()\n queue.put(word)\n assert queue.qsize() == (WINDOW_SIZE - 1)\n\n\ndef _process_text(graph, tokens, split_text):\n queue = _init_queue(split_text)\n for i in xrange(WINDOW_SIZE, len(split_text)):\n word = split_text[i]\n _process_word(graph, tokens, queue, word)\n _update_queue(queue, word)\n\n\ndef _queue_iterator(queue):\n iterations = queue.qsize()\n for i in xrange(iterations):\n var = queue.get()\n yield var\n queue.put(var)\n\n\ndef _set_graph_edges(graph, tokens, split_text):\n _process_first_window(graph, tokens, split_text)\n _process_text(graph, tokens, split_text)\n\n\ndef _extract_tokens(lemmas, scores, ratio, words):\n lemmas.sort(key=lambda s: scores[s], reverse=True)\n\n # If no \"words\" option is selected, the number of sentences is\n # reduced by the provided ratio, else, the ratio is ignored.\n length = len(lemmas) * ratio if words is None else words\n return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]\n\n\ndef _lemmas_to_words(tokens):\n lemma_to_word = {}\n for word, unit in iteritems(tokens):\n lemma = unit.token\n if lemma in lemma_to_word:\n lemma_to_word[lemma].append(word)\n else:\n lemma_to_word[lemma] = [word]\n return lemma_to_word\n\n\ndef _get_keywords_with_score(extracted_lemmas, lemma_to_word):\n \"\"\"\n :param extracted_lemmas:list of tuples\n :param lemma_to_word: dict of {lemma:list of words}\n :return: dict of {keyword:score}\n \"\"\"\n keywords = {}\n for score, lemma in extracted_lemmas:\n keyword_list = lemma_to_word[lemma]\n for keyword in keyword_list:\n keywords[keyword] = score\n return keywords\n\n\ndef _strip_word(word):\n stripped_word_list = list(_tokenize_by_word(word))\n return stripped_word_list[0] if stripped_word_list else \"\"\n\n\ndef _get_combined_keywords(_keywords, split_text):\n \"\"\"\n :param keywords:dict of keywords:scores\n :param split_text: list of strings\n :return: combined_keywords:list\n \"\"\"\n result = []\n _keywords = _keywords.copy()\n len_text = len(split_text)\n for i in xrange(len_text):\n word = _strip_word(split_text[i])\n if word in _keywords:\n combined_word = [word]\n if i + 1 == len_text:\n result.append(word) # appends last word if keyword and doesn't iterate\n for j in xrange(i + 1, len_text):\n other_word = _strip_word(split_text[j])\n if other_word in _keywords and other_word == split_text[j]:\n combined_word.append(other_word)\n else:\n for keyword in combined_word:\n _keywords.pop(keyword)\n result.append(\" \".join(combined_word))\n break\n return result\n\n\ndef _get_average_score(concept, _keywords):\n word_list = concept.split()\n word_counter = 0\n total = 0\n for word in word_list:\n total += _keywords[word]\n word_counter += 1\n return total / word_counter\n\n\ndef _format_results(_keywords, combined_keywords, split, scores):\n \"\"\"\n :param keywords:dict of keywords:scores\n :param combined_keywords:list of word/s\n \"\"\"\n combined_keywords.sort(key=lambda w: _get_average_score(w, _keywords), reverse=True)\n if scores:\n return [(word, _get_average_score(word, _keywords)) for word in combined_keywords]\n if split:\n return combined_keywords\n return \"\\n\".join(combined_keywords)\n\n\ndef keywords(text, ratio=0.2, words=None, split=False, scores=False):\n # Gets a dict of word -> lemma\n text = to_unicode(text)\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n # Creates the graph and adds the edges\n graph = _build_graph(_get_words_for_graph(tokens))\n _set_graph_edges(graph, tokens, split_text)\n del split_text # It's no longer used\n\n _remove_unreachable_nodes(graph)\n\n # Ranks the tokens using the PageRank algorithm. Returns dict of lemma -> score\n pagerank_scores = _pagerank(graph)\n\n extracted_lemmas = _extract_tokens(graph.nodes(), pagerank_scores, ratio, words)\n\n lemmas_to_word = _lemmas_to_words(tokens)\n keywords = _get_keywords_with_score(extracted_lemmas, lemmas_to_word)\n\n # text.split() to keep numbers and punctuation marks, so separeted concepts are not combined\n combined_keywords = _get_combined_keywords(keywords, text.split())\n\n return _format_results(keywords, combined_keywords, split, scores)\n\n\ndef get_graph(text):\n tokens = _clean_text_by_word(text)\n split_text = list(_tokenize_by_word(text))\n\n graph = _build_graph(_get_words_for_graph(tokens))\n _set_graph_edges(graph, tokens, split_text)\n\n return graph\n", "path": "gensim/summarization/keywords.py"}]} | 2,792 | 515 |
gh_patches_debug_14237 | rasdani/github-patches | git_diff | mne-tools__mne-python-10739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
montage_sgskip.py example doesn't run
When trying to run the example in `examples/visualization/montage_sgskip.py`, I get an exception:
```python
ValueError: A head<->mri transformation matrix (trans) is required to plot head surfaces in head coordinates, `trans=None` is not allowed
```
It runs if I pass `trans='fsaverage'` but I'm not sure this is how this example intended to behave (as the BEM is based on a sphere here).
@agramfort care to take a look?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/visualization/montage_sgskip.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 .. _plot_montage:
4
5 Plotting sensor layouts of EEG systems
6 ======================================
7
8 This example illustrates how to load all the EEG system montages
9 shipped in MNE-python, and display it on the fsaverage template subject.
10 """ # noqa: D205, D400
11 # Authors: Alexandre Gramfort <[email protected]>
12 # Joan Massich <[email protected]>
13 #
14 # License: BSD-3-Clause
15
16 # %%
17
18 import os.path as op
19
20 import mne
21 from mne.channels.montage import get_builtin_montages
22 from mne.datasets import fetch_fsaverage
23 from mne.viz import set_3d_title, set_3d_view
24
25
26 # %%
27 # Check all montages against a sphere
28
29 for current_montage in get_builtin_montages():
30 montage = mne.channels.make_standard_montage(current_montage)
31 info = mne.create_info(
32 ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
33 info.set_montage(montage)
34 sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)
35 fig = mne.viz.plot_alignment(
36 # Plot options
37 show_axes=True, dig='fiducials', surfaces='head',
38 bem=sphere, info=info)
39 set_3d_view(figure=fig, azimuth=135, elevation=80)
40 set_3d_title(figure=fig, title=current_montage)
41
42
43 # %%
44 # Check all montages against fsaverage
45
46 subjects_dir = op.dirname(fetch_fsaverage())
47
48 for current_montage in get_builtin_montages():
49 montage = mne.channels.make_standard_montage(current_montage)
50 # Create dummy info
51 info = mne.create_info(
52 ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
53 info.set_montage(montage)
54 fig = mne.viz.plot_alignment(
55 # Plot options
56 show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,
57 subject='fsaverage', subjects_dir=subjects_dir, info=info,
58 coord_frame='mri',
59 trans='fsaverage', # transform from head coords to fsaverage's MRI
60 )
61 set_3d_view(figure=fig, azimuth=135, elevation=80)
62 set_3d_title(figure=fig, title=current_montage)
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/visualization/montage_sgskip.py b/examples/visualization/montage_sgskip.py
--- a/examples/visualization/montage_sgskip.py
+++ b/examples/visualization/montage_sgskip.py
@@ -16,6 +16,7 @@
# %%
import os.path as op
+import numpy as np
import mne
from mne.channels.montage import get_builtin_montages
@@ -35,6 +36,7 @@
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head',
+ trans=mne.Transform("head", "mri", trans=np.eye(4)), # identity
bem=sphere, info=info)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
| {"golden_diff": "diff --git a/examples/visualization/montage_sgskip.py b/examples/visualization/montage_sgskip.py\n--- a/examples/visualization/montage_sgskip.py\n+++ b/examples/visualization/montage_sgskip.py\n@@ -16,6 +16,7 @@\n # %%\n \n import os.path as op\n+import numpy as np\n \n import mne\n from mne.channels.montage import get_builtin_montages\n@@ -35,6 +36,7 @@\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head',\n+ trans=mne.Transform(\"head\", \"mri\", trans=np.eye(4)), # identity\n bem=sphere, info=info)\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n", "issue": "montage_sgskip.py example doesn't run\nWhen trying to run the example in `examples/visualization/montage_sgskip.py`, I get an exception:\r\n\r\n```python\r\nValueError: A head<->mri transformation matrix (trans) is required to plot head surfaces in head coordinates, `trans=None` is not allowed\r\n```\r\nIt runs if I pass `trans='fsaverage'` but I'm not sure this is how this example intended to behave (as the BEM is based on a sphere here).\r\n\r\n@agramfort care to take a look?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. _plot_montage:\n\nPlotting sensor layouts of EEG systems\n======================================\n\nThis example illustrates how to load all the EEG system montages\nshipped in MNE-python, and display it on the fsaverage template subject.\n\"\"\" # noqa: D205, D400\n# Authors: Alexandre Gramfort <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport os.path as op\n\nimport mne\nfrom mne.channels.montage import get_builtin_montages\nfrom mne.datasets import fetch_fsaverage\nfrom mne.viz import set_3d_title, set_3d_view\n\n\n# %%\n# Check all montages against a sphere\n\nfor current_montage in get_builtin_montages():\n montage = mne.channels.make_standard_montage(current_montage)\n info = mne.create_info(\n ch_names=montage.ch_names, sfreq=100., ch_types='eeg')\n info.set_montage(montage)\n sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head',\n bem=sphere, info=info)\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n\n\n# %%\n# Check all montages against fsaverage\n\nsubjects_dir = op.dirname(fetch_fsaverage())\n\nfor current_montage in get_builtin_montages():\n montage = mne.channels.make_standard_montage(current_montage)\n # Create dummy info\n info = mne.create_info(\n ch_names=montage.ch_names, sfreq=100., ch_types='eeg')\n info.set_montage(montage)\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,\n subject='fsaverage', subjects_dir=subjects_dir, info=info,\n coord_frame='mri',\n trans='fsaverage', # transform from head coords to fsaverage's MRI\n )\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n", "path": "examples/visualization/montage_sgskip.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n.. _plot_montage:\n\nPlotting sensor layouts of EEG systems\n======================================\n\nThis example illustrates how to load all the EEG system montages\nshipped in MNE-python, and display it on the fsaverage template subject.\n\"\"\" # noqa: D205, D400\n# Authors: Alexandre Gramfort <[email protected]>\n# Joan Massich <[email protected]>\n#\n# License: BSD-3-Clause\n\n# %%\n\nimport os.path as op\nimport numpy as np\n\nimport mne\nfrom mne.channels.montage import get_builtin_montages\nfrom mne.datasets import fetch_fsaverage\nfrom mne.viz import set_3d_title, set_3d_view\n\n\n# %%\n# Check all montages against a sphere\n\nfor current_montage in get_builtin_montages():\n montage = mne.channels.make_standard_montage(current_montage)\n info = mne.create_info(\n ch_names=montage.ch_names, sfreq=100., ch_types='eeg')\n info.set_montage(montage)\n sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head',\n trans=mne.Transform(\"head\", \"mri\", trans=np.eye(4)), # identity\n bem=sphere, info=info)\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n\n\n# %%\n# Check all montages against fsaverage\n\nsubjects_dir = op.dirname(fetch_fsaverage())\n\nfor current_montage in get_builtin_montages():\n montage = mne.channels.make_standard_montage(current_montage)\n # Create dummy info\n info = mne.create_info(\n ch_names=montage.ch_names, sfreq=100., ch_types='eeg')\n info.set_montage(montage)\n fig = mne.viz.plot_alignment(\n # Plot options\n show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,\n subject='fsaverage', subjects_dir=subjects_dir, info=info,\n coord_frame='mri',\n trans='fsaverage', # transform from head coords to fsaverage's MRI\n )\n set_3d_view(figure=fig, azimuth=135, elevation=80)\n set_3d_title(figure=fig, title=current_montage)\n", "path": "examples/visualization/montage_sgskip.py"}]} | 1,062 | 206 |
gh_patches_debug_27163 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-844 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`if` after `else` in `try` raises false positive WPS513
# Bug report
<!--
Hi, thanks for submitting a bug. We appreciate that.
But, we will need some information about what's wrong to help you.
-->
## What's wrong
Code:
```python
try:
return_value = ast.literal_eval(node.body)
except ValueError:
# The value was not literal, it's ok.
return
else:
if return_value in self._primitive_values:
self.add_violation(ImplicitPrimitiveViolation(node))
```
Output:
```
329:13 WPS513 Found implicit `elif` condition
if return_value in self._primitive_values:
```
<!-- Describe what is not working. Please, attach a traceback. -->
## How is that should be
It should not raise this violation. It is not a part of `if` / `else` case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/tokenize/conditions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import tokenize
4 from typing import ClassVar, FrozenSet
5
6 from typing_extensions import final
7
8 from wemake_python_styleguide.violations.refactoring import (
9 ImplicitElifViolation,
10 )
11 from wemake_python_styleguide.visitors.base import BaseTokenVisitor
12
13
14 @final
15 class IfElseVisitor(BaseTokenVisitor):
16 """
17 Checks if tokens tokens.
18
19 We use ``tokenize`` instead of ``ast`` because
20
21 .. code:: python
22
23 if some:
24 ...
25 else:
26 if other:
27 ...
28
29 has the same ``ast`` representation as:
30
31 .. code:: python
32
33 if some:
34 ...
35 elif other:
36 ...
37
38 That's why we have to use ``tokenize`` to find
39 the raw tokens inside the text.
40
41 """
42
43 _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((
44 tokenize.NEWLINE,
45 tokenize.NL,
46 tokenize.COLON,
47 tokenize.INDENT,
48 ))
49
50 def visit_name(self, token: tokenize.TokenInfo) -> None:
51 """
52 Checks that ``if`` nodes are defined correctly.
53
54 Raises:
55 ImplicitElifViolation
56
57 """
58 self._check_implicit_elif(token)
59
60 def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
61 if token.string != 'else':
62 return
63
64 index = self.file_tokens.index(token)
65 # There's a bug in coverage, I am not sure how to make it work.
66 for next_token in self.file_tokens[index + 1:]: # pragma: no cover
67 if next_token.exact_type in self._allowed_token_types:
68 continue
69 elif next_token.string == 'if':
70 self.add_violation(ImplicitElifViolation(next_token))
71 return
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py
--- a/wemake_python_styleguide/visitors/tokenize/conditions.py
+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py
@@ -57,11 +57,36 @@
"""
self._check_implicit_elif(token)
+ def _does_else_belong_to_if(self, start_index: int) -> bool:
+ previous_token = self.file_tokens[start_index - 1]
+
+ if previous_token.type != tokenize.DEDENT:
+ # This is not the first token on the line, which means that it can
+ # also be "embedded" else: x if A else B
+ return False
+
+ for token in reversed(self.file_tokens[:start_index - 1]):
+ if token.type != tokenize.NAME:
+ continue
+
+ # Here we rely upon an intuition that in Python else have to be
+ # on the same level (same indentation) as parent statement.
+ if token.start[1] == previous_token.start[1]:
+ return token.string in {'if', 'elif'}
+
+ return False
+
def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
if token.string != 'else':
return
index = self.file_tokens.index(token)
+
+ # `else` token can belong also to `for` and `try/except` statement,
+ # which can trigger false positive for that violation.
+ if not self._does_else_belong_to_if(index):
+ return
+
# There's a bug in coverage, I am not sure how to make it work.
for next_token in self.file_tokens[index + 1:]: # pragma: no cover
if next_token.exact_type in self._allowed_token_types:
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py\n--- a/wemake_python_styleguide/visitors/tokenize/conditions.py\n+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py\n@@ -57,11 +57,36 @@\n \"\"\"\n self._check_implicit_elif(token)\n \n+ def _does_else_belong_to_if(self, start_index: int) -> bool:\n+ previous_token = self.file_tokens[start_index - 1]\n+\n+ if previous_token.type != tokenize.DEDENT:\n+ # This is not the first token on the line, which means that it can\n+ # also be \"embedded\" else: x if A else B\n+ return False\n+\n+ for token in reversed(self.file_tokens[:start_index - 1]):\n+ if token.type != tokenize.NAME:\n+ continue\n+\n+ # Here we rely upon an intuition that in Python else have to be\n+ # on the same level (same indentation) as parent statement.\n+ if token.start[1] == previous_token.start[1]:\n+ return token.string in {'if', 'elif'}\n+\n+ return False\n+\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n \n index = self.file_tokens.index(token)\n+\n+ # `else` token can belong also to `for` and `try/except` statement,\n+ # which can trigger false positive for that violation.\n+ if not self._does_else_belong_to_if(index):\n+ return\n+\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n", "issue": "`if` after `else` in `try` raises false positive WPS513\n# Bug report\r\n\r\n<!--\r\nHi, thanks for submitting a bug. We appreciate that.\r\n\r\nBut, we will need some information about what's wrong to help you.\r\n-->\r\n\r\n## What's wrong\r\n\r\nCode:\r\n\r\n```python\r\n try:\r\n return_value = ast.literal_eval(node.body)\r\n except ValueError:\r\n # The value was not literal, it's ok.\r\n return\r\n else:\r\n if return_value in self._primitive_values:\r\n self.add_violation(ImplicitPrimitiveViolation(node))\r\n```\r\n\r\nOutput:\r\n\r\n```\r\n329:13 WPS513 Found implicit `elif` condition\r\n if return_value in self._primitive_values:\r\n```\r\n\r\n<!-- Describe what is not working. Please, attach a traceback. -->\r\n\r\n## How is that should be\r\n\r\nIt should not raise this violation. It is not a part of `if` / `else` case.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport tokenize\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n\n index = self.file_tokens.index(token)\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self.add_violation(ImplicitElifViolation(next_token))\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport tokenize\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _does_else_belong_to_if(self, start_index: int) -> bool:\n previous_token = self.file_tokens[start_index - 1]\n\n if previous_token.type != tokenize.DEDENT:\n # This is not the first token on the line, which means that it can\n # also be \"embedded\" else: x if A else B\n return False\n\n for token in reversed(self.file_tokens[:start_index - 1]):\n if token.type != tokenize.NAME:\n continue\n\n # Here we rely upon an intuition that in Python else have to be\n # on the same level (same indentation) as parent statement.\n if token.start[1] == previous_token.start[1]:\n return token.string in {'if', 'elif'}\n\n return False\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n\n index = self.file_tokens.index(token)\n\n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n if not self._does_else_belong_to_if(index):\n return\n\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self.add_violation(ImplicitElifViolation(next_token))\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}]} | 1,009 | 426 |
gh_patches_debug_10618 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1854 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User confirmation
**Describe the bug**
When a new user has not yet confirmed their email address, if the site settings change to disable email address confirmation, the user's status stays inactive and the code doesn't work.
**To Reproduce**
1. Site settings: email confirmation ON
2. Register new user
3. Do not confirm their email with `confirmation_link` or confirmation code
4. Site settings: email confirmation OFF
5. user is inactive
6. `confirmation_link` or confirmation code seem not to work
**Expected behavior**
A clear and concise description of what you expected to happen.
User confirmation should work even when the setting for email confirmation is off
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/models/site.py`
Content:
```
1 """ the particulars for this instance of BookWyrm """
2 import datetime
3 from urllib.parse import urljoin
4
5 from django.db import models, IntegrityError
6 from django.dispatch import receiver
7 from django.utils import timezone
8 from model_utils import FieldTracker
9
10 from bookwyrm.preview_images import generate_site_preview_image_task
11 from bookwyrm.settings import DOMAIN, ENABLE_PREVIEW_IMAGES, STATIC_FULL_URL
12 from .base_model import BookWyrmModel, new_access_code
13 from .user import User
14 from .fields import get_absolute_url
15
16
17 class SiteSettings(models.Model):
18 """customized settings for this instance"""
19
20 name = models.CharField(default="BookWyrm", max_length=100)
21 instance_tagline = models.CharField(
22 max_length=150, default="Social Reading and Reviewing"
23 )
24 instance_description = models.TextField(default="This instance has no description.")
25 instance_short_description = models.CharField(max_length=255, blank=True, null=True)
26
27 # about page
28 registration_closed_text = models.TextField(
29 default="We aren't taking new users at this time. You can find an open "
30 'instance at <a href="https://joinbookwyrm.com/instances">'
31 "joinbookwyrm.com/instances</a>."
32 )
33 invite_request_text = models.TextField(
34 default="If your request is approved, you will receive an email with a "
35 "registration link."
36 )
37 code_of_conduct = models.TextField(default="Add a code of conduct here.")
38 privacy_policy = models.TextField(default="Add a privacy policy here.")
39
40 # registration
41 allow_registration = models.BooleanField(default=True)
42 allow_invite_requests = models.BooleanField(default=True)
43 require_confirm_email = models.BooleanField(default=True)
44
45 # images
46 logo = models.ImageField(upload_to="logos/", null=True, blank=True)
47 logo_small = models.ImageField(upload_to="logos/", null=True, blank=True)
48 favicon = models.ImageField(upload_to="logos/", null=True, blank=True)
49 preview_image = models.ImageField(
50 upload_to="previews/logos/", null=True, blank=True
51 )
52
53 # footer
54 support_link = models.CharField(max_length=255, null=True, blank=True)
55 support_title = models.CharField(max_length=100, null=True, blank=True)
56 admin_email = models.EmailField(max_length=255, null=True, blank=True)
57 footer_item = models.TextField(null=True, blank=True)
58
59 field_tracker = FieldTracker(fields=["name", "instance_tagline", "logo"])
60
61 @classmethod
62 def get(cls):
63 """gets the site settings db entry or defaults"""
64 try:
65 return cls.objects.get(id=1)
66 except cls.DoesNotExist:
67 default_settings = SiteSettings(id=1)
68 default_settings.save()
69 return default_settings
70
71 @property
72 def logo_url(self):
73 """helper to build the logo url"""
74 return self.get_url("logo", "images/logo.png")
75
76 @property
77 def logo_small_url(self):
78 """helper to build the logo url"""
79 return self.get_url("logo_small", "images/logo-small.png")
80
81 @property
82 def favicon_url(self):
83 """helper to build the logo url"""
84 return self.get_url("favicon", "images/favicon.png")
85
86 def get_url(self, field, default_path):
87 """get a media url or a default static path"""
88 uploaded = getattr(self, field, None)
89 if uploaded:
90 return get_absolute_url(uploaded)
91 return urljoin(STATIC_FULL_URL, default_path)
92
93
94 class SiteInvite(models.Model):
95 """gives someone access to create an account on the instance"""
96
97 created_date = models.DateTimeField(auto_now_add=True)
98 code = models.CharField(max_length=32, default=new_access_code)
99 expiry = models.DateTimeField(blank=True, null=True)
100 use_limit = models.IntegerField(blank=True, null=True)
101 times_used = models.IntegerField(default=0)
102 user = models.ForeignKey(User, on_delete=models.CASCADE)
103 invitees = models.ManyToManyField(User, related_name="invitees")
104
105 def valid(self):
106 """make sure it hasn't expired or been used"""
107 return (self.expiry is None or self.expiry > timezone.now()) and (
108 self.use_limit is None or self.times_used < self.use_limit
109 )
110
111 @property
112 def link(self):
113 """formats the invite link"""
114 return f"https://{DOMAIN}/invite/{self.code}"
115
116
117 class InviteRequest(BookWyrmModel):
118 """prospective users can request an invite"""
119
120 email = models.EmailField(max_length=255, unique=True)
121 invite = models.ForeignKey(
122 SiteInvite, on_delete=models.SET_NULL, null=True, blank=True
123 )
124 invite_sent = models.BooleanField(default=False)
125 ignored = models.BooleanField(default=False)
126
127 def save(self, *args, **kwargs):
128 """don't create a request for a registered email"""
129 if not self.id and User.objects.filter(email=self.email).exists():
130 raise IntegrityError()
131 super().save(*args, **kwargs)
132
133
134 def get_passowrd_reset_expiry():
135 """give people a limited time to use the link"""
136 now = timezone.now()
137 return now + datetime.timedelta(days=1)
138
139
140 class PasswordReset(models.Model):
141 """gives someone access to create an account on the instance"""
142
143 code = models.CharField(max_length=32, default=new_access_code)
144 expiry = models.DateTimeField(default=get_passowrd_reset_expiry)
145 user = models.OneToOneField(User, on_delete=models.CASCADE)
146
147 def valid(self):
148 """make sure it hasn't expired or been used"""
149 return self.expiry > timezone.now()
150
151 @property
152 def link(self):
153 """formats the invite link"""
154 return f"https://{DOMAIN}/password-reset/{self.code}"
155
156
157 # pylint: disable=unused-argument
158 @receiver(models.signals.post_save, sender=SiteSettings)
159 def preview_image(instance, *args, **kwargs):
160 """Update image preview for the default site image"""
161 if not ENABLE_PREVIEW_IMAGES:
162 return
163 changed_fields = instance.field_tracker.changed()
164
165 if len(changed_fields) > 0:
166 generate_site_preview_image_task.delay()
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/models/site.py b/bookwyrm/models/site.py
--- a/bookwyrm/models/site.py
+++ b/bookwyrm/models/site.py
@@ -90,6 +90,14 @@
return get_absolute_url(uploaded)
return urljoin(STATIC_FULL_URL, default_path)
+ def save(self, *args, **kwargs):
+ """if require_confirm_email is disabled, make sure no users are pending"""
+ if not self.require_confirm_email:
+ User.objects.filter(is_active=False, deactivation_reason="pending").update(
+ is_active=True, deactivation_reason=None
+ )
+ super().save(*args, **kwargs)
+
class SiteInvite(models.Model):
"""gives someone access to create an account on the instance"""
| {"golden_diff": "diff --git a/bookwyrm/models/site.py b/bookwyrm/models/site.py\n--- a/bookwyrm/models/site.py\n+++ b/bookwyrm/models/site.py\n@@ -90,6 +90,14 @@\n return get_absolute_url(uploaded)\n return urljoin(STATIC_FULL_URL, default_path)\n \n+ def save(self, *args, **kwargs):\n+ \"\"\"if require_confirm_email is disabled, make sure no users are pending\"\"\"\n+ if not self.require_confirm_email:\n+ User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n+ is_active=True, deactivation_reason=None\n+ )\n+ super().save(*args, **kwargs)\n+\n \n class SiteInvite(models.Model):\n \"\"\"gives someone access to create an account on the instance\"\"\"\n", "issue": "User confirmation\n**Describe the bug**\r\n\r\nWhen a new user has not yet confirmed their email address, if the site settings change to disable email address confirmation, the user's status stays inactive and the code doesn't work.\r\n\r\n**To Reproduce**\r\n\r\n1. Site settings: email confirmation ON\r\n2. Register new user\r\n3. Do not confirm their email with `confirmation_link` or confirmation code\r\n4. Site settings: email confirmation OFF\r\n5. user is inactive\r\n6. `confirmation_link` or confirmation code seem not to work\r\n\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\nUser confirmation should work even when the setting for email confirmation is off\r\n\n", "before_files": [{"content": "\"\"\" the particulars for this instance of BookWyrm \"\"\"\nimport datetime\nfrom urllib.parse import urljoin\n\nfrom django.db import models, IntegrityError\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom model_utils import FieldTracker\n\nfrom bookwyrm.preview_images import generate_site_preview_image_task\nfrom bookwyrm.settings import DOMAIN, ENABLE_PREVIEW_IMAGES, STATIC_FULL_URL\nfrom .base_model import BookWyrmModel, new_access_code\nfrom .user import User\nfrom .fields import get_absolute_url\n\n\nclass SiteSettings(models.Model):\n \"\"\"customized settings for this instance\"\"\"\n\n name = models.CharField(default=\"BookWyrm\", max_length=100)\n instance_tagline = models.CharField(\n max_length=150, default=\"Social Reading and Reviewing\"\n )\n instance_description = models.TextField(default=\"This instance has no description.\")\n instance_short_description = models.CharField(max_length=255, blank=True, null=True)\n\n # about page\n registration_closed_text = models.TextField(\n default=\"We aren't taking new users at this time. You can find an open \"\n 'instance at <a href=\"https://joinbookwyrm.com/instances\">'\n \"joinbookwyrm.com/instances</a>.\"\n )\n invite_request_text = models.TextField(\n default=\"If your request is approved, you will receive an email with a \"\n \"registration link.\"\n )\n code_of_conduct = models.TextField(default=\"Add a code of conduct here.\")\n privacy_policy = models.TextField(default=\"Add a privacy policy here.\")\n\n # registration\n allow_registration = models.BooleanField(default=True)\n allow_invite_requests = models.BooleanField(default=True)\n require_confirm_email = models.BooleanField(default=True)\n\n # images\n logo = models.ImageField(upload_to=\"logos/\", null=True, blank=True)\n logo_small = models.ImageField(upload_to=\"logos/\", null=True, blank=True)\n favicon = models.ImageField(upload_to=\"logos/\", null=True, blank=True)\n preview_image = models.ImageField(\n upload_to=\"previews/logos/\", null=True, blank=True\n )\n\n # footer\n support_link = models.CharField(max_length=255, null=True, blank=True)\n support_title = models.CharField(max_length=100, null=True, blank=True)\n admin_email = models.EmailField(max_length=255, null=True, blank=True)\n footer_item = models.TextField(null=True, blank=True)\n\n field_tracker = FieldTracker(fields=[\"name\", \"instance_tagline\", \"logo\"])\n\n @classmethod\n def get(cls):\n \"\"\"gets the site settings db entry or defaults\"\"\"\n try:\n return cls.objects.get(id=1)\n except cls.DoesNotExist:\n default_settings = SiteSettings(id=1)\n default_settings.save()\n return default_settings\n\n @property\n def logo_url(self):\n \"\"\"helper to build the logo url\"\"\"\n return self.get_url(\"logo\", \"images/logo.png\")\n\n @property\n def logo_small_url(self):\n \"\"\"helper to build the logo url\"\"\"\n return self.get_url(\"logo_small\", \"images/logo-small.png\")\n\n @property\n def favicon_url(self):\n \"\"\"helper to build the logo url\"\"\"\n return self.get_url(\"favicon\", \"images/favicon.png\")\n\n def get_url(self, field, default_path):\n \"\"\"get a media url or a default static path\"\"\"\n uploaded = getattr(self, field, None)\n if uploaded:\n return get_absolute_url(uploaded)\n return urljoin(STATIC_FULL_URL, default_path)\n\n\nclass SiteInvite(models.Model):\n \"\"\"gives someone access to create an account on the instance\"\"\"\n\n created_date = models.DateTimeField(auto_now_add=True)\n code = models.CharField(max_length=32, default=new_access_code)\n expiry = models.DateTimeField(blank=True, null=True)\n use_limit = models.IntegerField(blank=True, null=True)\n times_used = models.IntegerField(default=0)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n invitees = models.ManyToManyField(User, related_name=\"invitees\")\n\n def valid(self):\n \"\"\"make sure it hasn't expired or been used\"\"\"\n return (self.expiry is None or self.expiry > timezone.now()) and (\n self.use_limit is None or self.times_used < self.use_limit\n )\n\n @property\n def link(self):\n \"\"\"formats the invite link\"\"\"\n return f\"https://{DOMAIN}/invite/{self.code}\"\n\n\nclass InviteRequest(BookWyrmModel):\n \"\"\"prospective users can request an invite\"\"\"\n\n email = models.EmailField(max_length=255, unique=True)\n invite = models.ForeignKey(\n SiteInvite, on_delete=models.SET_NULL, null=True, blank=True\n )\n invite_sent = models.BooleanField(default=False)\n ignored = models.BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n \"\"\"don't create a request for a registered email\"\"\"\n if not self.id and User.objects.filter(email=self.email).exists():\n raise IntegrityError()\n super().save(*args, **kwargs)\n\n\ndef get_passowrd_reset_expiry():\n \"\"\"give people a limited time to use the link\"\"\"\n now = timezone.now()\n return now + datetime.timedelta(days=1)\n\n\nclass PasswordReset(models.Model):\n \"\"\"gives someone access to create an account on the instance\"\"\"\n\n code = models.CharField(max_length=32, default=new_access_code)\n expiry = models.DateTimeField(default=get_passowrd_reset_expiry)\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n def valid(self):\n \"\"\"make sure it hasn't expired or been used\"\"\"\n return self.expiry > timezone.now()\n\n @property\n def link(self):\n \"\"\"formats the invite link\"\"\"\n return f\"https://{DOMAIN}/password-reset/{self.code}\"\n\n\n# pylint: disable=unused-argument\n@receiver(models.signals.post_save, sender=SiteSettings)\ndef preview_image(instance, *args, **kwargs):\n \"\"\"Update image preview for the default site image\"\"\"\n if not ENABLE_PREVIEW_IMAGES:\n return\n changed_fields = instance.field_tracker.changed()\n\n if len(changed_fields) > 0:\n generate_site_preview_image_task.delay()\n", "path": "bookwyrm/models/site.py"}], "after_files": [{"content": "\"\"\" the particulars for this instance of BookWyrm \"\"\"\nimport datetime\nfrom urllib.parse import urljoin\n\nfrom django.db import models, IntegrityError\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom model_utils import FieldTracker\n\nfrom bookwyrm.preview_images import generate_site_preview_image_task\nfrom bookwyrm.settings import DOMAIN, ENABLE_PREVIEW_IMAGES, STATIC_FULL_URL\nfrom .base_model import BookWyrmModel, new_access_code\nfrom .user import User\nfrom .fields import get_absolute_url\n\n\nclass SiteSettings(models.Model):\n \"\"\"customized settings for this instance\"\"\"\n\n name = models.CharField(default=\"BookWyrm\", max_length=100)\n instance_tagline = models.CharField(\n max_length=150, default=\"Social Reading and Reviewing\"\n )\n instance_description = models.TextField(default=\"This instance has no description.\")\n instance_short_description = models.CharField(max_length=255, blank=True, null=True)\n\n # about page\n registration_closed_text = models.TextField(\n default=\"We aren't taking new users at this time. You can find an open \"\n 'instance at <a href=\"https://joinbookwyrm.com/instances\">'\n \"joinbookwyrm.com/instances</a>.\"\n )\n invite_request_text = models.TextField(\n default=\"If your request is approved, you will receive an email with a \"\n \"registration link.\"\n )\n code_of_conduct = models.TextField(default=\"Add a code of conduct here.\")\n privacy_policy = models.TextField(default=\"Add a privacy policy here.\")\n\n # registration\n allow_registration = models.BooleanField(default=True)\n allow_invite_requests = models.BooleanField(default=True)\n require_confirm_email = models.BooleanField(default=True)\n\n # images\n logo = models.ImageField(upload_to=\"logos/\", null=True, blank=True)\n logo_small = models.ImageField(upload_to=\"logos/\", null=True, blank=True)\n favicon = models.ImageField(upload_to=\"logos/\", null=True, blank=True)\n preview_image = models.ImageField(\n upload_to=\"previews/logos/\", null=True, blank=True\n )\n\n # footer\n support_link = models.CharField(max_length=255, null=True, blank=True)\n support_title = models.CharField(max_length=100, null=True, blank=True)\n admin_email = models.EmailField(max_length=255, null=True, blank=True)\n footer_item = models.TextField(null=True, blank=True)\n\n field_tracker = FieldTracker(fields=[\"name\", \"instance_tagline\", \"logo\"])\n\n @classmethod\n def get(cls):\n \"\"\"gets the site settings db entry or defaults\"\"\"\n try:\n return cls.objects.get(id=1)\n except cls.DoesNotExist:\n default_settings = SiteSettings(id=1)\n default_settings.save()\n return default_settings\n\n @property\n def logo_url(self):\n \"\"\"helper to build the logo url\"\"\"\n return self.get_url(\"logo\", \"images/logo.png\")\n\n @property\n def logo_small_url(self):\n \"\"\"helper to build the logo url\"\"\"\n return self.get_url(\"logo_small\", \"images/logo-small.png\")\n\n @property\n def favicon_url(self):\n \"\"\"helper to build the logo url\"\"\"\n return self.get_url(\"favicon\", \"images/favicon.png\")\n\n def get_url(self, field, default_path):\n \"\"\"get a media url or a default static path\"\"\"\n uploaded = getattr(self, field, None)\n if uploaded:\n return get_absolute_url(uploaded)\n return urljoin(STATIC_FULL_URL, default_path)\n\n def save(self, *args, **kwargs):\n \"\"\"if require_confirm_email is disabled, make sure no users are pending\"\"\"\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n super().save(*args, **kwargs)\n\n\nclass SiteInvite(models.Model):\n \"\"\"gives someone access to create an account on the instance\"\"\"\n\n created_date = models.DateTimeField(auto_now_add=True)\n code = models.CharField(max_length=32, default=new_access_code)\n expiry = models.DateTimeField(blank=True, null=True)\n use_limit = models.IntegerField(blank=True, null=True)\n times_used = models.IntegerField(default=0)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n invitees = models.ManyToManyField(User, related_name=\"invitees\")\n\n def valid(self):\n \"\"\"make sure it hasn't expired or been used\"\"\"\n return (self.expiry is None or self.expiry > timezone.now()) and (\n self.use_limit is None or self.times_used < self.use_limit\n )\n\n @property\n def link(self):\n \"\"\"formats the invite link\"\"\"\n return f\"https://{DOMAIN}/invite/{self.code}\"\n\n\nclass InviteRequest(BookWyrmModel):\n \"\"\"prospective users can request an invite\"\"\"\n\n email = models.EmailField(max_length=255, unique=True)\n invite = models.ForeignKey(\n SiteInvite, on_delete=models.SET_NULL, null=True, blank=True\n )\n invite_sent = models.BooleanField(default=False)\n ignored = models.BooleanField(default=False)\n\n def save(self, *args, **kwargs):\n \"\"\"don't create a request for a registered email\"\"\"\n if not self.id and User.objects.filter(email=self.email).exists():\n raise IntegrityError()\n super().save(*args, **kwargs)\n\n\ndef get_passowrd_reset_expiry():\n \"\"\"give people a limited time to use the link\"\"\"\n now = timezone.now()\n return now + datetime.timedelta(days=1)\n\n\nclass PasswordReset(models.Model):\n \"\"\"gives someone access to create an account on the instance\"\"\"\n\n code = models.CharField(max_length=32, default=new_access_code)\n expiry = models.DateTimeField(default=get_passowrd_reset_expiry)\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n def valid(self):\n \"\"\"make sure it hasn't expired or been used\"\"\"\n return self.expiry > timezone.now()\n\n @property\n def link(self):\n \"\"\"formats the invite link\"\"\"\n return f\"https://{DOMAIN}/password-reset/{self.code}\"\n\n\n# pylint: disable=unused-argument\n@receiver(models.signals.post_save, sender=SiteSettings)\ndef preview_image(instance, *args, **kwargs):\n \"\"\"Update image preview for the default site image\"\"\"\n if not ENABLE_PREVIEW_IMAGES:\n return\n changed_fields = instance.field_tracker.changed()\n\n if len(changed_fields) > 0:\n generate_site_preview_image_task.delay()\n", "path": "bookwyrm/models/site.py"}]} | 2,123 | 172 |
gh_patches_debug_1607 | rasdani/github-patches | git_diff | rotki__rotki-5059 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unicode logging entries in Windows
## Problem Definition
While checking the logs of a Windows user I noticed the following repeating multiple times inside rotki-electron.log
```--- Logging error ---
Traceback (most recent call last):
File "logging\__init__.py", line 1086, in emit
File "encodings\cp1252.py", line 19, in encode
UnicodeEncodeError: 'charmap' codec can't encode character '\u2728' in position 101: character maps to <undefined>
Call stack:
File "rotkehlchen\api\rest.py", line 298, in _do_query_async
File "rotkehlchen\api\rest.py", line 664, in _query_blockchain_balances
File "rotkehlchen\utils\mixins\lockable.py", line 46, in wrapper
File "rotkehlchen\utils\mixins\cacheable.py", line 97, in wrapper
File "rotkehlchen\chain\manager.py", line 616, in query_balances
File "rotkehlchen\utils\mixins\lockable.py", line 46, in wrapper
File "rotkehlchen\utils\mixins\cacheable.py", line 97, in wrapper
File "rotkehlchen\chain\manager.py", line 1150, in query_ethereum_balances
File "rotkehlchen\chain\manager.py", line 1083, in query_ethereum_tokens
File "rotkehlchen\chain\evm\tokens.py", line 341, in query_tokens_for_addresses
File "rotkehlchen\chain\evm\tokens.py", line 181, in _get_multicall_token_balances
File "logging\__init__.py", line 1800, in debug
File "logging\__init__.py", line 1844, in log
File "logging\__init__.py", line 1512, in log
File "logging\__init__.py", line 1589, in _log
File "logging\__init__.py", line 1599, in handle
File "logging\__init__.py", line 1661, in callHandlers
File "logging\__init__.py", line 952, in handle
File "logging\handlers.py", line 75, in emit
File "logging\__init__.py", line 1187, in emit
File "logging\__init__.py", line 1091, in emit
Message: 'Greenlet-4: Found \u2728(0x714a91578624d76A209E9aA4aF9a1F55d78f9190) token balance for 0x<redacted> and balance 6.434434324'
Arguments: ()
```
I suspect there may be an issue making unicode log entries to a windows file
## Task
Reproduce and fix
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/logging.py`
Content:
```
1 import argparse
2 import logging.config
3 import re
4 import sys
5 from pathlib import Path
6 from typing import TYPE_CHECKING, Any, Dict, MutableMapping, Optional, Tuple
7
8 import gevent
9
10 from rotkehlchen.utils.misc import timestamp_to_date, ts_now
11
12 PYWSGI_RE = re.compile(r'\[(.*)\] ')
13
14 TRACE = logging.DEBUG - 5
15
16
17 def add_logging_level(
18 level_name: str,
19 level_num: int,
20 method_name: Optional[str] = None,
21 ) -> None:
22 """
23 Comprehensively adds a new logging level to the `logging` module and the
24 currently configured logging class.
25
26 `level_name` becomes an attribute of the `logging` module with the value
27 `level_num`. `method_name` becomes a convenience method for both `logging`
28 itself and the class returned by `logging.getLoggerClass()` (usually just
29 `logging.Logger`). If `method_name` is not specified, `level_name.lower()` is
30 used.
31
32 To avoid accidental clobberings of existing attributes, this method will
33 raise an `AttributeError` if the level name is already an attribute of the
34 `logging` module or if the method name is already present
35
36 Example
37 -------
38 >>> add_logging_level('TRACE', logging.DEBUG - 5)
39 >>> logging.getLogger(__name__).setLevel("TRACE")
40 >>> logging.getLogger(__name__).trace('that worked')
41 >>> logging.trace('so did this')
42 >>> logging.TRACE
43 5
44
45 taken from: https://stackoverflow.com/a/35804945/110395
46 """
47 if not method_name:
48 method_name = level_name.lower()
49
50 if hasattr(logging, level_name):
51 raise AttributeError('{} already defined in logging module'.format(level_name))
52 if hasattr(logging, method_name):
53 raise AttributeError('{} already defined in logging module'.format(method_name))
54 if hasattr(logging.getLoggerClass(), method_name):
55 raise AttributeError('{} already defined in logger class'.format(method_name))
56
57 # This method was inspired by the answers to Stack Overflow post
58 # http://stackoverflow.com/q/2183233/2988730, especially
59 # http://stackoverflow.com/a/13638084/2988730
60 def log_for_level(self: logging.Logger, message: str, *args: Any, **kwargs: Any) -> None:
61 if self.isEnabledFor(level_num):
62 self._log(level_num, message, args, **kwargs) # pylint:disable=protected-access
63
64 def log_to_root(message: str, *args: Any, **kwargs: Any) -> None:
65 logging.log(level_num, message, *args, **kwargs)
66
67 logging.addLevelName(level_num, level_name)
68 setattr(logging, level_name, level_num)
69 setattr(logging.getLoggerClass(), method_name, log_for_level)
70 setattr(logging, method_name, log_to_root)
71
72
73 if TYPE_CHECKING:
74 class RotkehlchenLogger(logging.Logger):
75 """Just for typing. Have not found another way to do correct type checking
76 for custom log level loggers"""
77
78 def trace(self, msg: str, *args: Any, **kwargs: Any) -> None: # pylint: disable=unused-argument # noqa: E501
79 ...
80
81
82 class RotkehlchenLogsAdapter(logging.LoggerAdapter):
83
84 def __init__(self, logger: logging.Logger):
85 super().__init__(logger, extra={})
86
87 def process(self, given_msg: Any, kwargs: MutableMapping[str, Any]) -> Tuple[str, Dict]:
88 """
89 This is the main post-processing function for rotki logs
90
91 This function:
92 - appends all kwargs to the final message
93 - appends the greenlet id in the log message
94 """
95 msg = str(given_msg)
96 greenlet = gevent.getcurrent()
97 if greenlet.parent is None:
98 greenlet_name = 'Main Greenlet'
99 else:
100 try:
101 greenlet_name = greenlet.name
102 except AttributeError: # means it's a raw greenlet
103 greenlet_name = f'Greenlet with id {id(greenlet)}'
104
105 msg = greenlet_name + ': ' + msg + ','.join(' {}={}'.format(a[0], a[1]) for a in kwargs.items()) # noqa: E501
106 return msg, {}
107
108 def trace(self, msg: str, *args: Any, **kwargs: Any) -> None:
109 """
110 Delegate a trace call to the underlying logger.
111 """
112 self.log(TRACE, msg, *args, **kwargs)
113
114
115 class PywsgiFilter(logging.Filter):
116 def filter(self, record: logging.LogRecord) -> bool:
117 """Filter out the additional timestamp put in by pywsgi
118
119 This is really a hack to fix https://github.com/rotki/rotki/issues/1192
120
121 It seems that the way they do the logging in pywsgi they create the log
122 entry completely on their own. So the %message part of the entry contains
123 everything and is hence not properly customizale via normal python logging.
124
125 Other options apart from using this filter would be:
126 - Ignore it and just have the timestamp two times in the logs
127 - Completely disable pywsgi logging and perhaps move it all to the
128 rest api.
129 """
130 record.msg = PYWSGI_RE.sub('', record.msg)
131 return True
132
133
134 def configure_logging(args: argparse.Namespace) -> None:
135 loglevel = args.loglevel.upper()
136 formatters = {
137 'default': {
138 'format': '[%(asctime)s] %(levelname)s %(name)s %(message)s',
139 'datefmt': '%d/%m/%Y %H:%M:%S %Z',
140 },
141 }
142 handlers = {
143 'console': {
144 'class': 'logging.StreamHandler',
145 'level': loglevel,
146 'formatter': 'default',
147 },
148 }
149
150 if args.max_logfiles_num < 0:
151 backups_num = 0
152 else:
153 backups_num = args.max_logfiles_num - 1
154
155 if args.logtarget == 'file':
156 given_filepath = Path(args.logfile)
157 filepath = given_filepath
158 if not getattr(sys, 'frozen', False):
159 # not packaged -- must be in develop mode. Append date to each file
160 date = timestamp_to_date(
161 ts=ts_now(),
162 formatstr='%Y%m%d_%H%M%S',
163 treat_as_local=True,
164 )
165 filepath = given_filepath.parent / f'{date}_{given_filepath.name}'
166
167 selected_handlers = ['file']
168 single_log_max_bytes = int(
169 (args.max_size_in_mb_all_logs * 1024 * 1000) / args.max_logfiles_num,
170 )
171 handlers['file'] = {
172 'class': 'logging.handlers.RotatingFileHandler',
173 'filename': filepath,
174 'mode': 'a',
175 'maxBytes': single_log_max_bytes,
176 'backupCount': backups_num,
177 'level': loglevel,
178 'formatter': 'default',
179 }
180 else:
181 selected_handlers = ['console']
182
183 filters = {
184 'pywsgi': {
185 '()': PywsgiFilter,
186 },
187 }
188 loggers: Dict[str, Any] = {
189 '': { # root logger
190 'level': loglevel,
191 'handlers': selected_handlers,
192 },
193 'rotkehlchen.api.server.pywsgi': {
194 'level': loglevel,
195 'handlers': selected_handlers,
196 'filters': ['pywsgi'],
197 'propagate': False,
198 },
199 }
200 logging.config.dictConfig({
201 'version': 1,
202 'disable_existing_loggers': False,
203 'filters': filters,
204 'formatters': formatters,
205 'handlers': handlers,
206 'loggers': loggers,
207 })
208
209 if not args.logfromothermodules:
210 logging.getLogger('urllib3').setLevel(logging.CRITICAL)
211 logging.getLogger('urllib3.connectionpool').setLevel(logging.CRITICAL)
212 logging.getLogger('substrateinterface.base').setLevel(logging.CRITICAL)
213 logging.getLogger('eth_hash').setLevel(logging.CRITICAL)
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rotkehlchen/logging.py b/rotkehlchen/logging.py
--- a/rotkehlchen/logging.py
+++ b/rotkehlchen/logging.py
@@ -176,6 +176,7 @@
'backupCount': backups_num,
'level': loglevel,
'formatter': 'default',
+ 'encoding': 'utf-8',
}
else:
selected_handlers = ['console']
| {"golden_diff": "diff --git a/rotkehlchen/logging.py b/rotkehlchen/logging.py\n--- a/rotkehlchen/logging.py\n+++ b/rotkehlchen/logging.py\n@@ -176,6 +176,7 @@\n 'backupCount': backups_num,\n 'level': loglevel,\n 'formatter': 'default',\n+ 'encoding': 'utf-8',\n }\n else:\n selected_handlers = ['console']\n", "issue": "Unicode logging entries in Windows\n## Problem Definition\r\n\r\nWhile checking the logs of a Windows user I noticed the following repeating multiple times inside rotki-electron.log\r\n\r\n```--- Logging error ---\r\nTraceback (most recent call last):\r\n File \"logging\\__init__.py\", line 1086, in emit\r\n File \"encodings\\cp1252.py\", line 19, in encode\r\nUnicodeEncodeError: 'charmap' codec can't encode character '\\u2728' in position 101: character maps to <undefined>\r\nCall stack:\r\n File \"rotkehlchen\\api\\rest.py\", line 298, in _do_query_async\r\n File \"rotkehlchen\\api\\rest.py\", line 664, in _query_blockchain_balances\r\n File \"rotkehlchen\\utils\\mixins\\lockable.py\", line 46, in wrapper\r\n File \"rotkehlchen\\utils\\mixins\\cacheable.py\", line 97, in wrapper\r\n File \"rotkehlchen\\chain\\manager.py\", line 616, in query_balances\r\n File \"rotkehlchen\\utils\\mixins\\lockable.py\", line 46, in wrapper\r\n File \"rotkehlchen\\utils\\mixins\\cacheable.py\", line 97, in wrapper\r\n File \"rotkehlchen\\chain\\manager.py\", line 1150, in query_ethereum_balances\r\n File \"rotkehlchen\\chain\\manager.py\", line 1083, in query_ethereum_tokens\r\n File \"rotkehlchen\\chain\\evm\\tokens.py\", line 341, in query_tokens_for_addresses\r\n File \"rotkehlchen\\chain\\evm\\tokens.py\", line 181, in _get_multicall_token_balances\r\n File \"logging\\__init__.py\", line 1800, in debug\r\n File \"logging\\__init__.py\", line 1844, in log\r\n File \"logging\\__init__.py\", line 1512, in log\r\n File \"logging\\__init__.py\", line 1589, in _log\r\n File \"logging\\__init__.py\", line 1599, in handle\r\n File \"logging\\__init__.py\", line 1661, in callHandlers\r\n File \"logging\\__init__.py\", line 952, in handle\r\n File \"logging\\handlers.py\", line 75, in emit\r\n File \"logging\\__init__.py\", line 1187, in emit\r\n File \"logging\\__init__.py\", line 1091, in emit\r\nMessage: 'Greenlet-4: Found \\u2728(0x714a91578624d76A209E9aA4aF9a1F55d78f9190) token balance for 0x<redacted> and balance 6.434434324'\r\nArguments: ()\r\n```\r\n\r\nI suspect there may be an issue making unicode log entries to a windows file\r\n\r\n## Task\r\n\r\nReproduce and fix\n", "before_files": [{"content": "import argparse\nimport logging.config\nimport re\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, MutableMapping, Optional, Tuple\n\nimport gevent\n\nfrom rotkehlchen.utils.misc import timestamp_to_date, ts_now\n\nPYWSGI_RE = re.compile(r'\\[(.*)\\] ')\n\nTRACE = logging.DEBUG - 5\n\n\ndef add_logging_level(\n level_name: str,\n level_num: int,\n method_name: Optional[str] = None,\n) -> None:\n \"\"\"\n Comprehensively adds a new logging level to the `logging` module and the\n currently configured logging class.\n\n `level_name` becomes an attribute of the `logging` module with the value\n `level_num`. `method_name` becomes a convenience method for both `logging`\n itself and the class returned by `logging.getLoggerClass()` (usually just\n `logging.Logger`). If `method_name` is not specified, `level_name.lower()` is\n used.\n\n To avoid accidental clobberings of existing attributes, this method will\n raise an `AttributeError` if the level name is already an attribute of the\n `logging` module or if the method name is already present\n\n Example\n -------\n >>> add_logging_level('TRACE', logging.DEBUG - 5)\n >>> logging.getLogger(__name__).setLevel(\"TRACE\")\n >>> logging.getLogger(__name__).trace('that worked')\n >>> logging.trace('so did this')\n >>> logging.TRACE\n 5\n\n taken from: https://stackoverflow.com/a/35804945/110395\n \"\"\"\n if not method_name:\n method_name = level_name.lower()\n\n if hasattr(logging, level_name):\n raise AttributeError('{} already defined in logging module'.format(level_name))\n if hasattr(logging, method_name):\n raise AttributeError('{} already defined in logging module'.format(method_name))\n if hasattr(logging.getLoggerClass(), method_name):\n raise AttributeError('{} already defined in logger class'.format(method_name))\n\n # This method was inspired by the answers to Stack Overflow post\n # http://stackoverflow.com/q/2183233/2988730, especially\n # http://stackoverflow.com/a/13638084/2988730\n def log_for_level(self: logging.Logger, message: str, *args: Any, **kwargs: Any) -> None:\n if self.isEnabledFor(level_num):\n self._log(level_num, message, args, **kwargs) # pylint:disable=protected-access\n\n def log_to_root(message: str, *args: Any, **kwargs: Any) -> None:\n logging.log(level_num, message, *args, **kwargs)\n\n logging.addLevelName(level_num, level_name)\n setattr(logging, level_name, level_num)\n setattr(logging.getLoggerClass(), method_name, log_for_level)\n setattr(logging, method_name, log_to_root)\n\n\nif TYPE_CHECKING:\n class RotkehlchenLogger(logging.Logger):\n \"\"\"Just for typing. Have not found another way to do correct type checking\n for custom log level loggers\"\"\"\n\n def trace(self, msg: str, *args: Any, **kwargs: Any) -> None: # pylint: disable=unused-argument # noqa: E501\n ...\n\n\nclass RotkehlchenLogsAdapter(logging.LoggerAdapter):\n\n def __init__(self, logger: logging.Logger):\n super().__init__(logger, extra={})\n\n def process(self, given_msg: Any, kwargs: MutableMapping[str, Any]) -> Tuple[str, Dict]:\n \"\"\"\n This is the main post-processing function for rotki logs\n\n This function:\n - appends all kwargs to the final message\n - appends the greenlet id in the log message\n \"\"\"\n msg = str(given_msg)\n greenlet = gevent.getcurrent()\n if greenlet.parent is None:\n greenlet_name = 'Main Greenlet'\n else:\n try:\n greenlet_name = greenlet.name\n except AttributeError: # means it's a raw greenlet\n greenlet_name = f'Greenlet with id {id(greenlet)}'\n\n msg = greenlet_name + ': ' + msg + ','.join(' {}={}'.format(a[0], a[1]) for a in kwargs.items()) # noqa: E501\n return msg, {}\n\n def trace(self, msg: str, *args: Any, **kwargs: Any) -> None:\n \"\"\"\n Delegate a trace call to the underlying logger.\n \"\"\"\n self.log(TRACE, msg, *args, **kwargs)\n\n\nclass PywsgiFilter(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n \"\"\"Filter out the additional timestamp put in by pywsgi\n\n This is really a hack to fix https://github.com/rotki/rotki/issues/1192\n\n It seems that the way they do the logging in pywsgi they create the log\n entry completely on their own. So the %message part of the entry contains\n everything and is hence not properly customizale via normal python logging.\n\n Other options apart from using this filter would be:\n - Ignore it and just have the timestamp two times in the logs\n - Completely disable pywsgi logging and perhaps move it all to the\n rest api.\n \"\"\"\n record.msg = PYWSGI_RE.sub('', record.msg)\n return True\n\n\ndef configure_logging(args: argparse.Namespace) -> None:\n loglevel = args.loglevel.upper()\n formatters = {\n 'default': {\n 'format': '[%(asctime)s] %(levelname)s %(name)s %(message)s',\n 'datefmt': '%d/%m/%Y %H:%M:%S %Z',\n },\n }\n handlers = {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': loglevel,\n 'formatter': 'default',\n },\n }\n\n if args.max_logfiles_num < 0:\n backups_num = 0\n else:\n backups_num = args.max_logfiles_num - 1\n\n if args.logtarget == 'file':\n given_filepath = Path(args.logfile)\n filepath = given_filepath\n if not getattr(sys, 'frozen', False):\n # not packaged -- must be in develop mode. Append date to each file\n date = timestamp_to_date(\n ts=ts_now(),\n formatstr='%Y%m%d_%H%M%S',\n treat_as_local=True,\n )\n filepath = given_filepath.parent / f'{date}_{given_filepath.name}'\n\n selected_handlers = ['file']\n single_log_max_bytes = int(\n (args.max_size_in_mb_all_logs * 1024 * 1000) / args.max_logfiles_num,\n )\n handlers['file'] = {\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': filepath,\n 'mode': 'a',\n 'maxBytes': single_log_max_bytes,\n 'backupCount': backups_num,\n 'level': loglevel,\n 'formatter': 'default',\n }\n else:\n selected_handlers = ['console']\n\n filters = {\n 'pywsgi': {\n '()': PywsgiFilter,\n },\n }\n loggers: Dict[str, Any] = {\n '': { # root logger\n 'level': loglevel,\n 'handlers': selected_handlers,\n },\n 'rotkehlchen.api.server.pywsgi': {\n 'level': loglevel,\n 'handlers': selected_handlers,\n 'filters': ['pywsgi'],\n 'propagate': False,\n },\n }\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': filters,\n 'formatters': formatters,\n 'handlers': handlers,\n 'loggers': loggers,\n })\n\n if not args.logfromothermodules:\n logging.getLogger('urllib3').setLevel(logging.CRITICAL)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.CRITICAL)\n logging.getLogger('substrateinterface.base').setLevel(logging.CRITICAL)\n logging.getLogger('eth_hash').setLevel(logging.CRITICAL)\n", "path": "rotkehlchen/logging.py"}], "after_files": [{"content": "import argparse\nimport logging.config\nimport re\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, MutableMapping, Optional, Tuple\n\nimport gevent\n\nfrom rotkehlchen.utils.misc import timestamp_to_date, ts_now\n\nPYWSGI_RE = re.compile(r'\\[(.*)\\] ')\n\nTRACE = logging.DEBUG - 5\n\n\ndef add_logging_level(\n level_name: str,\n level_num: int,\n method_name: Optional[str] = None,\n) -> None:\n \"\"\"\n Comprehensively adds a new logging level to the `logging` module and the\n currently configured logging class.\n\n `level_name` becomes an attribute of the `logging` module with the value\n `level_num`. `method_name` becomes a convenience method for both `logging`\n itself and the class returned by `logging.getLoggerClass()` (usually just\n `logging.Logger`). If `method_name` is not specified, `level_name.lower()` is\n used.\n\n To avoid accidental clobberings of existing attributes, this method will\n raise an `AttributeError` if the level name is already an attribute of the\n `logging` module or if the method name is already present\n\n Example\n -------\n >>> add_logging_level('TRACE', logging.DEBUG - 5)\n >>> logging.getLogger(__name__).setLevel(\"TRACE\")\n >>> logging.getLogger(__name__).trace('that worked')\n >>> logging.trace('so did this')\n >>> logging.TRACE\n 5\n\n taken from: https://stackoverflow.com/a/35804945/110395\n \"\"\"\n if not method_name:\n method_name = level_name.lower()\n\n if hasattr(logging, level_name):\n raise AttributeError('{} already defined in logging module'.format(level_name))\n if hasattr(logging, method_name):\n raise AttributeError('{} already defined in logging module'.format(method_name))\n if hasattr(logging.getLoggerClass(), method_name):\n raise AttributeError('{} already defined in logger class'.format(method_name))\n\n # This method was inspired by the answers to Stack Overflow post\n # http://stackoverflow.com/q/2183233/2988730, especially\n # http://stackoverflow.com/a/13638084/2988730\n def log_for_level(self: logging.Logger, message: str, *args: Any, **kwargs: Any) -> None:\n if self.isEnabledFor(level_num):\n self._log(level_num, message, args, **kwargs) # pylint:disable=protected-access\n\n def log_to_root(message: str, *args: Any, **kwargs: Any) -> None:\n logging.log(level_num, message, *args, **kwargs)\n\n logging.addLevelName(level_num, level_name)\n setattr(logging, level_name, level_num)\n setattr(logging.getLoggerClass(), method_name, log_for_level)\n setattr(logging, method_name, log_to_root)\n\n\nif TYPE_CHECKING:\n class RotkehlchenLogger(logging.Logger):\n \"\"\"Just for typing. Have not found another way to do correct type checking\n for custom log level loggers\"\"\"\n\n def trace(self, msg: str, *args: Any, **kwargs: Any) -> None: # pylint: disable=unused-argument # noqa: E501\n ...\n\n\nclass RotkehlchenLogsAdapter(logging.LoggerAdapter):\n\n def __init__(self, logger: logging.Logger):\n super().__init__(logger, extra={})\n\n def process(self, given_msg: Any, kwargs: MutableMapping[str, Any]) -> Tuple[str, Dict]:\n \"\"\"\n This is the main post-processing function for rotki logs\n\n This function:\n - appends all kwargs to the final message\n - appends the greenlet id in the log message\n \"\"\"\n msg = str(given_msg)\n greenlet = gevent.getcurrent()\n if greenlet.parent is None:\n greenlet_name = 'Main Greenlet'\n else:\n try:\n greenlet_name = greenlet.name\n except AttributeError: # means it's a raw greenlet\n greenlet_name = f'Greenlet with id {id(greenlet)}'\n\n msg = greenlet_name + ': ' + msg + ','.join(' {}={}'.format(a[0], a[1]) for a in kwargs.items()) # noqa: E501\n return msg, {}\n\n def trace(self, msg: str, *args: Any, **kwargs: Any) -> None:\n \"\"\"\n Delegate a trace call to the underlying logger.\n \"\"\"\n self.log(TRACE, msg, *args, **kwargs)\n\n\nclass PywsgiFilter(logging.Filter):\n def filter(self, record: logging.LogRecord) -> bool:\n \"\"\"Filter out the additional timestamp put in by pywsgi\n\n This is really a hack to fix https://github.com/rotki/rotki/issues/1192\n\n It seems that the way they do the logging in pywsgi they create the log\n entry completely on their own. So the %message part of the entry contains\n everything and is hence not properly customizale via normal python logging.\n\n Other options apart from using this filter would be:\n - Ignore it and just have the timestamp two times in the logs\n - Completely disable pywsgi logging and perhaps move it all to the\n rest api.\n \"\"\"\n record.msg = PYWSGI_RE.sub('', record.msg)\n return True\n\n\ndef configure_logging(args: argparse.Namespace) -> None:\n loglevel = args.loglevel.upper()\n formatters = {\n 'default': {\n 'format': '[%(asctime)s] %(levelname)s %(name)s %(message)s',\n 'datefmt': '%d/%m/%Y %H:%M:%S %Z',\n },\n }\n handlers = {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': loglevel,\n 'formatter': 'default',\n },\n }\n\n if args.max_logfiles_num < 0:\n backups_num = 0\n else:\n backups_num = args.max_logfiles_num - 1\n\n if args.logtarget == 'file':\n given_filepath = Path(args.logfile)\n filepath = given_filepath\n if not getattr(sys, 'frozen', False):\n # not packaged -- must be in develop mode. Append date to each file\n date = timestamp_to_date(\n ts=ts_now(),\n formatstr='%Y%m%d_%H%M%S',\n treat_as_local=True,\n )\n filepath = given_filepath.parent / f'{date}_{given_filepath.name}'\n\n selected_handlers = ['file']\n single_log_max_bytes = int(\n (args.max_size_in_mb_all_logs * 1024 * 1000) / args.max_logfiles_num,\n )\n handlers['file'] = {\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': filepath,\n 'mode': 'a',\n 'maxBytes': single_log_max_bytes,\n 'backupCount': backups_num,\n 'level': loglevel,\n 'formatter': 'default',\n 'encoding': 'utf-8',\n }\n else:\n selected_handlers = ['console']\n\n filters = {\n 'pywsgi': {\n '()': PywsgiFilter,\n },\n }\n loggers: Dict[str, Any] = {\n '': { # root logger\n 'level': loglevel,\n 'handlers': selected_handlers,\n },\n 'rotkehlchen.api.server.pywsgi': {\n 'level': loglevel,\n 'handlers': selected_handlers,\n 'filters': ['pywsgi'],\n 'propagate': False,\n },\n }\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': filters,\n 'formatters': formatters,\n 'handlers': handlers,\n 'loggers': loggers,\n })\n\n if not args.logfromothermodules:\n logging.getLogger('urllib3').setLevel(logging.CRITICAL)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.CRITICAL)\n logging.getLogger('substrateinterface.base').setLevel(logging.CRITICAL)\n logging.getLogger('eth_hash').setLevel(logging.CRITICAL)\n", "path": "rotkehlchen/logging.py"}]} | 3,317 | 98 |
gh_patches_debug_23971 | rasdani/github-patches | git_diff | mne-tools__mne-bids-pipeline-813 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logging output is much more verbose when running with n_jobs > 1 vs n_jobs=1
After #799, I still see lots of output when Reports are created – but only if `n_jobs > 1`. For `n_jobs = 1`, things are working as expected.
```
❯ mne_bids_pipeline --config=./bids_pipeline_config.py --n_jobs=1
┌────────┬ Welcome aboard MNE-BIDS-Pipeline! 👋 ──────────────────────────────────────────────────────────────────────────────────────────────────────────────
│16:53:26│ 📝 Using configuration: ./bids_pipeline_config.py
│16:53:26│ ❌ Overriding config.n_jobs = 1
└────────┴
┌────────┬ init/_01_init_derivatives_dir ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
│16:53:26│ ⏳️ Initializing output directories.
└────────┴ done (1s)
┌────────┬ init/_02_find_empty_room ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
│16:53:26│ ⏩ Skipping, empty-room data only relevant for MEG …
└────────┴ done (1s)
┌────────┬ preprocessing/_01_data_quality ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
│16:53:26│ ⏳️ sub-005 run-01 Initializing report HDF5 file
│16:53:26│ ⏳️ sub-005 run-01 Adding original raw data to report
│16:53:28│ ⏳️ sub-005 run-01 Adding config and sys info to report
```
```
❯ mne_bids_pipeline --config=./bids_pipeline_config.py --n_jobs=8
┌────────┬ Welcome aboard MNE-BIDS-Pipeline! 👋 ──────────────────────────────────────────────────────────────────────────────────────────────────────────────
│16:53:50│ 📝 Using configuration: ./bids_pipeline_config.py
│16:53:50│ ❌ Overriding config.n_jobs = 8
└────────┴
┌────────┬ init/_01_init_derivatives_dir ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
│16:53:50│ ✅ Output directories already exist …
└────────┴ done (1s)
┌────────┬ init/_02_find_empty_room ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
│16:53:50│ ⏩ Skipping, empty-room data only relevant for MEG …
└────────┴ done (1s)
┌────────┬ preprocessing/_01_data_quality ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Embedding : jquery-3.6.0.min.js
Embedding : bootstrap.bundle.min.js
Embedding : bootstrap.min.css
Embedding : bootstrap-table/bootstrap-table.min.js
Embedding : bootstrap-table/bootstrap-table.min.css
Embedding : bootstrap-table/bootstrap-table-copy-rows.min.js
Embedding : bootstrap-table/bootstrap-table-export.min.js
Embedding : bootstrap-table/tableExport.min.js
Embedding : bootstrap-icons/bootstrap-icons.mne.min.css
Embedding : highlightjs/highlight.min.js
Embedding : highlightjs/atom-one-dark-reasonable.min.css
│16:53:51│ ⏳️ sub-005 run-01 Adding original raw data to report
│16:53:53│ ⏳️ sub-005 run-01 Adding config and sys info to report
│16:53:54│ ⏳️ sub-005 run-01 Saving report: /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-005/eeg/sub-005_task-tasin_report.html
Overwriting existing file.
Saving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-005/eeg/sub-005_task-tasin_report.h5
Saving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-005/eeg/sub-005_task-tasin_report.html
│16:53:54│ ⏳️ sub-102 run-01 Initializing report HDF5 file
Saving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.h5
Embedding : jquery-3.6.0.min.js
Embedding : bootstrap.bundle.min.js
Embedding : bootstrap.min.css
Embedding : bootstrap-table/bootstrap-table.min.js
Embedding : bootstrap-table/bootstrap-table.min.css
Embedding : bootstrap-table/bootstrap-table-copy-rows.min.js
Embedding : bootstrap-table/bootstrap-table-export.min.js
Embedding : bootstrap-table/tableExport.min.js
Embedding : bootstrap-icons/bootstrap-icons.mne.min.css
Embedding : highlightjs/highlight.min.js
Embedding : highlightjs/atom-one-dark-reasonable.min.css
Embedding : jquery-3.6.0.min.js
Embedding : bootstrap.bundle.min.js
Embedding : bootstrap.min.css
Embedding : bootstrap-table/bootstrap-table.min.js
Embedding : bootstrap-table/bootstrap-table.min.css
Embedding : bootstrap-table/bootstrap-table-copy-rows.min.js
Embedding : bootstrap-table/bootstrap-table-export.min.js
Embedding : bootstrap-table/tableExport.min.js
Embedding : bootstrap-icons/bootstrap-icons.mne.min.css
Embedding : highlightjs/highlight.min.js
Embedding : highlightjs/atom-one-dark-reasonable.min.css
│16:53:54│ ⏳️ sub-102 run-01 Adding original raw data to report
│16:53:54│ ⏳️ sub-005 run-04 Adding original raw data to report
│16:53:54│ ⏳️ sub-005 run-04 Adding config and sys info to report
│16:53:54│ ⏳️ sub-102 run-01 Adding config and sys info to report
│16:53:54│ ⏳️ sub-102 run-01 Saving report: /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.html
Overwriting existing file.
Saving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.h5
Saving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.html
Embedding : jquery-3.6.0.min.js
Embedding : bootstrap.bundle.min.js
Embedding : bootstrap.min.css
Embedding : bootstrap-table/bootstrap-table.min.js
Embedding : bootstrap-table/bootstrap-table.min.css
Embedding : bootstrap-table/bootstrap-table-copy-rows.min.js
Embedding : bootstrap-table/bootstrap-table-export.min.js
Embedding : bootstrap-table/tableExport.min.js
Embedding : bootstrap-icons/bootstrap-icons.mne.min.css
Embedding : highlightjs/highlight.min.js
Embedding : highlightjs/atom-one-dark-reasonable.min.css
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mne_bids_pipeline/_parallel.py`
Content:
```
1 """Parallelization."""
2
3 from typing import Literal, Callable
4 from types import SimpleNamespace
5
6 import joblib
7
8 from ._logging import logger, gen_log_kwargs, _is_testing
9
10
11 def get_n_jobs(*, exec_params: SimpleNamespace, log_override: bool = False) -> int:
12 n_jobs = exec_params.n_jobs
13 if n_jobs < 0:
14 n_cores = joblib.cpu_count()
15 n_jobs = min(n_cores + n_jobs + 1, n_cores)
16
17 # Shim to allow overriding n_jobs for specific steps
18 if _is_testing() and hasattr(exec_params, "_n_jobs"):
19 from ._run import _get_step_path, _short_step_path
20
21 step_path = _short_step_path(_get_step_path())
22 orig_n_jobs = n_jobs
23 n_jobs = exec_params._n_jobs.get(step_path, n_jobs)
24 if log_override and n_jobs != orig_n_jobs:
25 msg = f"Overriding n_jobs: {orig_n_jobs}→{n_jobs}"
26 logger.info(**gen_log_kwargs(message=msg, emoji="override"))
27 return n_jobs
28
29
30 dask_client = None
31
32
33 def setup_dask_client(*, exec_params: SimpleNamespace) -> None:
34 global dask_client
35
36 import dask
37 from dask.distributed import Client
38
39 if dask_client is not None:
40 return
41
42 n_workers = get_n_jobs(exec_params=exec_params)
43 msg = f"Dask initializing with {n_workers} workers …"
44 logger.info(**gen_log_kwargs(message=msg, emoji="👾"))
45
46 if exec_params.dask_temp_dir is None:
47 this_dask_temp_dir = exec_params.deriv_root / ".dask-worker-space"
48 else:
49 this_dask_temp_dir = exec_params.dask_temp_dir
50
51 msg = f"Dask temporary directory: {this_dask_temp_dir}"
52 logger.info(**gen_log_kwargs(message=msg, emoji="📂"))
53 dask.config.set(
54 {
55 "temporary-directory": this_dask_temp_dir,
56 "distributed.worker.memory.pause": 0.8,
57 # fraction of memory that can be utilized before the nanny
58 # process will terminate the worker
59 "distributed.worker.memory.terminate": 1.0,
60 # TODO spilling to disk currently doesn't work reliably for us,
61 # as Dask cannot spill "unmanaged" memory – and most of what we
62 # see currently is, in fact, "unmanaged". Needs thorough
63 # investigation.
64 "distributed.worker.memory.spill": False,
65 }
66 )
67 client = Client( # noqa: F841
68 memory_limit=exec_params.dask_worker_memory_limit,
69 n_workers=n_workers,
70 threads_per_worker=1,
71 name="mne-bids-pipeline",
72 )
73 client.auto_restart = False # don't restart killed workers
74
75 dashboard_url = client.dashboard_link
76 msg = "Dask client dashboard: " f"[link={dashboard_url}]{dashboard_url}[/link]"
77 logger.info(**gen_log_kwargs(message=msg, emoji="🌎"))
78
79 if exec_params.dask_open_dashboard:
80 import webbrowser
81
82 webbrowser.open(url=dashboard_url, autoraise=True)
83
84 # Update global variable
85 dask_client = client
86
87
88 def get_parallel_backend_name(
89 *,
90 exec_params: SimpleNamespace,
91 ) -> Literal["dask", "loky"]:
92 if (
93 exec_params.parallel_backend == "loky"
94 or get_n_jobs(exec_params=exec_params) == 1
95 ):
96 backend = "loky"
97 elif exec_params.parallel_backend == "dask":
98 # Disable interactive plotting backend
99 import matplotlib
100
101 matplotlib.use("Agg")
102 backend = "dask"
103 else:
104 # TODO: Move to value validation step
105 raise ValueError(f"Unknown parallel backend: {exec_params.parallel_backend}")
106
107 return backend
108
109
110 def get_parallel_backend(exec_params: SimpleNamespace) -> joblib.parallel_backend:
111 import joblib
112
113 backend = get_parallel_backend_name(exec_params=exec_params)
114 kwargs = {
115 "n_jobs": get_n_jobs(
116 exec_params=exec_params,
117 log_override=True,
118 )
119 }
120
121 if backend == "loky":
122 kwargs["inner_max_num_threads"] = 1
123 else:
124 setup_dask_client(exec_params=exec_params)
125
126 return joblib.parallel_backend(backend, **kwargs)
127
128
129 def parallel_func(func: Callable, *, exec_params: SimpleNamespace):
130 if get_parallel_backend_name(exec_params=exec_params) == "loky":
131 if get_n_jobs(exec_params=exec_params) == 1:
132 my_func = func
133 parallel = list
134 else:
135 from joblib import Parallel, delayed
136
137 parallel = Parallel()
138 my_func = delayed(func)
139 else: # Dask
140 from joblib import Parallel, delayed
141
142 parallel = Parallel()
143 my_func = delayed(func)
144
145 return parallel, my_func
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mne_bids_pipeline/_parallel.py b/mne_bids_pipeline/_parallel.py
--- a/mne_bids_pipeline/_parallel.py
+++ b/mne_bids_pipeline/_parallel.py
@@ -4,6 +4,7 @@
from types import SimpleNamespace
import joblib
+from mne.utils import use_log_level, logger as mne_logger
from ._logging import logger, gen_log_kwargs, _is_testing
@@ -127,19 +128,21 @@
def parallel_func(func: Callable, *, exec_params: SimpleNamespace):
- if get_parallel_backend_name(exec_params=exec_params) == "loky":
- if get_n_jobs(exec_params=exec_params) == 1:
- my_func = func
- parallel = list
- else:
- from joblib import Parallel, delayed
-
- parallel = Parallel()
- my_func = delayed(func)
- else: # Dask
+ if (
+ get_parallel_backend_name(exec_params=exec_params) == "loky"
+ and get_n_jobs(exec_params=exec_params) == 1
+ ):
+ my_func = func
+ parallel = list
+ else: # Dask or n_jobs > 1
from joblib import Parallel, delayed
parallel = Parallel()
- my_func = delayed(func)
+
+ def run_verbose(*args, verbose=mne_logger.level, **kwargs):
+ with use_log_level(verbose=verbose):
+ return func(*args, **kwargs)
+
+ my_func = delayed(run_verbose)
return parallel, my_func
| {"golden_diff": "diff --git a/mne_bids_pipeline/_parallel.py b/mne_bids_pipeline/_parallel.py\n--- a/mne_bids_pipeline/_parallel.py\n+++ b/mne_bids_pipeline/_parallel.py\n@@ -4,6 +4,7 @@\n from types import SimpleNamespace\n \n import joblib\n+from mne.utils import use_log_level, logger as mne_logger\n \n from ._logging import logger, gen_log_kwargs, _is_testing\n \n@@ -127,19 +128,21 @@\n \n \n def parallel_func(func: Callable, *, exec_params: SimpleNamespace):\n- if get_parallel_backend_name(exec_params=exec_params) == \"loky\":\n- if get_n_jobs(exec_params=exec_params) == 1:\n- my_func = func\n- parallel = list\n- else:\n- from joblib import Parallel, delayed\n-\n- parallel = Parallel()\n- my_func = delayed(func)\n- else: # Dask\n+ if (\n+ get_parallel_backend_name(exec_params=exec_params) == \"loky\"\n+ and get_n_jobs(exec_params=exec_params) == 1\n+ ):\n+ my_func = func\n+ parallel = list\n+ else: # Dask or n_jobs > 1\n from joblib import Parallel, delayed\n \n parallel = Parallel()\n- my_func = delayed(func)\n+\n+ def run_verbose(*args, verbose=mne_logger.level, **kwargs):\n+ with use_log_level(verbose=verbose):\n+ return func(*args, **kwargs)\n+\n+ my_func = delayed(run_verbose)\n \n return parallel, my_func\n", "issue": "Logging output is much more verbose when running with n_jobs > 1 vs n_jobs=1\nAfter #799, I still see lots of output when Reports are created \u2013\u00a0but only if `n_jobs > 1`. For `n_jobs = 1`, things are working as expected.\r\n\r\n```\r\n\u276f mne_bids_pipeline --config=./bids_pipeline_config.py --n_jobs=1\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c Welcome aboard MNE-BIDS-Pipeline! \ud83d\udc4b \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\n\u250216:53:26\u2502 \ud83d\udcdd Using configuration: ./bids_pipeline_config.py\r\n\u250216:53:26\u2502 \u274c Overriding config.n_jobs = 1\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534 \r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c init/_01_init_derivatives_dir \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\n\u250216:53:26\u2502 \u23f3\ufe0f Initializing output directories.\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534 done (1s)\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c init/_02_find_empty_room \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\n\u250216:53:26\u2502 \u23e9 Skipping, empty-room data only relevant for MEG \u2026\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534 done (1s)\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c preprocessing/_01_data_quality \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\n\u250216:53:26\u2502 \u23f3\ufe0f sub-005 run-01 Initializing report HDF5 file\r\n\u250216:53:26\u2502 \u23f3\ufe0f sub-005 run-01 Adding original raw data to report\r\n\u250216:53:28\u2502 \u23f3\ufe0f sub-005 run-01 Adding config and sys info to report\r\n```\r\n\r\n```\r\n\u276f mne_bids_pipeline --config=./bids_pipeline_config.py --n_jobs=8\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c Welcome aboard MNE-BIDS-Pipeline! \ud83d\udc4b \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\n\u250216:53:50\u2502 \ud83d\udcdd Using configuration: ./bids_pipeline_config.py\r\n\u250216:53:50\u2502 \u274c Overriding config.n_jobs = 8\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534 \r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c init/_01_init_derivatives_dir \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\n\u250216:53:50\u2502 \u2705 Output directories already exist \u2026\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534 done (1s)\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c init/_02_find_empty_room \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\n\u250216:53:50\u2502 \u23e9 Skipping, empty-room data only relevant for MEG \u2026\r\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534 done (1s)\r\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c preprocessing/_01_data_quality \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\r\nEmbedding : jquery-3.6.0.min.js\r\nEmbedding : bootstrap.bundle.min.js\r\nEmbedding : bootstrap.min.css\r\nEmbedding : bootstrap-table/bootstrap-table.min.js\r\nEmbedding : bootstrap-table/bootstrap-table.min.css\r\nEmbedding : bootstrap-table/bootstrap-table-copy-rows.min.js\r\nEmbedding : bootstrap-table/bootstrap-table-export.min.js\r\nEmbedding : bootstrap-table/tableExport.min.js\r\nEmbedding : bootstrap-icons/bootstrap-icons.mne.min.css\r\nEmbedding : highlightjs/highlight.min.js\r\nEmbedding : highlightjs/atom-one-dark-reasonable.min.css\r\n\u250216:53:51\u2502 \u23f3\ufe0f sub-005 run-01 Adding original raw data to report\r\n\u250216:53:53\u2502 \u23f3\ufe0f sub-005 run-01 Adding config and sys info to report\r\n\u250216:53:54\u2502 \u23f3\ufe0f sub-005 run-01 Saving report: /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-005/eeg/sub-005_task-tasin_report.html\r\nOverwriting existing file.\r\nSaving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-005/eeg/sub-005_task-tasin_report.h5\r\nSaving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-005/eeg/sub-005_task-tasin_report.html\r\n\u250216:53:54\u2502 \u23f3\ufe0f sub-102 run-01 Initializing report HDF5 file\r\nSaving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.h5\r\nEmbedding : jquery-3.6.0.min.js\r\nEmbedding : bootstrap.bundle.min.js\r\nEmbedding : bootstrap.min.css\r\nEmbedding : bootstrap-table/bootstrap-table.min.js\r\nEmbedding : bootstrap-table/bootstrap-table.min.css\r\nEmbedding : bootstrap-table/bootstrap-table-copy-rows.min.js\r\nEmbedding : bootstrap-table/bootstrap-table-export.min.js\r\nEmbedding : bootstrap-table/tableExport.min.js\r\nEmbedding : bootstrap-icons/bootstrap-icons.mne.min.css\r\nEmbedding : highlightjs/highlight.min.js\r\nEmbedding : highlightjs/atom-one-dark-reasonable.min.css\r\nEmbedding : jquery-3.6.0.min.js\r\nEmbedding : bootstrap.bundle.min.js\r\nEmbedding : bootstrap.min.css\r\nEmbedding : bootstrap-table/bootstrap-table.min.js\r\nEmbedding : bootstrap-table/bootstrap-table.min.css\r\nEmbedding : bootstrap-table/bootstrap-table-copy-rows.min.js\r\nEmbedding : bootstrap-table/bootstrap-table-export.min.js\r\nEmbedding : bootstrap-table/tableExport.min.js\r\nEmbedding : bootstrap-icons/bootstrap-icons.mne.min.css\r\nEmbedding : highlightjs/highlight.min.js\r\nEmbedding : highlightjs/atom-one-dark-reasonable.min.css\r\n\u250216:53:54\u2502 \u23f3\ufe0f sub-102 run-01 Adding original raw data to report\r\n\u250216:53:54\u2502 \u23f3\ufe0f sub-005 run-04 Adding original raw data to report\r\n\u250216:53:54\u2502 \u23f3\ufe0f sub-005 run-04 Adding config and sys info to report\r\n\u250216:53:54\u2502 \u23f3\ufe0f sub-102 run-01 Adding config and sys info to report\r\n\u250216:53:54\u2502 \u23f3\ufe0f sub-102 run-01 Saving report: /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.html\r\nOverwriting existing file.\r\nSaving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.h5\r\nSaving report to : /Users/richardhochenberger/Library/CloudStorage/OneDrive-FirmenichSA/Documents/TasIn/data/derivatives/sub-102/eeg/sub-102_task-tasin_report.html\r\nEmbedding : jquery-3.6.0.min.js\r\nEmbedding : bootstrap.bundle.min.js\r\nEmbedding : bootstrap.min.css\r\nEmbedding : bootstrap-table/bootstrap-table.min.js\r\nEmbedding : bootstrap-table/bootstrap-table.min.css\r\nEmbedding : bootstrap-table/bootstrap-table-copy-rows.min.js\r\nEmbedding : bootstrap-table/bootstrap-table-export.min.js\r\nEmbedding : bootstrap-table/tableExport.min.js\r\nEmbedding : bootstrap-icons/bootstrap-icons.mne.min.css\r\nEmbedding : highlightjs/highlight.min.js\r\nEmbedding : highlightjs/atom-one-dark-reasonable.min.css\r\n```\n", "before_files": [{"content": "\"\"\"Parallelization.\"\"\"\n\nfrom typing import Literal, Callable\nfrom types import SimpleNamespace\n\nimport joblib\n\nfrom ._logging import logger, gen_log_kwargs, _is_testing\n\n\ndef get_n_jobs(*, exec_params: SimpleNamespace, log_override: bool = False) -> int:\n n_jobs = exec_params.n_jobs\n if n_jobs < 0:\n n_cores = joblib.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n\n # Shim to allow overriding n_jobs for specific steps\n if _is_testing() and hasattr(exec_params, \"_n_jobs\"):\n from ._run import _get_step_path, _short_step_path\n\n step_path = _short_step_path(_get_step_path())\n orig_n_jobs = n_jobs\n n_jobs = exec_params._n_jobs.get(step_path, n_jobs)\n if log_override and n_jobs != orig_n_jobs:\n msg = f\"Overriding n_jobs: {orig_n_jobs}\u2192{n_jobs}\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"override\"))\n return n_jobs\n\n\ndask_client = None\n\n\ndef setup_dask_client(*, exec_params: SimpleNamespace) -> None:\n global dask_client\n\n import dask\n from dask.distributed import Client\n\n if dask_client is not None:\n return\n\n n_workers = get_n_jobs(exec_params=exec_params)\n msg = f\"Dask initializing with {n_workers} workers \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83d\udc7e\"))\n\n if exec_params.dask_temp_dir is None:\n this_dask_temp_dir = exec_params.deriv_root / \".dask-worker-space\"\n else:\n this_dask_temp_dir = exec_params.dask_temp_dir\n\n msg = f\"Dask temporary directory: {this_dask_temp_dir}\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83d\udcc2\"))\n dask.config.set(\n {\n \"temporary-directory\": this_dask_temp_dir,\n \"distributed.worker.memory.pause\": 0.8,\n # fraction of memory that can be utilized before the nanny\n # process will terminate the worker\n \"distributed.worker.memory.terminate\": 1.0,\n # TODO spilling to disk currently doesn't work reliably for us,\n # as Dask cannot spill \"unmanaged\" memory \u2013\u00a0and most of what we\n # see currently is, in fact, \"unmanaged\". Needs thorough\n # investigation.\n \"distributed.worker.memory.spill\": False,\n }\n )\n client = Client( # noqa: F841\n memory_limit=exec_params.dask_worker_memory_limit,\n n_workers=n_workers,\n threads_per_worker=1,\n name=\"mne-bids-pipeline\",\n )\n client.auto_restart = False # don't restart killed workers\n\n dashboard_url = client.dashboard_link\n msg = \"Dask client dashboard: \" f\"[link={dashboard_url}]{dashboard_url}[/link]\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83c\udf0e\"))\n\n if exec_params.dask_open_dashboard:\n import webbrowser\n\n webbrowser.open(url=dashboard_url, autoraise=True)\n\n # Update global variable\n dask_client = client\n\n\ndef get_parallel_backend_name(\n *,\n exec_params: SimpleNamespace,\n) -> Literal[\"dask\", \"loky\"]:\n if (\n exec_params.parallel_backend == \"loky\"\n or get_n_jobs(exec_params=exec_params) == 1\n ):\n backend = \"loky\"\n elif exec_params.parallel_backend == \"dask\":\n # Disable interactive plotting backend\n import matplotlib\n\n matplotlib.use(\"Agg\")\n backend = \"dask\"\n else:\n # TODO: Move to value validation step\n raise ValueError(f\"Unknown parallel backend: {exec_params.parallel_backend}\")\n\n return backend\n\n\ndef get_parallel_backend(exec_params: SimpleNamespace) -> joblib.parallel_backend:\n import joblib\n\n backend = get_parallel_backend_name(exec_params=exec_params)\n kwargs = {\n \"n_jobs\": get_n_jobs(\n exec_params=exec_params,\n log_override=True,\n )\n }\n\n if backend == \"loky\":\n kwargs[\"inner_max_num_threads\"] = 1\n else:\n setup_dask_client(exec_params=exec_params)\n\n return joblib.parallel_backend(backend, **kwargs)\n\n\ndef parallel_func(func: Callable, *, exec_params: SimpleNamespace):\n if get_parallel_backend_name(exec_params=exec_params) == \"loky\":\n if get_n_jobs(exec_params=exec_params) == 1:\n my_func = func\n parallel = list\n else:\n from joblib import Parallel, delayed\n\n parallel = Parallel()\n my_func = delayed(func)\n else: # Dask\n from joblib import Parallel, delayed\n\n parallel = Parallel()\n my_func = delayed(func)\n\n return parallel, my_func\n", "path": "mne_bids_pipeline/_parallel.py"}], "after_files": [{"content": "\"\"\"Parallelization.\"\"\"\n\nfrom typing import Literal, Callable\nfrom types import SimpleNamespace\n\nimport joblib\nfrom mne.utils import use_log_level, logger as mne_logger\n\nfrom ._logging import logger, gen_log_kwargs, _is_testing\n\n\ndef get_n_jobs(*, exec_params: SimpleNamespace, log_override: bool = False) -> int:\n n_jobs = exec_params.n_jobs\n if n_jobs < 0:\n n_cores = joblib.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n\n # Shim to allow overriding n_jobs for specific steps\n if _is_testing() and hasattr(exec_params, \"_n_jobs\"):\n from ._run import _get_step_path, _short_step_path\n\n step_path = _short_step_path(_get_step_path())\n orig_n_jobs = n_jobs\n n_jobs = exec_params._n_jobs.get(step_path, n_jobs)\n if log_override and n_jobs != orig_n_jobs:\n msg = f\"Overriding n_jobs: {orig_n_jobs}\u2192{n_jobs}\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"override\"))\n return n_jobs\n\n\ndask_client = None\n\n\ndef setup_dask_client(*, exec_params: SimpleNamespace) -> None:\n global dask_client\n\n import dask\n from dask.distributed import Client\n\n if dask_client is not None:\n return\n\n n_workers = get_n_jobs(exec_params=exec_params)\n msg = f\"Dask initializing with {n_workers} workers \u2026\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83d\udc7e\"))\n\n if exec_params.dask_temp_dir is None:\n this_dask_temp_dir = exec_params.deriv_root / \".dask-worker-space\"\n else:\n this_dask_temp_dir = exec_params.dask_temp_dir\n\n msg = f\"Dask temporary directory: {this_dask_temp_dir}\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83d\udcc2\"))\n dask.config.set(\n {\n \"temporary-directory\": this_dask_temp_dir,\n \"distributed.worker.memory.pause\": 0.8,\n # fraction of memory that can be utilized before the nanny\n # process will terminate the worker\n \"distributed.worker.memory.terminate\": 1.0,\n # TODO spilling to disk currently doesn't work reliably for us,\n # as Dask cannot spill \"unmanaged\" memory \u2013\u00a0and most of what we\n # see currently is, in fact, \"unmanaged\". Needs thorough\n # investigation.\n \"distributed.worker.memory.spill\": False,\n }\n )\n client = Client( # noqa: F841\n memory_limit=exec_params.dask_worker_memory_limit,\n n_workers=n_workers,\n threads_per_worker=1,\n name=\"mne-bids-pipeline\",\n )\n client.auto_restart = False # don't restart killed workers\n\n dashboard_url = client.dashboard_link\n msg = \"Dask client dashboard: \" f\"[link={dashboard_url}]{dashboard_url}[/link]\"\n logger.info(**gen_log_kwargs(message=msg, emoji=\"\ud83c\udf0e\"))\n\n if exec_params.dask_open_dashboard:\n import webbrowser\n\n webbrowser.open(url=dashboard_url, autoraise=True)\n\n # Update global variable\n dask_client = client\n\n\ndef get_parallel_backend_name(\n *,\n exec_params: SimpleNamespace,\n) -> Literal[\"dask\", \"loky\"]:\n if (\n exec_params.parallel_backend == \"loky\"\n or get_n_jobs(exec_params=exec_params) == 1\n ):\n backend = \"loky\"\n elif exec_params.parallel_backend == \"dask\":\n # Disable interactive plotting backend\n import matplotlib\n\n matplotlib.use(\"Agg\")\n backend = \"dask\"\n else:\n # TODO: Move to value validation step\n raise ValueError(f\"Unknown parallel backend: {exec_params.parallel_backend}\")\n\n return backend\n\n\ndef get_parallel_backend(exec_params: SimpleNamespace) -> joblib.parallel_backend:\n import joblib\n\n backend = get_parallel_backend_name(exec_params=exec_params)\n kwargs = {\n \"n_jobs\": get_n_jobs(\n exec_params=exec_params,\n log_override=True,\n )\n }\n\n if backend == \"loky\":\n kwargs[\"inner_max_num_threads\"] = 1\n else:\n setup_dask_client(exec_params=exec_params)\n\n return joblib.parallel_backend(backend, **kwargs)\n\n\ndef parallel_func(func: Callable, *, exec_params: SimpleNamespace):\n if (\n get_parallel_backend_name(exec_params=exec_params) == \"loky\"\n and get_n_jobs(exec_params=exec_params) == 1\n ):\n my_func = func\n parallel = list\n else: # Dask or n_jobs > 1\n from joblib import Parallel, delayed\n\n parallel = Parallel()\n\n def run_verbose(*args, verbose=mne_logger.level, **kwargs):\n with use_log_level(verbose=verbose):\n return func(*args, **kwargs)\n\n my_func = delayed(run_verbose)\n\n return parallel, my_func\n", "path": "mne_bids_pipeline/_parallel.py"}]} | 3,415 | 356 |
gh_patches_debug_19529 | rasdani/github-patches | git_diff | Parsl__parsl-127 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not raise `NotImplemented`
In a few places we raise `NotImplemented`, which itself raises a `SyntaxError`; this should be `NotImplementedError`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/app/app.py`
Content:
```
1 '''
2 Parsl Apps
3 ==========
4
5 Here lies the definitions for the @App decorator and the APP classes.
6 The APP class encapsulates a generic leaf task that can be executed asynchronously.
7
8 '''
9 import logging
10 from inspect import signature, Parameter
11
12 # Logging moved here in the PEP8 conformance fixes.
13 logger = logging.getLogger(__name__)
14
15
16 class AppBase (object):
17 """
18 This is the base class that defines the two external facing functions that an App must define.
19 The __init__ () which is called when the interpretor sees the definition of the decorated
20 function, and the __call__ () which is invoked when a decorated function is called by the user.
21
22 """
23
24 def __init__(self, func, executor, walltime=60, sites='all', cache=False, exec_type="bash"):
25 ''' Constructor for the APP object.
26
27 Args:
28 - func (function): Takes the function to be made into an App
29 - executor (executor): Executor for the execution resource
30
31 Kwargs:
32 - walltime (int) : Walltime in seconds for the app execution
33 - sites (str|list) : List of site names that this app could execute over. default is 'all'
34 - exec_type (string) : App type (bash|python)
35 - cache (Bool) : Enable caching of this app ?
36
37 Returns:
38 - APP object.
39
40 '''
41 self.__name__ = func.__name__
42 self.func = func
43 self.executor = executor
44 self.exec_type = exec_type
45 self.status = 'created'
46 self.sites = sites
47 self.cache = cache
48
49 sig = signature(func)
50 self.kwargs = {}
51 for s in sig.parameters:
52 if sig.parameters[s].default != Parameter.empty:
53 self.kwargs[s] = sig.parameters[s].default
54
55 self.stdout = sig.parameters['stdout'].default if 'stdout' in sig.parameters else None
56 self.stderr = sig.parameters['stderr'].default if 'stderr' in sig.parameters else None
57 self.inputs = sig.parameters['inputs'].default if 'inputs' in sig.parameters else []
58 self.outputs = sig.parameters['outputs'].default if 'outputs' in sig.parameters else []
59
60 def __call__(self, *args, **kwargs):
61 ''' The __call__ function must be implemented in the subclasses
62 '''
63 raise NotImplemented
64
65
66 def app_wrapper(func):
67
68 def wrapper(*args, **kwargs):
69 logger.debug("App wrapper begins")
70 x = func(*args, **kwargs)
71 logger.debug("App wrapper ends")
72 return x
73
74 return wrapper
75
76
77 def App(apptype, executor, walltime=60, cache=False, sites='all'):
78 ''' The App decorator function
79
80 Args:
81 - apptype (string) : Apptype can be bash|python
82 - executor (Executor) : Executor object wrapping threads/process pools etc.
83
84 Kwargs:
85 - walltime (int) : Walltime for app in seconds,
86 default=60
87 - sites (str|List) : List of site names on which the app could execute
88 default='all'
89 - cache (Bool) : Enable caching of the app call
90 default=False
91
92 Returns:
93 An AppFactory object, which when called runs the apps through the executor.
94 '''
95
96 from parsl import APP_FACTORY_FACTORY
97
98 def Exec(f):
99 return APP_FACTORY_FACTORY.make(apptype, executor, f,
100 sites=sites,
101 cache=cache,
102 walltime=walltime)
103
104 return Exec
105
```
Path: `parsl/executors/threads.py`
Content:
```
1 import logging
2 import sys
3 import concurrent.futures as cf
4 from parsl.executors.base import ParslExecutor
5
6 logger = logging.getLogger(__name__)
7
8
9 class ThreadPoolExecutor(ParslExecutor):
10 ''' The thread pool executor
11 '''
12
13 def __init__(self, max_workers=2, thread_name_prefix='',
14 execution_provider=None, config=None, **kwargs):
15 ''' Initialize the thread pool
16 Config options that are really used are :
17
18 config.sites.site.execution.options = {"maxThreads" : <int>,
19 "threadNamePrefix" : <string>}
20
21 Kwargs:
22 - max_workers (int) : Number of threads (Default=2) (keeping name workers/threads for backward compatibility)
23 - thread_name_prefix (string) : Thread name prefix (Only supported in python v3.6+
24 - execution_provider (ep object) : This is ignored here
25 - config (dict): The config dict object for the site:
26
27
28 '''
29
30 self._scaling_enabled = False
31 if not config:
32 config = {"execution": {}}
33 if "maxThreads" not in config["execution"]:
34 config["execution"]["maxThreads"] = max_workers
35 if "threadNamePrefix" not in config["execution"]:
36 config["execution"]["threadNamePrefix"] = thread_name_prefix
37
38 self.config = config
39
40 if sys.version_info > (3, 6):
41 self.executor = cf.ThreadPoolExecutor(max_workers=config["execution"]["maxThreads"],
42 thread_name_prefix=config["execution"]["threadNamePrefix"])
43 else:
44 self.executor = cf.ThreadPoolExecutor(max_workers=config["execution"]["maxThreads"])
45
46 @property
47 def scaling_enabled(self):
48 return self._scaling_enabled
49
50 def submit(self, *args, **kwargs):
51 ''' Submits work to the thread pool
52 This method is simply pass through and behaves like a submit call as described
53 here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
54
55 Returns:
56 Future
57 '''
58
59 return self.executor.submit(*args, **kwargs)
60
61 def scale_out(self, workers=1):
62 ''' Scales out the number of active workers by 1
63 This method is notImplemented for threads and will raise the error if called.
64
65 Raises:
66 NotImplemented exception
67 '''
68
69 raise NotImplemented
70
71 def scale_in(self, workers=1):
72 ''' Scale in the number of active workers by 1
73 This method is notImplemented for threads and will raise the error if called.
74
75 Raises:
76 NotImplemented exception
77 '''
78
79 raise NotImplemented
80
81 def shutdown(self, block=False):
82 ''' Shutdown the ThreadPool
83 This method is notImplemented for threads and will raise the error if called.
84 The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_
85
86 Kwargs:
87 - block (Bool): To block for confirmations or not
88
89 '''
90 x = self.executor.shutdown(wait=block)
91 logger.debug("Done with executor shutdown")
92 return x
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/app/app.py b/parsl/app/app.py
--- a/parsl/app/app.py
+++ b/parsl/app/app.py
@@ -60,7 +60,7 @@
def __call__(self, *args, **kwargs):
''' The __call__ function must be implemented in the subclasses
'''
- raise NotImplemented
+ raise NotImplementedError
def app_wrapper(func):
diff --git a/parsl/executors/threads.py b/parsl/executors/threads.py
--- a/parsl/executors/threads.py
+++ b/parsl/executors/threads.py
@@ -66,7 +66,7 @@
NotImplemented exception
'''
- raise NotImplemented
+ raise NotImplementedError
def scale_in(self, workers=1):
''' Scale in the number of active workers by 1
@@ -76,7 +76,7 @@
NotImplemented exception
'''
- raise NotImplemented
+ raise NotImplementedError
def shutdown(self, block=False):
''' Shutdown the ThreadPool
| {"golden_diff": "diff --git a/parsl/app/app.py b/parsl/app/app.py\n--- a/parsl/app/app.py\n+++ b/parsl/app/app.py\n@@ -60,7 +60,7 @@\n def __call__(self, *args, **kwargs):\n ''' The __call__ function must be implemented in the subclasses\n '''\n- raise NotImplemented\n+ raise NotImplementedError\n \n \n def app_wrapper(func):\ndiff --git a/parsl/executors/threads.py b/parsl/executors/threads.py\n--- a/parsl/executors/threads.py\n+++ b/parsl/executors/threads.py\n@@ -66,7 +66,7 @@\n NotImplemented exception\n '''\n \n- raise NotImplemented\n+ raise NotImplementedError\n \n def scale_in(self, workers=1):\n ''' Scale in the number of active workers by 1\n@@ -76,7 +76,7 @@\n NotImplemented exception\n '''\n \n- raise NotImplemented\n+ raise NotImplementedError\n \n def shutdown(self, block=False):\n ''' Shutdown the ThreadPool\n", "issue": "Do not raise `NotImplemented`\nIn a few places we raise `NotImplemented`, which itself raises a `SyntaxError`; this should be `NotImplementedError`.\n", "before_files": [{"content": "'''\nParsl Apps\n==========\n\nHere lies the definitions for the @App decorator and the APP classes.\nThe APP class encapsulates a generic leaf task that can be executed asynchronously.\n\n'''\nimport logging\nfrom inspect import signature, Parameter\n\n# Logging moved here in the PEP8 conformance fixes.\nlogger = logging.getLogger(__name__)\n\n\nclass AppBase (object):\n \"\"\"\n This is the base class that defines the two external facing functions that an App must define.\n The __init__ () which is called when the interpretor sees the definition of the decorated\n function, and the __call__ () which is invoked when a decorated function is called by the user.\n\n \"\"\"\n\n def __init__(self, func, executor, walltime=60, sites='all', cache=False, exec_type=\"bash\"):\n ''' Constructor for the APP object.\n\n Args:\n - func (function): Takes the function to be made into an App\n - executor (executor): Executor for the execution resource\n\n Kwargs:\n - walltime (int) : Walltime in seconds for the app execution\n - sites (str|list) : List of site names that this app could execute over. default is 'all'\n - exec_type (string) : App type (bash|python)\n - cache (Bool) : Enable caching of this app ?\n\n Returns:\n - APP object.\n\n '''\n self.__name__ = func.__name__\n self.func = func\n self.executor = executor\n self.exec_type = exec_type\n self.status = 'created'\n self.sites = sites\n self.cache = cache\n\n sig = signature(func)\n self.kwargs = {}\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n self.stdout = sig.parameters['stdout'].default if 'stdout' in sig.parameters else None\n self.stderr = sig.parameters['stderr'].default if 'stderr' in sig.parameters else None\n self.inputs = sig.parameters['inputs'].default if 'inputs' in sig.parameters else []\n self.outputs = sig.parameters['outputs'].default if 'outputs' in sig.parameters else []\n\n def __call__(self, *args, **kwargs):\n ''' The __call__ function must be implemented in the subclasses\n '''\n raise NotImplemented\n\n\ndef app_wrapper(func):\n\n def wrapper(*args, **kwargs):\n logger.debug(\"App wrapper begins\")\n x = func(*args, **kwargs)\n logger.debug(\"App wrapper ends\")\n return x\n\n return wrapper\n\n\ndef App(apptype, executor, walltime=60, cache=False, sites='all'):\n ''' The App decorator function\n\n Args:\n - apptype (string) : Apptype can be bash|python\n - executor (Executor) : Executor object wrapping threads/process pools etc.\n\n Kwargs:\n - walltime (int) : Walltime for app in seconds,\n default=60\n - sites (str|List) : List of site names on which the app could execute\n default='all'\n - cache (Bool) : Enable caching of the app call\n default=False\n\n Returns:\n An AppFactory object, which when called runs the apps through the executor.\n '''\n\n from parsl import APP_FACTORY_FACTORY\n\n def Exec(f):\n return APP_FACTORY_FACTORY.make(apptype, executor, f,\n sites=sites,\n cache=cache,\n walltime=walltime)\n\n return Exec\n", "path": "parsl/app/app.py"}, {"content": "import logging\nimport sys\nimport concurrent.futures as cf\nfrom parsl.executors.base import ParslExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nclass ThreadPoolExecutor(ParslExecutor):\n ''' The thread pool executor\n '''\n\n def __init__(self, max_workers=2, thread_name_prefix='',\n execution_provider=None, config=None, **kwargs):\n ''' Initialize the thread pool\n Config options that are really used are :\n\n config.sites.site.execution.options = {\"maxThreads\" : <int>,\n \"threadNamePrefix\" : <string>}\n\n Kwargs:\n - max_workers (int) : Number of threads (Default=2) (keeping name workers/threads for backward compatibility)\n - thread_name_prefix (string) : Thread name prefix (Only supported in python v3.6+\n - execution_provider (ep object) : This is ignored here\n - config (dict): The config dict object for the site:\n\n\n '''\n\n self._scaling_enabled = False\n if not config:\n config = {\"execution\": {}}\n if \"maxThreads\" not in config[\"execution\"]:\n config[\"execution\"][\"maxThreads\"] = max_workers\n if \"threadNamePrefix\" not in config[\"execution\"]:\n config[\"execution\"][\"threadNamePrefix\"] = thread_name_prefix\n\n self.config = config\n\n if sys.version_info > (3, 6):\n self.executor = cf.ThreadPoolExecutor(max_workers=config[\"execution\"][\"maxThreads\"],\n thread_name_prefix=config[\"execution\"][\"threadNamePrefix\"])\n else:\n self.executor = cf.ThreadPoolExecutor(max_workers=config[\"execution\"][\"maxThreads\"])\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def submit(self, *args, **kwargs):\n ''' Submits work to the thread pool\n This method is simply pass through and behaves like a submit call as described\n here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Returns:\n Future\n '''\n\n return self.executor.submit(*args, **kwargs)\n\n def scale_out(self, workers=1):\n ''' Scales out the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplemented\n\n def scale_in(self, workers=1):\n ''' Scale in the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplemented\n\n def shutdown(self, block=False):\n ''' Shutdown the ThreadPool\n This method is notImplemented for threads and will raise the error if called.\n The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_\n\n Kwargs:\n - block (Bool): To block for confirmations or not\n\n '''\n x = self.executor.shutdown(wait=block)\n logger.debug(\"Done with executor shutdown\")\n return x\n", "path": "parsl/executors/threads.py"}], "after_files": [{"content": "'''\nParsl Apps\n==========\n\nHere lies the definitions for the @App decorator and the APP classes.\nThe APP class encapsulates a generic leaf task that can be executed asynchronously.\n\n'''\nimport logging\nfrom inspect import signature, Parameter\n\n# Logging moved here in the PEP8 conformance fixes.\nlogger = logging.getLogger(__name__)\n\n\nclass AppBase (object):\n \"\"\"\n This is the base class that defines the two external facing functions that an App must define.\n The __init__ () which is called when the interpretor sees the definition of the decorated\n function, and the __call__ () which is invoked when a decorated function is called by the user.\n\n \"\"\"\n\n def __init__(self, func, executor, walltime=60, sites='all', cache=False, exec_type=\"bash\"):\n ''' Constructor for the APP object.\n\n Args:\n - func (function): Takes the function to be made into an App\n - executor (executor): Executor for the execution resource\n\n Kwargs:\n - walltime (int) : Walltime in seconds for the app execution\n - sites (str|list) : List of site names that this app could execute over. default is 'all'\n - exec_type (string) : App type (bash|python)\n - cache (Bool) : Enable caching of this app ?\n\n Returns:\n - APP object.\n\n '''\n self.__name__ = func.__name__\n self.func = func\n self.executor = executor\n self.exec_type = exec_type\n self.status = 'created'\n self.sites = sites\n self.cache = cache\n\n sig = signature(func)\n self.kwargs = {}\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n self.stdout = sig.parameters['stdout'].default if 'stdout' in sig.parameters else None\n self.stderr = sig.parameters['stderr'].default if 'stderr' in sig.parameters else None\n self.inputs = sig.parameters['inputs'].default if 'inputs' in sig.parameters else []\n self.outputs = sig.parameters['outputs'].default if 'outputs' in sig.parameters else []\n\n def __call__(self, *args, **kwargs):\n ''' The __call__ function must be implemented in the subclasses\n '''\n raise NotImplementedError\n\n\ndef app_wrapper(func):\n\n def wrapper(*args, **kwargs):\n logger.debug(\"App wrapper begins\")\n x = func(*args, **kwargs)\n logger.debug(\"App wrapper ends\")\n return x\n\n return wrapper\n\n\ndef App(apptype, executor, walltime=60, cache=False, sites='all'):\n ''' The App decorator function\n\n Args:\n - apptype (string) : Apptype can be bash|python\n - executor (Executor) : Executor object wrapping threads/process pools etc.\n\n Kwargs:\n - walltime (int) : Walltime for app in seconds,\n default=60\n - sites (str|List) : List of site names on which the app could execute\n default='all'\n - cache (Bool) : Enable caching of the app call\n default=False\n\n Returns:\n An AppFactory object, which when called runs the apps through the executor.\n '''\n\n from parsl import APP_FACTORY_FACTORY\n\n def Exec(f):\n return APP_FACTORY_FACTORY.make(apptype, executor, f,\n sites=sites,\n cache=cache,\n walltime=walltime)\n\n return Exec\n", "path": "parsl/app/app.py"}, {"content": "import logging\nimport sys\nimport concurrent.futures as cf\nfrom parsl.executors.base import ParslExecutor\n\nlogger = logging.getLogger(__name__)\n\n\nclass ThreadPoolExecutor(ParslExecutor):\n ''' The thread pool executor\n '''\n\n def __init__(self, max_workers=2, thread_name_prefix='',\n execution_provider=None, config=None, **kwargs):\n ''' Initialize the thread pool\n Config options that are really used are :\n\n config.sites.site.execution.options = {\"maxThreads\" : <int>,\n \"threadNamePrefix\" : <string>}\n\n Kwargs:\n - max_workers (int) : Number of threads (Default=2) (keeping name workers/threads for backward compatibility)\n - thread_name_prefix (string) : Thread name prefix (Only supported in python v3.6+\n - execution_provider (ep object) : This is ignored here\n - config (dict): The config dict object for the site:\n\n\n '''\n\n self._scaling_enabled = False\n if not config:\n config = {\"execution\": {}}\n if \"maxThreads\" not in config[\"execution\"]:\n config[\"execution\"][\"maxThreads\"] = max_workers\n if \"threadNamePrefix\" not in config[\"execution\"]:\n config[\"execution\"][\"threadNamePrefix\"] = thread_name_prefix\n\n self.config = config\n\n if sys.version_info > (3, 6):\n self.executor = cf.ThreadPoolExecutor(max_workers=config[\"execution\"][\"maxThreads\"],\n thread_name_prefix=config[\"execution\"][\"threadNamePrefix\"])\n else:\n self.executor = cf.ThreadPoolExecutor(max_workers=config[\"execution\"][\"maxThreads\"])\n\n @property\n def scaling_enabled(self):\n return self._scaling_enabled\n\n def submit(self, *args, **kwargs):\n ''' Submits work to the thread pool\n This method is simply pass through and behaves like a submit call as described\n here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_\n\n Returns:\n Future\n '''\n\n return self.executor.submit(*args, **kwargs)\n\n def scale_out(self, workers=1):\n ''' Scales out the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplementedError\n\n def scale_in(self, workers=1):\n ''' Scale in the number of active workers by 1\n This method is notImplemented for threads and will raise the error if called.\n\n Raises:\n NotImplemented exception\n '''\n\n raise NotImplementedError\n\n def shutdown(self, block=False):\n ''' Shutdown the ThreadPool\n This method is notImplemented for threads and will raise the error if called.\n The interface documentation for IPP is `here <http://ipyparallel.readthedocs.io/en/latest/api/ipyparallel.html#ipyparallel.Client.shutdown>`_\n\n Kwargs:\n - block (Bool): To block for confirmations or not\n\n '''\n x = self.executor.shutdown(wait=block)\n logger.debug(\"Done with executor shutdown\")\n return x\n", "path": "parsl/executors/threads.py"}]} | 2,139 | 232 |
gh_patches_debug_32571 | rasdani/github-patches | git_diff | encode__httpx-1138 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document the exception hierarchy
We've put a stack of great work into this for 0.14 - let's also show it off.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_exceptions.py`
Content:
```
1 """
2 Our exception hierarchy:
3
4 * HTTPError
5 x RequestError
6 + TransportError
7 - TimeoutException
8 · ConnectTimeout
9 · ReadTimeout
10 · WriteTimeout
11 · PoolTimeout
12 - NetworkError
13 · ConnectError
14 · ReadError
15 · WriteError
16 · CloseError
17 - ProtocolError
18 · LocalProtocolError
19 · RemoteProtocolError
20 - ProxyError
21 - UnsupportedProtocol
22 + DecodingError
23 + TooManyRedirects
24 + RequestBodyUnavailable
25 x HTTPStatusError
26 * NotRedirectResponse
27 * CookieConflict
28 * StreamError
29 x StreamConsumed
30 x ResponseNotRead
31 x RequestNotRead
32 x ResponseClosed
33 """
34 import contextlib
35 import typing
36
37 import httpcore
38
39 if typing.TYPE_CHECKING:
40 from ._models import Request, Response # pragma: nocover
41
42
43 class HTTPError(Exception):
44 """
45 Base class for `RequestError` and `HTTPStatusError`.
46
47 Useful for `try...except` blocks when issuing a request,
48 and then calling .raise_for_status().
49
50 For example:
51
52 try:
53 response = httpx.get("https://www.example.com")
54 response.raise_for_status()
55 except httpx.HTTPError as exc:
56 print(f"HTTP Exception for {exc.request.url} - {exc.message}")
57 """
58
59 def __init__(self, message: str, *, request: "Request") -> None:
60 super().__init__(message)
61 self.request = request
62
63
64 class RequestError(HTTPError):
65 """
66 Base class for all exceptions that may occur when issuing a `.request()`.
67 """
68
69 def __init__(self, message: str, *, request: "Request") -> None:
70 super().__init__(message, request=request)
71
72
73 class TransportError(RequestError):
74 """
75 Base class for all exceptions that are mapped from the httpcore API.
76 """
77
78
79 # Timeout exceptions...
80
81
82 class TimeoutException(TransportError):
83 """
84 The base class for timeout errors.
85
86 An operation has timed out.
87 """
88
89
90 class ConnectTimeout(TimeoutException):
91 """
92 Timed out while connecting to the host.
93 """
94
95
96 class ReadTimeout(TimeoutException):
97 """
98 Timed out while receiving data from the host.
99 """
100
101
102 class WriteTimeout(TimeoutException):
103 """
104 Timed out while sending data to the host.
105 """
106
107
108 class PoolTimeout(TimeoutException):
109 """
110 Timed out waiting to acquire a connection from the pool.
111 """
112
113
114 # Core networking exceptions...
115
116
117 class NetworkError(TransportError):
118 """
119 The base class for network-related errors.
120
121 An error occurred while interacting with the network.
122 """
123
124
125 class ReadError(NetworkError):
126 """
127 Failed to receive data from the network.
128 """
129
130
131 class WriteError(NetworkError):
132 """
133 Failed to send data through the network.
134 """
135
136
137 class ConnectError(NetworkError):
138 """
139 Failed to establish a connection.
140 """
141
142
143 class CloseError(NetworkError):
144 """
145 Failed to close a connection.
146 """
147
148
149 # Other transport exceptions...
150
151
152 class ProxyError(TransportError):
153 """
154 An error occurred while proxying a request.
155 """
156
157
158 class UnsupportedProtocol(TransportError):
159 """
160 Attempted to make a request to an unsupported protocol.
161
162 For example issuing a request to `ftp://www.example.com`.
163 """
164
165
166 class ProtocolError(TransportError):
167 """
168 The protocol was violated.
169 """
170
171
172 class LocalProtocolError(ProtocolError):
173 """
174 A protocol was violated by the client.
175
176 For example if the user instantiated a `Request` instance explicitly,
177 failed to include the mandatory `Host:` header, and then issued it directly
178 using `client.send()`.
179 """
180
181
182 class RemoteProtocolError(ProtocolError):
183 """
184 The protocol was violated by the server.
185
186 For exaample, returning malformed HTTP.
187 """
188
189
190 # Other request exceptions...
191
192
193 class DecodingError(RequestError):
194 """
195 Decoding of the response failed.
196 """
197
198
199 class TooManyRedirects(RequestError):
200 """
201 Too many redirects.
202 """
203
204
205 class RequestBodyUnavailable(RequestError):
206 """
207 Had to send the request again, but the request body was streaming, and is
208 no longer available.
209 """
210
211
212 # Client errors
213
214
215 class HTTPStatusError(HTTPError):
216 """
217 Response sent an error HTTP status.
218
219 May be raised when calling `response.raise_for_status()`
220 """
221
222 def __init__(
223 self, message: str, *, request: "Request", response: "Response"
224 ) -> None:
225 super().__init__(message, request=request)
226 self.response = response
227
228
229 class NotRedirectResponse(Exception):
230 """
231 Response was not a redirect response.
232
233 May be raised if `response.next()` is called without first
234 properly checking `response.is_redirect`.
235 """
236
237 def __init__(self, message: str) -> None:
238 super().__init__(message)
239
240
241 class CookieConflict(Exception):
242 """
243 Attempted to lookup a cookie by name, but multiple cookies existed.
244
245 Can occur when calling `response.cookies.get(...)`.
246 """
247
248 def __init__(self, message: str) -> None:
249 super().__init__(message)
250
251
252 # Stream exceptions...
253
254 # These may occur as the result of a programming error, by accessing
255 # the request/response stream in an invalid manner.
256
257
258 class StreamError(Exception):
259 """
260 The base class for stream exceptions.
261
262 The developer made an error in accessing the request stream in
263 an invalid way.
264 """
265
266 def __init__(self, message: str) -> None:
267 super().__init__(message)
268
269
270 class StreamConsumed(StreamError):
271 """
272 Attempted to read or stream response content, but the content has already
273 been streamed.
274 """
275
276 def __init__(self) -> None:
277 message = (
278 "Attempted to read or stream response content, but the content has "
279 "already been streamed."
280 )
281 super().__init__(message)
282
283
284 class ResponseNotRead(StreamError):
285 """
286 Attempted to access response content, without having called `read()`
287 after a streaming response.
288 """
289
290 def __init__(self) -> None:
291 message = (
292 "Attempted to access response content, without having called `read()` "
293 "after a streaming response."
294 )
295 super().__init__(message)
296
297
298 class RequestNotRead(StreamError):
299 """
300 Attempted to access request content, without having called `read()`.
301 """
302
303 def __init__(self) -> None:
304 message = "Attempted to access request content, without having called `read()`."
305 super().__init__(message)
306
307
308 class ResponseClosed(StreamError):
309 """
310 Attempted to read or stream response content, but the request has been
311 closed.
312 """
313
314 def __init__(self) -> None:
315 message = (
316 "Attempted to read or stream response content, but the request has "
317 "been closed."
318 )
319 super().__init__(message)
320
321
322 # The `InvalidURL` class is no longer required. It was being used to enforce only
323 # 'http'/'https' URLs being requested, but is now treated instead at the
324 # transport layer using `UnsupportedProtocol()`.`
325
326 # We are currently still exposing this class, but it will be removed in 1.0.
327 InvalidURL = UnsupportedProtocol
328
329
330 @contextlib.contextmanager
331 def map_exceptions(
332 mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],
333 **kwargs: typing.Any,
334 ) -> typing.Iterator[None]:
335 try:
336 yield
337 except Exception as exc:
338 mapped_exc = None
339
340 for from_exc, to_exc in mapping.items():
341 if not isinstance(exc, from_exc):
342 continue
343 # We want to map to the most specific exception we can find.
344 # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to
345 # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.
346 if mapped_exc is None or issubclass(to_exc, mapped_exc):
347 mapped_exc = to_exc
348
349 if mapped_exc is None:
350 raise
351
352 message = str(exc)
353 raise mapped_exc(message, **kwargs) from None # type: ignore
354
355
356 HTTPCORE_EXC_MAP = {
357 httpcore.TimeoutException: TimeoutException,
358 httpcore.ConnectTimeout: ConnectTimeout,
359 httpcore.ReadTimeout: ReadTimeout,
360 httpcore.WriteTimeout: WriteTimeout,
361 httpcore.PoolTimeout: PoolTimeout,
362 httpcore.NetworkError: NetworkError,
363 httpcore.ConnectError: ConnectError,
364 httpcore.ReadError: ReadError,
365 httpcore.WriteError: WriteError,
366 httpcore.CloseError: CloseError,
367 httpcore.ProxyError: ProxyError,
368 httpcore.UnsupportedProtocol: UnsupportedProtocol,
369 httpcore.ProtocolError: ProtocolError,
370 httpcore.LocalProtocolError: LocalProtocolError,
371 httpcore.RemoteProtocolError: RemoteProtocolError,
372 }
373
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/_exceptions.py b/httpx/_exceptions.py
--- a/httpx/_exceptions.py
+++ b/httpx/_exceptions.py
@@ -45,15 +45,17 @@
Base class for `RequestError` and `HTTPStatusError`.
Useful for `try...except` blocks when issuing a request,
- and then calling .raise_for_status().
+ and then calling `.raise_for_status()`.
For example:
+ ```
try:
response = httpx.get("https://www.example.com")
response.raise_for_status()
except httpx.HTTPError as exc:
print(f"HTTP Exception for {exc.request.url} - {exc.message}")
+ ```
"""
def __init__(self, message: str, *, request: "Request") -> None:
@@ -72,7 +74,9 @@
class TransportError(RequestError):
"""
- Base class for all exceptions that are mapped from the httpcore API.
+ Base class for all exceptions that occur at the level of the Transport API.
+
+ All of these exceptions also have an equivelent mapping in `httpcore`.
"""
@@ -151,7 +155,7 @@
class ProxyError(TransportError):
"""
- An error occurred while proxying a request.
+ An error occurred while establishing a proxy connection.
"""
@@ -192,7 +196,7 @@
class DecodingError(RequestError):
"""
- Decoding of the response failed.
+ Decoding of the response failed, due to a malformed encoding.
"""
@@ -214,7 +218,7 @@
class HTTPStatusError(HTTPError):
"""
- Response sent an error HTTP status.
+ The response had an error HTTP status of 4xx or 5xx.
May be raised when calling `response.raise_for_status()`
"""
| {"golden_diff": "diff --git a/httpx/_exceptions.py b/httpx/_exceptions.py\n--- a/httpx/_exceptions.py\n+++ b/httpx/_exceptions.py\n@@ -45,15 +45,17 @@\n Base class for `RequestError` and `HTTPStatusError`.\n \n Useful for `try...except` blocks when issuing a request,\n- and then calling .raise_for_status().\n+ and then calling `.raise_for_status()`.\n \n For example:\n \n+ ```\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n+ ```\n \"\"\"\n \n def __init__(self, message: str, *, request: \"Request\") -> None:\n@@ -72,7 +74,9 @@\n \n class TransportError(RequestError):\n \"\"\"\n- Base class for all exceptions that are mapped from the httpcore API.\n+ Base class for all exceptions that occur at the level of the Transport API.\n+\n+ All of these exceptions also have an equivelent mapping in `httpcore`.\n \"\"\"\n \n \n@@ -151,7 +155,7 @@\n \n class ProxyError(TransportError):\n \"\"\"\n- An error occurred while proxying a request.\n+ An error occurred while establishing a proxy connection.\n \"\"\"\n \n \n@@ -192,7 +196,7 @@\n \n class DecodingError(RequestError):\n \"\"\"\n- Decoding of the response failed.\n+ Decoding of the response failed, due to a malformed encoding.\n \"\"\"\n \n \n@@ -214,7 +218,7 @@\n \n class HTTPStatusError(HTTPError):\n \"\"\"\n- Response sent an error HTTP status.\n+ The response had an error HTTP status of 4xx or 5xx.\n \n May be raised when calling `response.raise_for_status()`\n \"\"\"\n", "issue": "Document the exception hierarchy \nWe've put a stack of great work into this for 0.14 - let's also show it off.\n", "before_files": [{"content": "\"\"\"\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n \u00b7 ConnectTimeout\n \u00b7 ReadTimeout\n \u00b7 WriteTimeout\n \u00b7 PoolTimeout\n - NetworkError\n \u00b7 ConnectError\n \u00b7 ReadError\n \u00b7 WriteError\n \u00b7 CloseError\n - ProtocolError\n \u00b7 LocalProtocolError\n \u00b7 RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n + RequestBodyUnavailable\n x HTTPStatusError\n* NotRedirectResponse\n* CookieConflict\n* StreamError\n x StreamConsumed\n x ResponseNotRead\n x RequestNotRead\n x ResponseClosed\n\"\"\"\nimport contextlib\nimport typing\n\nimport httpcore\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: nocover\n\n\nclass HTTPError(Exception):\n \"\"\"\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling .raise_for_status().\n\n For example:\n\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message)\n self.request = request\n\n\nclass RequestError(HTTPError):\n \"\"\"\n Base class for all exceptions that may occur when issuing a `.request()`.\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message, request=request)\n\n\nclass TransportError(RequestError):\n \"\"\"\n Base class for all exceptions that are mapped from the httpcore API.\n \"\"\"\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n \"\"\"\n The base class for timeout errors.\n\n An operation has timed out.\n \"\"\"\n\n\nclass ConnectTimeout(TimeoutException):\n \"\"\"\n Timed out while connecting to the host.\n \"\"\"\n\n\nclass ReadTimeout(TimeoutException):\n \"\"\"\n Timed out while receiving data from the host.\n \"\"\"\n\n\nclass WriteTimeout(TimeoutException):\n \"\"\"\n Timed out while sending data to the host.\n \"\"\"\n\n\nclass PoolTimeout(TimeoutException):\n \"\"\"\n Timed out waiting to acquire a connection from the pool.\n \"\"\"\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n \"\"\"\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n \"\"\"\n\n\nclass ReadError(NetworkError):\n \"\"\"\n Failed to receive data from the network.\n \"\"\"\n\n\nclass WriteError(NetworkError):\n \"\"\"\n Failed to send data through the network.\n \"\"\"\n\n\nclass ConnectError(NetworkError):\n \"\"\"\n Failed to establish a connection.\n \"\"\"\n\n\nclass CloseError(NetworkError):\n \"\"\"\n Failed to close a connection.\n \"\"\"\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n \"\"\"\n An error occurred while proxying a request.\n \"\"\"\n\n\nclass UnsupportedProtocol(TransportError):\n \"\"\"\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n \"\"\"\n\n\nclass ProtocolError(TransportError):\n \"\"\"\n The protocol was violated.\n \"\"\"\n\n\nclass LocalProtocolError(ProtocolError):\n \"\"\"\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n \"\"\"\n\n\nclass RemoteProtocolError(ProtocolError):\n \"\"\"\n The protocol was violated by the server.\n\n For exaample, returning malformed HTTP.\n \"\"\"\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n \"\"\"\n Decoding of the response failed.\n \"\"\"\n\n\nclass TooManyRedirects(RequestError):\n \"\"\"\n Too many redirects.\n \"\"\"\n\n\nclass RequestBodyUnavailable(RequestError):\n \"\"\"\n Had to send the request again, but the request body was streaming, and is\n no longer available.\n \"\"\"\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n \"\"\"\n Response sent an error HTTP status.\n\n May be raised when calling `response.raise_for_status()`\n \"\"\"\n\n def __init__(\n self, message: str, *, request: \"Request\", response: \"Response\"\n ) -> None:\n super().__init__(message, request=request)\n self.response = response\n\n\nclass NotRedirectResponse(Exception):\n \"\"\"\n Response was not a redirect response.\n\n May be raised if `response.next()` is called without first\n properly checking `response.is_redirect`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n \"\"\"\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(Exception):\n \"\"\"\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the content has already\n been streamed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the content has \"\n \"already been streamed.\"\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n \"\"\"\n Attempted to access response content, without having called `read()`\n after a streaming response.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to access response content, without having called `read()` \"\n \"after a streaming response.\"\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n \"\"\"\n Attempted to access request content, without having called `read()`.\n \"\"\"\n\n def __init__(self) -> None:\n message = \"Attempted to access request content, without having called `read()`.\"\n super().__init__(message)\n\n\nclass ResponseClosed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the request has been\n closed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the request has \"\n \"been closed.\"\n )\n super().__init__(message)\n\n\n# The `InvalidURL` class is no longer required. It was being used to enforce only\n# 'http'/'https' URLs being requested, but is now treated instead at the\n# transport layer using `UnsupportedProtocol()`.`\n\n# We are currently still exposing this class, but it will be removed in 1.0.\nInvalidURL = UnsupportedProtocol\n\n\[email protected]\ndef map_exceptions(\n mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],\n **kwargs: typing.Any,\n) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in mapping.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None:\n raise\n\n message = str(exc)\n raise mapped_exc(message, **kwargs) from None # type: ignore\n\n\nHTTPCORE_EXC_MAP = {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.CloseError: CloseError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n}\n", "path": "httpx/_exceptions.py"}], "after_files": [{"content": "\"\"\"\nOur exception hierarchy:\n\n* HTTPError\n x RequestError\n + TransportError\n - TimeoutException\n \u00b7 ConnectTimeout\n \u00b7 ReadTimeout\n \u00b7 WriteTimeout\n \u00b7 PoolTimeout\n - NetworkError\n \u00b7 ConnectError\n \u00b7 ReadError\n \u00b7 WriteError\n \u00b7 CloseError\n - ProtocolError\n \u00b7 LocalProtocolError\n \u00b7 RemoteProtocolError\n - ProxyError\n - UnsupportedProtocol\n + DecodingError\n + TooManyRedirects\n + RequestBodyUnavailable\n x HTTPStatusError\n* NotRedirectResponse\n* CookieConflict\n* StreamError\n x StreamConsumed\n x ResponseNotRead\n x RequestNotRead\n x ResponseClosed\n\"\"\"\nimport contextlib\nimport typing\n\nimport httpcore\n\nif typing.TYPE_CHECKING:\n from ._models import Request, Response # pragma: nocover\n\n\nclass HTTPError(Exception):\n \"\"\"\n Base class for `RequestError` and `HTTPStatusError`.\n\n Useful for `try...except` blocks when issuing a request,\n and then calling `.raise_for_status()`.\n\n For example:\n\n ```\n try:\n response = httpx.get(\"https://www.example.com\")\n response.raise_for_status()\n except httpx.HTTPError as exc:\n print(f\"HTTP Exception for {exc.request.url} - {exc.message}\")\n ```\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message)\n self.request = request\n\n\nclass RequestError(HTTPError):\n \"\"\"\n Base class for all exceptions that may occur when issuing a `.request()`.\n \"\"\"\n\n def __init__(self, message: str, *, request: \"Request\") -> None:\n super().__init__(message, request=request)\n\n\nclass TransportError(RequestError):\n \"\"\"\n Base class for all exceptions that occur at the level of the Transport API.\n\n All of these exceptions also have an equivelent mapping in `httpcore`.\n \"\"\"\n\n\n# Timeout exceptions...\n\n\nclass TimeoutException(TransportError):\n \"\"\"\n The base class for timeout errors.\n\n An operation has timed out.\n \"\"\"\n\n\nclass ConnectTimeout(TimeoutException):\n \"\"\"\n Timed out while connecting to the host.\n \"\"\"\n\n\nclass ReadTimeout(TimeoutException):\n \"\"\"\n Timed out while receiving data from the host.\n \"\"\"\n\n\nclass WriteTimeout(TimeoutException):\n \"\"\"\n Timed out while sending data to the host.\n \"\"\"\n\n\nclass PoolTimeout(TimeoutException):\n \"\"\"\n Timed out waiting to acquire a connection from the pool.\n \"\"\"\n\n\n# Core networking exceptions...\n\n\nclass NetworkError(TransportError):\n \"\"\"\n The base class for network-related errors.\n\n An error occurred while interacting with the network.\n \"\"\"\n\n\nclass ReadError(NetworkError):\n \"\"\"\n Failed to receive data from the network.\n \"\"\"\n\n\nclass WriteError(NetworkError):\n \"\"\"\n Failed to send data through the network.\n \"\"\"\n\n\nclass ConnectError(NetworkError):\n \"\"\"\n Failed to establish a connection.\n \"\"\"\n\n\nclass CloseError(NetworkError):\n \"\"\"\n Failed to close a connection.\n \"\"\"\n\n\n# Other transport exceptions...\n\n\nclass ProxyError(TransportError):\n \"\"\"\n An error occurred while establishing a proxy connection.\n \"\"\"\n\n\nclass UnsupportedProtocol(TransportError):\n \"\"\"\n Attempted to make a request to an unsupported protocol.\n\n For example issuing a request to `ftp://www.example.com`.\n \"\"\"\n\n\nclass ProtocolError(TransportError):\n \"\"\"\n The protocol was violated.\n \"\"\"\n\n\nclass LocalProtocolError(ProtocolError):\n \"\"\"\n A protocol was violated by the client.\n\n For example if the user instantiated a `Request` instance explicitly,\n failed to include the mandatory `Host:` header, and then issued it directly\n using `client.send()`.\n \"\"\"\n\n\nclass RemoteProtocolError(ProtocolError):\n \"\"\"\n The protocol was violated by the server.\n\n For exaample, returning malformed HTTP.\n \"\"\"\n\n\n# Other request exceptions...\n\n\nclass DecodingError(RequestError):\n \"\"\"\n Decoding of the response failed, due to a malformed encoding.\n \"\"\"\n\n\nclass TooManyRedirects(RequestError):\n \"\"\"\n Too many redirects.\n \"\"\"\n\n\nclass RequestBodyUnavailable(RequestError):\n \"\"\"\n Had to send the request again, but the request body was streaming, and is\n no longer available.\n \"\"\"\n\n\n# Client errors\n\n\nclass HTTPStatusError(HTTPError):\n \"\"\"\n The response had an error HTTP status of 4xx or 5xx.\n\n May be raised when calling `response.raise_for_status()`\n \"\"\"\n\n def __init__(\n self, message: str, *, request: \"Request\", response: \"Response\"\n ) -> None:\n super().__init__(message, request=request)\n self.response = response\n\n\nclass NotRedirectResponse(Exception):\n \"\"\"\n Response was not a redirect response.\n\n May be raised if `response.next()` is called without first\n properly checking `response.is_redirect`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass CookieConflict(Exception):\n \"\"\"\n Attempted to lookup a cookie by name, but multiple cookies existed.\n\n Can occur when calling `response.cookies.get(...)`.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\n# Stream exceptions...\n\n# These may occur as the result of a programming error, by accessing\n# the request/response stream in an invalid manner.\n\n\nclass StreamError(Exception):\n \"\"\"\n The base class for stream exceptions.\n\n The developer made an error in accessing the request stream in\n an invalid way.\n \"\"\"\n\n def __init__(self, message: str) -> None:\n super().__init__(message)\n\n\nclass StreamConsumed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the content has already\n been streamed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the content has \"\n \"already been streamed.\"\n )\n super().__init__(message)\n\n\nclass ResponseNotRead(StreamError):\n \"\"\"\n Attempted to access response content, without having called `read()`\n after a streaming response.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to access response content, without having called `read()` \"\n \"after a streaming response.\"\n )\n super().__init__(message)\n\n\nclass RequestNotRead(StreamError):\n \"\"\"\n Attempted to access request content, without having called `read()`.\n \"\"\"\n\n def __init__(self) -> None:\n message = \"Attempted to access request content, without having called `read()`.\"\n super().__init__(message)\n\n\nclass ResponseClosed(StreamError):\n \"\"\"\n Attempted to read or stream response content, but the request has been\n closed.\n \"\"\"\n\n def __init__(self) -> None:\n message = (\n \"Attempted to read or stream response content, but the request has \"\n \"been closed.\"\n )\n super().__init__(message)\n\n\n# The `InvalidURL` class is no longer required. It was being used to enforce only\n# 'http'/'https' URLs being requested, but is now treated instead at the\n# transport layer using `UnsupportedProtocol()`.`\n\n# We are currently still exposing this class, but it will be removed in 1.0.\nInvalidURL = UnsupportedProtocol\n\n\[email protected]\ndef map_exceptions(\n mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],\n **kwargs: typing.Any,\n) -> typing.Iterator[None]:\n try:\n yield\n except Exception as exc:\n mapped_exc = None\n\n for from_exc, to_exc in mapping.items():\n if not isinstance(exc, from_exc):\n continue\n # We want to map to the most specific exception we can find.\n # Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to\n # `httpx.ReadTimeout`, not just `httpx.TimeoutException`.\n if mapped_exc is None or issubclass(to_exc, mapped_exc):\n mapped_exc = to_exc\n\n if mapped_exc is None:\n raise\n\n message = str(exc)\n raise mapped_exc(message, **kwargs) from None # type: ignore\n\n\nHTTPCORE_EXC_MAP = {\n httpcore.TimeoutException: TimeoutException,\n httpcore.ConnectTimeout: ConnectTimeout,\n httpcore.ReadTimeout: ReadTimeout,\n httpcore.WriteTimeout: WriteTimeout,\n httpcore.PoolTimeout: PoolTimeout,\n httpcore.NetworkError: NetworkError,\n httpcore.ConnectError: ConnectError,\n httpcore.ReadError: ReadError,\n httpcore.WriteError: WriteError,\n httpcore.CloseError: CloseError,\n httpcore.ProxyError: ProxyError,\n httpcore.UnsupportedProtocol: UnsupportedProtocol,\n httpcore.ProtocolError: ProtocolError,\n httpcore.LocalProtocolError: LocalProtocolError,\n httpcore.RemoteProtocolError: RemoteProtocolError,\n}\n", "path": "httpx/_exceptions.py"}]} | 3,258 | 422 |
gh_patches_debug_15149 | rasdani/github-patches | git_diff | apache__airflow-29518 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KubernetesExecutor leaves failed pods due to deepcopy issue with Google providers
### Apache Airflow version
Other Airflow 2 version (please specify below)
### What happened
With Airflow 2.3 and 2.4 there appears to be a bug in the KubernetesExecutor when used in conjunction with the Google airflow providers. This bug does not affect Airflow 2.2 due to the pip version requirements.
The bug specifically presents itself when using nearly any Google provider operator. During the pod lifecycle, all is well until the executor in the pod starts to clean up following a successful run. Airflow itself still see's the task marked as a success, but in Kubernetes, while the task is finishing up after reporting status, it actually crashes and puts the pod into a Failed state silently:
```
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 8, in <module>
sys.exit(main())
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py", line 39, in main
args.func(args)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py", line 52, in command
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py", line 103, in wrapper
return f(*args, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 382, in task_run
_run_task_by_selected_method(args, dag, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 189, in _run_task_by_selected_method
_run_task_by_local_task_job(args, ti)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py", line 247, in _run_task_by_local_task_job
run_job.run()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py", line 247, in run
self._execute()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 137, in _execute
self.handle_task_exit(return_code)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 168, in handle_task_exit
self._run_mini_scheduler_on_child_tasks()
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py", line 75, in wrapper
return func(*args, session=session, **kwargs)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py", line 253, in _run_mini_scheduler_on_child_tasks
partial_dag = task.dag.partial_subset(
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2188, in partial_subset
dag.task_dict = {
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2189, in <dictcomp>
t.task_id: _deepcopy_task(t)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py", line 2186, in _deepcopy_task
return copy.deepcopy(t, memo)
File "/usr/local/lib/python3.9/copy.py", line 153, in deepcopy
y = copier(memo)
File "/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py", line 1163, in __deepcopy__
setattr(result, k, copy.deepcopy(v, memo))
File "/usr/local/lib/python3.9/copy.py", line 172, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/lib/python3.9/copy.py", line 264, in _reconstruct
y = func(*args)
File "/usr/local/lib/python3.9/enum.py", line 384, in __call__
return cls.__new__(cls, value)
File "/usr/local/lib/python3.9/enum.py", line 702, in __new__
raise ve_exc
ValueError: <object object at 0x7f570181a3c0> is not a valid _MethodDefault
```
Based on a quick look, it appears to be related to the default argument that Google is using in its operators which happens to be an Enum, and fails during a deepcopy at the end of the task.
Example operator that is affected: https://github.com/apache/airflow/blob/403ed7163f3431deb7fc21108e1743385e139907/airflow/providers/google/cloud/hooks/dataproc.py#L753
Reference to the Google Python API core which has the Enum causing the problem: https://github.com/googleapis/python-api-core/blob/main/google/api_core/gapic_v1/method.py#L31
### What you think should happen instead
Kubernetes pods should succeed, be marked as `Completed`, and then be gracefully terminated.
### How to reproduce
Use any `apache-airflow-providers-google` >= 7.0.0 which includes `google-api-core` >= 2.2.2. Run a DAG with a task which uses any of the Google operators which have `_MethodDefault` as a default argument.
### Operating System
Debian GNU/Linux 11 (bullseye)
### Versions of Apache Airflow Providers
apache-airflow-providers-amazon==6.0.0
apache-airflow-providers-apache-hive==5.0.0
apache-airflow-providers-celery==3.0.0
apache-airflow-providers-cncf-kubernetes==4.4.0
apache-airflow-providers-common-sql==1.3.1
apache-airflow-providers-docker==3.2.0
apache-airflow-providers-elasticsearch==4.2.1
apache-airflow-providers-ftp==3.1.0
apache-airflow-providers-google==8.4.0
apache-airflow-providers-grpc==3.0.0
apache-airflow-providers-hashicorp==3.1.0
apache-airflow-providers-http==4.0.0
apache-airflow-providers-imap==3.0.0
apache-airflow-providers-microsoft-azure==4.3.0
apache-airflow-providers-mysql==3.2.1
apache-airflow-providers-odbc==3.1.2
apache-airflow-providers-postgres==5.2.2
apache-airflow-providers-presto==4.2.0
apache-airflow-providers-redis==3.0.0
apache-airflow-providers-sendgrid==3.0.0
apache-airflow-providers-sftp==4.1.0
apache-airflow-providers-slack==6.0.0
apache-airflow-providers-sqlite==3.2.1
apache-airflow-providers-ssh==3.2.0
### Deployment
Other 3rd-party Helm chart
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/providers/google/cloud/operators/cloud_base.py`
Content:
```
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18 """This module contains a Google API base operator."""
19 from __future__ import annotations
20
21 from airflow.models import BaseOperator
22
23
24 class GoogleCloudBaseOperator(BaseOperator):
25 """
26 Abstract base class that takes care of common specifics of the operators built
27 on top of Google API client libraries.
28 """
29
30 pass
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/providers/google/cloud/operators/cloud_base.py b/airflow/providers/google/cloud/operators/cloud_base.py
--- a/airflow/providers/google/cloud/operators/cloud_base.py
+++ b/airflow/providers/google/cloud/operators/cloud_base.py
@@ -18,6 +18,8 @@
"""This module contains a Google API base operator."""
from __future__ import annotations
+from google.api_core.gapic_v1.method import DEFAULT
+
from airflow.models import BaseOperator
@@ -27,4 +29,11 @@
on top of Google API client libraries.
"""
- pass
+ def __deepcopy__(self, memo):
+ """
+ Updating the memo to fix the non-copyable global constant.
+ This constant can be specified in operator parameters as a retry configuration to indicate a default.
+ See https://github.com/apache/airflow/issues/28751 for details.
+ """
+ memo[id(DEFAULT)] = DEFAULT
+ return super().__deepcopy__(memo)
| {"golden_diff": "diff --git a/airflow/providers/google/cloud/operators/cloud_base.py b/airflow/providers/google/cloud/operators/cloud_base.py\n--- a/airflow/providers/google/cloud/operators/cloud_base.py\n+++ b/airflow/providers/google/cloud/operators/cloud_base.py\n@@ -18,6 +18,8 @@\n \"\"\"This module contains a Google API base operator.\"\"\"\n from __future__ import annotations\n \n+from google.api_core.gapic_v1.method import DEFAULT\n+\n from airflow.models import BaseOperator\n \n \n@@ -27,4 +29,11 @@\n on top of Google API client libraries.\n \"\"\"\n \n- pass\n+ def __deepcopy__(self, memo):\n+ \"\"\"\n+ Updating the memo to fix the non-copyable global constant.\n+ This constant can be specified in operator parameters as a retry configuration to indicate a default.\n+ See https://github.com/apache/airflow/issues/28751 for details.\n+ \"\"\"\n+ memo[id(DEFAULT)] = DEFAULT\n+ return super().__deepcopy__(memo)\n", "issue": "KubernetesExecutor leaves failed pods due to deepcopy issue with Google providers\n### Apache Airflow version\r\n\r\nOther Airflow 2 version (please specify below)\r\n\r\n### What happened\r\n\r\nWith Airflow 2.3 and 2.4 there appears to be a bug in the KubernetesExecutor when used in conjunction with the Google airflow providers. This bug does not affect Airflow 2.2 due to the pip version requirements.\r\n\r\nThe bug specifically presents itself when using nearly any Google provider operator. During the pod lifecycle, all is well until the executor in the pod starts to clean up following a successful run. Airflow itself still see's the task marked as a success, but in Kubernetes, while the task is finishing up after reporting status, it actually crashes and puts the pod into a Failed state silently:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/__main__.py\", line 39, in main\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/cli_parser.py\", line 52, in command\r\n return func(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/cli.py\", line 103, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py\", line 382, in task_run\r\n _run_task_by_selected_method(args, dag, ti)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py\", line 189, in _run_task_by_selected_method\r\n _run_task_by_local_task_job(args, ti)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/cli/commands/task_command.py\", line 247, in _run_task_by_local_task_job\r\n run_job.run()\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/base_job.py\", line 247, in run\r\n self._execute()\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py\", line 137, in _execute\r\n self.handle_task_exit(return_code)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py\", line 168, in handle_task_exit\r\n self._run_mini_scheduler_on_child_tasks()\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/utils/session.py\", line 75, in wrapper\r\n return func(*args, session=session, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/jobs/local_task_job.py\", line 253, in _run_mini_scheduler_on_child_tasks\r\n partial_dag = task.dag.partial_subset(\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py\", line 2188, in partial_subset\r\n dag.task_dict = {\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py\", line 2189, in <dictcomp>\r\n t.task_id: _deepcopy_task(t)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/dag.py\", line 2186, in _deepcopy_task\r\n return copy.deepcopy(t, memo)\r\n File \"/usr/local/lib/python3.9/copy.py\", line 153, in deepcopy\r\n y = copier(memo)\r\n File \"/home/airflow/.local/lib/python3.9/site-packages/airflow/models/baseoperator.py\", line 1163, in __deepcopy__\r\n setattr(result, k, copy.deepcopy(v, memo))\r\n File \"/usr/local/lib/python3.9/copy.py\", line 172, in deepcopy\r\n y = _reconstruct(x, memo, *rv)\r\n File \"/usr/local/lib/python3.9/copy.py\", line 264, in _reconstruct\r\n y = func(*args)\r\n File \"/usr/local/lib/python3.9/enum.py\", line 384, in __call__\r\n return cls.__new__(cls, value)\r\n File \"/usr/local/lib/python3.9/enum.py\", line 702, in __new__\r\n raise ve_exc\r\nValueError: <object object at 0x7f570181a3c0> is not a valid _MethodDefault\r\n```\r\n\r\nBased on a quick look, it appears to be related to the default argument that Google is using in its operators which happens to be an Enum, and fails during a deepcopy at the end of the task.\r\n\r\nExample operator that is affected: https://github.com/apache/airflow/blob/403ed7163f3431deb7fc21108e1743385e139907/airflow/providers/google/cloud/hooks/dataproc.py#L753\r\nReference to the Google Python API core which has the Enum causing the problem: https://github.com/googleapis/python-api-core/blob/main/google/api_core/gapic_v1/method.py#L31\r\n\r\n### What you think should happen instead\r\n\r\nKubernetes pods should succeed, be marked as `Completed`, and then be gracefully terminated.\r\n\r\n### How to reproduce\r\n\r\nUse any `apache-airflow-providers-google` >= 7.0.0 which includes `google-api-core` >= 2.2.2. Run a DAG with a task which uses any of the Google operators which have `_MethodDefault` as a default argument.\r\n\r\n### Operating System\r\n\r\nDebian GNU/Linux 11 (bullseye)\r\n\r\n### Versions of Apache Airflow Providers\r\n\r\napache-airflow-providers-amazon==6.0.0\r\napache-airflow-providers-apache-hive==5.0.0\r\napache-airflow-providers-celery==3.0.0\r\napache-airflow-providers-cncf-kubernetes==4.4.0\r\napache-airflow-providers-common-sql==1.3.1\r\napache-airflow-providers-docker==3.2.0\r\napache-airflow-providers-elasticsearch==4.2.1\r\napache-airflow-providers-ftp==3.1.0\r\napache-airflow-providers-google==8.4.0\r\napache-airflow-providers-grpc==3.0.0\r\napache-airflow-providers-hashicorp==3.1.0\r\napache-airflow-providers-http==4.0.0\r\napache-airflow-providers-imap==3.0.0\r\napache-airflow-providers-microsoft-azure==4.3.0\r\napache-airflow-providers-mysql==3.2.1\r\napache-airflow-providers-odbc==3.1.2\r\napache-airflow-providers-postgres==5.2.2\r\napache-airflow-providers-presto==4.2.0\r\napache-airflow-providers-redis==3.0.0\r\napache-airflow-providers-sendgrid==3.0.0\r\napache-airflow-providers-sftp==4.1.0\r\napache-airflow-providers-slack==6.0.0\r\napache-airflow-providers-sqlite==3.2.1\r\napache-airflow-providers-ssh==3.2.0\r\n\r\n### Deployment\r\n\r\nOther 3rd-party Helm chart\r\n\r\n### Deployment details\r\n\r\n_No response_\r\n\r\n### Anything else\r\n\r\n_No response_\r\n\r\n### Are you willing to submit PR?\r\n\r\n- [X] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"This module contains a Google API base operator.\"\"\"\nfrom __future__ import annotations\n\nfrom airflow.models import BaseOperator\n\n\nclass GoogleCloudBaseOperator(BaseOperator):\n \"\"\"\n Abstract base class that takes care of common specifics of the operators built\n on top of Google API client libraries.\n \"\"\"\n\n pass\n", "path": "airflow/providers/google/cloud/operators/cloud_base.py"}], "after_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"This module contains a Google API base operator.\"\"\"\nfrom __future__ import annotations\n\nfrom google.api_core.gapic_v1.method import DEFAULT\n\nfrom airflow.models import BaseOperator\n\n\nclass GoogleCloudBaseOperator(BaseOperator):\n \"\"\"\n Abstract base class that takes care of common specifics of the operators built\n on top of Google API client libraries.\n \"\"\"\n\n def __deepcopy__(self, memo):\n \"\"\"\n Updating the memo to fix the non-copyable global constant.\n This constant can be specified in operator parameters as a retry configuration to indicate a default.\n See https://github.com/apache/airflow/issues/28751 for details.\n \"\"\"\n memo[id(DEFAULT)] = DEFAULT\n return super().__deepcopy__(memo)\n", "path": "airflow/providers/google/cloud/operators/cloud_base.py"}]} | 2,319 | 223 |
gh_patches_debug_19372 | rasdani/github-patches | git_diff | aws-powertools__powertools-lambda-python-1534 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Static typing: aws_lambda_powertools.logging.utils.copy_config_to_registered_loggers argument log_level should accept int
### Static type checker used
mypy (project's standard)
### AWS Lambda function runtime
3.9
### AWS Lambda Powertools for Python version
latest
### Static type checker info
```
$ mypy repro.py
repro.py:5: error: Argument "log_level" to "copy_config_to_registered_loggers" has incompatible type "int"; expected "Optional[str]"
Found 1 error in 1 file (checked 1 source file)
```
```
mypy --version
mypy 0.971 (compiled: yes)
```
### Code snippet
```python
from aws_lambda_powertools.logging import utils
from aws_lambda_powertools import Logger
logger = Logger()
utils.copy_config_to_registered_loggers(source_logger=logger, log_level=30)
```
### Possible Solution
Update signature to accept `Union[str, int]`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aws_lambda_powertools/logging/utils.py`
Content:
```
1 import logging
2 from typing import Callable, List, Optional, Set, Union
3
4 from .logger import Logger
5
6 PACKAGE_LOGGER = "aws_lambda_powertools"
7
8
9 def copy_config_to_registered_loggers(
10 source_logger: Logger,
11 log_level: Optional[str] = None,
12 exclude: Optional[Set[str]] = None,
13 include: Optional[Set[str]] = None,
14 ) -> None:
15
16 """Copies source Logger level and handler to all registered loggers for consistent formatting.
17
18 Parameters
19 ----------
20 source_logger : Logger
21 Powertools Logger to copy configuration from
22 log_level : str, optional
23 Logging level to set to registered loggers, by default uses source_logger logging level
24 include : Optional[Set[str]], optional
25 List of logger names to include, by default all registered loggers are included
26 exclude : Optional[Set[str]], optional
27 List of logger names to exclude, by default None
28 """
29 level = log_level or source_logger.level
30
31 # Assumptions: Only take parent loggers not children (dot notation rule)
32 # Steps:
33 # 1. Default operation: Include all registered loggers
34 # 2. Only include set? Only add Loggers in the list and ignore all else
35 # 3. Include and exclude set? Add Logger if it’s in include and not in exclude
36 # 4. Only exclude set? Ignore Logger in the excluding list
37
38 # Exclude source and powertools package logger by default
39 # If source logger is a child ensure we exclude parent logger to not break child logger
40 # from receiving/pushing updates to keys being added/removed
41 source_logger_name = source_logger.name.split(".")[0]
42
43 if exclude:
44 exclude.update([source_logger_name, PACKAGE_LOGGER])
45 else:
46 exclude = {source_logger_name, PACKAGE_LOGGER}
47
48 # Prepare loggers set
49 if include:
50 loggers = include.difference(exclude)
51 filter_func = _include_registered_loggers_filter
52 else:
53 loggers = exclude
54 filter_func = _exclude_registered_loggers_filter
55
56 registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func)
57 for logger in registered_loggers:
58 _configure_logger(source_logger, logger, level)
59
60
61 def _include_registered_loggers_filter(loggers: Set[str]):
62 return [logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name in loggers]
63
64
65 def _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]:
66 return [
67 logging.getLogger(name) for name in logging.root.manager.loggerDict if "." not in name and name not in loggers
68 ]
69
70
71 def _find_registered_loggers(
72 source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]]
73 ) -> List[logging.Logger]:
74 """Filter root loggers based on provided parameters."""
75 root_loggers = filter_func(loggers)
76 source_logger.debug(f"Filtered root loggers: {root_loggers}")
77 return root_loggers
78
79
80 def _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None:
81 logger.handlers = []
82 logger.setLevel(level)
83 logger.propagate = False # ensure we don't propagate logs to existing loggers, #1073
84 source_logger.debug(f"Logger {logger} reconfigured to use logging level {level}")
85 for source_handler in source_logger.handlers:
86 logger.addHandler(source_handler)
87 source_logger.debug(f"Logger {logger} reconfigured to use {source_handler}")
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aws_lambda_powertools/logging/utils.py b/aws_lambda_powertools/logging/utils.py
--- a/aws_lambda_powertools/logging/utils.py
+++ b/aws_lambda_powertools/logging/utils.py
@@ -8,7 +8,7 @@
def copy_config_to_registered_loggers(
source_logger: Logger,
- log_level: Optional[str] = None,
+ log_level: Optional[Union[int, str]] = None,
exclude: Optional[Set[str]] = None,
include: Optional[Set[str]] = None,
) -> None:
@@ -19,7 +19,7 @@
----------
source_logger : Logger
Powertools Logger to copy configuration from
- log_level : str, optional
+ log_level : Union[int, str], optional
Logging level to set to registered loggers, by default uses source_logger logging level
include : Optional[Set[str]], optional
List of logger names to include, by default all registered loggers are included
| {"golden_diff": "diff --git a/aws_lambda_powertools/logging/utils.py b/aws_lambda_powertools/logging/utils.py\n--- a/aws_lambda_powertools/logging/utils.py\n+++ b/aws_lambda_powertools/logging/utils.py\n@@ -8,7 +8,7 @@\n \n def copy_config_to_registered_loggers(\n source_logger: Logger,\n- log_level: Optional[str] = None,\n+ log_level: Optional[Union[int, str]] = None,\n exclude: Optional[Set[str]] = None,\n include: Optional[Set[str]] = None,\n ) -> None:\n@@ -19,7 +19,7 @@\n ----------\n source_logger : Logger\n Powertools Logger to copy configuration from\n- log_level : str, optional\n+ log_level : Union[int, str], optional\n Logging level to set to registered loggers, by default uses source_logger logging level\n include : Optional[Set[str]], optional\n List of logger names to include, by default all registered loggers are included\n", "issue": "Static typing: aws_lambda_powertools.logging.utils.copy_config_to_registered_loggers argument log_level should accept int\n### Static type checker used\r\n\r\nmypy (project's standard)\r\n\r\n### AWS Lambda function runtime\r\n\r\n3.9\r\n\r\n### AWS Lambda Powertools for Python version\r\n\r\nlatest\r\n\r\n### Static type checker info\r\n\r\n```\r\n$ mypy repro.py\r\nrepro.py:5: error: Argument \"log_level\" to \"copy_config_to_registered_loggers\" has incompatible type \"int\"; expected \"Optional[str]\"\r\nFound 1 error in 1 file (checked 1 source file)\r\n```\r\n\r\n\r\n```\r\nmypy --version\r\nmypy 0.971 (compiled: yes)\r\n```\r\n\r\n### Code snippet\r\n\r\n```python\r\nfrom aws_lambda_powertools.logging import utils\r\nfrom aws_lambda_powertools import Logger\r\n\r\nlogger = Logger()\r\nutils.copy_config_to_registered_loggers(source_logger=logger, log_level=30)\r\n```\r\n\r\n\r\n### Possible Solution\r\n\r\nUpdate signature to accept `Union[str, int]`\n", "before_files": [{"content": "import logging\nfrom typing import Callable, List, Optional, Set, Union\n\nfrom .logger import Logger\n\nPACKAGE_LOGGER = \"aws_lambda_powertools\"\n\n\ndef copy_config_to_registered_loggers(\n source_logger: Logger,\n log_level: Optional[str] = None,\n exclude: Optional[Set[str]] = None,\n include: Optional[Set[str]] = None,\n) -> None:\n\n \"\"\"Copies source Logger level and handler to all registered loggers for consistent formatting.\n\n Parameters\n ----------\n source_logger : Logger\n Powertools Logger to copy configuration from\n log_level : str, optional\n Logging level to set to registered loggers, by default uses source_logger logging level\n include : Optional[Set[str]], optional\n List of logger names to include, by default all registered loggers are included\n exclude : Optional[Set[str]], optional\n List of logger names to exclude, by default None\n \"\"\"\n level = log_level or source_logger.level\n\n # Assumptions: Only take parent loggers not children (dot notation rule)\n # Steps:\n # 1. Default operation: Include all registered loggers\n # 2. Only include set? Only add Loggers in the list and ignore all else\n # 3. Include and exclude set? Add Logger if it\u2019s in include and not in exclude\n # 4. Only exclude set? Ignore Logger in the excluding list\n\n # Exclude source and powertools package logger by default\n # If source logger is a child ensure we exclude parent logger to not break child logger\n # from receiving/pushing updates to keys being added/removed\n source_logger_name = source_logger.name.split(\".\")[0]\n\n if exclude:\n exclude.update([source_logger_name, PACKAGE_LOGGER])\n else:\n exclude = {source_logger_name, PACKAGE_LOGGER}\n\n # Prepare loggers set\n if include:\n loggers = include.difference(exclude)\n filter_func = _include_registered_loggers_filter\n else:\n loggers = exclude\n filter_func = _exclude_registered_loggers_filter\n\n registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func)\n for logger in registered_loggers:\n _configure_logger(source_logger, logger, level)\n\n\ndef _include_registered_loggers_filter(loggers: Set[str]):\n return [logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name in loggers]\n\n\ndef _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]:\n return [\n logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name not in loggers\n ]\n\n\ndef _find_registered_loggers(\n source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]]\n) -> List[logging.Logger]:\n \"\"\"Filter root loggers based on provided parameters.\"\"\"\n root_loggers = filter_func(loggers)\n source_logger.debug(f\"Filtered root loggers: {root_loggers}\")\n return root_loggers\n\n\ndef _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None:\n logger.handlers = []\n logger.setLevel(level)\n logger.propagate = False # ensure we don't propagate logs to existing loggers, #1073\n source_logger.debug(f\"Logger {logger} reconfigured to use logging level {level}\")\n for source_handler in source_logger.handlers:\n logger.addHandler(source_handler)\n source_logger.debug(f\"Logger {logger} reconfigured to use {source_handler}\")\n", "path": "aws_lambda_powertools/logging/utils.py"}], "after_files": [{"content": "import logging\nfrom typing import Callable, List, Optional, Set, Union\n\nfrom .logger import Logger\n\nPACKAGE_LOGGER = \"aws_lambda_powertools\"\n\n\ndef copy_config_to_registered_loggers(\n source_logger: Logger,\n log_level: Optional[Union[int, str]] = None,\n exclude: Optional[Set[str]] = None,\n include: Optional[Set[str]] = None,\n) -> None:\n\n \"\"\"Copies source Logger level and handler to all registered loggers for consistent formatting.\n\n Parameters\n ----------\n source_logger : Logger\n Powertools Logger to copy configuration from\n log_level : Union[int, str], optional\n Logging level to set to registered loggers, by default uses source_logger logging level\n include : Optional[Set[str]], optional\n List of logger names to include, by default all registered loggers are included\n exclude : Optional[Set[str]], optional\n List of logger names to exclude, by default None\n \"\"\"\n level = log_level or source_logger.level\n\n # Assumptions: Only take parent loggers not children (dot notation rule)\n # Steps:\n # 1. Default operation: Include all registered loggers\n # 2. Only include set? Only add Loggers in the list and ignore all else\n # 3. Include and exclude set? Add Logger if it\u2019s in include and not in exclude\n # 4. Only exclude set? Ignore Logger in the excluding list\n\n # Exclude source and powertools package logger by default\n # If source logger is a child ensure we exclude parent logger to not break child logger\n # from receiving/pushing updates to keys being added/removed\n source_logger_name = source_logger.name.split(\".\")[0]\n\n if exclude:\n exclude.update([source_logger_name, PACKAGE_LOGGER])\n else:\n exclude = {source_logger_name, PACKAGE_LOGGER}\n\n # Prepare loggers set\n if include:\n loggers = include.difference(exclude)\n filter_func = _include_registered_loggers_filter\n else:\n loggers = exclude\n filter_func = _exclude_registered_loggers_filter\n\n registered_loggers = _find_registered_loggers(source_logger, loggers, filter_func)\n for logger in registered_loggers:\n _configure_logger(source_logger, logger, level)\n\n\ndef _include_registered_loggers_filter(loggers: Set[str]):\n return [logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name in loggers]\n\n\ndef _exclude_registered_loggers_filter(loggers: Set[str]) -> List[logging.Logger]:\n return [\n logging.getLogger(name) for name in logging.root.manager.loggerDict if \".\" not in name and name not in loggers\n ]\n\n\ndef _find_registered_loggers(\n source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]]\n) -> List[logging.Logger]:\n \"\"\"Filter root loggers based on provided parameters.\"\"\"\n root_loggers = filter_func(loggers)\n source_logger.debug(f\"Filtered root loggers: {root_loggers}\")\n return root_loggers\n\n\ndef _configure_logger(source_logger: Logger, logger: logging.Logger, level: Union[int, str]) -> None:\n logger.handlers = []\n logger.setLevel(level)\n logger.propagate = False # ensure we don't propagate logs to existing loggers, #1073\n source_logger.debug(f\"Logger {logger} reconfigured to use logging level {level}\")\n for source_handler in source_logger.handlers:\n logger.addHandler(source_handler)\n source_logger.debug(f\"Logger {logger} reconfigured to use {source_handler}\")\n", "path": "aws_lambda_powertools/logging/utils.py"}]} | 1,412 | 212 |
gh_patches_debug_40727 | rasdani/github-patches | git_diff | kserve__kserve-1472 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[AWS] Support IAM Role for Service Account in KFServing
/kind feature
**Describe the solution you'd like**
[A clear and concise description of what you want to happen.]
Currently, it needs a S3 credential to download model. We need more fine grain control ways. [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) is a better option. This helps us deploy model without credentials
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kfserving/kfserving/storage.py`
Content:
```
1 # Copyright 2020 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import glob
16 import logging
17 import tempfile
18 import mimetypes
19 import os
20 import re
21 import json
22 import shutil
23 import tarfile
24 import zipfile
25 import gzip
26 from urllib.parse import urlparse
27 import requests
28 from azure.storage.blob import BlockBlobService
29 from google.auth import exceptions
30 from google.cloud import storage
31 from minio import Minio
32 from kfserving.kfmodel_repository import MODEL_MOUNT_DIRS
33
34 _GCS_PREFIX = "gs://"
35 _S3_PREFIX = "s3://"
36 _BLOB_RE = "https://(.+?).blob.core.windows.net/(.+)"
37 _LOCAL_PREFIX = "file://"
38 _URI_RE = "https?://(.+)/(.+)"
39 _HTTP_PREFIX = "http(s)://"
40 _HEADERS_SUFFIX = "-headers"
41
42
43 class Storage(object): # pylint: disable=too-few-public-methods
44 @staticmethod
45 def download(uri: str, out_dir: str = None) -> str:
46 logging.info("Copying contents of %s to local", uri)
47
48 is_local = False
49 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
50 is_local = True
51
52 if out_dir is None:
53 if is_local:
54 # noop if out_dir is not set and the path is local
55 return Storage._download_local(uri)
56 out_dir = tempfile.mkdtemp()
57 elif not os.path.exists(out_dir):
58 os.mkdir(out_dir)
59
60 if uri.startswith(_GCS_PREFIX):
61 Storage._download_gcs(uri, out_dir)
62 elif uri.startswith(_S3_PREFIX):
63 Storage._download_s3(uri, out_dir)
64 elif re.search(_BLOB_RE, uri):
65 Storage._download_blob(uri, out_dir)
66 elif is_local:
67 return Storage._download_local(uri, out_dir)
68 elif re.search(_URI_RE, uri):
69 return Storage._download_from_uri(uri, out_dir)
70 elif uri.startswith(MODEL_MOUNT_DIRS):
71 # Don't need to download models if this InferenceService is running in the multi-model
72 # serving mode. The model agent will download models.
73 return out_dir
74 else:
75 raise Exception("Cannot recognize storage type for " + uri +
76 "\n'%s', '%s', '%s', and '%s' are the current available storage type." %
77 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX, _HTTP_PREFIX))
78
79 logging.info("Successfully copied %s to %s", uri, out_dir)
80 return out_dir
81
82 @staticmethod
83 def _download_s3(uri, temp_dir: str):
84 client = Storage._create_minio_client()
85 bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
86 bucket_name = bucket_args[0]
87 bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
88 objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
89 count = 0
90 for obj in objects:
91 # Replace any prefix from the object key with temp_dir
92 subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
93 # fget_object handles directory creation if does not exist
94 if not obj.is_dir:
95 if subdir_object_key == "":
96 subdir_object_key = obj.object_name
97 client.fget_object(bucket_name, obj.object_name,
98 os.path.join(temp_dir, subdir_object_key))
99 count = count + 1
100 if count == 0:
101 raise RuntimeError("Failed to fetch model. \
102 The path or model %s does not exist." % (uri))
103
104 @staticmethod
105 def _download_gcs(uri, temp_dir: str):
106 try:
107 storage_client = storage.Client()
108 except exceptions.DefaultCredentialsError:
109 storage_client = storage.Client.create_anonymous_client()
110 bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
111 bucket_name = bucket_args[0]
112 bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
113 bucket = storage_client.bucket(bucket_name)
114 prefix = bucket_path
115 if not prefix.endswith("/"):
116 prefix = prefix + "/"
117 blobs = bucket.list_blobs(prefix=prefix)
118 count = 0
119 for blob in blobs:
120 # Replace any prefix from the object key with temp_dir
121 subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
122
123 # Create necessary subdirectory to store the object locally
124 if "/" in subdir_object_key:
125 local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
126 if not os.path.isdir(local_object_dir):
127 os.makedirs(local_object_dir, exist_ok=True)
128 if subdir_object_key.strip() != "":
129 dest_path = os.path.join(temp_dir, subdir_object_key)
130 logging.info("Downloading: %s", dest_path)
131 blob.download_to_filename(dest_path)
132 count = count + 1
133 if count == 0:
134 raise RuntimeError("Failed to fetch model. \
135 The path or model %s does not exist." % uri)
136
137 @staticmethod
138 def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals
139 match = re.search(_BLOB_RE, uri)
140 account_name = match.group(1)
141 storage_url = match.group(2)
142 container_name, prefix = storage_url.split("/", 1)
143
144 logging.info("Connecting to BLOB account: [%s], container: [%s], prefix: [%s]",
145 account_name,
146 container_name,
147 prefix)
148 try:
149 block_blob_service = BlockBlobService(account_name=account_name)
150 blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
151 except Exception: # pylint: disable=broad-except
152 token = Storage._get_azure_storage_token()
153 if token is None:
154 logging.warning("Azure credentials not found, retrying anonymous access")
155 block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)
156 blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
157 count = 0
158 for blob in blobs:
159 dest_path = os.path.join(out_dir, blob.name)
160 if "/" in blob.name:
161 head, tail = os.path.split(blob.name)
162 if prefix is not None:
163 head = head[len(prefix):]
164 if head.startswith('/'):
165 head = head[1:]
166 dir_path = os.path.join(out_dir, head)
167 dest_path = os.path.join(dir_path, tail)
168 if not os.path.isdir(dir_path):
169 os.makedirs(dir_path)
170
171 logging.info("Downloading: %s to %s", blob.name, dest_path)
172 block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)
173 count = count + 1
174 if count == 0:
175 raise RuntimeError("Failed to fetch model. \
176 The path or model %s does not exist." % (uri))
177
178 @staticmethod
179 def _get_azure_storage_token():
180 tenant_id = os.getenv("AZ_TENANT_ID", "")
181 client_id = os.getenv("AZ_CLIENT_ID", "")
182 client_secret = os.getenv("AZ_CLIENT_SECRET", "")
183 subscription_id = os.getenv("AZ_SUBSCRIPTION_ID", "")
184
185 if tenant_id == "" or client_id == "" or client_secret == "" or subscription_id == "":
186 return None
187
188 # note the SP must have "Storage Blob Data Owner" perms for this to work
189 import adal
190 from azure.storage.common import TokenCredential
191
192 authority_url = "https://login.microsoftonline.com/" + tenant_id
193
194 context = adal.AuthenticationContext(authority_url)
195
196 token = context.acquire_token_with_client_credentials(
197 "https://storage.azure.com/",
198 client_id,
199 client_secret)
200
201 token_credential = TokenCredential(token["accessToken"])
202
203 logging.info("Retrieved SP token credential for client_id: %s", client_id)
204
205 return token_credential
206
207 @staticmethod
208 def _download_local(uri, out_dir=None):
209 local_path = uri.replace(_LOCAL_PREFIX, "", 1)
210 if not os.path.exists(local_path):
211 raise RuntimeError("Local path %s does not exist." % (uri))
212
213 if out_dir is None:
214 return local_path
215 elif not os.path.isdir(out_dir):
216 os.makedirs(out_dir)
217
218 if os.path.isdir(local_path):
219 local_path = os.path.join(local_path, "*")
220
221 for src in glob.glob(local_path):
222 _, tail = os.path.split(src)
223 dest_path = os.path.join(out_dir, tail)
224 logging.info("Linking: %s to %s", src, dest_path)
225 os.symlink(src, dest_path)
226 return out_dir
227
228 @staticmethod
229 def _download_from_uri(uri, out_dir=None):
230 url = urlparse(uri)
231 filename = os.path.basename(url.path)
232 mimetype, encoding = mimetypes.guess_type(url.path)
233 local_path = os.path.join(out_dir, filename)
234
235 if filename == '':
236 raise ValueError('No filename contained in URI: %s' % (uri))
237
238 # Get header information from host url
239 headers = {}
240 host_uri = url.hostname
241
242 headers_json = os.getenv(host_uri + _HEADERS_SUFFIX, "{}")
243 headers = json.loads(headers_json)
244
245 with requests.get(uri, stream=True, headers=headers) as response:
246 if response.status_code != 200:
247 raise RuntimeError("URI: %s returned a %s response code." % (uri, response.status_code))
248 if mimetype == 'application/zip' and not response.headers.get('Content-Type', '')\
249 .startswith('application/zip'):
250 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/zip\'" % uri)
251 if mimetype == 'application/x-tar' and not response.headers.get('Content-Type', '')\
252 .startswith('application/x-tar'):
253 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/x-tar\'" % uri)
254 if (mimetype != 'application/zip' and mimetype != 'application/x-tar') and \
255 not response.headers.get('Content-Type', '').startswith('application/octet-stream'):
256 raise RuntimeError("URI: %s did not respond with \'Content-Type\': \'application/octet-stream\'"
257 % uri)
258
259 if encoding == 'gzip':
260 stream = gzip.GzipFile(fileobj=response.raw)
261 local_path = os.path.join(out_dir, f'{filename}.tar')
262 else:
263 stream = response.raw
264 with open(local_path, 'wb') as out:
265 shutil.copyfileobj(stream, out)
266
267 if mimetype in ["application/x-tar", "application/zip"]:
268 if mimetype == "application/x-tar":
269 archive = tarfile.open(local_path, 'r', encoding='utf-8')
270 else:
271 archive = zipfile.ZipFile(local_path, 'r')
272 archive.extractall(out_dir)
273 archive.close()
274 os.remove(local_path)
275
276 return out_dir
277
278 @staticmethod
279 def _create_minio_client():
280 # Adding prefixing "http" in urlparse is necessary for it to be the netloc
281 url = urlparse(os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com"))
282 use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv("S3_USE_HTTPS", "true"))
283 return Minio(url.netloc,
284 access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
285 secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
286 region=os.getenv("AWS_REGION", ""),
287 secure=use_ssl)
288
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py
--- a/python/kfserving/kfserving/storage.py
+++ b/python/kfserving/kfserving/storage.py
@@ -13,22 +13,24 @@
# limitations under the License.
import glob
+import gzip
import logging
-import tempfile
import mimetypes
import os
import re
import json
import shutil
import tarfile
+import tempfile
import zipfile
-import gzip
from urllib.parse import urlparse
+
+import boto3
import requests
from azure.storage.blob import BlockBlobService
from google.auth import exceptions
from google.cloud import storage
-from minio import Minio
+
from kfserving.kfmodel_repository import MODEL_MOUNT_DIRS
_GCS_PREFIX = "gs://"
@@ -81,25 +83,27 @@
@staticmethod
def _download_s3(uri, temp_dir: str):
- client = Storage._create_minio_client()
- bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
- bucket_name = bucket_args[0]
- bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
- objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
- count = 0
- for obj in objects:
- # Replace any prefix from the object key with temp_dir
- subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
- # fget_object handles directory creation if does not exist
- if not obj.is_dir:
- if subdir_object_key == "":
- subdir_object_key = obj.object_name
- client.fget_object(bucket_name, obj.object_name,
- os.path.join(temp_dir, subdir_object_key))
- count = count + 1
- if count == 0:
- raise RuntimeError("Failed to fetch model. \
-The path or model %s does not exist." % (uri))
+ s3 = boto3.resource('s3', endpoint_url=os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com"))
+ parsed = urlparse(uri, scheme='s3')
+ bucket_name = parsed.netloc
+ bucket_path = parsed.path.lstrip('/')
+
+ bucket = s3.Bucket(bucket_name)
+ for obj in bucket.objects.filter(Prefix=bucket_path):
+ # Skip where boto3 lists the directory as an object
+ if obj.key.endswith("/"):
+ continue
+ # In the case where bucket_path points to a single object, set the target key to bucket_path
+ # Otherwise, remove the bucket_path prefix, strip any extra slashes, then prepend the target_dir
+ target_key = (
+ obj.key
+ if bucket_path == obj.key
+ else obj.key.replace(bucket_path, "", 1).lstrip("/")
+ )
+ target = f"{temp_dir}/{target_key}"
+ if not os.path.exists(os.path.dirname(target)):
+ os.makedirs(os.path.dirname(target), exist_ok=True)
+ bucket.download_file(obj.key, target)
@staticmethod
def _download_gcs(uri, temp_dir: str):
@@ -274,14 +278,3 @@
os.remove(local_path)
return out_dir
-
- @staticmethod
- def _create_minio_client():
- # Adding prefixing "http" in urlparse is necessary for it to be the netloc
- url = urlparse(os.getenv("AWS_ENDPOINT_URL", "http://s3.amazonaws.com"))
- use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv("S3_USE_HTTPS", "true"))
- return Minio(url.netloc,
- access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
- secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
- region=os.getenv("AWS_REGION", ""),
- secure=use_ssl)
| {"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -13,22 +13,24 @@\n # limitations under the License.\n \n import glob\n+import gzip\n import logging\n-import tempfile\n import mimetypes\n import os\n import re\n import json\n import shutil\n import tarfile\n+import tempfile\n import zipfile\n-import gzip\n from urllib.parse import urlparse\n+\n+import boto3\n import requests\n from azure.storage.blob import BlockBlobService\n from google.auth import exceptions\n from google.cloud import storage\n-from minio import Minio\n+\n from kfserving.kfmodel_repository import MODEL_MOUNT_DIRS\n \n _GCS_PREFIX = \"gs://\"\n@@ -81,25 +83,27 @@\n \n @staticmethod\n def _download_s3(uri, temp_dir: str):\n- client = Storage._create_minio_client()\n- bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n- bucket_name = bucket_args[0]\n- bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n- objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n- count = 0\n- for obj in objects:\n- # Replace any prefix from the object key with temp_dir\n- subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n- # fget_object handles directory creation if does not exist\n- if not obj.is_dir:\n- if subdir_object_key == \"\":\n- subdir_object_key = obj.object_name\n- client.fget_object(bucket_name, obj.object_name,\n- os.path.join(temp_dir, subdir_object_key))\n- count = count + 1\n- if count == 0:\n- raise RuntimeError(\"Failed to fetch model. \\\n-The path or model %s does not exist.\" % (uri))\n+ s3 = boto3.resource('s3', endpoint_url=os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n+ parsed = urlparse(uri, scheme='s3')\n+ bucket_name = parsed.netloc\n+ bucket_path = parsed.path.lstrip('/')\n+\n+ bucket = s3.Bucket(bucket_name)\n+ for obj in bucket.objects.filter(Prefix=bucket_path):\n+ # Skip where boto3 lists the directory as an object\n+ if obj.key.endswith(\"/\"):\n+ continue\n+ # In the case where bucket_path points to a single object, set the target key to bucket_path\n+ # Otherwise, remove the bucket_path prefix, strip any extra slashes, then prepend the target_dir\n+ target_key = (\n+ obj.key\n+ if bucket_path == obj.key\n+ else obj.key.replace(bucket_path, \"\", 1).lstrip(\"/\")\n+ )\n+ target = f\"{temp_dir}/{target_key}\"\n+ if not os.path.exists(os.path.dirname(target)):\n+ os.makedirs(os.path.dirname(target), exist_ok=True)\n+ bucket.download_file(obj.key, target)\n \n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n@@ -274,14 +278,3 @@\n os.remove(local_path)\n \n return out_dir\n-\n- @staticmethod\n- def _create_minio_client():\n- # Adding prefixing \"http\" in urlparse is necessary for it to be the netloc\n- url = urlparse(os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n- use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv(\"S3_USE_HTTPS\", \"true\"))\n- return Minio(url.netloc,\n- access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n- secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n- region=os.getenv(\"AWS_REGION\", \"\"),\n- secure=use_ssl)\n", "issue": "[AWS] Support IAM Role for Service Account in KFServing\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\n[A clear and concise description of what you want to happen.]\r\nCurrently, it needs a S3 credential to download model. We need more fine grain control ways. [IRSA](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) is a better option. This helps us deploy model without credentials\r\n \r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\n", "before_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport logging\nimport tempfile\nimport mimetypes\nimport os\nimport re\nimport json\nimport shutil\nimport tarfile\nimport zipfile\nimport gzip\nfrom urllib.parse import urlparse\nimport requests\nfrom azure.storage.blob import BlockBlobService\nfrom google.auth import exceptions\nfrom google.cloud import storage\nfrom minio import Minio\nfrom kfserving.kfmodel_repository import MODEL_MOUNT_DIRS\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_BLOB_RE = \"https://(.+?).blob.core.windows.net/(.+)\"\n_LOCAL_PREFIX = \"file://\"\n_URI_RE = \"https?://(.+)/(.+)\"\n_HTTP_PREFIX = \"http(s)://\"\n_HEADERS_SUFFIX = \"-headers\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n\n is_local = False\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n is_local = True\n\n if out_dir is None:\n if is_local:\n # noop if out_dir is not set and the path is local\n return Storage._download_local(uri)\n out_dir = tempfile.mkdtemp()\n elif not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, out_dir)\n elif re.search(_BLOB_RE, uri):\n Storage._download_blob(uri, out_dir)\n elif is_local:\n return Storage._download_local(uri, out_dir)\n elif re.search(_URI_RE, uri):\n return Storage._download_from_uri(uri, out_dir)\n elif uri.startswith(MODEL_MOUNT_DIRS):\n # Don't need to download models if this InferenceService is running in the multi-model\n # serving mode. The model agent will download models.\n return out_dir\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX, _HTTP_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n return out_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n count = 0\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n # fget_object handles directory creation if does not exist\n if not obj.is_dir:\n if subdir_object_key == \"\":\n subdir_object_key = obj.object_name\n client.fget_object(bucket_name, obj.object_name,\n os.path.join(temp_dir, subdir_object_key))\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % (uri))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n prefix = bucket_path\n if not prefix.endswith(\"/\"):\n prefix = prefix + \"/\"\n blobs = bucket.list_blobs(prefix=prefix)\n count = 0\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n if subdir_object_key.strip() != \"\":\n dest_path = os.path.join(temp_dir, subdir_object_key)\n logging.info(\"Downloading: %s\", dest_path)\n blob.download_to_filename(dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % uri)\n\n @staticmethod\n def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals\n match = re.search(_BLOB_RE, uri)\n account_name = match.group(1)\n storage_url = match.group(2)\n container_name, prefix = storage_url.split(\"/\", 1)\n\n logging.info(\"Connecting to BLOB account: [%s], container: [%s], prefix: [%s]\",\n account_name,\n container_name,\n prefix)\n try:\n block_blob_service = BlockBlobService(account_name=account_name)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n except Exception: # pylint: disable=broad-except\n token = Storage._get_azure_storage_token()\n if token is None:\n logging.warning(\"Azure credentials not found, retrying anonymous access\")\n block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n count = 0\n for blob in blobs:\n dest_path = os.path.join(out_dir, blob.name)\n if \"/\" in blob.name:\n head, tail = os.path.split(blob.name)\n if prefix is not None:\n head = head[len(prefix):]\n if head.startswith('/'):\n head = head[1:]\n dir_path = os.path.join(out_dir, head)\n dest_path = os.path.join(dir_path, tail)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n logging.info(\"Downloading: %s to %s\", blob.name, dest_path)\n block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % (uri))\n\n @staticmethod\n def _get_azure_storage_token():\n tenant_id = os.getenv(\"AZ_TENANT_ID\", \"\")\n client_id = os.getenv(\"AZ_CLIENT_ID\", \"\")\n client_secret = os.getenv(\"AZ_CLIENT_SECRET\", \"\")\n subscription_id = os.getenv(\"AZ_SUBSCRIPTION_ID\", \"\")\n\n if tenant_id == \"\" or client_id == \"\" or client_secret == \"\" or subscription_id == \"\":\n return None\n\n # note the SP must have \"Storage Blob Data Owner\" perms for this to work\n import adal\n from azure.storage.common import TokenCredential\n\n authority_url = \"https://login.microsoftonline.com/\" + tenant_id\n\n context = adal.AuthenticationContext(authority_url)\n\n token = context.acquire_token_with_client_credentials(\n \"https://storage.azure.com/\",\n client_id,\n client_secret)\n\n token_credential = TokenCredential(token[\"accessToken\"])\n\n logging.info(\"Retrieved SP token credential for client_id: %s\", client_id)\n\n return token_credential\n\n @staticmethod\n def _download_local(uri, out_dir=None):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise RuntimeError(\"Local path %s does not exist.\" % (uri))\n\n if out_dir is None:\n return local_path\n elif not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n if os.path.isdir(local_path):\n local_path = os.path.join(local_path, \"*\")\n\n for src in glob.glob(local_path):\n _, tail = os.path.split(src)\n dest_path = os.path.join(out_dir, tail)\n logging.info(\"Linking: %s to %s\", src, dest_path)\n os.symlink(src, dest_path)\n return out_dir\n\n @staticmethod\n def _download_from_uri(uri, out_dir=None):\n url = urlparse(uri)\n filename = os.path.basename(url.path)\n mimetype, encoding = mimetypes.guess_type(url.path)\n local_path = os.path.join(out_dir, filename)\n\n if filename == '':\n raise ValueError('No filename contained in URI: %s' % (uri))\n\n # Get header information from host url\n headers = {}\n host_uri = url.hostname\n\n headers_json = os.getenv(host_uri + _HEADERS_SUFFIX, \"{}\")\n headers = json.loads(headers_json)\n\n with requests.get(uri, stream=True, headers=headers) as response:\n if response.status_code != 200:\n raise RuntimeError(\"URI: %s returned a %s response code.\" % (uri, response.status_code))\n if mimetype == 'application/zip' and not response.headers.get('Content-Type', '')\\\n .startswith('application/zip'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/zip\\'\" % uri)\n if mimetype == 'application/x-tar' and not response.headers.get('Content-Type', '')\\\n .startswith('application/x-tar'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/x-tar\\'\" % uri)\n if (mimetype != 'application/zip' and mimetype != 'application/x-tar') and \\\n not response.headers.get('Content-Type', '').startswith('application/octet-stream'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/octet-stream\\'\"\n % uri)\n\n if encoding == 'gzip':\n stream = gzip.GzipFile(fileobj=response.raw)\n local_path = os.path.join(out_dir, f'{filename}.tar')\n else:\n stream = response.raw\n with open(local_path, 'wb') as out:\n shutil.copyfileobj(stream, out)\n\n if mimetype in [\"application/x-tar\", \"application/zip\"]:\n if mimetype == \"application/x-tar\":\n archive = tarfile.open(local_path, 'r', encoding='utf-8')\n else:\n archive = zipfile.ZipFile(local_path, 'r')\n archive.extractall(out_dir)\n archive.close()\n os.remove(local_path)\n\n return out_dir\n\n @staticmethod\n def _create_minio_client():\n # Adding prefixing \"http\" in urlparse is necessary for it to be the netloc\n url = urlparse(os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n use_ssl = url.scheme == 'https' if url.scheme else bool(os.getenv(\"S3_USE_HTTPS\", \"true\"))\n return Minio(url.netloc,\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n region=os.getenv(\"AWS_REGION\", \"\"),\n secure=use_ssl)\n", "path": "python/kfserving/kfserving/storage.py"}], "after_files": [{"content": "# Copyright 2020 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport gzip\nimport logging\nimport mimetypes\nimport os\nimport re\nimport json\nimport shutil\nimport tarfile\nimport tempfile\nimport zipfile\nfrom urllib.parse import urlparse\n\nimport boto3\nimport requests\nfrom azure.storage.blob import BlockBlobService\nfrom google.auth import exceptions\nfrom google.cloud import storage\n\nfrom kfserving.kfmodel_repository import MODEL_MOUNT_DIRS\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_BLOB_RE = \"https://(.+?).blob.core.windows.net/(.+)\"\n_LOCAL_PREFIX = \"file://\"\n_URI_RE = \"https?://(.+)/(.+)\"\n_HTTP_PREFIX = \"http(s)://\"\n_HEADERS_SUFFIX = \"-headers\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n\n is_local = False\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n is_local = True\n\n if out_dir is None:\n if is_local:\n # noop if out_dir is not set and the path is local\n return Storage._download_local(uri)\n out_dir = tempfile.mkdtemp()\n elif not os.path.exists(out_dir):\n os.mkdir(out_dir)\n\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, out_dir)\n elif re.search(_BLOB_RE, uri):\n Storage._download_blob(uri, out_dir)\n elif is_local:\n return Storage._download_local(uri, out_dir)\n elif re.search(_URI_RE, uri):\n return Storage._download_from_uri(uri, out_dir)\n elif uri.startswith(MODEL_MOUNT_DIRS):\n # Don't need to download models if this InferenceService is running in the multi-model\n # serving mode. The model agent will download models.\n return out_dir\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX, _HTTP_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n return out_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n s3 = boto3.resource('s3', endpoint_url=os.getenv(\"AWS_ENDPOINT_URL\", \"http://s3.amazonaws.com\"))\n parsed = urlparse(uri, scheme='s3')\n bucket_name = parsed.netloc\n bucket_path = parsed.path.lstrip('/')\n\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=bucket_path):\n # Skip where boto3 lists the directory as an object\n if obj.key.endswith(\"/\"):\n continue\n # In the case where bucket_path points to a single object, set the target key to bucket_path\n # Otherwise, remove the bucket_path prefix, strip any extra slashes, then prepend the target_dir\n target_key = (\n obj.key\n if bucket_path == obj.key\n else obj.key.replace(bucket_path, \"\", 1).lstrip(\"/\")\n )\n target = f\"{temp_dir}/{target_key}\"\n if not os.path.exists(os.path.dirname(target)):\n os.makedirs(os.path.dirname(target), exist_ok=True)\n bucket.download_file(obj.key, target)\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n prefix = bucket_path\n if not prefix.endswith(\"/\"):\n prefix = prefix + \"/\"\n blobs = bucket.list_blobs(prefix=prefix)\n count = 0\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n if subdir_object_key.strip() != \"\":\n dest_path = os.path.join(temp_dir, subdir_object_key)\n logging.info(\"Downloading: %s\", dest_path)\n blob.download_to_filename(dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % uri)\n\n @staticmethod\n def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals\n match = re.search(_BLOB_RE, uri)\n account_name = match.group(1)\n storage_url = match.group(2)\n container_name, prefix = storage_url.split(\"/\", 1)\n\n logging.info(\"Connecting to BLOB account: [%s], container: [%s], prefix: [%s]\",\n account_name,\n container_name,\n prefix)\n try:\n block_blob_service = BlockBlobService(account_name=account_name)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n except Exception: # pylint: disable=broad-except\n token = Storage._get_azure_storage_token()\n if token is None:\n logging.warning(\"Azure credentials not found, retrying anonymous access\")\n block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n count = 0\n for blob in blobs:\n dest_path = os.path.join(out_dir, blob.name)\n if \"/\" in blob.name:\n head, tail = os.path.split(blob.name)\n if prefix is not None:\n head = head[len(prefix):]\n if head.startswith('/'):\n head = head[1:]\n dir_path = os.path.join(out_dir, head)\n dest_path = os.path.join(dir_path, tail)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n logging.info(\"Downloading: %s to %s\", blob.name, dest_path)\n block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)\n count = count + 1\n if count == 0:\n raise RuntimeError(\"Failed to fetch model. \\\nThe path or model %s does not exist.\" % (uri))\n\n @staticmethod\n def _get_azure_storage_token():\n tenant_id = os.getenv(\"AZ_TENANT_ID\", \"\")\n client_id = os.getenv(\"AZ_CLIENT_ID\", \"\")\n client_secret = os.getenv(\"AZ_CLIENT_SECRET\", \"\")\n subscription_id = os.getenv(\"AZ_SUBSCRIPTION_ID\", \"\")\n\n if tenant_id == \"\" or client_id == \"\" or client_secret == \"\" or subscription_id == \"\":\n return None\n\n # note the SP must have \"Storage Blob Data Owner\" perms for this to work\n import adal\n from azure.storage.common import TokenCredential\n\n authority_url = \"https://login.microsoftonline.com/\" + tenant_id\n\n context = adal.AuthenticationContext(authority_url)\n\n token = context.acquire_token_with_client_credentials(\n \"https://storage.azure.com/\",\n client_id,\n client_secret)\n\n token_credential = TokenCredential(token[\"accessToken\"])\n\n logging.info(\"Retrieved SP token credential for client_id: %s\", client_id)\n\n return token_credential\n\n @staticmethod\n def _download_local(uri, out_dir=None):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise RuntimeError(\"Local path %s does not exist.\" % (uri))\n\n if out_dir is None:\n return local_path\n elif not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n if os.path.isdir(local_path):\n local_path = os.path.join(local_path, \"*\")\n\n for src in glob.glob(local_path):\n _, tail = os.path.split(src)\n dest_path = os.path.join(out_dir, tail)\n logging.info(\"Linking: %s to %s\", src, dest_path)\n os.symlink(src, dest_path)\n return out_dir\n\n @staticmethod\n def _download_from_uri(uri, out_dir=None):\n url = urlparse(uri)\n filename = os.path.basename(url.path)\n mimetype, encoding = mimetypes.guess_type(url.path)\n local_path = os.path.join(out_dir, filename)\n\n if filename == '':\n raise ValueError('No filename contained in URI: %s' % (uri))\n\n # Get header information from host url\n headers = {}\n host_uri = url.hostname\n\n headers_json = os.getenv(host_uri + _HEADERS_SUFFIX, \"{}\")\n headers = json.loads(headers_json)\n\n with requests.get(uri, stream=True, headers=headers) as response:\n if response.status_code != 200:\n raise RuntimeError(\"URI: %s returned a %s response code.\" % (uri, response.status_code))\n if mimetype == 'application/zip' and not response.headers.get('Content-Type', '')\\\n .startswith('application/zip'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/zip\\'\" % uri)\n if mimetype == 'application/x-tar' and not response.headers.get('Content-Type', '')\\\n .startswith('application/x-tar'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/x-tar\\'\" % uri)\n if (mimetype != 'application/zip' and mimetype != 'application/x-tar') and \\\n not response.headers.get('Content-Type', '').startswith('application/octet-stream'):\n raise RuntimeError(\"URI: %s did not respond with \\'Content-Type\\': \\'application/octet-stream\\'\"\n % uri)\n\n if encoding == 'gzip':\n stream = gzip.GzipFile(fileobj=response.raw)\n local_path = os.path.join(out_dir, f'{filename}.tar')\n else:\n stream = response.raw\n with open(local_path, 'wb') as out:\n shutil.copyfileobj(stream, out)\n\n if mimetype in [\"application/x-tar\", \"application/zip\"]:\n if mimetype == \"application/x-tar\":\n archive = tarfile.open(local_path, 'r', encoding='utf-8')\n else:\n archive = zipfile.ZipFile(local_path, 'r')\n archive.extractall(out_dir)\n archive.close()\n os.remove(local_path)\n\n return out_dir\n", "path": "python/kfserving/kfserving/storage.py"}]} | 3,777 | 868 |
gh_patches_debug_374 | rasdani/github-patches | git_diff | cupy__cupy-2615 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cupy.where fails for complex arrays
The function cupy.where does not work for complex arrays (numpy.where does):
```
import cupy as cp
a = cp.arange(5).astype(cp.complex128)
b = cp.arange(5).astype(cp.complex128)
c = cp.where(a==b,a,b)
```
fails with the error message
> TypeError: Wrong type ((<class 'numpy.bool_'>, <class 'numpy.complex128'>, <class 'numpy.complex128'>)) of arguments for cupy_where
For `cp.float64`, everything works fine.
CuPy Version : 6.4.0
CUDA Root : /usr
CUDA Build Version : 8000
CUDA Driver Version : 10020
CUDA Runtime Version : 8000
cuDNN Build Version : 7102
cuDNN Version : 7102
NCCL Build Version : 2213
NCCL Runtime Version : (unknown)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/sorting/search.py`
Content:
```
1 from cupy import core
2 from cupy.core import fusion
3
4
5 def argmax(a, axis=None, dtype=None, out=None, keepdims=False):
6 """Returns the indices of the maximum along an axis.
7
8 Args:
9 a (cupy.ndarray): Array to take argmax.
10 axis (int): Along which axis to find the maximum. ``a`` is flattened by
11 default.
12 dtype: Data type specifier.
13 out (cupy.ndarray): Output array.
14 keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis
15 of length one.
16
17 Returns:
18 cupy.ndarray: The indices of the maximum of ``a`` along an axis.
19
20 .. seealso:: :func:`numpy.argmax`
21
22 """
23 # TODO(okuta): check type
24 return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
25
26
27 # TODO(okuta): Implement nanargmax
28
29
30 def argmin(a, axis=None, dtype=None, out=None, keepdims=False):
31 """Returns the indices of the minimum along an axis.
32
33 Args:
34 a (cupy.ndarray): Array to take argmin.
35 axis (int): Along which axis to find the minimum. ``a`` is flattened by
36 default.
37 dtype: Data type specifier.
38 out (cupy.ndarray): Output array.
39 keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis
40 of length one.
41
42 Returns:
43 cupy.ndarray: The indices of the minimum of ``a`` along an axis.
44
45 .. seealso:: :func:`numpy.argmin`
46
47 """
48 # TODO(okuta): check type
49 return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
50
51
52 # TODO(okuta): Implement nanargmin
53
54
55 # TODO(okuta): Implement argwhere
56
57
58 def nonzero(a):
59 """Return the indices of the elements that are non-zero.
60
61 Returns a tuple of arrays, one for each dimension of a,
62 containing the indices of the non-zero elements in that dimension.
63
64 Args:
65 a (cupy.ndarray): array
66
67 Returns:
68 tuple of arrays: Indices of elements that are non-zero.
69
70 .. seealso:: :func:`numpy.nonzero`
71
72 """
73 assert isinstance(a, core.ndarray)
74 return a.nonzero()
75
76
77 def flatnonzero(a):
78 """Return indices that are non-zero in the flattened version of a.
79
80 This is equivalent to a.ravel().nonzero()[0].
81
82 Args:
83 a (cupy.ndarray): input array
84
85 Returns:
86 cupy.ndarray: Output array,
87 containing the indices of the elements of a.ravel() that are non-zero.
88
89 .. seealso:: :func:`numpy.flatnonzero`
90 """
91 assert isinstance(a, core.ndarray)
92 return a.ravel().nonzero()[0]
93
94
95 _where_ufunc = core.create_ufunc(
96 'cupy_where',
97 ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',
98 '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',
99 # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it
100 # works).
101 # See issue #551.
102 '?hd->d', '?Hd->d',
103 '?dd->d'),
104 'out0 = in0 ? in1 : in2')
105
106
107 def where(condition, x=None, y=None):
108 """Return elements, either from x or y, depending on condition.
109
110 If only condition is given, return ``condition.nonzero()``.
111
112 Args:
113 condition (cupy.ndarray): When True, take x, otherwise take y.
114 x (cupy.ndarray): Values from which to choose on ``True``.
115 y (cupy.ndarray): Values from which to choose on ``False``.
116
117 Returns:
118 cupy.ndarray: Each element of output contains elements of ``x`` when
119 ``condition`` is ``True``, otherwise elements of ``y``. If only
120 ``condition`` is given, return the tuple ``condition.nonzero()``,
121 the indices where ``condition`` is True.
122
123 .. seealso:: :func:`numpy.where`
124
125 """
126
127 missing = (x is None, y is None).count(True)
128
129 if missing == 1:
130 raise ValueError('Must provide both \'x\' and \'y\' or neither.')
131 if missing == 2:
132 return nonzero(condition)
133
134 if fusion._is_fusing():
135 return fusion._call_ufunc(_where_ufunc, condition, x, y)
136 return _where_ufunc(condition.astype('?'), x, y)
137
138
139 # TODO(okuta): Implement searchsorted
140
141
142 # TODO(okuta): Implement extract
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/sorting/search.py b/cupy/sorting/search.py
--- a/cupy/sorting/search.py
+++ b/cupy/sorting/search.py
@@ -100,7 +100,7 @@
# works).
# See issue #551.
'?hd->d', '?Hd->d',
- '?dd->d'),
+ '?dd->d', '?FF->F', '?DD->D'),
'out0 = in0 ? in1 : in2')
| {"golden_diff": "diff --git a/cupy/sorting/search.py b/cupy/sorting/search.py\n--- a/cupy/sorting/search.py\n+++ b/cupy/sorting/search.py\n@@ -100,7 +100,7 @@\n # works).\n # See issue #551.\n '?hd->d', '?Hd->d',\n- '?dd->d'),\n+ '?dd->d', '?FF->F', '?DD->D'),\n 'out0 = in0 ? in1 : in2')\n", "issue": "cupy.where fails for complex arrays\nThe function cupy.where does not work for complex arrays (numpy.where does):\r\n\r\n```\r\nimport cupy as cp\r\na = cp.arange(5).astype(cp.complex128)\r\nb = cp.arange(5).astype(cp.complex128)\r\nc = cp.where(a==b,a,b)\r\n```\r\n\r\nfails with the error message\r\n\r\n> TypeError: Wrong type ((<class 'numpy.bool_'>, <class 'numpy.complex128'>, <class 'numpy.complex128'>)) of arguments for cupy_where\r\n\r\nFor `cp.float64`, everything works fine.\r\n\r\nCuPy Version : 6.4.0\r\nCUDA Root : /usr\r\nCUDA Build Version : 8000\r\nCUDA Driver Version : 10020\r\nCUDA Runtime Version : 8000\r\ncuDNN Build Version : 7102\r\ncuDNN Version : 7102\r\nNCCL Build Version : 2213\r\nNCCL Runtime Version : (unknown)\n", "before_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\n\n\ndef argmax(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the maximum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmax.\n axis (int): Along which axis to find the maximum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the maximum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmax`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmax\n\n\ndef argmin(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the minimum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmin.\n axis (int): Along which axis to find the minimum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the minimum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmin`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmin\n\n\n# TODO(okuta): Implement argwhere\n\n\ndef nonzero(a):\n \"\"\"Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of a,\n containing the indices of the non-zero elements in that dimension.\n\n Args:\n a (cupy.ndarray): array\n\n Returns:\n tuple of arrays: Indices of elements that are non-zero.\n\n .. seealso:: :func:`numpy.nonzero`\n\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.nonzero()\n\n\ndef flatnonzero(a):\n \"\"\"Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to a.ravel().nonzero()[0].\n\n Args:\n a (cupy.ndarray): input array\n\n Returns:\n cupy.ndarray: Output array,\n containing the indices of the elements of a.ravel() that are non-zero.\n\n .. seealso:: :func:`numpy.flatnonzero`\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.ravel().nonzero()[0]\n\n\n_where_ufunc = core.create_ufunc(\n 'cupy_where',\n ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',\n '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',\n # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it\n # works).\n # See issue #551.\n '?hd->d', '?Hd->d',\n '?dd->d'),\n 'out0 = in0 ? in1 : in2')\n\n\ndef where(condition, x=None, y=None):\n \"\"\"Return elements, either from x or y, depending on condition.\n\n If only condition is given, return ``condition.nonzero()``.\n\n Args:\n condition (cupy.ndarray): When True, take x, otherwise take y.\n x (cupy.ndarray): Values from which to choose on ``True``.\n y (cupy.ndarray): Values from which to choose on ``False``.\n\n Returns:\n cupy.ndarray: Each element of output contains elements of ``x`` when\n ``condition`` is ``True``, otherwise elements of ``y``. If only\n ``condition`` is given, return the tuple ``condition.nonzero()``,\n the indices where ``condition`` is True.\n\n .. seealso:: :func:`numpy.where`\n\n \"\"\"\n\n missing = (x is None, y is None).count(True)\n\n if missing == 1:\n raise ValueError('Must provide both \\'x\\' and \\'y\\' or neither.')\n if missing == 2:\n return nonzero(condition)\n\n if fusion._is_fusing():\n return fusion._call_ufunc(_where_ufunc, condition, x, y)\n return _where_ufunc(condition.astype('?'), x, y)\n\n\n# TODO(okuta): Implement searchsorted\n\n\n# TODO(okuta): Implement extract\n", "path": "cupy/sorting/search.py"}], "after_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\n\n\ndef argmax(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the maximum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmax.\n axis (int): Along which axis to find the maximum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the maximum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmax`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmax\n\n\ndef argmin(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the minimum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmin.\n axis (int): Along which axis to find the minimum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis\n of length one.\n\n Returns:\n cupy.ndarray: The indices of the minimum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmin`\n\n \"\"\"\n # TODO(okuta): check type\n return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmin\n\n\n# TODO(okuta): Implement argwhere\n\n\ndef nonzero(a):\n \"\"\"Return the indices of the elements that are non-zero.\n\n Returns a tuple of arrays, one for each dimension of a,\n containing the indices of the non-zero elements in that dimension.\n\n Args:\n a (cupy.ndarray): array\n\n Returns:\n tuple of arrays: Indices of elements that are non-zero.\n\n .. seealso:: :func:`numpy.nonzero`\n\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.nonzero()\n\n\ndef flatnonzero(a):\n \"\"\"Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to a.ravel().nonzero()[0].\n\n Args:\n a (cupy.ndarray): input array\n\n Returns:\n cupy.ndarray: Output array,\n containing the indices of the elements of a.ravel() that are non-zero.\n\n .. seealso:: :func:`numpy.flatnonzero`\n \"\"\"\n assert isinstance(a, core.ndarray)\n return a.ravel().nonzero()[0]\n\n\n_where_ufunc = core.create_ufunc(\n 'cupy_where',\n ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',\n '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',\n # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it\n # works).\n # See issue #551.\n '?hd->d', '?Hd->d',\n '?dd->d', '?FF->F', '?DD->D'),\n 'out0 = in0 ? in1 : in2')\n\n\ndef where(condition, x=None, y=None):\n \"\"\"Return elements, either from x or y, depending on condition.\n\n If only condition is given, return ``condition.nonzero()``.\n\n Args:\n condition (cupy.ndarray): When True, take x, otherwise take y.\n x (cupy.ndarray): Values from which to choose on ``True``.\n y (cupy.ndarray): Values from which to choose on ``False``.\n\n Returns:\n cupy.ndarray: Each element of output contains elements of ``x`` when\n ``condition`` is ``True``, otherwise elements of ``y``. If only\n ``condition`` is given, return the tuple ``condition.nonzero()``,\n the indices where ``condition`` is True.\n\n .. seealso:: :func:`numpy.where`\n\n \"\"\"\n\n missing = (x is None, y is None).count(True)\n\n if missing == 1:\n raise ValueError('Must provide both \\'x\\' and \\'y\\' or neither.')\n if missing == 2:\n return nonzero(condition)\n\n if fusion._is_fusing():\n return fusion._call_ufunc(_where_ufunc, condition, x, y)\n return _where_ufunc(condition.astype('?'), x, y)\n\n\n# TODO(okuta): Implement searchsorted\n\n\n# TODO(okuta): Implement extract\n", "path": "cupy/sorting/search.py"}]} | 1,899 | 113 |
gh_patches_debug_30952 | rasdani/github-patches | git_diff | apluslms__a-plus-576 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LTI: remove request.host from LTI parameters
Remove `request.get_host()` from all LTI parameter calculations. Use `settings.BASE_URL` instead. `request.get_host` comes from the browser, thus user can use it to manipulate the data send as part of the post.
These related issues can be fixed in connection or should be created as another issues:
- [x] `tool_consumer_instance_guid` this field doesn't seem to include relevant information. it should be created from settings.BASE_URL
- [x] `launch_presentation_return_url` this should include link to the page in A+, which will render the launch button (basically to the page this link is rendered at).
- [ ] Interesting second part is to include or keep query parameters, so we can request reauthentication by the tool provider (e.g. koodisäilö), which would be sending browser to `{launch_presentation_return_url}?tc_return_url=<ulr>` or such. TODO: raphendyr needs to write down better explanation (i.e. create an issue) - **Moved to an issue #431**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `external_services/lti.py`
Content:
```
1 from hashlib import md5
2 from django.conf import settings
3 from django.core.exceptions import PermissionDenied
4 from django.utils.translation import get_language
5 from rest_framework.reverse import reverse
6 from rest_framework.settings import api_settings
7 from oauthlib.common import urldecode
8 from oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_BODY, \
9 SIGNATURE_TYPE_QUERY
10 import json
11
12 from aplus.api import api_reverse
13 from lib.helpers import update_url_params
14 from course.models import Enrollment
15
16
17 class LTIRequest(object):
18
19 def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):
20 self.service = service
21 course = instance.course
22 # Context and resource parameters.
23 context_id = context_id or (request.get_host() + instance.get_absolute_url())
24 link_id = link_id or "aplus{:d}".format(service.pk)
25 title = title or link_id
26
27 # Gather user information
28 user_id, given_name, family_name, full_name, email = self.user_info(instance, user)
29
30 # Determine user role.
31 role = "Learner,Student"
32 # Student is not a standard role name, but it has been used here before
33 if course.is_teacher(user):
34 role = "Instructor"
35 elif instance.is_assistant(user):
36 role = "TA,TeachingAssistant" # "TA" is not a standard role
37
38 self.parameters = add or {}
39 self.parameters.update({
40
41 "lti_version": "LTI-1p0",
42 "lti_message_type": "basic-lti-launch-request",
43
44 "resource_link_id": link_id,
45 "resource_link_title": title,
46
47 # User.
48 "user_id": user_id,
49 "roles": role,
50 "lis_person_name_full": full_name,
51 "lis_person_name_given": given_name,
52 "lis_person_name_family": family_name,
53 "lis_person_contact_email_primary": email,
54
55 # Selected course.
56 "context_id": context_id,
57 "context_title": course.name,
58 "context_label": course.code,
59
60 "launch_presentation_locale": get_language(),
61 "launch_presentation_document_target":
62 "iframe" if exercise and exercise.open_in_iframe else "window",
63 "launch_presentation_return_url": request.scheme + '://' + request.get_host() + instance.get_absolute_url(),
64
65 "tool_consumer_instance_guid": request.get_host() + "/aplus",
66 "tool_consumer_instance_name": "A+ LMS",
67 })
68
69 if service.api_access:
70 self.parameters.update({
71 'custom_context_api': settings.BASE_URL + api_reverse("course-detail", kwargs={'course_id': instance.id}),
72 'custom_context_api_id': str(instance.id),
73 'custom_user_api_token': user.userprofile.api_token,
74 })
75
76 if exercise:
77 # LTI 1.1 Tool Provider may return grades to A+ (Tool Consumer)
78 self.parameters.update({
79 # Outcome Service requests from the LTI Tool Provider include the
80 # sourcedid from the launch request. It is used to create new submissions
81 # for storing the points of the user.
82 "lis_result_sourcedid": "{}-{}".format(exercise.pk, user_id),
83 # The LTI Tool Provider posts Outcome Service requests to this URL (i.e., points for a submission)
84 "lis_outcome_service_url": reverse('lti-outcomes', request=request,
85 kwargs={'version': api_settings.DEFAULT_VERSION}),
86 })
87
88 def user_info(self, course_instance, user):
89 if self.service.is_anonymous:
90 # Anonymize user information
91 enrollment = Enrollment.objects.filter(course_instance=course_instance, user_profile=user.userprofile).first()
92 if not enrollment:
93 raise PermissionDenied()
94 # Creates anon name and id for pre-pseudonymisation Enrollments
95 if not (enrollment.anon_name or enrollment.anon_id):
96 # the model's post_save functions take care of the creation
97 enrollment.save()
98 user_id = "a" + enrollment.anon_id # a for anonymous
99 full_name = enrollment.anon_name
100 given_name, sep, family_name = full_name.rpartition(" ")
101 if not given_name:
102 given_name = "Anonymous"
103 email = "anonymous-{}@aplus.invalid".format(enrollment.anon_id)
104 else:
105 user_id = "i" + str(user.pk) # i for internal
106 full_name = "{} {}".format(user.first_name, user.last_name)
107 given_name = user.first_name
108 family_name = user.last_name
109 email = user.email
110 return user_id, given_name, family_name, full_name, email
111
112 def get_checksum_of_parameters(self, only_user_and_course_level_params=False):
113 if only_user_and_course_level_params:
114 # do not include parameters that change between different exercises for the same LTI service
115 included_keys = (
116 "lti_version",
117 "lti_message_type",
118 "user_id",
119 "lis_person_name_full",
120 "lis_person_contact_email_primary",
121 "context_id",
122 "context_label",
123 "tool_consumer_instance_guid",
124 )
125 params = [(key, value) for key, value in self.parameters.items() if key in included_keys]
126 else:
127 params = self.parameters.items()
128 sum = md5()
129 for key, value in sorted(params):
130 sum.update("{}={};".format(key, value).encode('utf-8'))
131 return sum.hexdigest()
132
133 def sign_post_parameters(self, url=None):
134 client = Client(self.service.consumer_key,
135 client_secret=self.service.consumer_secret,
136 signature_method=SIGNATURE_HMAC,
137 signature_type=SIGNATURE_TYPE_BODY)
138 uri, headers, body = client.sign(self._get_url(url),
139 http_method="POST",
140 body=self.parameters,
141 headers={"Content-Type": "application/x-www-form-urlencoded"})
142 return urldecode(body)
143
144 def sign_get_query(self, url=None):
145 client = Client(self.service.consumer_key,
146 client_secret=self.service.consumer_secret,
147 signature_method=SIGNATURE_HMAC,
148 signature_type=SIGNATURE_TYPE_QUERY)
149 uri = update_url_params(self._get_url(url), self.parameters)
150 try:
151 query, headers, body = client.sign(uri, http_method="GET")
152 except ValueError as e:
153 raise ValueError("Invalid url %r for %r: %s" % (uri, self.service, e))
154 return query
155
156 def _get_url(self, url=None):
157 if url and url.startswith('//') or '://' in url:
158 return url
159 return self.service.get_final_url(url)
160
161
162 class CustomStudentInfoLTIRequest(LTIRequest):
163
164 def __init__(self, service, user, profiles, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):
165 self.service = service
166 self.course_instance = instance
167 parameters = add or {}
168 if not service.is_anonymous:
169 parameters['custom_student_id'] = self.true_student_id(user.userprofile)
170 if len(profiles) > 1:
171 parameters['custom_group_members'] = self.group_json(profiles)
172 super().__init__(service, user, instance, request, title, context_id, link_id, parameters, exercise)
173
174 def true_student_id(self, profile):
175 return profile.student_id or "A{:d}".format(profile.id)
176
177 def group_json(self, profiles):
178 data = []
179 for profile in profiles:
180 user_id, given_name, family_name, full_name, email = self.user_info(self.course_instance, profile.user)
181 d = {
182 'user': user_id,
183 'given_name': given_name,
184 'family_name': family_name,
185 'full_name': full_name,
186 'email': email,
187 }
188 if not self.service.is_anonymous:
189 d['student_id'] = self.true_student_id(profile)
190 data.append(d)
191 return json.dumps(data)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/external_services/lti.py b/external_services/lti.py
--- a/external_services/lti.py
+++ b/external_services/lti.py
@@ -1,4 +1,6 @@
from hashlib import md5
+from urllib.parse import urlsplit, urljoin
+
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils.translation import get_language
@@ -19,8 +21,9 @@
def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):
self.service = service
course = instance.course
+ base_url_parts = urlsplit(settings.BASE_URL)
# Context and resource parameters.
- context_id = context_id or (request.get_host() + instance.get_absolute_url())
+ context_id = context_id or (base_url_parts.netloc + instance.get_absolute_url())
link_id = link_id or "aplus{:d}".format(service.pk)
title = title or link_id
@@ -60,9 +63,9 @@
"launch_presentation_locale": get_language(),
"launch_presentation_document_target":
"iframe" if exercise and exercise.open_in_iframe else "window",
- "launch_presentation_return_url": request.scheme + '://' + request.get_host() + instance.get_absolute_url(),
+ "launch_presentation_return_url": urljoin(settings.BASE_URL, instance.get_absolute_url()),
- "tool_consumer_instance_guid": request.get_host() + "/aplus",
+ "tool_consumer_instance_guid": base_url_parts.netloc + "/aplus",
"tool_consumer_instance_name": "A+ LMS",
})
| {"golden_diff": "diff --git a/external_services/lti.py b/external_services/lti.py\n--- a/external_services/lti.py\n+++ b/external_services/lti.py\n@@ -1,4 +1,6 @@\n from hashlib import md5\n+from urllib.parse import urlsplit, urljoin\n+\n from django.conf import settings\n from django.core.exceptions import PermissionDenied\n from django.utils.translation import get_language\n@@ -19,8 +21,9 @@\n def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n course = instance.course\n+ base_url_parts = urlsplit(settings.BASE_URL)\n # Context and resource parameters.\n- context_id = context_id or (request.get_host() + instance.get_absolute_url())\n+ context_id = context_id or (base_url_parts.netloc + instance.get_absolute_url())\n link_id = link_id or \"aplus{:d}\".format(service.pk)\n title = title or link_id\n \n@@ -60,9 +63,9 @@\n \"launch_presentation_locale\": get_language(),\n \"launch_presentation_document_target\":\n \"iframe\" if exercise and exercise.open_in_iframe else \"window\",\n- \"launch_presentation_return_url\": request.scheme + '://' + request.get_host() + instance.get_absolute_url(),\n+ \"launch_presentation_return_url\": urljoin(settings.BASE_URL, instance.get_absolute_url()),\n \n- \"tool_consumer_instance_guid\": request.get_host() + \"/aplus\",\n+ \"tool_consumer_instance_guid\": base_url_parts.netloc + \"/aplus\",\n \"tool_consumer_instance_name\": \"A+ LMS\",\n })\n", "issue": "LTI: remove request.host from LTI parameters\nRemove `request.get_host()` from all LTI parameter calculations. Use `settings.BASE_URL` instead. `request.get_host` comes from the browser, thus user can use it to manipulate the data send as part of the post.\r\n\r\nThese related issues can be fixed in connection or should be created as another issues:\r\n\r\n- [x] `tool_consumer_instance_guid` this field doesn't seem to include relevant information. it should be created from settings.BASE_URL\r\n- [x] `launch_presentation_return_url` this should include link to the page in A+, which will render the launch button (basically to the page this link is rendered at).\r\n- [ ] Interesting second part is to include or keep query parameters, so we can request reauthentication by the tool provider (e.g. koodis\u00e4il\u00f6), which would be sending browser to `{launch_presentation_return_url}?tc_return_url=<ulr>` or such. TODO: raphendyr needs to write down better explanation (i.e. create an issue) - **Moved to an issue #431**\n", "before_files": [{"content": "from hashlib import md5\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import get_language\nfrom rest_framework.reverse import reverse\nfrom rest_framework.settings import api_settings\nfrom oauthlib.common import urldecode\nfrom oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_BODY, \\\n SIGNATURE_TYPE_QUERY\nimport json\n\nfrom aplus.api import api_reverse\nfrom lib.helpers import update_url_params\nfrom course.models import Enrollment\n\n\nclass LTIRequest(object):\n\n def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n course = instance.course\n # Context and resource parameters.\n context_id = context_id or (request.get_host() + instance.get_absolute_url())\n link_id = link_id or \"aplus{:d}\".format(service.pk)\n title = title or link_id\n\n # Gather user information\n user_id, given_name, family_name, full_name, email = self.user_info(instance, user)\n\n # Determine user role.\n role = \"Learner,Student\"\n # Student is not a standard role name, but it has been used here before\n if course.is_teacher(user):\n role = \"Instructor\"\n elif instance.is_assistant(user):\n role = \"TA,TeachingAssistant\" # \"TA\" is not a standard role\n\n self.parameters = add or {}\n self.parameters.update({\n\n \"lti_version\": \"LTI-1p0\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n\n \"resource_link_id\": link_id,\n \"resource_link_title\": title,\n\n # User.\n \"user_id\": user_id,\n \"roles\": role,\n \"lis_person_name_full\": full_name,\n \"lis_person_name_given\": given_name,\n \"lis_person_name_family\": family_name,\n \"lis_person_contact_email_primary\": email,\n\n # Selected course.\n \"context_id\": context_id,\n \"context_title\": course.name,\n \"context_label\": course.code,\n\n \"launch_presentation_locale\": get_language(),\n \"launch_presentation_document_target\":\n \"iframe\" if exercise and exercise.open_in_iframe else \"window\",\n \"launch_presentation_return_url\": request.scheme + '://' + request.get_host() + instance.get_absolute_url(),\n\n \"tool_consumer_instance_guid\": request.get_host() + \"/aplus\",\n \"tool_consumer_instance_name\": \"A+ LMS\",\n })\n\n if service.api_access:\n self.parameters.update({\n 'custom_context_api': settings.BASE_URL + api_reverse(\"course-detail\", kwargs={'course_id': instance.id}),\n 'custom_context_api_id': str(instance.id),\n 'custom_user_api_token': user.userprofile.api_token,\n })\n\n if exercise:\n # LTI 1.1 Tool Provider may return grades to A+ (Tool Consumer)\n self.parameters.update({\n # Outcome Service requests from the LTI Tool Provider include the\n # sourcedid from the launch request. It is used to create new submissions\n # for storing the points of the user.\n \"lis_result_sourcedid\": \"{}-{}\".format(exercise.pk, user_id),\n # The LTI Tool Provider posts Outcome Service requests to this URL (i.e., points for a submission)\n \"lis_outcome_service_url\": reverse('lti-outcomes', request=request,\n kwargs={'version': api_settings.DEFAULT_VERSION}),\n })\n\n def user_info(self, course_instance, user):\n if self.service.is_anonymous:\n # Anonymize user information\n enrollment = Enrollment.objects.filter(course_instance=course_instance, user_profile=user.userprofile).first()\n if not enrollment:\n raise PermissionDenied()\n # Creates anon name and id for pre-pseudonymisation Enrollments\n if not (enrollment.anon_name or enrollment.anon_id):\n # the model's post_save functions take care of the creation\n enrollment.save()\n user_id = \"a\" + enrollment.anon_id # a for anonymous\n full_name = enrollment.anon_name\n given_name, sep, family_name = full_name.rpartition(\" \")\n if not given_name:\n given_name = \"Anonymous\"\n email = \"anonymous-{}@aplus.invalid\".format(enrollment.anon_id)\n else:\n user_id = \"i\" + str(user.pk) # i for internal\n full_name = \"{} {}\".format(user.first_name, user.last_name)\n given_name = user.first_name\n family_name = user.last_name\n email = user.email\n return user_id, given_name, family_name, full_name, email\n\n def get_checksum_of_parameters(self, only_user_and_course_level_params=False):\n if only_user_and_course_level_params:\n # do not include parameters that change between different exercises for the same LTI service\n included_keys = (\n \"lti_version\",\n \"lti_message_type\",\n \"user_id\",\n \"lis_person_name_full\",\n \"lis_person_contact_email_primary\",\n \"context_id\",\n \"context_label\",\n \"tool_consumer_instance_guid\",\n )\n params = [(key, value) for key, value in self.parameters.items() if key in included_keys]\n else:\n params = self.parameters.items()\n sum = md5()\n for key, value in sorted(params):\n sum.update(\"{}={};\".format(key, value).encode('utf-8'))\n return sum.hexdigest()\n\n def sign_post_parameters(self, url=None):\n client = Client(self.service.consumer_key,\n client_secret=self.service.consumer_secret,\n signature_method=SIGNATURE_HMAC,\n signature_type=SIGNATURE_TYPE_BODY)\n uri, headers, body = client.sign(self._get_url(url),\n http_method=\"POST\",\n body=self.parameters,\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"})\n return urldecode(body)\n\n def sign_get_query(self, url=None):\n client = Client(self.service.consumer_key,\n client_secret=self.service.consumer_secret,\n signature_method=SIGNATURE_HMAC,\n signature_type=SIGNATURE_TYPE_QUERY)\n uri = update_url_params(self._get_url(url), self.parameters)\n try:\n query, headers, body = client.sign(uri, http_method=\"GET\")\n except ValueError as e:\n raise ValueError(\"Invalid url %r for %r: %s\" % (uri, self.service, e))\n return query\n\n def _get_url(self, url=None):\n if url and url.startswith('//') or '://' in url:\n return url\n return self.service.get_final_url(url)\n\n\nclass CustomStudentInfoLTIRequest(LTIRequest):\n\n def __init__(self, service, user, profiles, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n self.course_instance = instance\n parameters = add or {}\n if not service.is_anonymous:\n parameters['custom_student_id'] = self.true_student_id(user.userprofile)\n if len(profiles) > 1:\n parameters['custom_group_members'] = self.group_json(profiles)\n super().__init__(service, user, instance, request, title, context_id, link_id, parameters, exercise)\n\n def true_student_id(self, profile):\n return profile.student_id or \"A{:d}\".format(profile.id)\n\n def group_json(self, profiles):\n data = []\n for profile in profiles:\n user_id, given_name, family_name, full_name, email = self.user_info(self.course_instance, profile.user)\n d = {\n 'user': user_id,\n 'given_name': given_name,\n 'family_name': family_name,\n 'full_name': full_name,\n 'email': email,\n }\n if not self.service.is_anonymous:\n d['student_id'] = self.true_student_id(profile)\n data.append(d)\n return json.dumps(data)\n", "path": "external_services/lti.py"}], "after_files": [{"content": "from hashlib import md5\nfrom urllib.parse import urlsplit, urljoin\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import get_language\nfrom rest_framework.reverse import reverse\nfrom rest_framework.settings import api_settings\nfrom oauthlib.common import urldecode\nfrom oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_BODY, \\\n SIGNATURE_TYPE_QUERY\nimport json\n\nfrom aplus.api import api_reverse\nfrom lib.helpers import update_url_params\nfrom course.models import Enrollment\n\n\nclass LTIRequest(object):\n\n def __init__(self, service, user, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n course = instance.course\n base_url_parts = urlsplit(settings.BASE_URL)\n # Context and resource parameters.\n context_id = context_id or (base_url_parts.netloc + instance.get_absolute_url())\n link_id = link_id or \"aplus{:d}\".format(service.pk)\n title = title or link_id\n\n # Gather user information\n user_id, given_name, family_name, full_name, email = self.user_info(instance, user)\n\n # Determine user role.\n role = \"Learner,Student\"\n # Student is not a standard role name, but it has been used here before\n if course.is_teacher(user):\n role = \"Instructor\"\n elif instance.is_assistant(user):\n role = \"TA,TeachingAssistant\" # \"TA\" is not a standard role\n\n self.parameters = add or {}\n self.parameters.update({\n\n \"lti_version\": \"LTI-1p0\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n\n \"resource_link_id\": link_id,\n \"resource_link_title\": title,\n\n # User.\n \"user_id\": user_id,\n \"roles\": role,\n \"lis_person_name_full\": full_name,\n \"lis_person_name_given\": given_name,\n \"lis_person_name_family\": family_name,\n \"lis_person_contact_email_primary\": email,\n\n # Selected course.\n \"context_id\": context_id,\n \"context_title\": course.name,\n \"context_label\": course.code,\n\n \"launch_presentation_locale\": get_language(),\n \"launch_presentation_document_target\":\n \"iframe\" if exercise and exercise.open_in_iframe else \"window\",\n \"launch_presentation_return_url\": urljoin(settings.BASE_URL, instance.get_absolute_url()),\n\n \"tool_consumer_instance_guid\": base_url_parts.netloc + \"/aplus\",\n \"tool_consumer_instance_name\": \"A+ LMS\",\n })\n\n if service.api_access:\n self.parameters.update({\n 'custom_context_api': settings.BASE_URL + api_reverse(\"course-detail\", kwargs={'course_id': instance.id}),\n 'custom_context_api_id': str(instance.id),\n 'custom_user_api_token': user.userprofile.api_token,\n })\n\n if exercise:\n # LTI 1.1 Tool Provider may return grades to A+ (Tool Consumer)\n self.parameters.update({\n # Outcome Service requests from the LTI Tool Provider include the\n # sourcedid from the launch request. It is used to create new submissions\n # for storing the points of the user.\n \"lis_result_sourcedid\": \"{}-{}\".format(exercise.pk, user_id),\n # The LTI Tool Provider posts Outcome Service requests to this URL (i.e., points for a submission)\n \"lis_outcome_service_url\": reverse('lti-outcomes', request=request,\n kwargs={'version': api_settings.DEFAULT_VERSION}),\n })\n\n def user_info(self, course_instance, user):\n if self.service.is_anonymous:\n # Anonymize user information\n enrollment = Enrollment.objects.filter(course_instance=course_instance, user_profile=user.userprofile).first()\n if not enrollment:\n raise PermissionDenied()\n # Creates anon name and id for pre-pseudonymisation Enrollments\n if not (enrollment.anon_name or enrollment.anon_id):\n # the model's post_save functions take care of the creation\n enrollment.save()\n user_id = \"a\" + enrollment.anon_id # a for anonymous\n full_name = enrollment.anon_name\n given_name, sep, family_name = full_name.rpartition(\" \")\n if not given_name:\n given_name = \"Anonymous\"\n email = \"anonymous-{}@aplus.invalid\".format(enrollment.anon_id)\n else:\n user_id = \"i\" + str(user.pk) # i for internal\n full_name = \"{} {}\".format(user.first_name, user.last_name)\n given_name = user.first_name\n family_name = user.last_name\n email = user.email\n return user_id, given_name, family_name, full_name, email\n\n def get_checksum_of_parameters(self, only_user_and_course_level_params=False):\n if only_user_and_course_level_params:\n # do not include parameters that change between different exercises for the same LTI service\n included_keys = (\n \"lti_version\",\n \"lti_message_type\",\n \"user_id\",\n \"lis_person_name_full\",\n \"lis_person_contact_email_primary\",\n \"context_id\",\n \"context_label\",\n \"tool_consumer_instance_guid\",\n )\n params = [(key, value) for key, value in self.parameters.items() if key in included_keys]\n else:\n params = self.parameters.items()\n sum = md5()\n for key, value in sorted(params):\n sum.update(\"{}={};\".format(key, value).encode('utf-8'))\n return sum.hexdigest()\n\n def sign_post_parameters(self, url=None):\n client = Client(self.service.consumer_key,\n client_secret=self.service.consumer_secret,\n signature_method=SIGNATURE_HMAC,\n signature_type=SIGNATURE_TYPE_BODY)\n uri, headers, body = client.sign(self._get_url(url),\n http_method=\"POST\",\n body=self.parameters,\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"})\n return urldecode(body)\n\n def sign_get_query(self, url=None):\n client = Client(self.service.consumer_key,\n client_secret=self.service.consumer_secret,\n signature_method=SIGNATURE_HMAC,\n signature_type=SIGNATURE_TYPE_QUERY)\n uri = update_url_params(self._get_url(url), self.parameters)\n try:\n query, headers, body = client.sign(uri, http_method=\"GET\")\n except ValueError as e:\n raise ValueError(\"Invalid url %r for %r: %s\" % (uri, self.service, e))\n return query\n\n def _get_url(self, url=None):\n if url and url.startswith('//') or '://' in url:\n return url\n return self.service.get_final_url(url)\n\n\nclass CustomStudentInfoLTIRequest(LTIRequest):\n\n def __init__(self, service, user, profiles, instance, request, title, context_id=None, link_id=None, add=None, exercise=None):\n self.service = service\n self.course_instance = instance\n parameters = add or {}\n if not service.is_anonymous:\n parameters['custom_student_id'] = self.true_student_id(user.userprofile)\n if len(profiles) > 1:\n parameters['custom_group_members'] = self.group_json(profiles)\n super().__init__(service, user, instance, request, title, context_id, link_id, parameters, exercise)\n\n def true_student_id(self, profile):\n return profile.student_id or \"A{:d}\".format(profile.id)\n\n def group_json(self, profiles):\n data = []\n for profile in profiles:\n user_id, given_name, family_name, full_name, email = self.user_info(self.course_instance, profile.user)\n d = {\n 'user': user_id,\n 'given_name': given_name,\n 'family_name': family_name,\n 'full_name': full_name,\n 'email': email,\n }\n if not self.service.is_anonymous:\n d['student_id'] = self.true_student_id(profile)\n data.append(d)\n return json.dumps(data)\n", "path": "external_services/lti.py"}]} | 2,668 | 370 |
gh_patches_debug_162 | rasdani/github-patches | git_diff | CTFd__CTFd-796 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not possible to download files as anonymous user
**Environment**:
- CTFd Version/Commit: 2.0.0 from master
- Operating System: Debian Stretch, Ubuntu 18.04, ...
- Web Browser and Version: Firefox 63
**What happened?**
* In admin, set visibility of challenges to public
* Logout
* Open challenge view (`/challenges`)
* Click on a challenge with a file
* Try to download the file (`/files/c378d661d2c9e103c4409cd4c92d801f/alice_bob.py`
* => Error 403
**What did you expect to happen?**
* ...
* Click on a challenge with a file
* Try to download the file
* File downloads
**How to reproduce your issue**
_see above_
**Any associated stack traces or error logs**
_none_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/__init__.py`
Content:
```
1 import sys
2 import os
3
4 from distutils.version import StrictVersion
5 from flask import Flask
6 from werkzeug.contrib.fixers import ProxyFix
7 from jinja2 import FileSystemLoader
8 from jinja2.sandbox import SandboxedEnvironment
9 from six.moves import input
10
11 from CTFd import utils
12 from CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database
13 from CTFd.utils.sessions import CachingSessionInterface
14 from CTFd.utils.updates import update_check
15 from CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals
16 from CTFd.utils.events import socketio
17 from CTFd.plugins import init_plugins
18
19 # Hack to support Unicode in Python 2 properly
20 if sys.version_info[0] < 3:
21 reload(sys)
22 sys.setdefaultencoding("utf-8")
23
24 __version__ = '2.0.0'
25
26
27 class CTFdFlask(Flask):
28 def __init__(self, *args, **kwargs):
29 """Overriden Jinja constructor setting a custom jinja_environment"""
30 self.jinja_environment = SandboxedBaseEnvironment
31 self.session_interface = CachingSessionInterface(key_prefix='session')
32 Flask.__init__(self, *args, **kwargs)
33
34 def create_jinja_environment(self):
35 """Overridden jinja environment constructor"""
36 return super(CTFdFlask, self).create_jinja_environment()
37
38
39 class SandboxedBaseEnvironment(SandboxedEnvironment):
40 """SandboxEnvironment that mimics the Flask BaseEnvironment"""
41 def __init__(self, app, **options):
42 if 'loader' not in options:
43 options['loader'] = app.create_global_jinja_loader()
44 # Disable cache entirely so that themes can be switched (#662)
45 # If the cache is enabled, switching themes will cause odd rendering errors
46 SandboxedEnvironment.__init__(self, cache_size=0, **options)
47 self.app = app
48
49
50 class ThemeLoader(FileSystemLoader):
51 """Custom FileSystemLoader that switches themes based on the configuration value"""
52 def __init__(self, searchpath, encoding='utf-8', followlinks=False):
53 super(ThemeLoader, self).__init__(searchpath, encoding, followlinks)
54 self.overriden_templates = {}
55
56 def get_source(self, environment, template):
57 # Check if the template has been overriden
58 if template in self.overriden_templates:
59 return self.overriden_templates[template], template, True
60
61 # Check if the template requested is for the admin panel
62 if template.startswith('admin/'):
63 template = template[6:] # Strip out admin/
64 template = "/".join(['admin', 'templates', template])
65 return super(ThemeLoader, self).get_source(environment, template)
66
67 # Load regular theme data
68 theme = utils.get_config('ctf_theme')
69 template = "/".join([theme, 'templates', template])
70 return super(ThemeLoader, self).get_source(environment, template)
71
72
73 def confirm_upgrade():
74 if sys.stdin.isatty():
75 print("/*\\ CTFd has updated and must update the database! /*\\")
76 print("/*\\ Please backup your database before proceeding! /*\\")
77 print("/*\\ CTFd maintainers are not responsible for any data loss! /*\\")
78 if input('Run database migrations (Y/N)').lower().strip() == 'y':
79 return True
80 else:
81 print('/*\\ Ignored database migrations... /*\\')
82 return False
83 else:
84 return True
85
86
87 def run_upgrade():
88 upgrade()
89 utils.set_config('ctf_version', __version__)
90
91
92 def create_app(config='CTFd.config.Config'):
93 app = CTFdFlask(__name__)
94 with app.app_context():
95 app.config.from_object(config)
96
97 theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True)
98 app.jinja_loader = theme_loader
99
100 from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking
101
102 url = create_database()
103
104 # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in
105 # This is mostly so we can force MySQL's charset
106 app.config['SQLALCHEMY_DATABASE_URI'] = str(url)
107
108 # Register database
109 db.init_app(app)
110
111 # Register Flask-Migrate
112 migrations.init_app(app, db)
113
114 # Alembic sqlite support is lacking so we should just create_all anyway
115 if url.drivername.startswith('sqlite'):
116 db.create_all()
117 stamp()
118 else:
119 # This creates tables instead of db.create_all()
120 # Allows migrations to happen properly
121 upgrade()
122
123 from CTFd.models import ma
124
125 ma.init_app(app)
126
127 app.db = db
128 app.VERSION = __version__
129
130 from CTFd.cache import cache
131
132 cache.init_app(app)
133 app.cache = cache
134
135 # If you have multiple workers you must have a shared cache
136 socketio.init_app(
137 app,
138 async_mode=app.config.get('SOCKETIO_ASYNC_MODE'),
139 message_queue=app.config.get('CACHE_REDIS_URL')
140 )
141
142 if app.config.get('REVERSE_PROXY'):
143 app.wsgi_app = ProxyFix(app.wsgi_app)
144
145 version = utils.get_config('ctf_version')
146
147 # Upgrading from an older version of CTFd
148 if version and (StrictVersion(version) < StrictVersion(__version__)):
149 if confirm_upgrade():
150 run_upgrade()
151 else:
152 exit()
153
154 if not version:
155 utils.set_config('ctf_version', __version__)
156
157 if not utils.get_config('ctf_theme'):
158 utils.set_config('ctf_theme', 'core')
159
160 update_check(force=True)
161
162 init_request_processors(app)
163 init_template_filters(app)
164 init_template_globals(app)
165
166 # Importing here allows tests to use sensible names (e.g. api instead of api_bp)
167 from CTFd.views import views
168 from CTFd.teams import teams
169 from CTFd.users import users
170 from CTFd.challenges import challenges
171 from CTFd.scoreboard import scoreboard
172 from CTFd.auth import auth
173 from CTFd.admin import admin
174 from CTFd.api import api
175 from CTFd.events import events
176 from CTFd.errors import page_not_found, forbidden, general_error, gateway_error
177
178 app.register_blueprint(views)
179 app.register_blueprint(teams)
180 app.register_blueprint(users)
181 app.register_blueprint(challenges)
182 app.register_blueprint(scoreboard)
183 app.register_blueprint(auth)
184 app.register_blueprint(api)
185 app.register_blueprint(events)
186
187 app.register_blueprint(admin)
188
189 app.register_error_handler(404, page_not_found)
190 app.register_error_handler(403, forbidden)
191 app.register_error_handler(500, general_error)
192 app.register_error_handler(502, gateway_error)
193
194 init_plugins(app)
195
196 return app
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/__init__.py b/CTFd/__init__.py
--- a/CTFd/__init__.py
+++ b/CTFd/__init__.py
@@ -21,7 +21,7 @@
reload(sys)
sys.setdefaultencoding("utf-8")
-__version__ = '2.0.0'
+__version__ = '2.0.1'
class CTFdFlask(Flask):
| {"golden_diff": "diff --git a/CTFd/__init__.py b/CTFd/__init__.py\n--- a/CTFd/__init__.py\n+++ b/CTFd/__init__.py\n@@ -21,7 +21,7 @@\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n \n-__version__ = '2.0.0'\n+__version__ = '2.0.1'\n \n \n class CTFdFlask(Flask):\n", "issue": "Not possible to download files as anonymous user\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.0.0 from master\r\n - Operating System: Debian Stretch, Ubuntu 18.04, ...\r\n - Web Browser and Version: Firefox 63\r\n\r\n**What happened?**\r\n\r\n* In admin, set visibility of challenges to public\r\n* Logout\r\n* Open challenge view (`/challenges`)\r\n* Click on a challenge with a file\r\n* Try to download the file (`/files/c378d661d2c9e103c4409cd4c92d801f/alice_bob.py`\r\n* => Error 403\r\n\r\n**What did you expect to happen?**\r\n\r\n* ...\r\n* Click on a challenge with a file\r\n* Try to download the file\r\n* File downloads\r\n\r\n**How to reproduce your issue**\r\n\r\n_see above_\r\n\r\n**Any associated stack traces or error logs**\r\n\r\n_none_\n", "before_files": [{"content": "import sys\nimport os\n\nfrom distutils.version import StrictVersion\nfrom flask import Flask\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom jinja2 import FileSystemLoader\nfrom jinja2.sandbox import SandboxedEnvironment\nfrom six.moves import input\n\nfrom CTFd import utils\nfrom CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database\nfrom CTFd.utils.sessions import CachingSessionInterface\nfrom CTFd.utils.updates import update_check\nfrom CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals\nfrom CTFd.utils.events import socketio\nfrom CTFd.plugins import init_plugins\n\n# Hack to support Unicode in Python 2 properly\nif sys.version_info[0] < 3:\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n\n__version__ = '2.0.0'\n\n\nclass CTFdFlask(Flask):\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden Jinja constructor setting a custom jinja_environment\"\"\"\n self.jinja_environment = SandboxedBaseEnvironment\n self.session_interface = CachingSessionInterface(key_prefix='session')\n Flask.__init__(self, *args, **kwargs)\n\n def create_jinja_environment(self):\n \"\"\"Overridden jinja environment constructor\"\"\"\n return super(CTFdFlask, self).create_jinja_environment()\n\n\nclass SandboxedBaseEnvironment(SandboxedEnvironment):\n \"\"\"SandboxEnvironment that mimics the Flask BaseEnvironment\"\"\"\n def __init__(self, app, **options):\n if 'loader' not in options:\n options['loader'] = app.create_global_jinja_loader()\n # Disable cache entirely so that themes can be switched (#662)\n # If the cache is enabled, switching themes will cause odd rendering errors\n SandboxedEnvironment.__init__(self, cache_size=0, **options)\n self.app = app\n\n\nclass ThemeLoader(FileSystemLoader):\n \"\"\"Custom FileSystemLoader that switches themes based on the configuration value\"\"\"\n def __init__(self, searchpath, encoding='utf-8', followlinks=False):\n super(ThemeLoader, self).__init__(searchpath, encoding, followlinks)\n self.overriden_templates = {}\n\n def get_source(self, environment, template):\n # Check if the template has been overriden\n if template in self.overriden_templates:\n return self.overriden_templates[template], template, True\n\n # Check if the template requested is for the admin panel\n if template.startswith('admin/'):\n template = template[6:] # Strip out admin/\n template = \"/\".join(['admin', 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n # Load regular theme data\n theme = utils.get_config('ctf_theme')\n template = \"/\".join([theme, 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n\ndef confirm_upgrade():\n if sys.stdin.isatty():\n print(\"/*\\\\ CTFd has updated and must update the database! /*\\\\\")\n print(\"/*\\\\ Please backup your database before proceeding! /*\\\\\")\n print(\"/*\\\\ CTFd maintainers are not responsible for any data loss! /*\\\\\")\n if input('Run database migrations (Y/N)').lower().strip() == 'y':\n return True\n else:\n print('/*\\\\ Ignored database migrations... /*\\\\')\n return False\n else:\n return True\n\n\ndef run_upgrade():\n upgrade()\n utils.set_config('ctf_version', __version__)\n\n\ndef create_app(config='CTFd.config.Config'):\n app = CTFdFlask(__name__)\n with app.app_context():\n app.config.from_object(config)\n\n theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True)\n app.jinja_loader = theme_loader\n\n from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking\n\n url = create_database()\n\n # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in\n # This is mostly so we can force MySQL's charset\n app.config['SQLALCHEMY_DATABASE_URI'] = str(url)\n\n # Register database\n db.init_app(app)\n\n # Register Flask-Migrate\n migrations.init_app(app, db)\n\n # Alembic sqlite support is lacking so we should just create_all anyway\n if url.drivername.startswith('sqlite'):\n db.create_all()\n stamp()\n else:\n # This creates tables instead of db.create_all()\n # Allows migrations to happen properly\n upgrade()\n\n from CTFd.models import ma\n\n ma.init_app(app)\n\n app.db = db\n app.VERSION = __version__\n\n from CTFd.cache import cache\n\n cache.init_app(app)\n app.cache = cache\n\n # If you have multiple workers you must have a shared cache\n socketio.init_app(\n app,\n async_mode=app.config.get('SOCKETIO_ASYNC_MODE'),\n message_queue=app.config.get('CACHE_REDIS_URL')\n )\n\n if app.config.get('REVERSE_PROXY'):\n app.wsgi_app = ProxyFix(app.wsgi_app)\n\n version = utils.get_config('ctf_version')\n\n # Upgrading from an older version of CTFd\n if version and (StrictVersion(version) < StrictVersion(__version__)):\n if confirm_upgrade():\n run_upgrade()\n else:\n exit()\n\n if not version:\n utils.set_config('ctf_version', __version__)\n\n if not utils.get_config('ctf_theme'):\n utils.set_config('ctf_theme', 'core')\n\n update_check(force=True)\n\n init_request_processors(app)\n init_template_filters(app)\n init_template_globals(app)\n\n # Importing here allows tests to use sensible names (e.g. api instead of api_bp)\n from CTFd.views import views\n from CTFd.teams import teams\n from CTFd.users import users\n from CTFd.challenges import challenges\n from CTFd.scoreboard import scoreboard\n from CTFd.auth import auth\n from CTFd.admin import admin\n from CTFd.api import api\n from CTFd.events import events\n from CTFd.errors import page_not_found, forbidden, general_error, gateway_error\n\n app.register_blueprint(views)\n app.register_blueprint(teams)\n app.register_blueprint(users)\n app.register_blueprint(challenges)\n app.register_blueprint(scoreboard)\n app.register_blueprint(auth)\n app.register_blueprint(api)\n app.register_blueprint(events)\n\n app.register_blueprint(admin)\n\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(403, forbidden)\n app.register_error_handler(500, general_error)\n app.register_error_handler(502, gateway_error)\n\n init_plugins(app)\n\n return app\n", "path": "CTFd/__init__.py"}], "after_files": [{"content": "import sys\nimport os\n\nfrom distutils.version import StrictVersion\nfrom flask import Flask\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom jinja2 import FileSystemLoader\nfrom jinja2.sandbox import SandboxedEnvironment\nfrom six.moves import input\n\nfrom CTFd import utils\nfrom CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database\nfrom CTFd.utils.sessions import CachingSessionInterface\nfrom CTFd.utils.updates import update_check\nfrom CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals\nfrom CTFd.utils.events import socketio\nfrom CTFd.plugins import init_plugins\n\n# Hack to support Unicode in Python 2 properly\nif sys.version_info[0] < 3:\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n\n__version__ = '2.0.1'\n\n\nclass CTFdFlask(Flask):\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden Jinja constructor setting a custom jinja_environment\"\"\"\n self.jinja_environment = SandboxedBaseEnvironment\n self.session_interface = CachingSessionInterface(key_prefix='session')\n Flask.__init__(self, *args, **kwargs)\n\n def create_jinja_environment(self):\n \"\"\"Overridden jinja environment constructor\"\"\"\n return super(CTFdFlask, self).create_jinja_environment()\n\n\nclass SandboxedBaseEnvironment(SandboxedEnvironment):\n \"\"\"SandboxEnvironment that mimics the Flask BaseEnvironment\"\"\"\n def __init__(self, app, **options):\n if 'loader' not in options:\n options['loader'] = app.create_global_jinja_loader()\n # Disable cache entirely so that themes can be switched (#662)\n # If the cache is enabled, switching themes will cause odd rendering errors\n SandboxedEnvironment.__init__(self, cache_size=0, **options)\n self.app = app\n\n\nclass ThemeLoader(FileSystemLoader):\n \"\"\"Custom FileSystemLoader that switches themes based on the configuration value\"\"\"\n def __init__(self, searchpath, encoding='utf-8', followlinks=False):\n super(ThemeLoader, self).__init__(searchpath, encoding, followlinks)\n self.overriden_templates = {}\n\n def get_source(self, environment, template):\n # Check if the template has been overriden\n if template in self.overriden_templates:\n return self.overriden_templates[template], template, True\n\n # Check if the template requested is for the admin panel\n if template.startswith('admin/'):\n template = template[6:] # Strip out admin/\n template = \"/\".join(['admin', 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n # Load regular theme data\n theme = utils.get_config('ctf_theme')\n template = \"/\".join([theme, 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n\ndef confirm_upgrade():\n if sys.stdin.isatty():\n print(\"/*\\\\ CTFd has updated and must update the database! /*\\\\\")\n print(\"/*\\\\ Please backup your database before proceeding! /*\\\\\")\n print(\"/*\\\\ CTFd maintainers are not responsible for any data loss! /*\\\\\")\n if input('Run database migrations (Y/N)').lower().strip() == 'y':\n return True\n else:\n print('/*\\\\ Ignored database migrations... /*\\\\')\n return False\n else:\n return True\n\n\ndef run_upgrade():\n upgrade()\n utils.set_config('ctf_version', __version__)\n\n\ndef create_app(config='CTFd.config.Config'):\n app = CTFdFlask(__name__)\n with app.app_context():\n app.config.from_object(config)\n\n theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True)\n app.jinja_loader = theme_loader\n\n from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking\n\n url = create_database()\n\n # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in\n # This is mostly so we can force MySQL's charset\n app.config['SQLALCHEMY_DATABASE_URI'] = str(url)\n\n # Register database\n db.init_app(app)\n\n # Register Flask-Migrate\n migrations.init_app(app, db)\n\n # Alembic sqlite support is lacking so we should just create_all anyway\n if url.drivername.startswith('sqlite'):\n db.create_all()\n stamp()\n else:\n # This creates tables instead of db.create_all()\n # Allows migrations to happen properly\n upgrade()\n\n from CTFd.models import ma\n\n ma.init_app(app)\n\n app.db = db\n app.VERSION = __version__\n\n from CTFd.cache import cache\n\n cache.init_app(app)\n app.cache = cache\n\n # If you have multiple workers you must have a shared cache\n socketio.init_app(\n app,\n async_mode=app.config.get('SOCKETIO_ASYNC_MODE'),\n message_queue=app.config.get('CACHE_REDIS_URL')\n )\n\n if app.config.get('REVERSE_PROXY'):\n app.wsgi_app = ProxyFix(app.wsgi_app)\n\n version = utils.get_config('ctf_version')\n\n # Upgrading from an older version of CTFd\n if version and (StrictVersion(version) < StrictVersion(__version__)):\n if confirm_upgrade():\n run_upgrade()\n else:\n exit()\n\n if not version:\n utils.set_config('ctf_version', __version__)\n\n if not utils.get_config('ctf_theme'):\n utils.set_config('ctf_theme', 'core')\n\n update_check(force=True)\n\n init_request_processors(app)\n init_template_filters(app)\n init_template_globals(app)\n\n # Importing here allows tests to use sensible names (e.g. api instead of api_bp)\n from CTFd.views import views\n from CTFd.teams import teams\n from CTFd.users import users\n from CTFd.challenges import challenges\n from CTFd.scoreboard import scoreboard\n from CTFd.auth import auth\n from CTFd.admin import admin\n from CTFd.api import api\n from CTFd.events import events\n from CTFd.errors import page_not_found, forbidden, general_error, gateway_error\n\n app.register_blueprint(views)\n app.register_blueprint(teams)\n app.register_blueprint(users)\n app.register_blueprint(challenges)\n app.register_blueprint(scoreboard)\n app.register_blueprint(auth)\n app.register_blueprint(api)\n app.register_blueprint(events)\n\n app.register_blueprint(admin)\n\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(403, forbidden)\n app.register_error_handler(500, general_error)\n app.register_error_handler(502, gateway_error)\n\n init_plugins(app)\n\n return app\n", "path": "CTFd/__init__.py"}]} | 2,475 | 100 |
gh_patches_debug_26189 | rasdani/github-patches | git_diff | pytorch__vision-870 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid hash error on ImageNet dataset
In my environment, md5 value of `meta.bin` used in ImageNet dataset is different from the value defined in `imagenet.py`.
`meta.bin` is generated by `torch.save` in the code. I found python2 and3 generate different files.
md5sum hashes are as follows.
- (defined) `7e0d3cf156177e4fc47011cdd30ce706`
- (Python 2.7.16, Ubuntu) `a36fd93cf3900286d99e24ad0a73ce04`
- (Python 3.7.3, Ubuntu) `ca981e8aac175178e80e7949d90ee85c`
https://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L23-L26
https://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L117-L118
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/imagenet.py`
Content:
```
1 from __future__ import print_function
2 import os
3 import shutil
4 import torch
5 from .folder import ImageFolder
6 from .utils import check_integrity, download_url
7
8 ARCHIVE_DICT = {
9 'train': {
10 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',
11 'md5': '1d675b47d978889d74fa0da5fadfb00e',
12 },
13 'val': {
14 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',
15 'md5': '29b22e2961454d5413ddabcf34fc5622',
16 },
17 'devkit': {
18 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',
19 'md5': 'fa75699e90414af021442c21a62c3abf',
20 }
21 }
22
23 META_DICT = {
24 'filename': 'meta.bin',
25 'md5': '7e0d3cf156177e4fc47011cdd30ce706',
26 }
27
28
29 class ImageNet(ImageFolder):
30 """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
31
32 Args:
33 root (string): Root directory of the ImageNet Dataset.
34 split (string, optional): The dataset split, supports ``train``, or ``val``.
35 download (bool, optional): If true, downloads the dataset from the internet and
36 puts it in root directory. If dataset is already downloaded, it is not
37 downloaded again.
38 transform (callable, optional): A function/transform that takes in an PIL image
39 and returns a transformed version. E.g, ``transforms.RandomCrop``
40 target_transform (callable, optional): A function/transform that takes in the
41 target and transforms it.
42 loader (callable, optional): A function to load an image given its path.
43
44 Attributes:
45 classes (list): List of the class names.
46 class_to_idx (dict): Dict with items (class_name, class_index).
47 wnids (list): List of the WordNet IDs.
48 wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
49 imgs (list): List of (image path, class_index) tuples
50 targets (list): The class_index value for each image in the dataset
51 """
52
53 def __init__(self, root, split='train', download=False, **kwargs):
54 root = self.root = os.path.expanduser(root)
55 self.split = self._verify_split(split)
56
57 if download:
58 self.download()
59 wnid_to_classes = self._load_meta_file()[0]
60
61 super(ImageNet, self).__init__(self.split_folder, **kwargs)
62 self.root = root
63
64 idcs = [idx for _, idx in self.imgs]
65 self.wnids = self.classes
66 self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}
67 self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
68 self.class_to_idx = {cls: idx
69 for clss, idx in zip(self.classes, idcs)
70 for cls in clss}
71
72 def download(self):
73 if not self._check_meta_file_integrity():
74 tmpdir = os.path.join(self.root, 'tmp')
75
76 archive_dict = ARCHIVE_DICT['devkit']
77 download_and_extract_tar(archive_dict['url'], self.root,
78 extract_root=tmpdir,
79 md5=archive_dict['md5'])
80 devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]
81 meta = parse_devkit(os.path.join(tmpdir, devkit_folder))
82 self._save_meta_file(*meta)
83
84 shutil.rmtree(tmpdir)
85
86 if not os.path.isdir(self.split_folder):
87 archive_dict = ARCHIVE_DICT[self.split]
88 download_and_extract_tar(archive_dict['url'], self.root,
89 extract_root=self.split_folder,
90 md5=archive_dict['md5'])
91
92 if self.split == 'train':
93 prepare_train_folder(self.split_folder)
94 elif self.split == 'val':
95 val_wnids = self._load_meta_file()[1]
96 prepare_val_folder(self.split_folder, val_wnids)
97 else:
98 msg = ("You set download=True, but a folder '{}' already exist in "
99 "the root directory. If you want to re-download or re-extract the "
100 "archive, delete the folder.")
101 print(msg.format(self.split))
102
103 @property
104 def meta_file(self):
105 return os.path.join(self.root, META_DICT['filename'])
106
107 def _check_meta_file_integrity(self):
108 return check_integrity(self.meta_file, META_DICT['md5'])
109
110 def _load_meta_file(self):
111 if self._check_meta_file_integrity():
112 return torch.load(self.meta_file)
113 else:
114 raise RuntimeError("Meta file not found or corrupted.",
115 "You can use download=True to create it.")
116
117 def _save_meta_file(self, wnid_to_class, val_wnids):
118 torch.save((wnid_to_class, val_wnids), self.meta_file)
119
120 def _verify_split(self, split):
121 if split not in self.valid_splits:
122 msg = "Unknown split {} .".format(split)
123 msg += "Valid splits are {{}}.".format(", ".join(self.valid_splits))
124 raise ValueError(msg)
125 return split
126
127 @property
128 def valid_splits(self):
129 return 'train', 'val'
130
131 @property
132 def split_folder(self):
133 return os.path.join(self.root, self.split)
134
135 def extra_repr(self):
136 return "Split: {split}".format(**self.__dict__)
137
138
139 def extract_tar(src, dest=None, gzip=None, delete=False):
140 import tarfile
141
142 if dest is None:
143 dest = os.path.dirname(src)
144 if gzip is None:
145 gzip = src.lower().endswith('.gz')
146
147 mode = 'r:gz' if gzip else 'r'
148 with tarfile.open(src, mode) as tarfh:
149 tarfh.extractall(path=dest)
150
151 if delete:
152 os.remove(src)
153
154
155 def download_and_extract_tar(url, download_root, extract_root=None, filename=None,
156 md5=None, **kwargs):
157 download_root = os.path.expanduser(download_root)
158 if extract_root is None:
159 extract_root = download_root
160 if filename is None:
161 filename = os.path.basename(url)
162
163 if not check_integrity(os.path.join(download_root, filename), md5):
164 download_url(url, download_root, filename=filename, md5=md5)
165
166 extract_tar(os.path.join(download_root, filename), extract_root, **kwargs)
167
168
169 def parse_devkit(root):
170 idx_to_wnid, wnid_to_classes = parse_meta(root)
171 val_idcs = parse_val_groundtruth(root)
172 val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
173 return wnid_to_classes, val_wnids
174
175
176 def parse_meta(devkit_root, path='data', filename='meta.mat'):
177 import scipy.io as sio
178
179 metafile = os.path.join(devkit_root, path, filename)
180 meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
181 nums_children = list(zip(*meta))[4]
182 meta = [meta[idx] for idx, num_children in enumerate(nums_children)
183 if num_children == 0]
184 idcs, wnids, classes = list(zip(*meta))[:3]
185 classes = [tuple(clss.split(', ')) for clss in classes]
186 idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
187 wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
188 return idx_to_wnid, wnid_to_classes
189
190
191 def parse_val_groundtruth(devkit_root, path='data',
192 filename='ILSVRC2012_validation_ground_truth.txt'):
193 with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:
194 val_idcs = txtfh.readlines()
195 return [int(val_idx) for val_idx in val_idcs]
196
197
198 def prepare_train_folder(folder):
199 for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:
200 extract_tar(archive, os.path.splitext(archive)[0], delete=True)
201
202
203 def prepare_val_folder(folder, wnids):
204 img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])
205
206 for wnid in set(wnids):
207 os.mkdir(os.path.join(folder, wnid))
208
209 for wnid, img_file in zip(wnids, img_files):
210 shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))
211
212
213 def _splitexts(root):
214 exts = []
215 ext = '.'
216 while ext:
217 root, ext = os.path.splitext(root)
218 exts.append(ext)
219 return root, ''.join(reversed(exts))
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py
--- a/torchvision/datasets/imagenet.py
+++ b/torchvision/datasets/imagenet.py
@@ -20,11 +20,6 @@
}
}
-META_DICT = {
- 'filename': 'meta.bin',
- 'md5': '7e0d3cf156177e4fc47011cdd30ce706',
-}
-
class ImageNet(ImageFolder):
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
@@ -70,7 +65,7 @@
for cls in clss}
def download(self):
- if not self._check_meta_file_integrity():
+ if not check_integrity(self.meta_file):
tmpdir = os.path.join(self.root, 'tmp')
archive_dict = ARCHIVE_DICT['devkit']
@@ -102,13 +97,10 @@
@property
def meta_file(self):
- return os.path.join(self.root, META_DICT['filename'])
-
- def _check_meta_file_integrity(self):
- return check_integrity(self.meta_file, META_DICT['md5'])
+ return os.path.join(self.root, 'meta.bin')
def _load_meta_file(self):
- if self._check_meta_file_integrity():
+ if check_integrity(self.meta_file):
return torch.load(self.meta_file)
else:
raise RuntimeError("Meta file not found or corrupted.",
| {"golden_diff": "diff --git a/torchvision/datasets/imagenet.py b/torchvision/datasets/imagenet.py\n--- a/torchvision/datasets/imagenet.py\n+++ b/torchvision/datasets/imagenet.py\n@@ -20,11 +20,6 @@\n }\n }\n \n-META_DICT = {\n- 'filename': 'meta.bin',\n- 'md5': '7e0d3cf156177e4fc47011cdd30ce706',\n-}\n-\n \n class ImageNet(ImageFolder):\n \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n@@ -70,7 +65,7 @@\n for cls in clss}\n \n def download(self):\n- if not self._check_meta_file_integrity():\n+ if not check_integrity(self.meta_file):\n tmpdir = os.path.join(self.root, 'tmp')\n \n archive_dict = ARCHIVE_DICT['devkit']\n@@ -102,13 +97,10 @@\n \n @property\n def meta_file(self):\n- return os.path.join(self.root, META_DICT['filename'])\n-\n- def _check_meta_file_integrity(self):\n- return check_integrity(self.meta_file, META_DICT['md5'])\n+ return os.path.join(self.root, 'meta.bin')\n \n def _load_meta_file(self):\n- if self._check_meta_file_integrity():\n+ if check_integrity(self.meta_file):\n return torch.load(self.meta_file)\n else:\n raise RuntimeError(\"Meta file not found or corrupted.\",\n", "issue": "Invalid hash error on ImageNet dataset\nIn my environment, md5 value of `meta.bin` used in ImageNet dataset is different from the value defined in `imagenet.py`.\r\n\r\n`meta.bin` is generated by `torch.save` in the code. I found python2 and3 generate different files.\r\n\r\nmd5sum hashes are as follows.\r\n\r\n- (defined) `7e0d3cf156177e4fc47011cdd30ce706`\r\n- (Python 2.7.16, Ubuntu) `a36fd93cf3900286d99e24ad0a73ce04`\r\n- (Python 3.7.3, Ubuntu) `ca981e8aac175178e80e7949d90ee85c`\r\n\r\nhttps://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L23-L26\r\n\r\nhttps://github.com/pytorch/vision/blob/9a481d0bec2700763a799ff148fe2e083b575441/torchvision/datasets/imagenet.py#L117-L118\n", "before_files": [{"content": "from __future__ import print_function\nimport os\nimport shutil\nimport torch\nfrom .folder import ImageFolder\nfrom .utils import check_integrity, download_url\n\nARCHIVE_DICT = {\n 'train': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',\n 'md5': '1d675b47d978889d74fa0da5fadfb00e',\n },\n 'val': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',\n 'md5': '29b22e2961454d5413ddabcf34fc5622',\n },\n 'devkit': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',\n 'md5': 'fa75699e90414af021442c21a62c3abf',\n }\n}\n\nMETA_DICT = {\n 'filename': 'meta.bin',\n 'md5': '7e0d3cf156177e4fc47011cdd30ce706',\n}\n\n\nclass ImageNet(ImageFolder):\n \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n\n Args:\n root (string): Root directory of the ImageNet Dataset.\n split (string, optional): The dataset split, supports ``train``, or ``val``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n wnids (list): List of the WordNet IDs.\n wnid_to_idx (dict): Dict with items (wordnet_id, class_index).\n imgs (list): List of (image path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, split='train', download=False, **kwargs):\n root = self.root = os.path.expanduser(root)\n self.split = self._verify_split(split)\n\n if download:\n self.download()\n wnid_to_classes = self._load_meta_file()[0]\n\n super(ImageNet, self).__init__(self.split_folder, **kwargs)\n self.root = root\n\n idcs = [idx for _, idx in self.imgs]\n self.wnids = self.classes\n self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}\n self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\n self.class_to_idx = {cls: idx\n for clss, idx in zip(self.classes, idcs)\n for cls in clss}\n\n def download(self):\n if not self._check_meta_file_integrity():\n tmpdir = os.path.join(self.root, 'tmp')\n\n archive_dict = ARCHIVE_DICT['devkit']\n download_and_extract_tar(archive_dict['url'], self.root,\n extract_root=tmpdir,\n md5=archive_dict['md5'])\n devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]\n meta = parse_devkit(os.path.join(tmpdir, devkit_folder))\n self._save_meta_file(*meta)\n\n shutil.rmtree(tmpdir)\n\n if not os.path.isdir(self.split_folder):\n archive_dict = ARCHIVE_DICT[self.split]\n download_and_extract_tar(archive_dict['url'], self.root,\n extract_root=self.split_folder,\n md5=archive_dict['md5'])\n\n if self.split == 'train':\n prepare_train_folder(self.split_folder)\n elif self.split == 'val':\n val_wnids = self._load_meta_file()[1]\n prepare_val_folder(self.split_folder, val_wnids)\n else:\n msg = (\"You set download=True, but a folder '{}' already exist in \"\n \"the root directory. If you want to re-download or re-extract the \"\n \"archive, delete the folder.\")\n print(msg.format(self.split))\n\n @property\n def meta_file(self):\n return os.path.join(self.root, META_DICT['filename'])\n\n def _check_meta_file_integrity(self):\n return check_integrity(self.meta_file, META_DICT['md5'])\n\n def _load_meta_file(self):\n if self._check_meta_file_integrity():\n return torch.load(self.meta_file)\n else:\n raise RuntimeError(\"Meta file not found or corrupted.\",\n \"You can use download=True to create it.\")\n\n def _save_meta_file(self, wnid_to_class, val_wnids):\n torch.save((wnid_to_class, val_wnids), self.meta_file)\n\n def _verify_split(self, split):\n if split not in self.valid_splits:\n msg = \"Unknown split {} .\".format(split)\n msg += \"Valid splits are {{}}.\".format(\", \".join(self.valid_splits))\n raise ValueError(msg)\n return split\n\n @property\n def valid_splits(self):\n return 'train', 'val'\n\n @property\n def split_folder(self):\n return os.path.join(self.root, self.split)\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n\n\ndef extract_tar(src, dest=None, gzip=None, delete=False):\n import tarfile\n\n if dest is None:\n dest = os.path.dirname(src)\n if gzip is None:\n gzip = src.lower().endswith('.gz')\n\n mode = 'r:gz' if gzip else 'r'\n with tarfile.open(src, mode) as tarfh:\n tarfh.extractall(path=dest)\n\n if delete:\n os.remove(src)\n\n\ndef download_and_extract_tar(url, download_root, extract_root=None, filename=None,\n md5=None, **kwargs):\n download_root = os.path.expanduser(download_root)\n if extract_root is None:\n extract_root = download_root\n if filename is None:\n filename = os.path.basename(url)\n\n if not check_integrity(os.path.join(download_root, filename), md5):\n download_url(url, download_root, filename=filename, md5=md5)\n\n extract_tar(os.path.join(download_root, filename), extract_root, **kwargs)\n\n\ndef parse_devkit(root):\n idx_to_wnid, wnid_to_classes = parse_meta(root)\n val_idcs = parse_val_groundtruth(root)\n val_wnids = [idx_to_wnid[idx] for idx in val_idcs]\n return wnid_to_classes, val_wnids\n\n\ndef parse_meta(devkit_root, path='data', filename='meta.mat'):\n import scipy.io as sio\n\n metafile = os.path.join(devkit_root, path, filename)\n meta = sio.loadmat(metafile, squeeze_me=True)['synsets']\n nums_children = list(zip(*meta))[4]\n meta = [meta[idx] for idx, num_children in enumerate(nums_children)\n if num_children == 0]\n idcs, wnids, classes = list(zip(*meta))[:3]\n classes = [tuple(clss.split(', ')) for clss in classes]\n idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}\n wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}\n return idx_to_wnid, wnid_to_classes\n\n\ndef parse_val_groundtruth(devkit_root, path='data',\n filename='ILSVRC2012_validation_ground_truth.txt'):\n with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:\n val_idcs = txtfh.readlines()\n return [int(val_idx) for val_idx in val_idcs]\n\n\ndef prepare_train_folder(folder):\n for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:\n extract_tar(archive, os.path.splitext(archive)[0], delete=True)\n\n\ndef prepare_val_folder(folder, wnids):\n img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])\n\n for wnid in set(wnids):\n os.mkdir(os.path.join(folder, wnid))\n\n for wnid, img_file in zip(wnids, img_files):\n shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))\n\n\ndef _splitexts(root):\n exts = []\n ext = '.'\n while ext:\n root, ext = os.path.splitext(root)\n exts.append(ext)\n return root, ''.join(reversed(exts))\n", "path": "torchvision/datasets/imagenet.py"}], "after_files": [{"content": "from __future__ import print_function\nimport os\nimport shutil\nimport torch\nfrom .folder import ImageFolder\nfrom .utils import check_integrity, download_url\n\nARCHIVE_DICT = {\n 'train': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',\n 'md5': '1d675b47d978889d74fa0da5fadfb00e',\n },\n 'val': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',\n 'md5': '29b22e2961454d5413ddabcf34fc5622',\n },\n 'devkit': {\n 'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',\n 'md5': 'fa75699e90414af021442c21a62c3abf',\n }\n}\n\n\nclass ImageNet(ImageFolder):\n \"\"\"`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.\n\n Args:\n root (string): Root directory of the ImageNet Dataset.\n split (string, optional): The dataset split, supports ``train``, or ``val``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n\n Attributes:\n classes (list): List of the class names.\n class_to_idx (dict): Dict with items (class_name, class_index).\n wnids (list): List of the WordNet IDs.\n class_to_idx (dict): Dict with items (wordnet_id, wordnet_id_index).\n imgs (list): List of (image path, class_index) tuples\n targets (list): The class_index value for each image in the dataset\n \"\"\"\n\n def __init__(self, root, split='train', download=False, **kwargs):\n root = self.root = os.path.expanduser(root)\n self.split = self._verify_split(split)\n\n if download:\n self.download()\n wnid_to_classes = self._load_meta_file()[0]\n\n super(ImageNet, self).__init__(self.split_folder, **kwargs)\n self.root = root\n\n idcs = [idx for _, idx in self.imgs]\n self.wnids = self.classes\n self.wnid_to_idx = {wnid: idx for idx, wnid in zip(idcs, self.wnids)}\n self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]\n self.class_to_idx = {cls: idx\n for clss, idx in zip(self.classes, idcs)\n for cls in clss}\n\n def download(self):\n if not check_integrity(self.meta_file):\n tmpdir = os.path.join(self.root, 'tmp')\n\n archive_dict = ARCHIVE_DICT['devkit']\n download_and_extract_tar(archive_dict['url'], self.root,\n extract_root=tmpdir,\n md5=archive_dict['md5'])\n devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]\n meta = parse_devkit(os.path.join(tmpdir, devkit_folder))\n self._save_meta_file(*meta)\n\n shutil.rmtree(tmpdir)\n\n if not os.path.isdir(self.split_folder):\n archive_dict = ARCHIVE_DICT[self.split]\n download_and_extract_tar(archive_dict['url'], self.root,\n extract_root=self.split_folder,\n md5=archive_dict['md5'])\n\n if self.split == 'train':\n prepare_train_folder(self.split_folder)\n elif self.split == 'val':\n val_wnids = self._load_meta_file()[1]\n prepare_val_folder(self.split_folder, val_wnids)\n else:\n msg = (\"You set download=True, but a folder '{}' already exist in \"\n \"the root directory. If you want to re-download or re-extract the \"\n \"archive, delete the folder.\")\n print(msg.format(self.split))\n\n @property\n def meta_file(self):\n return os.path.join(self.root, 'meta.bin')\n\n def _load_meta_file(self):\n if check_integrity(self.meta_file):\n return torch.load(self.meta_file)\n else:\n raise RuntimeError(\"Meta file not found or corrupted.\",\n \"You can use download=True to create it.\")\n\n def _save_meta_file(self, wnid_to_class, val_wnids):\n torch.save((wnid_to_class, val_wnids), self.meta_file)\n\n def _verify_split(self, split):\n if split not in self.valid_splits:\n msg = \"Unknown split {} .\".format(split)\n msg += \"Valid splits are {{}}.\".format(\", \".join(self.valid_splits))\n raise ValueError(msg)\n return split\n\n @property\n def valid_splits(self):\n return 'train', 'val'\n\n @property\n def split_folder(self):\n return os.path.join(self.root, self.split)\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n\n\ndef extract_tar(src, dest=None, gzip=None, delete=False):\n import tarfile\n\n if dest is None:\n dest = os.path.dirname(src)\n if gzip is None:\n gzip = src.lower().endswith('.gz')\n\n mode = 'r:gz' if gzip else 'r'\n with tarfile.open(src, mode) as tarfh:\n tarfh.extractall(path=dest)\n\n if delete:\n os.remove(src)\n\n\ndef download_and_extract_tar(url, download_root, extract_root=None, filename=None,\n md5=None, **kwargs):\n download_root = os.path.expanduser(download_root)\n if extract_root is None:\n extract_root = extract_root\n if filename is None:\n filename = os.path.basename(url)\n\n if not check_integrity(os.path.join(download_root, filename), md5):\n download_url(url, download_root, filename=filename, md5=md5)\n\n extract_tar(os.path.join(download_root, filename), extract_root, **kwargs)\n\n\ndef parse_devkit(root):\n idx_to_wnid, wnid_to_classes = parse_meta(root)\n val_idcs = parse_val_groundtruth(root)\n val_wnids = [idx_to_wnid[idx] for idx in val_idcs]\n return wnid_to_classes, val_wnids\n\n\ndef parse_meta(devkit_root, path='data', filename='meta.mat'):\n import scipy.io as sio\n\n metafile = os.path.join(devkit_root, path, filename)\n meta = sio.loadmat(metafile, squeeze_me=True)['synsets']\n nums_children = list(zip(*meta))[4]\n meta = [meta[idx] for idx, num_children in enumerate(nums_children)\n if num_children == 0]\n idcs, wnids, classes = list(zip(*meta))[:3]\n classes = [tuple(clss.split(', ')) for clss in classes]\n idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}\n wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}\n return idx_to_wnid, wnid_to_classes\n\n\ndef parse_val_groundtruth(devkit_root, path='data',\n filename='ILSVRC2012_validation_ground_truth.txt'):\n with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:\n val_idcs = txtfh.readlines()\n return [int(val_idx) for val_idx in val_idcs]\n\n\ndef prepare_train_folder(folder):\n for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:\n extract_tar(archive, os.path.splitext(archive)[0], delete=True)\n\n\ndef prepare_val_folder(folder, wnids):\n img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])\n\n for wnid in set(wnids):\n os.mkdir(os.path.join(folder, wnid))\n\n for wnid, img_file in zip(wnids, img_files):\n shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))\n\n\ndef _splitexts(root):\n exts = []\n ext = '.'\n while ext:\n root, ext = os.path.splitext(root)\n exts.append(ext)\n return root, ''.join(reversed(exts))\n", "path": "torchvision/datasets/imagenet.py"}]} | 3,222 | 353 |
gh_patches_debug_29960 | rasdani/github-patches | git_diff | tough-dev-school__education-backend-222 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Переписать интеграцию с телеграмом
Сейчас используем какой-то [мутный сервис](https://github.com/f213/education-backend/blob/master/src/app/integrations/tg.py#L4) для отправки сообщения в телегу. Так вышло потому, что когда мы запускались, API телеграма было раскомнадзорено.
Надо отказаться от использования этого сервиса. Заодно, сделать возможность указывать отдельные каналы уведомлений о новых заказах для разных курсов.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/app/integrations/tg.py`
Content:
```
1 import requests
2
3
4 def send_happiness_message(text):
5 response = requests.post('https://timepad.f213.in/msg/', json={
6 'text': text,
7 })
8
9 assert response.status_code == 200, 'TG proxy should return 200'
10 assert response.json()['ok'] is True, 'TG proxy should say msg is ok'
11
```
Path: `src/app/settings.py`
Content:
```
1 import environ
2 import os
3 from celery.schedules import crontab
4
5 root = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /)
6 env = environ.Env(DEBUG=(bool, False)) # set default values and casting
7 environ.Env.read_env() # reading .env file
8 SITE_ROOT = root()
9
10 USE_L10N = True
11 USE_i18N = True
12
13 LANGUAGE_CODE = 'ru'
14 LOCALE_PATHS = ['locale']
15
16 INTERNAL_IPS = [
17 '127.0.0.1',
18 ]
19 FRONTEND_URL = 'https://education.borshev.com'
20
21 USE_TZ = False
22 TIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow')
23
24 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
25 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
26 TEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner'
27
28
29 # Quick-start development settings - unsuitable for production
30 # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
31
32 # SECURITY WARNING: keep the secret key used in production secret!
33 SECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t')
34
35 # SECURITY WARNING: don't run with debug turned on in production!
36 DEBUG = env('DEBUG', cast=bool, default=False)
37 CI = env('CI', cast=bool, default=False)
38 ANONYMIZE_ENABLED = DEBUG
39
40 ABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com')
41 ALLOWED_HOSTS = [
42 'edu-app.borshev.com',
43 'localhost',
44 'localhost:8000',
45 'education.borshev.com',
46 ABSOLUTE_HOST.replace('https://', ''),
47 ]
48
49 CORS_ORIGIN_WHITELIST = [
50 'https://pmdaily.ru',
51 'https://education.borshev.com',
52 ]
53
54 CSRF_TRUSTED_ORIGINS = [
55 'pmdaily.ru',
56 'education.borshev.com',
57 'borshev.com',
58 ]
59
60
61 # Application definition
62
63 INSTALLED_APPS = [
64 'app',
65 'users',
66 'orders',
67 'products',
68 'shipping',
69 'tinkoff',
70 'triggers',
71 'magnets',
72 'banking',
73
74 'corsheaders',
75 'hattori',
76 'anymail',
77 'rest_framework',
78 'rest_framework.authtoken',
79 'drf_recaptcha',
80 'django_filters',
81
82 'axes',
83 'django.contrib.admin',
84 'django.contrib.auth',
85 'django.contrib.contenttypes',
86 'django.contrib.sessions',
87 'django.contrib.messages',
88 'django.contrib.staticfiles',
89
90 'debug_toolbar',
91 ]
92
93 MIDDLEWARE = [
94 'django.middleware.security.SecurityMiddleware',
95
96 'django.contrib.sessions.middleware.SessionMiddleware',
97 'corsheaders.middleware.CorsMiddleware',
98 'django.middleware.common.CommonMiddleware',
99 # 'django.middleware.csrf.CsrfViewMiddleware',
100 'django.contrib.auth.middleware.AuthenticationMiddleware',
101 'django.contrib.auth.middleware.RemoteUserMiddleware',
102 'django.contrib.messages.middleware.MessageMiddleware',
103 'django.middleware.clickjacking.XFrameOptionsMiddleware',
104 'app.middleware.real_ip.real_ip_middleware',
105 'axes.middleware.AxesMiddleware',
106 'debug_toolbar.middleware.DebugToolbarMiddleware',
107 ]
108
109 if not DEBUG and not CI:
110 MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware')
111
112
113 REST_FRAMEWORK = {
114 'DEFAULT_PERMISSION_CLASSES': (
115 'rest_framework.permissions.IsAuthenticated',
116 ),
117 'DEFAULT_AUTHENTICATION_CLASSES': (
118 'rest_framework.authentication.TokenAuthentication',
119 ),
120 'DEFAULT_RENDERER_CLASSES': [
121 'app.renderers.AppJSONRenderer',
122 ],
123 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
124 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination',
125 'PAGE_SIZE': 20,
126 }
127
128 ROOT_URLCONF = 'app.urls'
129
130 TEMPLATES = [
131 {
132 'BACKEND': 'django.template.backends.django.DjangoTemplates',
133 'DIRS': [],
134 'APP_DIRS': True,
135 'OPTIONS': {
136 'context_processors': [
137 'django.template.context_processors.debug',
138 'django.template.context_processors.request',
139 'django.contrib.auth.context_processors.auth',
140 'django.contrib.messages.context_processors.messages',
141 ],
142 },
143 },
144 ]
145
146 WSGI_APPLICATION = 'app.wsgi.application'
147
148
149 # Database
150 # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
151 DATABASES = {
152 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
153 }
154 AUTH_USER_MODEL = 'users.User'
155 AUTHENTICATION_BACKENDS = [
156 'axes.backends.AxesBackend',
157 'django.contrib.auth.backends.ModelBackend',
158 'django.contrib.auth.backends.RemoteUserBackend',
159 ]
160
161 HEALTH_CHECKS_ERROR_CODE = 503
162 HEALTH_CHECKS = {
163 'db': 'django_healthchecks.contrib.check_database',
164 }
165
166 MEDIA_URL = env('MEDIA_URL', default='/media/')
167
168 STATIC_URL = env('STATIC_URL', default='/static/')
169 STATIC_ROOT = env('STATIC_ROOT')
170
171 SENTRY_DSN = env('SENTRY_DSN', cast=str, default='')
172
173 if not DEBUG and SENTRY_DSN:
174 import sentry_sdk
175 from sentry_sdk.integrations.celery import CeleryIntegration
176 from sentry_sdk.integrations.django import DjangoIntegration
177 from sentry_sdk.integrations.redis import RedisIntegration
178
179 sentry_sdk.init(
180 dsn=SENTRY_DSN,
181 integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()],
182 )
183
184 BROKER_URL = env('CELERY_BACKEND')
185 CELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud.
186 CELERY_TIMEZONE = TIME_ZONE
187 CELERY_ENABLE_UTC = False
188 CELERYBEAT_SCHEDULE = {
189 'run_started_purchase_trigger': {
190 'task': 'triggers.tasks.check_for_started_purchase_triggers',
191 'schedule': crontab(hour='*', minute=15),
192 },
193 'run_record_feedback_trigger': {
194 'task': 'triggers.tasks.check_for_record_feedback_triggers',
195 'schedule': crontab(hour='*', minute=15),
196 },
197 'ship_unshipped_orders': {
198 'task': 'orders.tasks.ship_unshipped_orders',
199 'schedule': crontab(hour='*', minute='*/2'),
200 },
201 }
202
203
204 AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)
205 AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)
206 AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)
207 AWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None)
208 AWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None)
209
210 EMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False)
211
212 EMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
213
214 MAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')
215 MAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None)
216
217 DEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='')
218 ANYMAIL = {
219 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''),
220 'DEBUG_API_REQUESTS': env('DEBUG'),
221 }
222
223 CLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str)
224
225 ZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str)
226 ZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str)
227
228 TINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None)
229 TINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None)
230 TINKOFF_CREDIT_SHOP_ID = env('TINKOFF_CREDIT_SHOP_ID', default=None)
231 TINKOFF_CREDIT_SHOWCASE_ID = env('TINKOFF_CREDIT_SHOWCASE_ID', default=None)
232
233 TINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG)
234
235 SEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)
236
237 DRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='')
238 DRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True)
239
```
Path: `src/orders/services/order_shipper.py`
Content:
```
1 from django.conf import settings
2 from django.utils import timezone
3
4 from app.tasks import send_happiness_message, send_mail
5 from orders.models import Order
6
7
8 class Pigwidgeon:
9 """Ship the order (actualy calls item ship() method)"""
10 def __init__(self, order: Order, silent: bool = False):
11 self.order = order
12 self.silent = silent
13
14 def __call__(self):
15 if self.ship():
16 self.mark_order_as_shipped()
17
18 if not self.order.notification_to_giver_is_sent:
19 self.send_notification_to_giver()
20
21 if not self.silent:
22 self.send_happiness_message()
23
24 def ship(self) -> bool:
25 """Ship the order. Returns true if order is shipped"""
26 desired_date = self.order.desired_shipment_date
27 if desired_date is None or desired_date <= timezone.now():
28 self.order.item.ship(to=self.order.user, order=self.order)
29
30 return True
31
32 return False
33
34 def mark_order_as_shipped(self):
35 self.order.shipped = timezone.now()
36 self.order.save()
37
38 def send_happiness_message(self):
39 if not settings.SEND_HAPPINESS_MESSAGES:
40 return
41
42 send_happiness_message.delay(text='💰+{sum} ₽, {user}, {reason}'.format(
43 sum=str(self.order.price).replace('.00', ''),
44 user=str(self.order.user),
45 reason=str(self.order.item) if self.order.giver is None else f'{self.order.item} (подарок)',
46 ))
47
48 def send_notification_to_giver(self):
49 if self.order.giver is None:
50 return
51
52 if self.order.desired_shipment_date is None:
53 return
54
55 send_mail.delay(
56 to=self.order.giver.email,
57 template_id='gift-notification-for-giver', # postmark
58 disable_antispam=True,
59 ctx={
60 'item_name': self.order.item.full_name,
61 'receiver_name': str(self.order.user),
62 'receiver_email': self.order.user.email,
63 'desired_shipment_date': self.order.desired_shipment_date.strftime('%d.%m.%Y'),
64 },
65 )
66
67 self.order.notification_to_giver_is_sent = True
68 self.order.save()
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/app/integrations/tg.py b/src/app/integrations/tg.py
--- a/src/app/integrations/tg.py
+++ b/src/app/integrations/tg.py
@@ -1,10 +1,18 @@
import requests
+from django.conf import settings
-def send_happiness_message(text):
- response = requests.post('https://timepad.f213.in/msg/', json={
+def send_message(channel, text):
+ url = f'https://api.telegram.org/bot{settings.BOT_TOKEN}/sendMessage'
+ response = requests.post(url, data={
+ 'chat_id': channel,
'text': text,
+ 'parse_mode': 'markdown',
+ 'disable_web_page_preview': True,
})
- assert response.status_code == 200, 'TG proxy should return 200'
- assert response.json()['ok'] is True, 'TG proxy should say msg is ok'
+ assert response.status_code == 200, 'TG should return 200'
+
+
+def send_happiness_message(text):
+ send_message(settings.HAPPINESS_MESSAGES_CHAT_ID, text)
diff --git a/src/app/settings.py b/src/app/settings.py
--- a/src/app/settings.py
+++ b/src/app/settings.py
@@ -232,7 +232,8 @@
TINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG)
-SEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)
+BOT_TOKEN = env('BOT_TOKEN', cast=str, default=None)
+HAPPINESS_MESSAGES_CHAT_ID = env('HAPPINESS_MESSAGES_CHAT_ID', cast=str, default=None)
DRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='')
DRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True)
diff --git a/src/orders/services/order_shipper.py b/src/orders/services/order_shipper.py
--- a/src/orders/services/order_shipper.py
+++ b/src/orders/services/order_shipper.py
@@ -36,7 +36,7 @@
self.order.save()
def send_happiness_message(self):
- if not settings.SEND_HAPPINESS_MESSAGES:
+ if not settings.HAPPINESS_MESSAGES_CHAT_ID:
return
send_happiness_message.delay(text='💰+{sum} ₽, {user}, {reason}'.format(
| {"golden_diff": "diff --git a/src/app/integrations/tg.py b/src/app/integrations/tg.py\n--- a/src/app/integrations/tg.py\n+++ b/src/app/integrations/tg.py\n@@ -1,10 +1,18 @@\n import requests\n+from django.conf import settings\n \n \n-def send_happiness_message(text):\n- response = requests.post('https://timepad.f213.in/msg/', json={\n+def send_message(channel, text):\n+ url = f'https://api.telegram.org/bot{settings.BOT_TOKEN}/sendMessage'\n+ response = requests.post(url, data={\n+ 'chat_id': channel,\n 'text': text,\n+ 'parse_mode': 'markdown',\n+ 'disable_web_page_preview': True,\n })\n \n- assert response.status_code == 200, 'TG proxy should return 200'\n- assert response.json()['ok'] is True, 'TG proxy should say msg is ok'\n+ assert response.status_code == 200, 'TG should return 200'\n+\n+\n+def send_happiness_message(text):\n+ send_message(settings.HAPPINESS_MESSAGES_CHAT_ID, text)\ndiff --git a/src/app/settings.py b/src/app/settings.py\n--- a/src/app/settings.py\n+++ b/src/app/settings.py\n@@ -232,7 +232,8 @@\n \n TINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG)\n \n-SEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)\n+BOT_TOKEN = env('BOT_TOKEN', cast=str, default=None)\n+HAPPINESS_MESSAGES_CHAT_ID = env('HAPPINESS_MESSAGES_CHAT_ID', cast=str, default=None)\n \n DRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='')\n DRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True)\ndiff --git a/src/orders/services/order_shipper.py b/src/orders/services/order_shipper.py\n--- a/src/orders/services/order_shipper.py\n+++ b/src/orders/services/order_shipper.py\n@@ -36,7 +36,7 @@\n self.order.save()\n \n def send_happiness_message(self):\n- if not settings.SEND_HAPPINESS_MESSAGES:\n+ if not settings.HAPPINESS_MESSAGES_CHAT_ID:\n return\n \n send_happiness_message.delay(text='\ud83d\udcb0+{sum} \u20bd, {user}, {reason}'.format(\n", "issue": "\u041f\u0435\u0440\u0435\u043f\u0438\u0441\u0430\u0442\u044c \u0438\u043d\u0442\u0435\u0433\u0440\u0430\u0446\u0438\u044e \u0441 \u0442\u0435\u043b\u0435\u0433\u0440\u0430\u043c\u043e\u043c\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u0443\u0435\u043c \u043a\u0430\u043a\u043e\u0439-\u0442\u043e [\u043c\u0443\u0442\u043d\u044b\u0439 \u0441\u0435\u0440\u0432\u0438\u0441](https://github.com/f213/education-backend/blob/master/src/app/integrations/tg.py#L4) \u0434\u043b\u044f \u043e\u0442\u043f\u0440\u0430\u0432\u043a\u0438 \u0441\u043e\u043e\u0431\u0449\u0435\u043d\u0438\u044f \u0432 \u0442\u0435\u043b\u0435\u0433\u0443. \u0422\u0430\u043a \u0432\u044b\u0448\u043b\u043e \u043f\u043e\u0442\u043e\u043c\u0443, \u0447\u0442\u043e \u043a\u043e\u0433\u0434\u0430 \u043c\u044b \u0437\u0430\u043f\u0443\u0441\u043a\u0430\u043b\u0438\u0441\u044c, API \u0442\u0435\u043b\u0435\u0433\u0440\u0430\u043c\u0430 \u0431\u044b\u043b\u043e \u0440\u0430\u0441\u043a\u043e\u043c\u043d\u0430\u0434\u0437\u043e\u0440\u0435\u043d\u043e.\r\n\r\n\u041d\u0430\u0434\u043e \u043e\u0442\u043a\u0430\u0437\u0430\u0442\u044c\u0441\u044f \u043e\u0442 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u043d\u0438\u044f \u044d\u0442\u043e\u0433\u043e \u0441\u0435\u0440\u0432\u0438\u0441\u0430. \u0417\u0430\u043e\u0434\u043d\u043e, \u0441\u0434\u0435\u043b\u0430\u0442\u044c \u0432\u043e\u0437\u043c\u043e\u0436\u043d\u043e\u0441\u0442\u044c \u0443\u043a\u0430\u0437\u044b\u0432\u0430\u0442\u044c \u043e\u0442\u0434\u0435\u043b\u044c\u043d\u044b\u0435 \u043a\u0430\u043d\u0430\u043b\u044b \u0443\u0432\u0435\u0434\u043e\u043c\u043b\u0435\u043d\u0438\u0439 \u043e \u043d\u043e\u0432\u044b\u0445 \u0437\u0430\u043a\u0430\u0437\u0430\u0445 \u0434\u043b\u044f \u0440\u0430\u0437\u043d\u044b\u0445 \u043a\u0443\u0440\u0441\u043e\u0432.\n", "before_files": [{"content": "import requests\n\n\ndef send_happiness_message(text):\n response = requests.post('https://timepad.f213.in/msg/', json={\n 'text': text,\n })\n\n assert response.status_code == 200, 'TG proxy should return 200'\n assert response.json()['ok'] is True, 'TG proxy should say msg is ok'\n", "path": "src/app/integrations/tg.py"}, {"content": "import environ\nimport os\nfrom celery.schedules import crontab\n\nroot = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /)\nenv = environ.Env(DEBUG=(bool, False)) # set default values and casting\nenviron.Env.read_env() # reading .env file\nSITE_ROOT = root()\n\nUSE_L10N = True\nUSE_i18N = True\n\nLANGUAGE_CODE = 'ru'\nLOCALE_PATHS = ['locale']\n\nINTERNAL_IPS = [\n '127.0.0.1',\n]\nFRONTEND_URL = 'https://education.borshev.com'\n\nUSE_TZ = False\nTIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow')\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner'\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env('DEBUG', cast=bool, default=False)\nCI = env('CI', cast=bool, default=False)\nANONYMIZE_ENABLED = DEBUG\n\nABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com')\nALLOWED_HOSTS = [\n 'edu-app.borshev.com',\n 'localhost',\n 'localhost:8000',\n 'education.borshev.com',\n ABSOLUTE_HOST.replace('https://', ''),\n]\n\nCORS_ORIGIN_WHITELIST = [\n 'https://pmdaily.ru',\n 'https://education.borshev.com',\n]\n\nCSRF_TRUSTED_ORIGINS = [\n 'pmdaily.ru',\n 'education.borshev.com',\n 'borshev.com',\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'app',\n 'users',\n 'orders',\n 'products',\n 'shipping',\n 'tinkoff',\n 'triggers',\n 'magnets',\n 'banking',\n\n 'corsheaders',\n 'hattori',\n 'anymail',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'drf_recaptcha',\n 'django_filters',\n\n 'axes',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'debug_toolbar',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'app.middleware.real_ip.real_ip_middleware',\n 'axes.middleware.AxesMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nif not DEBUG and not CI:\n MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware')\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'app.renderers.AppJSONRenderer',\n ],\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination',\n 'PAGE_SIZE': 20,\n}\n\nROOT_URLCONF = 'app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\nDATABASES = {\n 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ\n}\nAUTH_USER_MODEL = 'users.User'\nAUTHENTICATION_BACKENDS = [\n 'axes.backends.AxesBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'django.contrib.auth.backends.RemoteUserBackend',\n]\n\nHEALTH_CHECKS_ERROR_CODE = 503\nHEALTH_CHECKS = {\n 'db': 'django_healthchecks.contrib.check_database',\n}\n\nMEDIA_URL = env('MEDIA_URL', default='/media/')\n\nSTATIC_URL = env('STATIC_URL', default='/static/')\nSTATIC_ROOT = env('STATIC_ROOT')\n\nSENTRY_DSN = env('SENTRY_DSN', cast=str, default='')\n\nif not DEBUG and SENTRY_DSN:\n import sentry_sdk\n from sentry_sdk.integrations.celery import CeleryIntegration\n from sentry_sdk.integrations.django import DjangoIntegration\n from sentry_sdk.integrations.redis import RedisIntegration\n\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()],\n )\n\nBROKER_URL = env('CELERY_BACKEND')\nCELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud.\nCELERY_TIMEZONE = TIME_ZONE\nCELERY_ENABLE_UTC = False\nCELERYBEAT_SCHEDULE = {\n 'run_started_purchase_trigger': {\n 'task': 'triggers.tasks.check_for_started_purchase_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'run_record_feedback_trigger': {\n 'task': 'triggers.tasks.check_for_record_feedback_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'ship_unshipped_orders': {\n 'task': 'orders.tasks.ship_unshipped_orders',\n 'schedule': crontab(hour='*', minute='*/2'),\n },\n}\n\n\nAWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)\nAWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)\nAWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)\nAWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None)\nAWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None)\n\nEMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False)\n\nEMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')\n\nMAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')\nMAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None)\n\nDEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='')\nANYMAIL = {\n 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''),\n 'DEBUG_API_REQUESTS': env('DEBUG'),\n}\n\nCLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str)\n\nZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str)\nZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str)\n\nTINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None)\nTINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None)\nTINKOFF_CREDIT_SHOP_ID = env('TINKOFF_CREDIT_SHOP_ID', default=None)\nTINKOFF_CREDIT_SHOWCASE_ID = env('TINKOFF_CREDIT_SHOWCASE_ID', default=None)\n\nTINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG)\n\nSEND_HAPPINESS_MESSAGES = env('SEND_HAPPINESS_MESSAGES', cast=bool, default=False)\n\nDRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='')\nDRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True)\n", "path": "src/app/settings.py"}, {"content": "from django.conf import settings\nfrom django.utils import timezone\n\nfrom app.tasks import send_happiness_message, send_mail\nfrom orders.models import Order\n\n\nclass Pigwidgeon:\n \"\"\"Ship the order (actualy calls item ship() method)\"\"\"\n def __init__(self, order: Order, silent: bool = False):\n self.order = order\n self.silent = silent\n\n def __call__(self):\n if self.ship():\n self.mark_order_as_shipped()\n\n if not self.order.notification_to_giver_is_sent:\n self.send_notification_to_giver()\n\n if not self.silent:\n self.send_happiness_message()\n\n def ship(self) -> bool:\n \"\"\"Ship the order. Returns true if order is shipped\"\"\"\n desired_date = self.order.desired_shipment_date\n if desired_date is None or desired_date <= timezone.now():\n self.order.item.ship(to=self.order.user, order=self.order)\n\n return True\n\n return False\n\n def mark_order_as_shipped(self):\n self.order.shipped = timezone.now()\n self.order.save()\n\n def send_happiness_message(self):\n if not settings.SEND_HAPPINESS_MESSAGES:\n return\n\n send_happiness_message.delay(text='\ud83d\udcb0+{sum} \u20bd, {user}, {reason}'.format(\n sum=str(self.order.price).replace('.00', ''),\n user=str(self.order.user),\n reason=str(self.order.item) if self.order.giver is None else f'{self.order.item} (\u043f\u043e\u0434\u0430\u0440\u043e\u043a)',\n ))\n\n def send_notification_to_giver(self):\n if self.order.giver is None:\n return\n\n if self.order.desired_shipment_date is None:\n return\n\n send_mail.delay(\n to=self.order.giver.email,\n template_id='gift-notification-for-giver', # postmark\n disable_antispam=True,\n ctx={\n 'item_name': self.order.item.full_name,\n 'receiver_name': str(self.order.user),\n 'receiver_email': self.order.user.email,\n 'desired_shipment_date': self.order.desired_shipment_date.strftime('%d.%m.%Y'),\n },\n )\n\n self.order.notification_to_giver_is_sent = True\n self.order.save()\n", "path": "src/orders/services/order_shipper.py"}], "after_files": [{"content": "import requests\nfrom django.conf import settings\n\n\ndef send_message(channel, text):\n url = f'https://api.telegram.org/bot{settings.BOT_TOKEN}/sendMessage'\n response = requests.post(url, data={\n 'chat_id': channel,\n 'text': text,\n 'parse_mode': 'markdown',\n 'disable_web_page_preview': True,\n })\n\n assert response.status_code == 200, 'TG should return 200'\n\n\ndef send_happiness_message(text):\n send_message(settings.HAPPINESS_MESSAGES_CHAT_ID, text)\n", "path": "src/app/integrations/tg.py"}, {"content": "import environ\nimport os\nfrom celery.schedules import crontab\n\nroot = environ.Path(__file__) - 2 # three folder back (/a/b/c/ - 3 = /)\nenv = environ.Env(DEBUG=(bool, False)) # set default values and casting\nenviron.Env.read_env() # reading .env file\nSITE_ROOT = root()\n\nUSE_L10N = True\nUSE_i18N = True\n\nLANGUAGE_CODE = 'ru'\nLOCALE_PATHS = ['locale']\n\nINTERNAL_IPS = [\n '127.0.0.1',\n]\nFRONTEND_URL = 'https://education.borshev.com'\n\nUSE_TZ = False\nTIME_ZONE = env('TIME_ZONE', cast=str, default='Europe/Moscow')\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nTEST_RUNNER = 'app.test.disable_test_command_runner.DisableTestCommandRunner'\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env('SECRET_KEY', cast=str, default='s3cr3t')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env('DEBUG', cast=bool, default=False)\nCI = env('CI', cast=bool, default=False)\nANONYMIZE_ENABLED = DEBUG\n\nABSOLUTE_HOST = env('ABSOLUTE_HOST', cast=str, default='https://edu-app.borshev.com')\nALLOWED_HOSTS = [\n 'edu-app.borshev.com',\n 'localhost',\n 'localhost:8000',\n 'education.borshev.com',\n ABSOLUTE_HOST.replace('https://', ''),\n]\n\nCORS_ORIGIN_WHITELIST = [\n 'https://pmdaily.ru',\n 'https://education.borshev.com',\n]\n\nCSRF_TRUSTED_ORIGINS = [\n 'pmdaily.ru',\n 'education.borshev.com',\n 'borshev.com',\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'app',\n 'users',\n 'orders',\n 'products',\n 'shipping',\n 'tinkoff',\n 'triggers',\n 'magnets',\n 'banking',\n\n 'corsheaders',\n 'hattori',\n 'anymail',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'drf_recaptcha',\n 'django_filters',\n\n 'axes',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'debug_toolbar',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.RemoteUserMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'app.middleware.real_ip.real_ip_middleware',\n 'axes.middleware.AxesMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nif not DEBUG and not CI:\n MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware')\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticated',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': [\n 'app.renderers.AppJSONRenderer',\n ],\n 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',\n 'DEFAULT_PAGINATION_CLASS': 'app.pagination.AppPagination',\n 'PAGE_SIZE': 20,\n}\n\nROOT_URLCONF = 'app.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'app.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\nDATABASES = {\n 'default': env.db(), # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ\n}\nAUTH_USER_MODEL = 'users.User'\nAUTHENTICATION_BACKENDS = [\n 'axes.backends.AxesBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'django.contrib.auth.backends.RemoteUserBackend',\n]\n\nHEALTH_CHECKS_ERROR_CODE = 503\nHEALTH_CHECKS = {\n 'db': 'django_healthchecks.contrib.check_database',\n}\n\nMEDIA_URL = env('MEDIA_URL', default='/media/')\n\nSTATIC_URL = env('STATIC_URL', default='/static/')\nSTATIC_ROOT = env('STATIC_ROOT')\n\nSENTRY_DSN = env('SENTRY_DSN', cast=str, default='')\n\nif not DEBUG and SENTRY_DSN:\n import sentry_sdk\n from sentry_sdk.integrations.celery import CeleryIntegration\n from sentry_sdk.integrations.django import DjangoIntegration\n from sentry_sdk.integrations.redis import RedisIntegration\n\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), CeleryIntegration(), RedisIntegration()],\n )\n\nBROKER_URL = env('CELERY_BACKEND')\nCELERY_ALWAYS_EAGER = env('CELERY_ALWAYS_EAGER', cast=bool, default=DEBUG) # by default in debug mode we run all celery tasks in foregroud.\nCELERY_TIMEZONE = TIME_ZONE\nCELERY_ENABLE_UTC = False\nCELERYBEAT_SCHEDULE = {\n 'run_started_purchase_trigger': {\n 'task': 'triggers.tasks.check_for_started_purchase_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'run_record_feedback_trigger': {\n 'task': 'triggers.tasks.check_for_record_feedback_triggers',\n 'schedule': crontab(hour='*', minute=15),\n },\n 'ship_unshipped_orders': {\n 'task': 'orders.tasks.ship_unshipped_orders',\n 'schedule': crontab(hour='*', minute='*/2'),\n },\n}\n\n\nAWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', default=None)\nAWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', default=None)\nAWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', default=None)\nAWS_S3_REGION_NAME = env('AWS_S3_REGION_NAME', default=None)\nAWS_S3_ENDPOINT_URL = env('AWS_S3_ENDPOINT_URL', default=None)\n\nEMAIL_ENABLED = env('EMAIL_ENABLED', cast=bool, default=False)\n\nEMAIL_BACKEND = env('EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')\n\nMAILCHIMP_API_KEY = env('MAILCHIMP_API_KEY', default='')\nMAILCHIMP_CONTACT_LIST_ID = env('MAILCHIMP_CONTACT_LIST_ID', cast=str, default=None)\n\nDEFAULT_FROM_EMAIL = env('EMAIL_FROM', cast=str, default='')\nANYMAIL = {\n 'POSTMARK_SERVER_TOKEN': env('POSTMARK_SERVER_TOKEN', cast=str, default=''),\n 'DEBUG_API_REQUESTS': env('DEBUG'),\n}\n\nCLICKMEETING_API_KEY = env('CLICKMEETING_API_KEY', default=None, cast=str)\n\nZOOMUS_API_KEY = env('ZOOMUS_API_KEY', default=None, cast=str)\nZOOMUS_API_SECRET = env('ZOOMUS_API_SECRET', default=None, cast=str)\n\nTINKOFF_TERMINAL_KEY = env('TINKOFF_TERMINAL_KEY', default=None)\nTINKOFF_TERMINAL_PASSWORD = env('TINKOFF_TERMINAL_PASSWORD', default=None)\nTINKOFF_CREDIT_SHOP_ID = env('TINKOFF_CREDIT_SHOP_ID', default=None)\nTINKOFF_CREDIT_SHOWCASE_ID = env('TINKOFF_CREDIT_SHOWCASE_ID', default=None)\n\nTINKOFF_CREDIT_DEMO_MODE = env('TINKOFF_CREDIT_DEMO_MODE', default=DEBUG)\n\nBOT_TOKEN = env('BOT_TOKEN', cast=str, default=None)\nHAPPINESS_MESSAGES_CHAT_ID = env('HAPPINESS_MESSAGES_CHAT_ID', cast=str, default=None)\n\nDRF_RECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', cast=str, default='')\nDRF_RECAPTCHA_TESTING = DRF_RECAPTCHA_TESTING_PASS = not env('RECAPTCHA_ENABLED', cast=bool, default=True)\n", "path": "src/app/settings.py"}, {"content": "from django.conf import settings\nfrom django.utils import timezone\n\nfrom app.tasks import send_happiness_message, send_mail\nfrom orders.models import Order\n\n\nclass Pigwidgeon:\n \"\"\"Ship the order (actualy calls item ship() method)\"\"\"\n def __init__(self, order: Order, silent: bool = False):\n self.order = order\n self.silent = silent\n\n def __call__(self):\n if self.ship():\n self.mark_order_as_shipped()\n\n if not self.order.notification_to_giver_is_sent:\n self.send_notification_to_giver()\n\n if not self.silent:\n self.send_happiness_message()\n\n def ship(self) -> bool:\n \"\"\"Ship the order. Returns true if order is shipped\"\"\"\n desired_date = self.order.desired_shipment_date\n if desired_date is None or desired_date <= timezone.now():\n self.order.item.ship(to=self.order.user, order=self.order)\n\n return True\n\n return False\n\n def mark_order_as_shipped(self):\n self.order.shipped = timezone.now()\n self.order.save()\n\n def send_happiness_message(self):\n if not settings.HAPPINESS_MESSAGES_CHAT_ID:\n return\n\n send_happiness_message.delay(text='\ud83d\udcb0+{sum} \u20bd, {user}, {reason}'.format(\n sum=str(self.order.price).replace('.00', ''),\n user=str(self.order.user),\n reason=str(self.order.item) if self.order.giver is None else f'{self.order.item} (\u043f\u043e\u0434\u0430\u0440\u043e\u043a)',\n ))\n\n def send_notification_to_giver(self):\n if self.order.giver is None:\n return\n\n if self.order.desired_shipment_date is None:\n return\n\n send_mail.delay(\n to=self.order.giver.email,\n template_id='gift-notification-for-giver', # postmark\n disable_antispam=True,\n ctx={\n 'item_name': self.order.item.full_name,\n 'receiver_name': str(self.order.user),\n 'receiver_email': self.order.user.email,\n 'desired_shipment_date': self.order.desired_shipment_date.strftime('%d.%m.%Y'),\n },\n )\n\n self.order.notification_to_giver_is_sent = True\n self.order.save()\n", "path": "src/orders/services/order_shipper.py"}]} | 3,618 | 553 |
gh_patches_debug_6287 | rasdani/github-patches | git_diff | ansible__ansible-38194 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid yml causes include_vars to hang or freeze without output
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and devel branch are affected too.
Always add information AFTER of these html comments.
-->
I made some attempt to sift through the 3,511 issues. Apology for adding yet another to this hopelessly large list. I did *not* set-up for development and try to reproduce.
##### ISSUE TYPE
<!--- Pick one below and delete the rest -->
- Bug Report
##### COMPONENT NAME
<!---
Insert below this comment the name of the module, plugin, task or feature.
Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path
-->
include_vars
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
ansible 2.4.3.0
config file = /Users/dclo/Documents/ansible/ansible.cfg
configured module search path = ['/Users/dclo/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.6/site-packages/ansible
executable location = /usr/local/bin/ansible
python version = 3.6.4 (default, Mar 22 2018, 13:54:22) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]
```
##### CONFIGURATION
<!---
If using Ansible 2.4 or above, paste the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
```
DEFAULT_HOST_LIST(/Users/dclo/Documents/ansible/ansible.cfg) = ['/Users/dclo/Documents/ansible/staging']
DEFAULT_VAULT_PASSWORD_FILE(/Users/dclo/Documents/ansible/ansible.cfg) = /Users/dclo/Documents/ansible/.vault_pass
```
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.
-->
macOS High Sierra 10.13.3
##### SUMMARY
<!--- Explain the problem briefly -->
`ansible-playbook test.yml -vvv` outputs the beginning of the `include_vars` task and then hangs.
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used.
-->
- Create a vars file with ansible-vault (create/edit) that has malformed yaml.
- Include it in the playbook.
<!--- Paste example playbooks or commands between quotes below -->
```yaml
---
- hosts: all
gather_facts: no
tasks:
- include_vars: "vars/vault.yml"
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
Ansible outputs syntax error on the yaml file.
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
Ansible hangs
<!--- Paste verbatim command output between quotes below -->
```
TASK [include_vars] ********************************************************************************
task path: /Users/dclo/Documents/ansible/test.yml:5
^CProcess WorkerProcess-2:
[ERROR]: User interrupted execution
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/plugins/action/include_vars.py`
Content:
```
1 # (c) 2016, Allen Sanabria <[email protected]>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17
18 from __future__ import (absolute_import, division, print_function)
19 __metaclass__ = type
20
21 from os import path, walk
22 import re
23
24 from ansible.errors import AnsibleError
25 from ansible.module_utils.six import string_types
26 from ansible.module_utils._text import to_native, to_text
27 from ansible.plugins.action import ActionBase
28
29
30 class ActionModule(ActionBase):
31
32 TRANSFERS_FILES = False
33
34 VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
35 VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']
36 VALID_FILE_ARGUMENTS = ['file', '_raw_params']
37 VALID_ALL = ['name']
38
39 def _set_dir_defaults(self):
40 if not self.depth:
41 self.depth = 0
42
43 if self.files_matching:
44 self.matcher = re.compile(r'{0}'.format(self.files_matching))
45 else:
46 self.matcher = None
47
48 if not self.ignore_files:
49 self.ignore_files = list()
50
51 if isinstance(self.ignore_files, str):
52 self.ignore_files = self.ignore_files.split()
53
54 elif isinstance(self.ignore_files, dict):
55 return {
56 'failed': True,
57 'message': '{0} must be a list'.format(self.ignore_files)
58 }
59
60 def _set_args(self):
61 """ Set instance variables based on the arguments that were passed """
62
63 self.return_results_as_name = self._task.args.get('name', None)
64 self.source_dir = self._task.args.get('dir', None)
65 self.source_file = self._task.args.get('file', None)
66 if not self.source_dir and not self.source_file:
67 self.source_file = self._task.args.get('_raw_params')
68
69 self.depth = self._task.args.get('depth', None)
70 self.files_matching = self._task.args.get('files_matching', None)
71 self.ignore_files = self._task.args.get('ignore_files', None)
72 self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
73
74 # convert/validate extensions list
75 if isinstance(self.valid_extensions, string_types):
76 self.valid_extensions = list(self.valid_extensions)
77 if not isinstance(self.valid_extensions, list):
78 raise AnsibleError('Invalid type for "extensions" option, it must be a list')
79
80 def run(self, tmp=None, task_vars=None):
81 """ Load yml files recursively from a directory.
82 """
83 del tmp # tmp no longer has any effect
84
85 if task_vars is None:
86 task_vars = dict()
87
88 self.show_content = True
89 self.included_files = []
90
91 # Validate arguments
92 dirs = 0
93 files = 0
94 for arg in self._task.args:
95 if arg in self.VALID_DIR_ARGUMENTS:
96 dirs += 1
97 elif arg in self.VALID_FILE_ARGUMENTS:
98 files += 1
99 elif arg in self.VALID_ALL:
100 pass
101 else:
102 raise AnsibleError('{0} is not a valid option in debug'.format(arg))
103
104 if dirs and files:
105 raise AnsibleError("Your are mixing file only and dir only arguments, these are incompatible")
106
107 # set internal vars from args
108 self._set_args()
109
110 results = dict()
111 if self.source_dir:
112 self._set_dir_defaults()
113 self._set_root_dir()
114 if not path.exists(self.source_dir):
115 failed = True
116 err_msg = ('{0} directory does not exist'.format(self.source_dir))
117 elif not path.isdir(self.source_dir):
118 failed = True
119 err_msg = ('{0} is not a directory'.format(self.source_dir))
120 else:
121 for root_dir, filenames in self._traverse_dir_depth():
122 failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
123 if failed:
124 break
125 results.update(updated_results)
126 else:
127 try:
128 self.source_file = self._find_needle('vars', self.source_file)
129 failed, err_msg, updated_results = (
130 self._load_files(self.source_file)
131 )
132 if not failed:
133 results.update(updated_results)
134
135 except AnsibleError as e:
136 failed = True
137 err_msg = to_native(e)
138
139 if self.return_results_as_name:
140 scope = dict()
141 scope[self.return_results_as_name] = results
142 results = scope
143
144 result = super(ActionModule, self).run(task_vars=task_vars)
145
146 if failed:
147 result['failed'] = failed
148 result['message'] = err_msg
149
150 result['ansible_included_var_files'] = self.included_files
151 result['ansible_facts'] = results
152 result['_ansible_no_log'] = not self.show_content
153
154 return result
155
156 def _set_root_dir(self):
157 if self._task._role:
158 if self.source_dir.split('/')[0] == 'vars':
159 path_to_use = (
160 path.join(self._task._role._role_path, self.source_dir)
161 )
162 if path.exists(path_to_use):
163 self.source_dir = path_to_use
164 else:
165 path_to_use = (
166 path.join(
167 self._task._role._role_path, 'vars', self.source_dir
168 )
169 )
170 self.source_dir = path_to_use
171 else:
172 current_dir = (
173 "/".join(self._task._ds._data_source.split('/')[:-1])
174 )
175 self.source_dir = path.join(current_dir, self.source_dir)
176
177 def _traverse_dir_depth(self):
178 """ Recursively iterate over a directory and sort the files in
179 alphabetical order. Do not iterate pass the set depth.
180 The default depth is unlimited.
181 """
182 current_depth = 0
183 sorted_walk = list(walk(self.source_dir))
184 sorted_walk.sort(key=lambda x: x[0])
185 for current_root, current_dir, current_files in sorted_walk:
186 current_depth += 1
187 if current_depth <= self.depth or self.depth == 0:
188 current_files.sort()
189 yield (current_root, current_files)
190 else:
191 break
192
193 def _ignore_file(self, filename):
194 """ Return True if a file matches the list of ignore_files.
195 Args:
196 filename (str): The filename that is being matched against.
197
198 Returns:
199 Boolean
200 """
201 for file_type in self.ignore_files:
202 try:
203 if re.search(r'{0}$'.format(file_type), filename):
204 return True
205 except Exception:
206 err_msg = 'Invalid regular expression: {0}'.format(file_type)
207 raise AnsibleError(err_msg)
208 return False
209
210 def _is_valid_file_ext(self, source_file):
211 """ Verify if source file has a valid extension
212 Args:
213 source_file (str): The full path of source file or source file.
214 Returns:
215 Bool
216 """
217 file_ext = path.splitext(source_file)
218 return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
219
220 def _load_files(self, filename, validate_extensions=False):
221 """ Loads a file and converts the output into a valid Python dict.
222 Args:
223 filename (str): The source file.
224
225 Returns:
226 Tuple (bool, str, dict)
227 """
228 results = dict()
229 failed = False
230 err_msg = ''
231 if validate_extensions and not self._is_valid_file_ext(filename):
232 failed = True
233 err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))
234 else:
235 b_data, show_content = self._loader._get_file_contents(filename)
236 data = to_text(b_data, errors='surrogate_or_strict')
237
238 self.show_content = show_content
239 data = self._loader.load(data, show_content)
240 if not data:
241 data = dict()
242 if not isinstance(data, dict):
243 failed = True
244 err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))
245 else:
246 self.included_files.append(filename)
247 results.update(data)
248
249 return failed, err_msg, results
250
251 def _load_files_in_dir(self, root_dir, var_files):
252 """ Load the found yml files and update/overwrite the dictionary.
253 Args:
254 root_dir (str): The base directory of the list of files that is being passed.
255 var_files: (list): List of files to iterate over and load into a dictionary.
256
257 Returns:
258 Tuple (bool, str, dict)
259 """
260 results = dict()
261 failed = False
262 err_msg = ''
263 for filename in var_files:
264 stop_iter = False
265 # Never include main.yml from a role, as that is the default included by the role
266 if self._task._role:
267 if filename == 'main.yml':
268 stop_iter = True
269 continue
270
271 filepath = path.join(root_dir, filename)
272 if self.files_matching:
273 if not self.matcher.search(filename):
274 stop_iter = True
275
276 if not stop_iter and not failed:
277 if path.exists(filepath) and not self._ignore_file(filename):
278 failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
279 if not failed:
280 results.update(loaded_data)
281
282 return failed, err_msg, results
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
--- a/lib/ansible/plugins/action/include_vars.py
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -236,7 +236,7 @@
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content = show_content
- data = self._loader.load(data, show_content)
+ data = self._loader.load(data, file_name=filename, show_content=show_content)
if not data:
data = dict()
if not isinstance(data, dict):
| {"golden_diff": "diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py\n--- a/lib/ansible/plugins/action/include_vars.py\n+++ b/lib/ansible/plugins/action/include_vars.py\n@@ -236,7 +236,7 @@\n data = to_text(b_data, errors='surrogate_or_strict')\n \n self.show_content = show_content\n- data = self._loader.load(data, show_content)\n+ data = self._loader.load(data, file_name=filename, show_content=show_content)\n if not data:\n data = dict()\n if not isinstance(data, dict):\n", "issue": "Invalid yml causes include_vars to hang or freeze without output\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\nAlways add information AFTER of these html comments.\r\n-->\r\n\r\nI made some attempt to sift through the 3,511 issues. Apology for adding yet another to this hopelessly large list. I did *not* set-up for development and try to reproduce.\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest -->\r\n - Bug Report\r\n \r\n##### COMPONENT NAME\r\n<!---\r\nInsert below this comment the name of the module, plugin, task or feature.\r\nDo not include extra details here, e.g. \"vyos_command\" not \"the network module vyos_command\" or the full path\r\n-->\r\ninclude_vars\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.4.3.0\r\n config file = /Users/dclo/Documents/ansible/ansible.cfg\r\n configured module search path = ['/Users/dclo/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/lib/python3.6/site-packages/ansible\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.6.4 (default, Mar 22 2018, 13:54:22) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nIf using Ansible 2.4 or above, paste the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n-->\r\n```\r\nDEFAULT_HOST_LIST(/Users/dclo/Documents/ansible/ansible.cfg) = ['/Users/dclo/Documents/ansible/staging']\r\nDEFAULT_VAULT_PASSWORD_FILE(/Users/dclo/Documents/ansible/ansible.cfg) = /Users/dclo/Documents/ansible/.vault_pass\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.\r\n-->\r\nmacOS High Sierra 10.13.3\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\n`ansible-playbook test.yml -vvv` outputs the beginning of the `include_vars` task and then hangs.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\n- Create a vars file with ansible-vault (create/edit) that has malformed yaml.\r\n- Include it in the playbook.\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n---\r\n- hosts: all\r\n gather_facts: no\r\n tasks:\r\n - include_vars: \"vars/vault.yml\"\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nAnsible outputs syntax error on the yaml file.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nAnsible hangs\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\nTASK [include_vars] ********************************************************************************\r\ntask path: /Users/dclo/Documents/ansible/test.yml:5\r\n^CProcess WorkerProcess-2:\r\n [ERROR]: User interrupted execution\r\n```\r\n\n", "before_files": [{"content": "# (c) 2016, Allen Sanabria <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom os import path, walk\nimport re\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six import string_types\nfrom ansible.module_utils._text import to_native, to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']\n VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']\n VALID_FILE_ARGUMENTS = ['file', '_raw_params']\n VALID_ALL = ['name']\n\n def _set_dir_defaults(self):\n if not self.depth:\n self.depth = 0\n\n if self.files_matching:\n self.matcher = re.compile(r'{0}'.format(self.files_matching))\n else:\n self.matcher = None\n\n if not self.ignore_files:\n self.ignore_files = list()\n\n if isinstance(self.ignore_files, str):\n self.ignore_files = self.ignore_files.split()\n\n elif isinstance(self.ignore_files, dict):\n return {\n 'failed': True,\n 'message': '{0} must be a list'.format(self.ignore_files)\n }\n\n def _set_args(self):\n \"\"\" Set instance variables based on the arguments that were passed \"\"\"\n\n self.return_results_as_name = self._task.args.get('name', None)\n self.source_dir = self._task.args.get('dir', None)\n self.source_file = self._task.args.get('file', None)\n if not self.source_dir and not self.source_file:\n self.source_file = self._task.args.get('_raw_params')\n\n self.depth = self._task.args.get('depth', None)\n self.files_matching = self._task.args.get('files_matching', None)\n self.ignore_files = self._task.args.get('ignore_files', None)\n self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)\n\n # convert/validate extensions list\n if isinstance(self.valid_extensions, string_types):\n self.valid_extensions = list(self.valid_extensions)\n if not isinstance(self.valid_extensions, list):\n raise AnsibleError('Invalid type for \"extensions\" option, it must be a list')\n\n def run(self, tmp=None, task_vars=None):\n \"\"\" Load yml files recursively from a directory.\n \"\"\"\n del tmp # tmp no longer has any effect\n\n if task_vars is None:\n task_vars = dict()\n\n self.show_content = True\n self.included_files = []\n\n # Validate arguments\n dirs = 0\n files = 0\n for arg in self._task.args:\n if arg in self.VALID_DIR_ARGUMENTS:\n dirs += 1\n elif arg in self.VALID_FILE_ARGUMENTS:\n files += 1\n elif arg in self.VALID_ALL:\n pass\n else:\n raise AnsibleError('{0} is not a valid option in debug'.format(arg))\n\n if dirs and files:\n raise AnsibleError(\"Your are mixing file only and dir only arguments, these are incompatible\")\n\n # set internal vars from args\n self._set_args()\n\n results = dict()\n if self.source_dir:\n self._set_dir_defaults()\n self._set_root_dir()\n if not path.exists(self.source_dir):\n failed = True\n err_msg = ('{0} directory does not exist'.format(self.source_dir))\n elif not path.isdir(self.source_dir):\n failed = True\n err_msg = ('{0} is not a directory'.format(self.source_dir))\n else:\n for root_dir, filenames in self._traverse_dir_depth():\n failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))\n if failed:\n break\n results.update(updated_results)\n else:\n try:\n self.source_file = self._find_needle('vars', self.source_file)\n failed, err_msg, updated_results = (\n self._load_files(self.source_file)\n )\n if not failed:\n results.update(updated_results)\n\n except AnsibleError as e:\n failed = True\n err_msg = to_native(e)\n\n if self.return_results_as_name:\n scope = dict()\n scope[self.return_results_as_name] = results\n results = scope\n\n result = super(ActionModule, self).run(task_vars=task_vars)\n\n if failed:\n result['failed'] = failed\n result['message'] = err_msg\n\n result['ansible_included_var_files'] = self.included_files\n result['ansible_facts'] = results\n result['_ansible_no_log'] = not self.show_content\n\n return result\n\n def _set_root_dir(self):\n if self._task._role:\n if self.source_dir.split('/')[0] == 'vars':\n path_to_use = (\n path.join(self._task._role._role_path, self.source_dir)\n )\n if path.exists(path_to_use):\n self.source_dir = path_to_use\n else:\n path_to_use = (\n path.join(\n self._task._role._role_path, 'vars', self.source_dir\n )\n )\n self.source_dir = path_to_use\n else:\n current_dir = (\n \"/\".join(self._task._ds._data_source.split('/')[:-1])\n )\n self.source_dir = path.join(current_dir, self.source_dir)\n\n def _traverse_dir_depth(self):\n \"\"\" Recursively iterate over a directory and sort the files in\n alphabetical order. Do not iterate pass the set depth.\n The default depth is unlimited.\n \"\"\"\n current_depth = 0\n sorted_walk = list(walk(self.source_dir))\n sorted_walk.sort(key=lambda x: x[0])\n for current_root, current_dir, current_files in sorted_walk:\n current_depth += 1\n if current_depth <= self.depth or self.depth == 0:\n current_files.sort()\n yield (current_root, current_files)\n else:\n break\n\n def _ignore_file(self, filename):\n \"\"\" Return True if a file matches the list of ignore_files.\n Args:\n filename (str): The filename that is being matched against.\n\n Returns:\n Boolean\n \"\"\"\n for file_type in self.ignore_files:\n try:\n if re.search(r'{0}$'.format(file_type), filename):\n return True\n except Exception:\n err_msg = 'Invalid regular expression: {0}'.format(file_type)\n raise AnsibleError(err_msg)\n return False\n\n def _is_valid_file_ext(self, source_file):\n \"\"\" Verify if source file has a valid extension\n Args:\n source_file (str): The full path of source file or source file.\n Returns:\n Bool\n \"\"\"\n file_ext = path.splitext(source_file)\n return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)\n\n def _load_files(self, filename, validate_extensions=False):\n \"\"\" Loads a file and converts the output into a valid Python dict.\n Args:\n filename (str): The source file.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n if validate_extensions and not self._is_valid_file_ext(filename):\n failed = True\n err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))\n else:\n b_data, show_content = self._loader._get_file_contents(filename)\n data = to_text(b_data, errors='surrogate_or_strict')\n\n self.show_content = show_content\n data = self._loader.load(data, show_content)\n if not data:\n data = dict()\n if not isinstance(data, dict):\n failed = True\n err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))\n else:\n self.included_files.append(filename)\n results.update(data)\n\n return failed, err_msg, results\n\n def _load_files_in_dir(self, root_dir, var_files):\n \"\"\" Load the found yml files and update/overwrite the dictionary.\n Args:\n root_dir (str): The base directory of the list of files that is being passed.\n var_files: (list): List of files to iterate over and load into a dictionary.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n for filename in var_files:\n stop_iter = False\n # Never include main.yml from a role, as that is the default included by the role\n if self._task._role:\n if filename == 'main.yml':\n stop_iter = True\n continue\n\n filepath = path.join(root_dir, filename)\n if self.files_matching:\n if not self.matcher.search(filename):\n stop_iter = True\n\n if not stop_iter and not failed:\n if path.exists(filepath) and not self._ignore_file(filename):\n failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)\n if not failed:\n results.update(loaded_data)\n\n return failed, err_msg, results\n", "path": "lib/ansible/plugins/action/include_vars.py"}], "after_files": [{"content": "# (c) 2016, Allen Sanabria <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom os import path, walk\nimport re\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six import string_types\nfrom ansible.module_utils._text import to_native, to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']\n VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']\n VALID_FILE_ARGUMENTS = ['file', '_raw_params']\n VALID_ALL = ['name']\n\n def _set_dir_defaults(self):\n if not self.depth:\n self.depth = 0\n\n if self.files_matching:\n self.matcher = re.compile(r'{0}'.format(self.files_matching))\n else:\n self.matcher = None\n\n if not self.ignore_files:\n self.ignore_files = list()\n\n if isinstance(self.ignore_files, str):\n self.ignore_files = self.ignore_files.split()\n\n elif isinstance(self.ignore_files, dict):\n return {\n 'failed': True,\n 'message': '{0} must be a list'.format(self.ignore_files)\n }\n\n def _set_args(self):\n \"\"\" Set instance variables based on the arguments that were passed \"\"\"\n\n self.return_results_as_name = self._task.args.get('name', None)\n self.source_dir = self._task.args.get('dir', None)\n self.source_file = self._task.args.get('file', None)\n if not self.source_dir and not self.source_file:\n self.source_file = self._task.args.get('_raw_params')\n\n self.depth = self._task.args.get('depth', None)\n self.files_matching = self._task.args.get('files_matching', None)\n self.ignore_files = self._task.args.get('ignore_files', None)\n self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)\n\n # convert/validate extensions list\n if isinstance(self.valid_extensions, string_types):\n self.valid_extensions = list(self.valid_extensions)\n if not isinstance(self.valid_extensions, list):\n raise AnsibleError('Invalid type for \"extensions\" option, it must be a list')\n\n def run(self, tmp=None, task_vars=None):\n \"\"\" Load yml files recursively from a directory.\n \"\"\"\n del tmp # tmp no longer has any effect\n\n if task_vars is None:\n task_vars = dict()\n\n self.show_content = True\n self.included_files = []\n\n # Validate arguments\n dirs = 0\n files = 0\n for arg in self._task.args:\n if arg in self.VALID_DIR_ARGUMENTS:\n dirs += 1\n elif arg in self.VALID_FILE_ARGUMENTS:\n files += 1\n elif arg in self.VALID_ALL:\n pass\n else:\n raise AnsibleError('{0} is not a valid option in debug'.format(arg))\n\n if dirs and files:\n raise AnsibleError(\"Your are mixing file only and dir only arguments, these are incompatible\")\n\n # set internal vars from args\n self._set_args()\n\n results = dict()\n if self.source_dir:\n self._set_dir_defaults()\n self._set_root_dir()\n if not path.exists(self.source_dir):\n failed = True\n err_msg = ('{0} directory does not exist'.format(self.source_dir))\n elif not path.isdir(self.source_dir):\n failed = True\n err_msg = ('{0} is not a directory'.format(self.source_dir))\n else:\n for root_dir, filenames in self._traverse_dir_depth():\n failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))\n if failed:\n break\n results.update(updated_results)\n else:\n try:\n self.source_file = self._find_needle('vars', self.source_file)\n failed, err_msg, updated_results = (\n self._load_files(self.source_file)\n )\n if not failed:\n results.update(updated_results)\n\n except AnsibleError as e:\n failed = True\n err_msg = to_native(e)\n\n if self.return_results_as_name:\n scope = dict()\n scope[self.return_results_as_name] = results\n results = scope\n\n result = super(ActionModule, self).run(task_vars=task_vars)\n\n if failed:\n result['failed'] = failed\n result['message'] = err_msg\n\n result['ansible_included_var_files'] = self.included_files\n result['ansible_facts'] = results\n result['_ansible_no_log'] = not self.show_content\n\n return result\n\n def _set_root_dir(self):\n if self._task._role:\n if self.source_dir.split('/')[0] == 'vars':\n path_to_use = (\n path.join(self._task._role._role_path, self.source_dir)\n )\n if path.exists(path_to_use):\n self.source_dir = path_to_use\n else:\n path_to_use = (\n path.join(\n self._task._role._role_path, 'vars', self.source_dir\n )\n )\n self.source_dir = path_to_use\n else:\n current_dir = (\n \"/\".join(self._task._ds._data_source.split('/')[:-1])\n )\n self.source_dir = path.join(current_dir, self.source_dir)\n\n def _traverse_dir_depth(self):\n \"\"\" Recursively iterate over a directory and sort the files in\n alphabetical order. Do not iterate pass the set depth.\n The default depth is unlimited.\n \"\"\"\n current_depth = 0\n sorted_walk = list(walk(self.source_dir))\n sorted_walk.sort(key=lambda x: x[0])\n for current_root, current_dir, current_files in sorted_walk:\n current_depth += 1\n if current_depth <= self.depth or self.depth == 0:\n current_files.sort()\n yield (current_root, current_files)\n else:\n break\n\n def _ignore_file(self, filename):\n \"\"\" Return True if a file matches the list of ignore_files.\n Args:\n filename (str): The filename that is being matched against.\n\n Returns:\n Boolean\n \"\"\"\n for file_type in self.ignore_files:\n try:\n if re.search(r'{0}$'.format(file_type), filename):\n return True\n except Exception:\n err_msg = 'Invalid regular expression: {0}'.format(file_type)\n raise AnsibleError(err_msg)\n return False\n\n def _is_valid_file_ext(self, source_file):\n \"\"\" Verify if source file has a valid extension\n Args:\n source_file (str): The full path of source file or source file.\n Returns:\n Bool\n \"\"\"\n file_ext = path.splitext(source_file)\n return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)\n\n def _load_files(self, filename, validate_extensions=False):\n \"\"\" Loads a file and converts the output into a valid Python dict.\n Args:\n filename (str): The source file.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n if validate_extensions and not self._is_valid_file_ext(filename):\n failed = True\n err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))\n else:\n b_data, show_content = self._loader._get_file_contents(filename)\n data = to_text(b_data, errors='surrogate_or_strict')\n\n self.show_content = show_content\n data = self._loader.load(data, file_name=filename, show_content=show_content)\n if not data:\n data = dict()\n if not isinstance(data, dict):\n failed = True\n err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))\n else:\n self.included_files.append(filename)\n results.update(data)\n\n return failed, err_msg, results\n\n def _load_files_in_dir(self, root_dir, var_files):\n \"\"\" Load the found yml files and update/overwrite the dictionary.\n Args:\n root_dir (str): The base directory of the list of files that is being passed.\n var_files: (list): List of files to iterate over and load into a dictionary.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n for filename in var_files:\n stop_iter = False\n # Never include main.yml from a role, as that is the default included by the role\n if self._task._role:\n if filename == 'main.yml':\n stop_iter = True\n continue\n\n filepath = path.join(root_dir, filename)\n if self.files_matching:\n if not self.matcher.search(filename):\n stop_iter = True\n\n if not stop_iter and not failed:\n if path.exists(filepath) and not self._ignore_file(filename):\n failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)\n if not failed:\n results.update(loaded_data)\n\n return failed, err_msg, results\n", "path": "lib/ansible/plugins/action/include_vars.py"}]} | 3,995 | 135 |
gh_patches_debug_42439 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1858 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Barcode seperator processing fails with password protected PDFs
### Description
With PAPERLESS_CONSUMER_ENABLE_BARCODES=true uploading a password protected PDF causes the file task to fail. It appears that the barcode separation stage fails with an error rather than skipping the document. The same document imports correctly with the above settings commented out.
Traceback from the failed task:
```python
/tmp/paperless/paperless-upload-0bj7vn9g: invalid password : Traceback (most recent call last):
File "/usr/src/paperless/src/src/django-q/django_q/cluster.py", line 454, in worker
res = f(*task["args"], **task["kwargs"])
File "/usr/src/paperless/src/documents/tasks.py", line 99, in consume_file
pdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path)
File "/usr/src/paperless/src/documents/barcodes.py", line 121, in scan_file_for_separating_barcodes
pdf = Pdf.open(pdf_filepath)
File "/usr/local/lib/python3.9/site-packages/pikepdf/_methods.py", line 791, in open
pdf = Pdf._open(
pikepdf._qpdf.PasswordError: /tmp/paperless/paperless-upload-0bj7vn9g: invalid password
```
### Steps to reproduce
1. Launch paperless with PAPERLESS_CONSUMER_ENABLE_BARCODES=true
2. Upload password protected PDF
3. Check the file tasks menu for failed jobs
### Webserver logs
_No response_
### Paperless-ngx version
1.9.2
### Host OS
Fedora Server 36
### Installation method
Other (please describe above)
### Browser
_No response_
### Configuration changes
_No response_
### Other
Official docker image running under root podman
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/barcodes.py`
Content:
```
1 import logging
2 import os
3 import shutil
4 import tempfile
5 from functools import lru_cache
6 from typing import List
7 from typing import Optional
8 from typing import Tuple
9
10 import magic
11 from django.conf import settings
12 from pdf2image import convert_from_path
13 from pikepdf import Page
14 from pikepdf import Pdf
15 from pikepdf import PdfImage
16 from PIL import Image
17 from PIL import ImageSequence
18 from pyzbar import pyzbar
19
20 logger = logging.getLogger("paperless.barcodes")
21
22
23 class BarcodeImageFormatError(Exception):
24 pass
25
26
27 @lru_cache(maxsize=8)
28 def supported_file_type(mime_type) -> bool:
29 """
30 Determines if the file is valid for barcode
31 processing, based on MIME type and settings
32
33 :return: True if the file is supported, False otherwise
34 """
35 supported_mime = ["application/pdf"]
36 if settings.CONSUMER_BARCODE_TIFF_SUPPORT:
37 supported_mime += ["image/tiff"]
38
39 return mime_type in supported_mime
40
41
42 def barcode_reader(image: Image) -> List[str]:
43 """
44 Read any barcodes contained in image
45 Returns a list containing all found barcodes
46 """
47 barcodes = []
48 # Decode the barcode image
49 detected_barcodes = pyzbar.decode(image)
50
51 if detected_barcodes:
52 # Traverse through all the detected barcodes in image
53 for barcode in detected_barcodes:
54 if barcode.data:
55 decoded_barcode = barcode.data.decode("utf-8")
56 barcodes.append(decoded_barcode)
57 logger.debug(
58 f"Barcode of type {str(barcode.type)} found: {decoded_barcode}",
59 )
60 return barcodes
61
62
63 def get_file_mime_type(path: str) -> str:
64 """
65 Determines the file type, based on MIME type.
66
67 Returns the MIME type.
68 """
69 mime_type = magic.from_file(path, mime=True)
70 logger.debug(f"Detected mime type: {mime_type}")
71 return mime_type
72
73
74 def convert_from_tiff_to_pdf(filepath: str) -> str:
75 """
76 converts a given TIFF image file to pdf into a temporary directory.
77
78 Returns the new pdf file.
79 """
80 file_name = os.path.splitext(os.path.basename(filepath))[0]
81 mime_type = get_file_mime_type(filepath)
82 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR)
83 # use old file name with pdf extension
84 if mime_type == "image/tiff":
85 newpath = os.path.join(tempdir, file_name + ".pdf")
86 else:
87 logger.warning(
88 f"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.",
89 )
90 return None
91 with Image.open(filepath) as image:
92 images = []
93 for i, page in enumerate(ImageSequence.Iterator(image)):
94 page = page.convert("RGB")
95 images.append(page)
96 try:
97 if len(images) == 1:
98 images[0].save(newpath)
99 else:
100 images[0].save(newpath, save_all=True, append_images=images[1:])
101 except OSError as e:
102 logger.warning(
103 f"Could not save the file as pdf. Error: {str(e)}",
104 )
105 return None
106 return newpath
107
108
109 def scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:
110 """
111 Scan the provided pdf file for page separating barcodes
112 Returns a PDF filepath and a list of pagenumbers,
113 which separate the file into new files
114 """
115
116 def _pikepdf_barcode_scan(pdf_filepath: str):
117 with Pdf.open(pdf_filepath) as pdf:
118 for page_num, page in enumerate(pdf.pages):
119 for image_key in page.images:
120 pdfimage = PdfImage(page.images[image_key])
121
122 if "/CCITTFaxDecode" in pdfimage.filters:
123 raise BarcodeImageFormatError()
124
125 # Not all images can be transcoded to a PIL image, which
126 # is what pyzbar expects to receive
127 pillow_img = pdfimage.as_pil_image()
128
129 detected_barcodes = barcode_reader(pillow_img)
130
131 if settings.CONSUMER_BARCODE_STRING in detected_barcodes:
132 separator_page_numbers.append(page_num)
133
134 def _pdf2image_barcode_scan(pdf_filepath: str):
135 # use a temporary directory in case the file os too big to handle in memory
136 with tempfile.TemporaryDirectory() as path:
137 pages_from_path = convert_from_path(pdf_filepath, output_folder=path)
138 for current_page_number, page in enumerate(pages_from_path):
139 current_barcodes = barcode_reader(page)
140 if settings.CONSUMER_BARCODE_STRING in current_barcodes:
141 separator_page_numbers.append(current_page_number)
142
143 separator_page_numbers = []
144 pdf_filepath = None
145
146 mime_type = get_file_mime_type(filepath)
147
148 if supported_file_type(mime_type):
149 pdf_filepath = filepath
150 if mime_type == "image/tiff":
151 pdf_filepath = convert_from_tiff_to_pdf(filepath)
152
153 if settings.CONSUMER_USE_LEGACY_DETECTION:
154 _pdf2image_barcode_scan(pdf_filepath)
155 else:
156 try:
157 _pikepdf_barcode_scan(pdf_filepath)
158 except Exception as e:
159
160 logger.warning(
161 f"Exception using pikepdf for barcodes,"
162 f" falling back to pdf2image: {e}",
163 )
164 # Reset this incase pikepdf got part way through
165 separator_page_numbers = []
166 _pdf2image_barcode_scan(pdf_filepath)
167
168 else:
169 logger.warning(
170 f"Unsupported file format for barcode reader: {str(mime_type)}",
171 )
172 return pdf_filepath, separator_page_numbers
173
174
175 def separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:
176 """
177 Separate the provided pdf file on the pages_to_split_on.
178 The pages which are defined by page_numbers will be removed.
179 Returns a list of (temporary) filepaths to consume.
180 These will need to be deleted later.
181 """
182
183 document_paths = []
184
185 if not pages_to_split_on:
186 logger.warning("No pages to split on!")
187 return document_paths
188
189 os.makedirs(settings.SCRATCH_DIR, exist_ok=True)
190 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR)
191 fname = os.path.splitext(os.path.basename(filepath))[0]
192 pdf = Pdf.open(filepath)
193
194 # A list of documents, ie a list of lists of pages
195 documents: List[List[Page]] = []
196 # A single document, ie a list of pages
197 document: List[Page] = []
198
199 for idx, page in enumerate(pdf.pages):
200 # Keep building the new PDF as long as it is not a
201 # separator index
202 if idx not in pages_to_split_on:
203 document.append(page)
204 # Make sure to append the very last document to the documents
205 if idx == (len(pdf.pages) - 1):
206 documents.append(document)
207 document = []
208 else:
209 # This is a split index, save the current PDF pages, and restart
210 # a new destination page listing
211 logger.debug(f"Starting new document at idx {idx}")
212 documents.append(document)
213 document = []
214
215 documents = [x for x in documents if len(x)]
216
217 logger.debug(f"Split into {len(documents)} new documents")
218
219 # Write the new documents out
220 for doc_idx, document in enumerate(documents):
221 dst = Pdf.new()
222 dst.pages.extend(document)
223
224 output_filename = f"{fname}_document_{doc_idx}.pdf"
225
226 logger.debug(f"pdf no:{doc_idx} has {len(dst.pages)} pages")
227 savepath = os.path.join(tempdir, output_filename)
228 with open(savepath, "wb") as out:
229 dst.save(out)
230 document_paths.append(savepath)
231
232 return document_paths
233
234
235 def save_to_dir(
236 filepath: str,
237 newname: str = None,
238 target_dir: str = settings.CONSUMPTION_DIR,
239 ):
240 """
241 Copies filepath to target_dir.
242 Optionally rename the file.
243 """
244 if os.path.isfile(filepath) and os.path.isdir(target_dir):
245 dst = shutil.copy(filepath, target_dir)
246 logging.debug(f"saved {str(filepath)} to {str(dst)}")
247 if newname:
248 dst_new = os.path.join(target_dir, newname)
249 logger.debug(f"moving {str(dst)} to {str(dst_new)}")
250 os.rename(dst, dst_new)
251 else:
252 logger.warning(f"{str(filepath)} or {str(target_dir)} don't exist.")
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py
--- a/src/documents/barcodes.py
+++ b/src/documents/barcodes.py
@@ -10,9 +10,12 @@
import magic
from django.conf import settings
from pdf2image import convert_from_path
+from pdf2image.exceptions import PDFPageCountError
from pikepdf import Page
+from pikepdf import PasswordError
from pikepdf import Pdf
from pikepdf import PdfImage
+from pikepdf.models.image import HifiPrintImageNotTranscodableError
from PIL import Image
from PIL import ImageSequence
from pyzbar import pyzbar
@@ -120,7 +123,9 @@
pdfimage = PdfImage(page.images[image_key])
if "/CCITTFaxDecode" in pdfimage.filters:
- raise BarcodeImageFormatError()
+ raise BarcodeImageFormatError(
+ "Unable to decode CCITTFaxDecode images",
+ )
# Not all images can be transcoded to a PIL image, which
# is what pyzbar expects to receive
@@ -132,7 +137,7 @@
separator_page_numbers.append(page_num)
def _pdf2image_barcode_scan(pdf_filepath: str):
- # use a temporary directory in case the file os too big to handle in memory
+ # use a temporary directory in case the file is too big to handle in memory
with tempfile.TemporaryDirectory() as path:
pages_from_path = convert_from_path(pdf_filepath, output_folder=path)
for current_page_number, page in enumerate(pages_from_path):
@@ -150,20 +155,42 @@
if mime_type == "image/tiff":
pdf_filepath = convert_from_tiff_to_pdf(filepath)
+ # Chose the scanner
if settings.CONSUMER_USE_LEGACY_DETECTION:
- _pdf2image_barcode_scan(pdf_filepath)
+ logger.debug("Using pdf2image for barcodes")
+ scanner_function = _pdf2image_barcode_scan
else:
- try:
- _pikepdf_barcode_scan(pdf_filepath)
- except Exception as e:
+ logger.debug("Using pikepdf for barcodes")
+ scanner_function = _pikepdf_barcode_scan
- logger.warning(
- f"Exception using pikepdf for barcodes,"
- f" falling back to pdf2image: {e}",
- )
- # Reset this incase pikepdf got part way through
+ # Run the scanner
+ try:
+ scanner_function(pdf_filepath)
+ # Neither method can handle password protected PDFs without it being
+ # provided. Log it and continue
+ except (PasswordError, PDFPageCountError) as e:
+ logger.warning(
+ f"File is likely password protected, not splitting: {e}",
+ )
+ # Handle pikepdf related image decoding issues with a fallback
+ except (BarcodeImageFormatError, HifiPrintImageNotTranscodableError) as e:
+ logger.warning(
+ f"Falling back to pdf2image because: {e}",
+ )
+ try:
separator_page_numbers = []
_pdf2image_barcode_scan(pdf_filepath)
+ # This file is really borked, allow the consumption to continue
+ # but it may fail further on
+ except Exception as e: # pragma: no cover
+ logger.warning(
+ f"Exception during barcode scanning: {e}",
+ )
+ # We're not sure what happened, but allow the consumption to continue
+ except Exception as e: # pragma: no cover
+ logger.warning(
+ f"Exception during barcode scanning: {e}",
+ )
else:
logger.warning(
| {"golden_diff": "diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py\n--- a/src/documents/barcodes.py\n+++ b/src/documents/barcodes.py\n@@ -10,9 +10,12 @@\n import magic\n from django.conf import settings\n from pdf2image import convert_from_path\n+from pdf2image.exceptions import PDFPageCountError\n from pikepdf import Page\n+from pikepdf import PasswordError\n from pikepdf import Pdf\n from pikepdf import PdfImage\n+from pikepdf.models.image import HifiPrintImageNotTranscodableError\n from PIL import Image\n from PIL import ImageSequence\n from pyzbar import pyzbar\n@@ -120,7 +123,9 @@\n pdfimage = PdfImage(page.images[image_key])\n \n if \"/CCITTFaxDecode\" in pdfimage.filters:\n- raise BarcodeImageFormatError()\n+ raise BarcodeImageFormatError(\n+ \"Unable to decode CCITTFaxDecode images\",\n+ )\n \n # Not all images can be transcoded to a PIL image, which\n # is what pyzbar expects to receive\n@@ -132,7 +137,7 @@\n separator_page_numbers.append(page_num)\n \n def _pdf2image_barcode_scan(pdf_filepath: str):\n- # use a temporary directory in case the file os too big to handle in memory\n+ # use a temporary directory in case the file is too big to handle in memory\n with tempfile.TemporaryDirectory() as path:\n pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n for current_page_number, page in enumerate(pages_from_path):\n@@ -150,20 +155,42 @@\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n \n+ # Chose the scanner\n if settings.CONSUMER_USE_LEGACY_DETECTION:\n- _pdf2image_barcode_scan(pdf_filepath)\n+ logger.debug(\"Using pdf2image for barcodes\")\n+ scanner_function = _pdf2image_barcode_scan\n else:\n- try:\n- _pikepdf_barcode_scan(pdf_filepath)\n- except Exception as e:\n+ logger.debug(\"Using pikepdf for barcodes\")\n+ scanner_function = _pikepdf_barcode_scan\n \n- logger.warning(\n- f\"Exception using pikepdf for barcodes,\"\n- f\" falling back to pdf2image: {e}\",\n- )\n- # Reset this incase pikepdf got part way through\n+ # Run the scanner\n+ try:\n+ scanner_function(pdf_filepath)\n+ # Neither method can handle password protected PDFs without it being\n+ # provided. Log it and continue\n+ except (PasswordError, PDFPageCountError) as e:\n+ logger.warning(\n+ f\"File is likely password protected, not splitting: {e}\",\n+ )\n+ # Handle pikepdf related image decoding issues with a fallback\n+ except (BarcodeImageFormatError, HifiPrintImageNotTranscodableError) as e:\n+ logger.warning(\n+ f\"Falling back to pdf2image because: {e}\",\n+ )\n+ try:\n separator_page_numbers = []\n _pdf2image_barcode_scan(pdf_filepath)\n+ # This file is really borked, allow the consumption to continue\n+ # but it may fail further on\n+ except Exception as e: # pragma: no cover\n+ logger.warning(\n+ f\"Exception during barcode scanning: {e}\",\n+ )\n+ # We're not sure what happened, but allow the consumption to continue\n+ except Exception as e: # pragma: no cover\n+ logger.warning(\n+ f\"Exception during barcode scanning: {e}\",\n+ )\n \n else:\n logger.warning(\n", "issue": "[BUG] Barcode seperator processing fails with password protected PDFs\n### Description\n\nWith PAPERLESS_CONSUMER_ENABLE_BARCODES=true uploading a password protected PDF causes the file task to fail. It appears that the barcode separation stage fails with an error rather than skipping the document. The same document imports correctly with the above settings commented out.\r\n\r\nTraceback from the failed task:\r\n```python\r\n/tmp/paperless/paperless-upload-0bj7vn9g: invalid password : Traceback (most recent call last):\r\n File \"/usr/src/paperless/src/src/django-q/django_q/cluster.py\", line 454, in worker\r\n res = f(*task[\"args\"], **task[\"kwargs\"])\r\n File \"/usr/src/paperless/src/documents/tasks.py\", line 99, in consume_file\r\n pdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path)\r\n File \"/usr/src/paperless/src/documents/barcodes.py\", line 121, in scan_file_for_separating_barcodes\r\n pdf = Pdf.open(pdf_filepath)\r\n File \"/usr/local/lib/python3.9/site-packages/pikepdf/_methods.py\", line 791, in open\r\n pdf = Pdf._open(\r\npikepdf._qpdf.PasswordError: /tmp/paperless/paperless-upload-0bj7vn9g: invalid password\r\n```\n\n### Steps to reproduce\n\n1. Launch paperless with PAPERLESS_CONSUMER_ENABLE_BARCODES=true\r\n2. Upload password protected PDF\r\n3. Check the file tasks menu for failed jobs\n\n### Webserver logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.9.2\n\n### Host OS\n\nFedora Server 36\n\n### Installation method\n\nOther (please describe above)\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\nOfficial docker image running under root podman\n", "before_files": [{"content": "import logging\nimport os\nimport shutil\nimport tempfile\nfrom functools import lru_cache\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport magic\nfrom django.conf import settings\nfrom pdf2image import convert_from_path\nfrom pikepdf import Page\nfrom pikepdf import Pdf\nfrom pikepdf import PdfImage\nfrom PIL import Image\nfrom PIL import ImageSequence\nfrom pyzbar import pyzbar\n\nlogger = logging.getLogger(\"paperless.barcodes\")\n\n\nclass BarcodeImageFormatError(Exception):\n pass\n\n\n@lru_cache(maxsize=8)\ndef supported_file_type(mime_type) -> bool:\n \"\"\"\n Determines if the file is valid for barcode\n processing, based on MIME type and settings\n\n :return: True if the file is supported, False otherwise\n \"\"\"\n supported_mime = [\"application/pdf\"]\n if settings.CONSUMER_BARCODE_TIFF_SUPPORT:\n supported_mime += [\"image/tiff\"]\n\n return mime_type in supported_mime\n\n\ndef barcode_reader(image: Image) -> List[str]:\n \"\"\"\n Read any barcodes contained in image\n Returns a list containing all found barcodes\n \"\"\"\n barcodes = []\n # Decode the barcode image\n detected_barcodes = pyzbar.decode(image)\n\n if detected_barcodes:\n # Traverse through all the detected barcodes in image\n for barcode in detected_barcodes:\n if barcode.data:\n decoded_barcode = barcode.data.decode(\"utf-8\")\n barcodes.append(decoded_barcode)\n logger.debug(\n f\"Barcode of type {str(barcode.type)} found: {decoded_barcode}\",\n )\n return barcodes\n\n\ndef get_file_mime_type(path: str) -> str:\n \"\"\"\n Determines the file type, based on MIME type.\n\n Returns the MIME type.\n \"\"\"\n mime_type = magic.from_file(path, mime=True)\n logger.debug(f\"Detected mime type: {mime_type}\")\n return mime_type\n\n\ndef convert_from_tiff_to_pdf(filepath: str) -> str:\n \"\"\"\n converts a given TIFF image file to pdf into a temporary directory.\n\n Returns the new pdf file.\n \"\"\"\n file_name = os.path.splitext(os.path.basename(filepath))[0]\n mime_type = get_file_mime_type(filepath)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n # use old file name with pdf extension\n if mime_type == \"image/tiff\":\n newpath = os.path.join(tempdir, file_name + \".pdf\")\n else:\n logger.warning(\n f\"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.\",\n )\n return None\n with Image.open(filepath) as image:\n images = []\n for i, page in enumerate(ImageSequence.Iterator(image)):\n page = page.convert(\"RGB\")\n images.append(page)\n try:\n if len(images) == 1:\n images[0].save(newpath)\n else:\n images[0].save(newpath, save_all=True, append_images=images[1:])\n except OSError as e:\n logger.warning(\n f\"Could not save the file as pdf. Error: {str(e)}\",\n )\n return None\n return newpath\n\n\ndef scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:\n \"\"\"\n Scan the provided pdf file for page separating barcodes\n Returns a PDF filepath and a list of pagenumbers,\n which separate the file into new files\n \"\"\"\n\n def _pikepdf_barcode_scan(pdf_filepath: str):\n with Pdf.open(pdf_filepath) as pdf:\n for page_num, page in enumerate(pdf.pages):\n for image_key in page.images:\n pdfimage = PdfImage(page.images[image_key])\n\n if \"/CCITTFaxDecode\" in pdfimage.filters:\n raise BarcodeImageFormatError()\n\n # Not all images can be transcoded to a PIL image, which\n # is what pyzbar expects to receive\n pillow_img = pdfimage.as_pil_image()\n\n detected_barcodes = barcode_reader(pillow_img)\n\n if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n separator_page_numbers.append(page_num)\n\n def _pdf2image_barcode_scan(pdf_filepath: str):\n # use a temporary directory in case the file os too big to handle in memory\n with tempfile.TemporaryDirectory() as path:\n pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n for current_page_number, page in enumerate(pages_from_path):\n current_barcodes = barcode_reader(page)\n if settings.CONSUMER_BARCODE_STRING in current_barcodes:\n separator_page_numbers.append(current_page_number)\n\n separator_page_numbers = []\n pdf_filepath = None\n\n mime_type = get_file_mime_type(filepath)\n\n if supported_file_type(mime_type):\n pdf_filepath = filepath\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n\n if settings.CONSUMER_USE_LEGACY_DETECTION:\n _pdf2image_barcode_scan(pdf_filepath)\n else:\n try:\n _pikepdf_barcode_scan(pdf_filepath)\n except Exception as e:\n\n logger.warning(\n f\"Exception using pikepdf for barcodes,\"\n f\" falling back to pdf2image: {e}\",\n )\n # Reset this incase pikepdf got part way through\n separator_page_numbers = []\n _pdf2image_barcode_scan(pdf_filepath)\n\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n )\n return pdf_filepath, separator_page_numbers\n\n\ndef separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:\n \"\"\"\n Separate the provided pdf file on the pages_to_split_on.\n The pages which are defined by page_numbers will be removed.\n Returns a list of (temporary) filepaths to consume.\n These will need to be deleted later.\n \"\"\"\n\n document_paths = []\n\n if not pages_to_split_on:\n logger.warning(\"No pages to split on!\")\n return document_paths\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n fname = os.path.splitext(os.path.basename(filepath))[0]\n pdf = Pdf.open(filepath)\n\n # A list of documents, ie a list of lists of pages\n documents: List[List[Page]] = []\n # A single document, ie a list of pages\n document: List[Page] = []\n\n for idx, page in enumerate(pdf.pages):\n # Keep building the new PDF as long as it is not a\n # separator index\n if idx not in pages_to_split_on:\n document.append(page)\n # Make sure to append the very last document to the documents\n if idx == (len(pdf.pages) - 1):\n documents.append(document)\n document = []\n else:\n # This is a split index, save the current PDF pages, and restart\n # a new destination page listing\n logger.debug(f\"Starting new document at idx {idx}\")\n documents.append(document)\n document = []\n\n documents = [x for x in documents if len(x)]\n\n logger.debug(f\"Split into {len(documents)} new documents\")\n\n # Write the new documents out\n for doc_idx, document in enumerate(documents):\n dst = Pdf.new()\n dst.pages.extend(document)\n\n output_filename = f\"{fname}_document_{doc_idx}.pdf\"\n\n logger.debug(f\"pdf no:{doc_idx} has {len(dst.pages)} pages\")\n savepath = os.path.join(tempdir, output_filename)\n with open(savepath, \"wb\") as out:\n dst.save(out)\n document_paths.append(savepath)\n\n return document_paths\n\n\ndef save_to_dir(\n filepath: str,\n newname: str = None,\n target_dir: str = settings.CONSUMPTION_DIR,\n):\n \"\"\"\n Copies filepath to target_dir.\n Optionally rename the file.\n \"\"\"\n if os.path.isfile(filepath) and os.path.isdir(target_dir):\n dst = shutil.copy(filepath, target_dir)\n logging.debug(f\"saved {str(filepath)} to {str(dst)}\")\n if newname:\n dst_new = os.path.join(target_dir, newname)\n logger.debug(f\"moving {str(dst)} to {str(dst_new)}\")\n os.rename(dst, dst_new)\n else:\n logger.warning(f\"{str(filepath)} or {str(target_dir)} don't exist.\")\n", "path": "src/documents/barcodes.py"}], "after_files": [{"content": "import logging\nimport os\nimport shutil\nimport tempfile\nfrom functools import lru_cache\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport magic\nfrom django.conf import settings\nfrom pdf2image import convert_from_path\nfrom pdf2image.exceptions import PDFPageCountError\nfrom pikepdf import Page\nfrom pikepdf import PasswordError\nfrom pikepdf import Pdf\nfrom pikepdf import PdfImage\nfrom pikepdf.models.image import HifiPrintImageNotTranscodableError\nfrom PIL import Image\nfrom PIL import ImageSequence\nfrom pyzbar import pyzbar\n\nlogger = logging.getLogger(\"paperless.barcodes\")\n\n\nclass BarcodeImageFormatError(Exception):\n pass\n\n\n@lru_cache(maxsize=8)\ndef supported_file_type(mime_type) -> bool:\n \"\"\"\n Determines if the file is valid for barcode\n processing, based on MIME type and settings\n\n :return: True if the file is supported, False otherwise\n \"\"\"\n supported_mime = [\"application/pdf\"]\n if settings.CONSUMER_BARCODE_TIFF_SUPPORT:\n supported_mime += [\"image/tiff\"]\n\n return mime_type in supported_mime\n\n\ndef barcode_reader(image: Image) -> List[str]:\n \"\"\"\n Read any barcodes contained in image\n Returns a list containing all found barcodes\n \"\"\"\n barcodes = []\n # Decode the barcode image\n detected_barcodes = pyzbar.decode(image)\n\n if detected_barcodes:\n # Traverse through all the detected barcodes in image\n for barcode in detected_barcodes:\n if barcode.data:\n decoded_barcode = barcode.data.decode(\"utf-8\")\n barcodes.append(decoded_barcode)\n logger.debug(\n f\"Barcode of type {str(barcode.type)} found: {decoded_barcode}\",\n )\n return barcodes\n\n\ndef get_file_mime_type(path: str) -> str:\n \"\"\"\n Determines the file type, based on MIME type.\n\n Returns the MIME type.\n \"\"\"\n mime_type = magic.from_file(path, mime=True)\n logger.debug(f\"Detected mime type: {mime_type}\")\n return mime_type\n\n\ndef convert_from_tiff_to_pdf(filepath: str) -> str:\n \"\"\"\n converts a given TIFF image file to pdf into a temporary directory.\n\n Returns the new pdf file.\n \"\"\"\n file_name = os.path.splitext(os.path.basename(filepath))[0]\n mime_type = get_file_mime_type(filepath)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n # use old file name with pdf extension\n if mime_type == \"image/tiff\":\n newpath = os.path.join(tempdir, file_name + \".pdf\")\n else:\n logger.warning(\n f\"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.\",\n )\n return None\n with Image.open(filepath) as image:\n images = []\n for i, page in enumerate(ImageSequence.Iterator(image)):\n page = page.convert(\"RGB\")\n images.append(page)\n try:\n if len(images) == 1:\n images[0].save(newpath)\n else:\n images[0].save(newpath, save_all=True, append_images=images[1:])\n except OSError as e:\n logger.warning(\n f\"Could not save the file as pdf. Error: {str(e)}\",\n )\n return None\n return newpath\n\n\ndef scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:\n \"\"\"\n Scan the provided pdf file for page separating barcodes\n Returns a PDF filepath and a list of pagenumbers,\n which separate the file into new files\n \"\"\"\n\n def _pikepdf_barcode_scan(pdf_filepath: str):\n with Pdf.open(pdf_filepath) as pdf:\n for page_num, page in enumerate(pdf.pages):\n for image_key in page.images:\n pdfimage = PdfImage(page.images[image_key])\n\n if \"/CCITTFaxDecode\" in pdfimage.filters:\n raise BarcodeImageFormatError(\n \"Unable to decode CCITTFaxDecode images\",\n )\n\n # Not all images can be transcoded to a PIL image, which\n # is what pyzbar expects to receive\n pillow_img = pdfimage.as_pil_image()\n\n detected_barcodes = barcode_reader(pillow_img)\n\n if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n separator_page_numbers.append(page_num)\n\n def _pdf2image_barcode_scan(pdf_filepath: str):\n # use a temporary directory in case the file is too big to handle in memory\n with tempfile.TemporaryDirectory() as path:\n pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n for current_page_number, page in enumerate(pages_from_path):\n current_barcodes = barcode_reader(page)\n if settings.CONSUMER_BARCODE_STRING in current_barcodes:\n separator_page_numbers.append(current_page_number)\n\n separator_page_numbers = []\n pdf_filepath = None\n\n mime_type = get_file_mime_type(filepath)\n\n if supported_file_type(mime_type):\n pdf_filepath = filepath\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n\n # Chose the scanner\n if settings.CONSUMER_USE_LEGACY_DETECTION:\n logger.debug(\"Using pdf2image for barcodes\")\n scanner_function = _pdf2image_barcode_scan\n else:\n logger.debug(\"Using pikepdf for barcodes\")\n scanner_function = _pikepdf_barcode_scan\n\n # Run the scanner\n try:\n scanner_function(pdf_filepath)\n # Neither method can handle password protected PDFs without it being\n # provided. Log it and continue\n except (PasswordError, PDFPageCountError) as e:\n logger.warning(\n f\"File is likely password protected, not splitting: {e}\",\n )\n # Handle pikepdf related image decoding issues with a fallback\n except (BarcodeImageFormatError, HifiPrintImageNotTranscodableError) as e:\n logger.warning(\n f\"Falling back to pdf2image because: {e}\",\n )\n try:\n separator_page_numbers = []\n _pdf2image_barcode_scan(pdf_filepath)\n # This file is really borked, allow the consumption to continue\n # but it may fail further on\n except Exception as e: # pragma: no cover\n logger.warning(\n f\"Exception during barcode scanning: {e}\",\n )\n # We're not sure what happened, but allow the consumption to continue\n except Exception as e: # pragma: no cover\n logger.warning(\n f\"Exception during barcode scanning: {e}\",\n )\n\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n )\n return pdf_filepath, separator_page_numbers\n\n\ndef separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:\n \"\"\"\n Separate the provided pdf file on the pages_to_split_on.\n The pages which are defined by page_numbers will be removed.\n Returns a list of (temporary) filepaths to consume.\n These will need to be deleted later.\n \"\"\"\n\n document_paths = []\n\n if not pages_to_split_on:\n logger.warning(\"No pages to split on!\")\n return document_paths\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n fname = os.path.splitext(os.path.basename(filepath))[0]\n pdf = Pdf.open(filepath)\n\n # A list of documents, ie a list of lists of pages\n documents: List[List[Page]] = []\n # A single document, ie a list of pages\n document: List[Page] = []\n\n for idx, page in enumerate(pdf.pages):\n # Keep building the new PDF as long as it is not a\n # separator index\n if idx not in pages_to_split_on:\n document.append(page)\n # Make sure to append the very last document to the documents\n if idx == (len(pdf.pages) - 1):\n documents.append(document)\n document = []\n else:\n # This is a split index, save the current PDF pages, and restart\n # a new destination page listing\n logger.debug(f\"Starting new document at idx {idx}\")\n documents.append(document)\n document = []\n\n documents = [x for x in documents if len(x)]\n\n logger.debug(f\"Split into {len(documents)} new documents\")\n\n # Write the new documents out\n for doc_idx, document in enumerate(documents):\n dst = Pdf.new()\n dst.pages.extend(document)\n\n output_filename = f\"{fname}_document_{doc_idx}.pdf\"\n\n logger.debug(f\"pdf no:{doc_idx} has {len(dst.pages)} pages\")\n savepath = os.path.join(tempdir, output_filename)\n with open(savepath, \"wb\") as out:\n dst.save(out)\n document_paths.append(savepath)\n\n return document_paths\n\n\ndef save_to_dir(\n filepath: str,\n newname: str = None,\n target_dir: str = settings.CONSUMPTION_DIR,\n):\n \"\"\"\n Copies filepath to target_dir.\n Optionally rename the file.\n \"\"\"\n if os.path.isfile(filepath) and os.path.isdir(target_dir):\n dst = shutil.copy(filepath, target_dir)\n logging.debug(f\"saved {str(filepath)} to {str(dst)}\")\n if newname:\n dst_new = os.path.join(target_dir, newname)\n logger.debug(f\"moving {str(dst)} to {str(dst_new)}\")\n os.rename(dst, dst_new)\n else:\n logger.warning(f\"{str(filepath)} or {str(target_dir)} don't exist.\")\n", "path": "src/documents/barcodes.py"}]} | 3,173 | 837 |
gh_patches_debug_24756 | rasdani/github-patches | git_diff | netbox-community__netbox-2290 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deficiency in new webhook implementation
<!--
Before opening a new issue, please search through the existing issues to
see if your topic has already been addressed. Note that you may need to
remove the "is:open" filter from the search bar to include closed issues.
Check the appropriate type for your issue below by placing an x between the
brackets. For assistance with installation issues, or for any other issues
other than those listed below, please raise your topic for discussion on
our mailing list:
https://groups.google.com/forum/#!forum/netbox-discuss
Please note that issues which do not fall under any of the below categories
will be closed. Due to an excessive backlog of feature requests, we are
not currently accepting any proposals which extend NetBox's feature scope.
Do not prepend any sort of tag to your issue's title. An administrator will
review your issue and assign labels as appropriate.
--->
### Issue type
[ ] Feature request <!-- An enhancement of existing functionality -->
[X] Bug report <!-- Unexpected or erroneous behavior -->
[ ] Documentation <!-- A modification to the documentation -->
<!--
Please describe the environment in which you are running NetBox. (Be sure
to verify that you are running the latest stable release of NetBox before
submitting a bug report.) If you are submitting a bug report and have made
any changes to the code base, please first validate that your bug can be
recreated while running an official release.
-->
### Environment
* Python version: python 2.7.5
* NetBox version: develop-2.4
<!--
BUG REPORTS must include:
* A list of the steps needed for someone else to reproduce the bug
* A description of the expected and observed behavior
* Any relevant error messages (screenshots may also help)
FEATURE REQUESTS must include:
* A detailed description of the proposed functionality
* A use case for the new feature
* A rough description of any necessary changes to the database schema
* Any relevant third-party libraries which would be needed
-->
### Description
Testing out the webhook implementation and discovered the following bug:
when a model contains a custom field of type date the worker is unable to serialize the data for transmission
```
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/rq/worker.py", line 793, in perform_job
rv = job.perform()
File "/usr/lib/python2.7/site-packages/rq/job.py", line 599, in perform
self._result = self._execute()
File "/usr/lib/python2.7/site-packages/rq/job.py", line 605, in _execute
return self.func(*self.args, **self.kwargs)
File "/opt/netbox/netbox/extras/webhooks_worker.py", line 44, in process_webhook
prepared_request = requests.Request(**params).prepare()
File "/usr/lib/python2.7/site-packages/requests/models.py", line 259, in prepare
hooks=self.hooks,
File "/usr/lib/python2.7/site-packages/requests/models.py", line 307, in prepare
self.prepare_body(data, files, json)
File "/usr/lib/python2.7/site-packages/requests/models.py", line 427, in prepare_body
body = json_dumps(json)
File "/usr/lib64/python2.7/site-packages/simplejson/__init__.py", line 382, in dumps
return _default_encoder.encode(obj)
File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 291, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 373, in iterencode
return _iterencode(o, 0)
File "/usr/lib64/python2.7/site-packages/simplejson/encoder.py", line 268, in default
o.__class__.__name__)
TypeError: Object of type date is not JSON serializable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/extras/webhooks_worker.py`
Content:
```
1 import hashlib
2 import hmac
3
4 import requests
5 from django_rq import job
6
7 from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES
8
9
10 @job('default')
11 def process_webhook(webhook, data, model_class, event, timestamp):
12 """
13 Make a POST request to the defined Webhook
14 """
15 payload = {
16 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],
17 'timestamp': timestamp,
18 'model': model_class.__name__,
19 'data': data
20 }
21 headers = {
22 'Content-Type': webhook.get_http_content_type_display(),
23 }
24 params = {
25 'method': 'POST',
26 'url': webhook.payload_url,
27 'headers': headers
28 }
29
30 if webhook.http_content_type == WEBHOOK_CT_JSON:
31 params.update({'json': payload})
32 elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:
33 params.update({'data': payload})
34
35 prepared_request = requests.Request(**params).prepare()
36
37 if webhook.secret != '':
38 # sign the request with the secret
39 hmac_prep = hmac.new(bytearray(webhook.secret, 'utf8'), prepared_request.body, digestmod=hashlib.sha512)
40 prepared_request.headers['X-Hook-Signature'] = hmac_prep.hexdigest()
41
42 with requests.Session() as session:
43 session.verify = webhook.ssl_verification
44 response = session.send(prepared_request)
45
46 if response.status_code >= 200 and response.status_code <= 299:
47 return 'Status {} returned, webhook successfully processed.'.format(response.status_code)
48 else:
49 raise requests.exceptions.RequestException(
50 "Status {} returned, webhook FAILED to process.".format(response.status_code)
51 )
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/extras/webhooks_worker.py b/netbox/extras/webhooks_worker.py
--- a/netbox/extras/webhooks_worker.py
+++ b/netbox/extras/webhooks_worker.py
@@ -1,8 +1,10 @@
import hashlib
import hmac
-
import requests
+import json
+
from django_rq import job
+from rest_framework.utils.encoders import JSONEncoder
from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES
@@ -13,9 +15,9 @@
Make a POST request to the defined Webhook
"""
payload = {
- 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],
+ 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event].lower(),
'timestamp': timestamp,
- 'model': model_class.__name__,
+ 'model': model_class._meta.model_name,
'data': data
}
headers = {
@@ -28,7 +30,7 @@
}
if webhook.http_content_type == WEBHOOK_CT_JSON:
- params.update({'json': payload})
+ params.update({'data': json.dumps(payload, cls=JSONEncoder)})
elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:
params.update({'data': payload})
| {"golden_diff": "diff --git a/netbox/extras/webhooks_worker.py b/netbox/extras/webhooks_worker.py\n--- a/netbox/extras/webhooks_worker.py\n+++ b/netbox/extras/webhooks_worker.py\n@@ -1,8 +1,10 @@\n import hashlib\n import hmac\n-\n import requests\n+import json\n+\n from django_rq import job\n+from rest_framework.utils.encoders import JSONEncoder\n \n from extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES\n \n@@ -13,9 +15,9 @@\n Make a POST request to the defined Webhook\n \"\"\"\n payload = {\n- 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],\n+ 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event].lower(),\n 'timestamp': timestamp,\n- 'model': model_class.__name__,\n+ 'model': model_class._meta.model_name,\n 'data': data\n }\n headers = {\n@@ -28,7 +30,7 @@\n }\n \n if webhook.http_content_type == WEBHOOK_CT_JSON:\n- params.update({'json': payload})\n+ params.update({'data': json.dumps(payload, cls=JSONEncoder)})\n elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:\n params.update({'data': payload})\n", "issue": "deficiency in new webhook implementation\n<!--\r\n Before opening a new issue, please search through the existing issues to\r\n see if your topic has already been addressed. Note that you may need to\r\n remove the \"is:open\" filter from the search bar to include closed issues.\r\n\r\n Check the appropriate type for your issue below by placing an x between the\r\n brackets. For assistance with installation issues, or for any other issues\r\n other than those listed below, please raise your topic for discussion on\r\n our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please note that issues which do not fall under any of the below categories\r\n will be closed. Due to an excessive backlog of feature requests, we are\r\n not currently accepting any proposals which extend NetBox's feature scope.\r\n\r\n Do not prepend any sort of tag to your issue's title. An administrator will\r\n review your issue and assign labels as appropriate.\r\n--->\r\n### Issue type\r\n[ ] Feature request <!-- An enhancement of existing functionality -->\r\n[X] Bug report <!-- Unexpected or erroneous behavior -->\r\n[ ] Documentation <!-- A modification to the documentation -->\r\n\r\n<!--\r\n Please describe the environment in which you are running NetBox. (Be sure\r\n to verify that you are running the latest stable release of NetBox before\r\n submitting a bug report.) If you are submitting a bug report and have made\r\n any changes to the code base, please first validate that your bug can be\r\n recreated while running an official release.\r\n-->\r\n### Environment\r\n* Python version: python 2.7.5\r\n* NetBox version: develop-2.4\r\n\r\n<!--\r\n BUG REPORTS must include:\r\n * A list of the steps needed for someone else to reproduce the bug\r\n * A description of the expected and observed behavior\r\n * Any relevant error messages (screenshots may also help)\r\n\r\n FEATURE REQUESTS must include:\r\n * A detailed description of the proposed functionality\r\n * A use case for the new feature\r\n * A rough description of any necessary changes to the database schema\r\n * Any relevant third-party libraries which would be needed\r\n-->\r\n### Description\r\nTesting out the webhook implementation and discovered the following bug:\r\n when a model contains a custom field of type date the worker is unable to serialize the data for transmission\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/site-packages/rq/worker.py\", line 793, in perform_job\r\n rv = job.perform()\r\n File \"/usr/lib/python2.7/site-packages/rq/job.py\", line 599, in perform\r\n self._result = self._execute()\r\n File \"/usr/lib/python2.7/site-packages/rq/job.py\", line 605, in _execute\r\n return self.func(*self.args, **self.kwargs)\r\n File \"/opt/netbox/netbox/extras/webhooks_worker.py\", line 44, in process_webhook\r\n prepared_request = requests.Request(**params).prepare()\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 259, in prepare\r\n hooks=self.hooks,\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 307, in prepare\r\n self.prepare_body(data, files, json)\r\n File \"/usr/lib/python2.7/site-packages/requests/models.py\", line 427, in prepare_body\r\n body = json_dumps(json)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/__init__.py\", line 382, in dumps\r\n return _default_encoder.encode(obj)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 291, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 373, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/usr/lib64/python2.7/site-packages/simplejson/encoder.py\", line 268, in default\r\n o.__class__.__name__)\r\nTypeError: Object of type date is not JSON serializable\r\n```\n", "before_files": [{"content": "import hashlib\nimport hmac\n\nimport requests\nfrom django_rq import job\n\nfrom extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES\n\n\n@job('default')\ndef process_webhook(webhook, data, model_class, event, timestamp):\n \"\"\"\n Make a POST request to the defined Webhook\n \"\"\"\n payload = {\n 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event],\n 'timestamp': timestamp,\n 'model': model_class.__name__,\n 'data': data\n }\n headers = {\n 'Content-Type': webhook.get_http_content_type_display(),\n }\n params = {\n 'method': 'POST',\n 'url': webhook.payload_url,\n 'headers': headers\n }\n\n if webhook.http_content_type == WEBHOOK_CT_JSON:\n params.update({'json': payload})\n elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:\n params.update({'data': payload})\n\n prepared_request = requests.Request(**params).prepare()\n\n if webhook.secret != '':\n # sign the request with the secret\n hmac_prep = hmac.new(bytearray(webhook.secret, 'utf8'), prepared_request.body, digestmod=hashlib.sha512)\n prepared_request.headers['X-Hook-Signature'] = hmac_prep.hexdigest()\n\n with requests.Session() as session:\n session.verify = webhook.ssl_verification\n response = session.send(prepared_request)\n\n if response.status_code >= 200 and response.status_code <= 299:\n return 'Status {} returned, webhook successfully processed.'.format(response.status_code)\n else:\n raise requests.exceptions.RequestException(\n \"Status {} returned, webhook FAILED to process.\".format(response.status_code)\n )\n", "path": "netbox/extras/webhooks_worker.py"}], "after_files": [{"content": "import hashlib\nimport hmac\nimport requests\nimport json\n\nfrom django_rq import job\nfrom rest_framework.utils.encoders import JSONEncoder\n\nfrom extras.constants import WEBHOOK_CT_JSON, WEBHOOK_CT_X_WWW_FORM_ENCODED, OBJECTCHANGE_ACTION_CHOICES\n\n\n@job('default')\ndef process_webhook(webhook, data, model_class, event, timestamp):\n \"\"\"\n Make a POST request to the defined Webhook\n \"\"\"\n payload = {\n 'event': dict(OBJECTCHANGE_ACTION_CHOICES)[event].lower(),\n 'timestamp': timestamp,\n 'model': model_class._meta.model_name,\n 'data': data\n }\n headers = {\n 'Content-Type': webhook.get_http_content_type_display(),\n }\n params = {\n 'method': 'POST',\n 'url': webhook.payload_url,\n 'headers': headers\n }\n\n if webhook.http_content_type == WEBHOOK_CT_JSON:\n params.update({'data': json.dumps(payload, cls=JSONEncoder)})\n elif webhook.http_content_type == WEBHOOK_CT_X_WWW_FORM_ENCODED:\n params.update({'data': payload})\n\n prepared_request = requests.Request(**params).prepare()\n\n if webhook.secret != '':\n # sign the request with the secret\n hmac_prep = hmac.new(bytearray(webhook.secret, 'utf8'), prepared_request.body, digestmod=hashlib.sha512)\n prepared_request.headers['X-Hook-Signature'] = hmac_prep.hexdigest()\n\n with requests.Session() as session:\n session.verify = webhook.ssl_verification\n response = session.send(prepared_request)\n\n if response.status_code >= 200 and response.status_code <= 299:\n return 'Status {} returned, webhook successfully processed.'.format(response.status_code)\n else:\n raise requests.exceptions.RequestException(\n \"Status {} returned, webhook FAILED to process.\".format(response.status_code)\n )\n", "path": "netbox/extras/webhooks_worker.py"}]} | 1,637 | 287 |
gh_patches_debug_17598 | rasdani/github-patches | git_diff | archlinux__archinstall-469 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cmd_output is undeclared in luks.py but is used when raising an exception
```
if cmd_handle.exit_code != 0:
raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}')
```
This will need to be written get the output from the cmd_handle instance of the SysCommand class, or else if this is thrown the output won't be displayed, and we'll instead get an undeclared variable error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/luks.py`
Content:
```
1 import pathlib
2
3 from .disk import Partition
4 from .general import *
5 from .output import log
6
7
8 class luks2:
9 def __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):
10 self.password = password
11 self.partition = partition
12 self.mountpoint = mountpoint
13 self.args = args
14 self.kwargs = kwargs
15 self.key_file = key_file
16 self.auto_unmount = auto_unmount
17 self.filesystem = 'crypto_LUKS'
18 self.mapdev = None
19
20 def __enter__(self):
21 # if self.partition.allow_formatting:
22 # self.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)
23 # else:
24 if not self.key_file:
25 self.key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique?
26
27 if type(self.password) != bytes:
28 self.password = bytes(self.password, 'UTF-8')
29
30 with open(self.key_file, 'wb') as fh:
31 fh.write(self.password)
32
33 return self.unlock(self.partition, self.mountpoint, self.key_file)
34
35 def __exit__(self, *args, **kwargs):
36 # TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager
37 if self.auto_unmount:
38 self.close()
39
40 if len(args) >= 2 and args[1]:
41 raise args[1]
42 return True
43
44 def encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):
45 if not self.partition.allow_formatting:
46 raise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')
47
48 log(f'Encrypting {partition} (This might take a while)', level=logging.INFO)
49
50 if not key_file:
51 if self.key_file:
52 key_file = self.key_file
53 else:
54 key_file = f"/tmp/{os.path.basename(self.partition.path)}.disk_pw" # TODO: Make disk-pw-file randomly unique?
55
56 if not password:
57 password = self.password
58
59 if type(password) != bytes:
60 password = bytes(password, 'UTF-8')
61
62 with open(key_file, 'wb') as fh:
63 fh.write(password)
64
65 cryptsetup_args = shlex.join([
66 '/usr/bin/cryptsetup',
67 '--batch-mode',
68 '--verbose',
69 '--type', 'luks2',
70 '--pbkdf', 'argon2id',
71 '--hash', hash_type,
72 '--key-size', str(key_size),
73 '--iter-time', str(iter_time),
74 '--key-file', os.path.abspath(key_file),
75 '--use-urandom',
76 'luksFormat', partition.path,
77 ])
78
79 try:
80 # Try to setup the crypt-device
81 cmd_handle = SysCommand(cryptsetup_args)
82 except SysCallError as err:
83 if err.exit_code == 256:
84 log(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=logging.DEBUG)
85 # Partition was in use, unmount it and try again
86 partition.unmount()
87
88 # Get crypt-information about the device by doing a reverse lookup starting with the partition path
89 # For instance: /dev/sda
90 devinfo = json.loads(b''.join(SysCommand(f"lsblk --fs -J {partition.path}")).decode('UTF-8'))['blockdevices'][0]
91
92 # For each child (sub-partition/sub-device)
93 if len(children := devinfo.get('children', [])):
94 for child in children:
95 # Unmount the child location
96 if child_mountpoint := child.get('mountpoint', None):
97 log(f'Unmounting {child_mountpoint}', level=logging.DEBUG)
98 SysCommand(f"umount -R {child_mountpoint}")
99
100 # And close it if possible.
101 log(f"Closing crypt device {child['name']}", level=logging.DEBUG)
102 SysCommand(f"cryptsetup close {child['name']}")
103
104 # Then try again to set up the crypt-device
105 cmd_handle = SysCommand(cryptsetup_args)
106 else:
107 raise err
108
109 if cmd_handle.exit_code != 0:
110 raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}')
111
112 return key_file
113
114 def unlock(self, partition, mountpoint, key_file):
115 """
116 Mounts a luks2 compatible partition to a certain mountpoint.
117 Keyfile must be specified as there's no way to interact with the pw-prompt atm.
118
119 :param mountpoint: The name without absolute path, for instance "luksdev" will point to /dev/mapper/luksdev
120 :type mountpoint: str
121 """
122 from .disk import get_filesystem_type
123
124 if '/' in mountpoint:
125 os.path.basename(mountpoint) # TODO: Raise exception instead?
126
127 wait_timer = time.time()
128 while pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:
129 time.sleep(0.025)
130
131 SysCommand(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')
132 if os.path.islink(f'/dev/mapper/{mountpoint}'):
133 self.mapdev = f'/dev/mapper/{mountpoint}'
134 unlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)
135 unlocked_partition.allow_formatting = self.partition.allow_formatting
136 return unlocked_partition
137
138 def close(self, mountpoint=None):
139 if not mountpoint:
140 mountpoint = self.mapdev
141
142 SysCommand(f'/usr/bin/cryptsetup close {self.mapdev}')
143 return os.path.islink(self.mapdev) is False
144
145 def format(self, path):
146 if (handle := SysCommand(f"/usr/bin/cryptsetup -q -v luksErase {path}")).exit_code != 0:
147 raise DiskError(f'Could not format {path} with {self.filesystem} because: {b"".join(handle)}')
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py
--- a/archinstall/lib/luks.py
+++ b/archinstall/lib/luks.py
@@ -43,7 +43,7 @@
def encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):
if not self.partition.allow_formatting:
- raise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')
+ raise DiskError(f'Could not encrypt volume {partition} due to it having a formatting lock.')
log(f'Encrypting {partition} (This might take a while)', level=logging.INFO)
@@ -107,7 +107,7 @@
raise err
if cmd_handle.exit_code != 0:
- raise DiskError(f'Could not encrypt volume "{partition.path}": {cmd_output}')
+ raise DiskError(f'Could not encrypt volume "{partition.path}": {b"".join(cmd_handle)}')
return key_file
| {"golden_diff": "diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py\n--- a/archinstall/lib/luks.py\n+++ b/archinstall/lib/luks.py\n@@ -43,7 +43,7 @@\n \n \tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n \t\tif not self.partition.allow_formatting:\n-\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n+\t\t\traise DiskError(f'Could not encrypt volume {partition} due to it having a formatting lock.')\n \n \t\tlog(f'Encrypting {partition} (This might take a while)', level=logging.INFO)\n \n@@ -107,7 +107,7 @@\n \t\t\t\traise err\n \n \t\tif cmd_handle.exit_code != 0:\n-\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n+\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {b\"\".join(cmd_handle)}')\n \n \t\treturn key_file\n", "issue": "cmd_output is undeclared in luks.py but is used when raising an exception\n```\r\n\t\tif cmd_handle.exit_code != 0:\r\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\r\n```\r\n\r\nThis will need to be written get the output from the cmd_handle instance of the SysCommand class, or else if this is thrown the output won't be displayed, and we'll instead get an undeclared variable error.\n", "before_files": [{"content": "import pathlib\n\nfrom .disk import Partition\nfrom .general import *\nfrom .output import log\n\n\nclass luks2:\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t# if self.partition.allow_formatting:\n\t\t# \tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t# else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {self.partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=logging.INFO)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2id',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', os.path.abspath(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', partition.path,\n\t\t])\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = SysCommand(cryptsetup_args)\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=logging.DEBUG)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(SysCommand(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=logging.DEBUG)\n\t\t\t\t\t\t\tSysCommand(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=logging.DEBUG)\n\t\t\t\t\t\tSysCommand(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = SysCommand(cryptsetup_args)\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif cmd_handle.exit_code != 0:\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {cmd_output}')\n\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a luks2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\n\t\twait_timer = time.time()\n\t\twhile pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tSysCommand(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tSysCommand(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := SysCommand(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "path": "archinstall/lib/luks.py"}], "after_files": [{"content": "import pathlib\n\nfrom .disk import Partition\nfrom .general import *\nfrom .output import log\n\n\nclass luks2:\n\tdef __init__(self, partition, mountpoint, password, key_file=None, auto_unmount=False, *args, **kwargs):\n\t\tself.password = password\n\t\tself.partition = partition\n\t\tself.mountpoint = mountpoint\n\t\tself.args = args\n\t\tself.kwargs = kwargs\n\t\tself.key_file = key_file\n\t\tself.auto_unmount = auto_unmount\n\t\tself.filesystem = 'crypto_LUKS'\n\t\tself.mapdev = None\n\n\tdef __enter__(self):\n\t\t# if self.partition.allow_formatting:\n\t\t# \tself.key_file = self.encrypt(self.partition, *self.args, **self.kwargs)\n\t\t# else:\n\t\tif not self.key_file:\n\t\t\tself.key_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif type(self.password) != bytes:\n\t\t\tself.password = bytes(self.password, 'UTF-8')\n\n\t\twith open(self.key_file, 'wb') as fh:\n\t\t\tfh.write(self.password)\n\n\t\treturn self.unlock(self.partition, self.mountpoint, self.key_file)\n\n\tdef __exit__(self, *args, **kwargs):\n\t\t# TODO: https://stackoverflow.com/questions/28157929/how-to-safely-handle-an-exception-inside-a-context-manager\n\t\tif self.auto_unmount:\n\t\t\tself.close()\n\n\t\tif len(args) >= 2 and args[1]:\n\t\t\traise args[1]\n\t\treturn True\n\n\tdef encrypt(self, partition, password=None, key_size=512, hash_type='sha512', iter_time=10000, key_file=None):\n\t\tif not self.partition.allow_formatting:\n\t\t\traise DiskError(f'Could not encrypt volume {partition} due to it having a formatting lock.')\n\n\t\tlog(f'Encrypting {partition} (This might take a while)', level=logging.INFO)\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = f\"/tmp/{os.path.basename(self.partition.path)}.disk_pw\" # TODO: Make disk-pw-file randomly unique?\n\n\t\tif not password:\n\t\t\tpassword = self.password\n\n\t\tif type(password) != bytes:\n\t\t\tpassword = bytes(password, 'UTF-8')\n\n\t\twith open(key_file, 'wb') as fh:\n\t\t\tfh.write(password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2id',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', os.path.abspath(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', partition.path,\n\t\t])\n\n\t\ttry:\n\t\t\t# Try to setup the crypt-device\n\t\t\tcmd_handle = SysCommand(cryptsetup_args)\n\t\texcept SysCallError as err:\n\t\t\tif err.exit_code == 256:\n\t\t\t\tlog(f'{partition} is being used, trying to unmount and crypt-close the device and running one more attempt at encrypting the device.', level=logging.DEBUG)\n\t\t\t\t# Partition was in use, unmount it and try again\n\t\t\t\tpartition.unmount()\n\n\t\t\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t\t\t# For instance: /dev/sda\n\t\t\t\tdevinfo = json.loads(b''.join(SysCommand(f\"lsblk --fs -J {partition.path}\")).decode('UTF-8'))['blockdevices'][0]\n\n\t\t\t\t# For each child (sub-partition/sub-device)\n\t\t\t\tif len(children := devinfo.get('children', [])):\n\t\t\t\t\tfor child in children:\n\t\t\t\t\t\t# Unmount the child location\n\t\t\t\t\t\tif child_mountpoint := child.get('mountpoint', None):\n\t\t\t\t\t\t\tlog(f'Unmounting {child_mountpoint}', level=logging.DEBUG)\n\t\t\t\t\t\t\tSysCommand(f\"umount -R {child_mountpoint}\")\n\n\t\t\t\t\t\t# And close it if possible.\n\t\t\t\t\t\tlog(f\"Closing crypt device {child['name']}\", level=logging.DEBUG)\n\t\t\t\t\t\tSysCommand(f\"cryptsetup close {child['name']}\")\n\n\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\tcmd_handle = SysCommand(cryptsetup_args)\n\t\t\telse:\n\t\t\t\traise err\n\n\t\tif cmd_handle.exit_code != 0:\n\t\t\traise DiskError(f'Could not encrypt volume \"{partition.path}\": {b\"\".join(cmd_handle)}')\n\n\t\treturn key_file\n\n\tdef unlock(self, partition, mountpoint, key_file):\n\t\t\"\"\"\n\t\tMounts a luks2 compatible partition to a certain mountpoint.\n\t\tKeyfile must be specified as there's no way to interact with the pw-prompt atm.\n\n\t\t:param mountpoint: The name without absolute path, for instance \"luksdev\" will point to /dev/mapper/luksdev\n\t\t:type mountpoint: str\n\t\t\"\"\"\n\t\tfrom .disk import get_filesystem_type\n\n\t\tif '/' in mountpoint:\n\t\t\tos.path.basename(mountpoint) # TODO: Raise exception instead?\n\n\t\twait_timer = time.time()\n\t\twhile pathlib.Path(partition.path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tSysCommand(f'/usr/bin/cryptsetup open {partition.path} {mountpoint} --key-file {os.path.abspath(key_file)} --type luks2')\n\t\tif os.path.islink(f'/dev/mapper/{mountpoint}'):\n\t\t\tself.mapdev = f'/dev/mapper/{mountpoint}'\n\t\t\tunlocked_partition = Partition(self.mapdev, None, encrypted=True, filesystem=get_filesystem_type(self.mapdev), autodetect_filesystem=False)\n\t\t\tunlocked_partition.allow_formatting = self.partition.allow_formatting\n\t\t\treturn unlocked_partition\n\n\tdef close(self, mountpoint=None):\n\t\tif not mountpoint:\n\t\t\tmountpoint = self.mapdev\n\n\t\tSysCommand(f'/usr/bin/cryptsetup close {self.mapdev}')\n\t\treturn os.path.islink(self.mapdev) is False\n\n\tdef format(self, path):\n\t\tif (handle := SysCommand(f\"/usr/bin/cryptsetup -q -v luksErase {path}\")).exit_code != 0:\n\t\t\traise DiskError(f'Could not format {path} with {self.filesystem} because: {b\"\".join(handle)}')\n", "path": "archinstall/lib/luks.py"}]} | 2,137 | 245 |
gh_patches_debug_31702 | rasdani/github-patches | git_diff | napari__napari-6821 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test vectors
# Description
This PR adds tests for the vectors layer, improves the doc strings and cleans up the code a tiny bit, but doesn't change any functionality.
One question is - should the input parameters be
`edge_width`, `edge_color`, and `length` for the width, color, and the multiplicative length factor for the vectors or should they be something else. They used to just be `width`, `color`, and `length` but I added `edge_` to make the parameters the same as for the `points` and `shapes` layer, though you could argue that for the `points` layer the parameters do different things and that in the vectors layer we don't have a `face` and an `edge` so it is just confusing. I'm open to suggestions - personally I like the consistency - but we can change it. Thoughts @bryantChhun @kevinyamauchi @jni?
## Type of change
<!-- Please delete options that are not relevant. -->
- [x] Bug-fix (non-breaking change which fixes an issue)
# How has this been tested?
<!-- Please describe the tests that you ran to verify your changes. -->
- [x] adds `napari/layers/vectors/tests/test_vectors.py`
## Final checklist:
- [x] My PR is the minimum possible work for the desired functionality
- [x] I have commented my code, particularly in hard-to-understand areas
- [x] I have made corresponding changes to the documentation
- [x] I have added tests that prove my fix is effective or that my feature works
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_app_model/actions/_view_actions.py`
Content:
```
1 """Actions related to the 'View' menu that do not require Qt.
2
3 View actions that do require Qt should go in
4 `napari/_qt/_qapp_model/qactions/_view.py`.
5 """
6
7 from app_model.types import Action, ToggleRule
8
9 from napari._app_model.actions._toggle_action import ViewerToggleAction
10 from napari._app_model.constants import CommandId, MenuGroup, MenuId
11 from napari.settings import get_settings
12
13 VIEW_ACTIONS: list[Action] = []
14 MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}
15
16 for cmd, viewer_attr, sub_attr in (
17 (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),
18 (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),
19 (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),
20 (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),
21 (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),
22 (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),
23 (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),
24 (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),
25 ):
26 VIEW_ACTIONS.append(
27 ViewerToggleAction(
28 id=cmd,
29 title=cmd.command_title,
30 viewer_attribute=viewer_attr,
31 sub_attribute=sub_attr,
32 menus=[{'id': MENUID_DICT[viewer_attr]}],
33 )
34 )
35
36
37 def _tooltip_visibility_toggle() -> None:
38 settings = get_settings().appearance
39 settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility
40
41
42 def _get_current_tooltip_visibility() -> bool:
43 return get_settings().appearance.layer_tooltip_visibility
44
45
46 VIEW_ACTIONS.extend(
47 [
48 # TODO: this could be made into a toggle setting Action subclass
49 # using a similar pattern to the above ViewerToggleAction classes
50 Action(
51 id=CommandId.TOGGLE_LAYER_TOOLTIPS,
52 title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,
53 menus=[
54 {
55 'id': MenuId.MENUBAR_VIEW,
56 'group': MenuGroup.RENDER,
57 'order': 10,
58 }
59 ],
60 callback=_tooltip_visibility_toggle,
61 toggled=ToggleRule(get_current=_get_current_tooltip_visibility),
62 ),
63 ]
64 )
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/_app_model/actions/_view_actions.py b/napari/_app_model/actions/_view_actions.py
deleted file mode 100644
--- a/napari/_app_model/actions/_view_actions.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""Actions related to the 'View' menu that do not require Qt.
-
-View actions that do require Qt should go in
-`napari/_qt/_qapp_model/qactions/_view.py`.
-"""
-
-from app_model.types import Action, ToggleRule
-
-from napari._app_model.actions._toggle_action import ViewerToggleAction
-from napari._app_model.constants import CommandId, MenuGroup, MenuId
-from napari.settings import get_settings
-
-VIEW_ACTIONS: list[Action] = []
-MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}
-
-for cmd, viewer_attr, sub_attr in (
- (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),
- (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),
- (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),
- (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),
- (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),
- (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),
- (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),
- (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),
-):
- VIEW_ACTIONS.append(
- ViewerToggleAction(
- id=cmd,
- title=cmd.command_title,
- viewer_attribute=viewer_attr,
- sub_attribute=sub_attr,
- menus=[{'id': MENUID_DICT[viewer_attr]}],
- )
- )
-
-
-def _tooltip_visibility_toggle() -> None:
- settings = get_settings().appearance
- settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility
-
-
-def _get_current_tooltip_visibility() -> bool:
- return get_settings().appearance.layer_tooltip_visibility
-
-
-VIEW_ACTIONS.extend(
- [
- # TODO: this could be made into a toggle setting Action subclass
- # using a similar pattern to the above ViewerToggleAction classes
- Action(
- id=CommandId.TOGGLE_LAYER_TOOLTIPS,
- title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,
- menus=[
- {
- 'id': MenuId.MENUBAR_VIEW,
- 'group': MenuGroup.RENDER,
- 'order': 10,
- }
- ],
- callback=_tooltip_visibility_toggle,
- toggled=ToggleRule(get_current=_get_current_tooltip_visibility),
- ),
- ]
-)
| {"golden_diff": "diff --git a/napari/_app_model/actions/_view_actions.py b/napari/_app_model/actions/_view_actions.py\ndeleted file mode 100644\n--- a/napari/_app_model/actions/_view_actions.py\n+++ /dev/null\n@@ -1,64 +0,0 @@\n-\"\"\"Actions related to the 'View' menu that do not require Qt.\n-\n-View actions that do require Qt should go in\n-`napari/_qt/_qapp_model/qactions/_view.py`.\n-\"\"\"\n-\n-from app_model.types import Action, ToggleRule\n-\n-from napari._app_model.actions._toggle_action import ViewerToggleAction\n-from napari._app_model.constants import CommandId, MenuGroup, MenuId\n-from napari.settings import get_settings\n-\n-VIEW_ACTIONS: list[Action] = []\n-MENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}\n-\n-for cmd, viewer_attr, sub_attr in (\n- (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),\n- (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),\n- (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),\n- (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),\n- (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),\n- (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),\n-):\n- VIEW_ACTIONS.append(\n- ViewerToggleAction(\n- id=cmd,\n- title=cmd.command_title,\n- viewer_attribute=viewer_attr,\n- sub_attribute=sub_attr,\n- menus=[{'id': MENUID_DICT[viewer_attr]}],\n- )\n- )\n-\n-\n-def _tooltip_visibility_toggle() -> None:\n- settings = get_settings().appearance\n- settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility\n-\n-\n-def _get_current_tooltip_visibility() -> bool:\n- return get_settings().appearance.layer_tooltip_visibility\n-\n-\n-VIEW_ACTIONS.extend(\n- [\n- # TODO: this could be made into a toggle setting Action subclass\n- # using a similar pattern to the above ViewerToggleAction classes\n- Action(\n- id=CommandId.TOGGLE_LAYER_TOOLTIPS,\n- title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,\n- menus=[\n- {\n- 'id': MenuId.MENUBAR_VIEW,\n- 'group': MenuGroup.RENDER,\n- 'order': 10,\n- }\n- ],\n- callback=_tooltip_visibility_toggle,\n- toggled=ToggleRule(get_current=_get_current_tooltip_visibility),\n- ),\n- ]\n-)\n", "issue": "Test vectors\n# Description\r\nThis PR adds tests for the vectors layer, improves the doc strings and cleans up the code a tiny bit, but doesn't change any functionality.\r\n\r\nOne question is - should the input parameters be \r\n`edge_width`, `edge_color`, and `length` for the width, color, and the multiplicative length factor for the vectors or should they be something else. They used to just be `width`, `color`, and `length` but I added `edge_` to make the parameters the same as for the `points` and `shapes` layer, though you could argue that for the `points` layer the parameters do different things and that in the vectors layer we don't have a `face` and an `edge` so it is just confusing. I'm open to suggestions - personally I like the consistency - but we can change it. Thoughts @bryantChhun @kevinyamauchi @jni?\r\n \r\n## Type of change\r\n<!-- Please delete options that are not relevant. -->\r\n- [x] Bug-fix (non-breaking change which fixes an issue)\r\n\r\n# How has this been tested?\r\n<!-- Please describe the tests that you ran to verify your changes. -->\r\n- [x] adds `napari/layers/vectors/tests/test_vectors.py`\r\n\r\n## Final checklist:\r\n- [x] My PR is the minimum possible work for the desired functionality\r\n- [x] I have commented my code, particularly in hard-to-understand areas\r\n- [x] I have made corresponding changes to the documentation\r\n- [x] I have added tests that prove my fix is effective or that my feature works\r\n\n", "before_files": [{"content": "\"\"\"Actions related to the 'View' menu that do not require Qt.\n\nView actions that do require Qt should go in\n`napari/_qt/_qapp_model/qactions/_view.py`.\n\"\"\"\n\nfrom app_model.types import Action, ToggleRule\n\nfrom napari._app_model.actions._toggle_action import ViewerToggleAction\nfrom napari._app_model.constants import CommandId, MenuGroup, MenuId\nfrom napari.settings import get_settings\n\nVIEW_ACTIONS: list[Action] = []\nMENUID_DICT = {'axes': MenuId.VIEW_AXES, 'scale_bar': MenuId.VIEW_SCALEBAR}\n\nfor cmd, viewer_attr, sub_attr in (\n (CommandId.TOGGLE_VIEWER_AXES, 'axes', 'visible'),\n (CommandId.TOGGLE_VIEWER_AXES_COLORED, 'axes', 'colored'),\n (CommandId.TOGGLE_VIEWER_AXES_LABELS, 'axes', 'labels'),\n (CommandId.TOGGLE_VIEWER_AXES_DASHED, 'axes', 'dashed'),\n (CommandId.TOGGLE_VIEWER_AXES_ARROWS, 'axes', 'arrows'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR, 'scale_bar', 'visible'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR_COLORED, 'scale_bar', 'colored'),\n (CommandId.TOGGLE_VIEWER_SCALE_BAR_TICKS, 'scale_bar', 'ticks'),\n):\n VIEW_ACTIONS.append(\n ViewerToggleAction(\n id=cmd,\n title=cmd.command_title,\n viewer_attribute=viewer_attr,\n sub_attribute=sub_attr,\n menus=[{'id': MENUID_DICT[viewer_attr]}],\n )\n )\n\n\ndef _tooltip_visibility_toggle() -> None:\n settings = get_settings().appearance\n settings.layer_tooltip_visibility = not settings.layer_tooltip_visibility\n\n\ndef _get_current_tooltip_visibility() -> bool:\n return get_settings().appearance.layer_tooltip_visibility\n\n\nVIEW_ACTIONS.extend(\n [\n # TODO: this could be made into a toggle setting Action subclass\n # using a similar pattern to the above ViewerToggleAction classes\n Action(\n id=CommandId.TOGGLE_LAYER_TOOLTIPS,\n title=CommandId.TOGGLE_LAYER_TOOLTIPS.command_title,\n menus=[\n {\n 'id': MenuId.MENUBAR_VIEW,\n 'group': MenuGroup.RENDER,\n 'order': 10,\n }\n ],\n callback=_tooltip_visibility_toggle,\n toggled=ToggleRule(get_current=_get_current_tooltip_visibility),\n ),\n ]\n)\n", "path": "napari/_app_model/actions/_view_actions.py"}], "after_files": [{"content": null, "path": "napari/_app_model/actions/_view_actions.py"}]} | 1,267 | 660 |
gh_patches_debug_10596 | rasdani/github-patches | git_diff | xonsh__xonsh-1630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Autocomplete: don't display full path
Using `prompt_kit`, when completing a path such as `/var/log/<TAB>`, the autocompletion menu shows fully prefixed entries. The problem is that when the path is very deep, the autocomplete panel starts to give unreadable results (see attached screenshot).
The proposed solution is to only display the `os.path.basename` of the autocompleted path, suffixed by `/` if it is a directory.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/ptk/completer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Completer implementation to use with prompt_toolkit."""
3 import os
4 import builtins
5
6 from prompt_toolkit.layout.dimension import LayoutDimension
7 from prompt_toolkit.completion import Completer, Completion
8
9
10 class PromptToolkitCompleter(Completer):
11 """Simple prompt_toolkit Completer object.
12
13 It just redirects requests to normal Xonsh completer.
14 """
15
16 def __init__(self, completer, ctx):
17 """Takes instance of xonsh.completer.Completer and dict with context."""
18 self.completer = completer
19 self.ctx = ctx
20
21 def get_completions(self, document, complete_event):
22 """Returns a generator for list of completions."""
23
24 # Only generate completions when the user hits tab.
25 if complete_event.completion_requested:
26 if self.completer is None:
27 yield from []
28 else:
29 line = document.current_line.lstrip()
30 endidx = document.cursor_position_col
31 begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0
32 prefix = line[begidx:endidx]
33 line = builtins.aliases.expand_alias(line)
34 completions, l = self.completer.complete(prefix,
35 line,
36 begidx,
37 endidx,
38 self.ctx)
39 if len(completions) <= 1:
40 pass
41 elif len(os.path.commonprefix(completions)) <= len(prefix):
42 self.reserve_space()
43 for comp in completions:
44 yield Completion(comp, -l)
45
46 def reserve_space(self):
47 cli = builtins.__xonsh_shell__.shell.prompter.cli
48 window = cli.application.layout.children[0].content.children[1]
49
50 if window and window.render_info:
51 h = window.render_info.content_height
52 r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')
53 size = h + r
54
55 def comp_height(cli):
56 # If there is an autocompletion menu to be shown, make sure that o
57 # layout has at least a minimal height in order to display it.
58 if not cli.is_done:
59 return LayoutDimension(min=size)
60 else:
61 return LayoutDimension()
62 window._height = comp_height
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py
--- a/xonsh/ptk/completer.py
+++ b/xonsh/ptk/completer.py
@@ -40,7 +40,11 @@
pass
elif len(os.path.commonprefix(completions)) <= len(prefix):
self.reserve_space()
+ prefix, _, compprefix = prefix.rpartition('.')
for comp in completions:
+ if comp.rsplit('.', 1)[0] in prefix:
+ comp = comp.rsplit('.', 1)[-1]
+ l = len(compprefix) if compprefix in comp else 0
yield Completion(comp, -l)
def reserve_space(self):
| {"golden_diff": "diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py\n--- a/xonsh/ptk/completer.py\n+++ b/xonsh/ptk/completer.py\n@@ -40,7 +40,11 @@\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n+ prefix, _, compprefix = prefix.rpartition('.')\n for comp in completions:\n+ if comp.rsplit('.', 1)[0] in prefix:\n+ comp = comp.rsplit('.', 1)[-1]\n+ l = len(compprefix) if compprefix in comp else 0\n yield Completion(comp, -l)\n \n def reserve_space(self):\n", "issue": "Autocomplete: don't display full path\nUsing `prompt_kit`, when completing a path such as `/var/log/<TAB>`, the autocompletion menu shows fully prefixed entries. The problem is that when the path is very deep, the autocomplete panel starts to give unreadable results (see attached screenshot).\n\nThe proposed solution is to only display the `os.path.basename` of the autocompleted path, suffixed by `/` if it is a directory.\n\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\nimport os\nimport builtins\n\nfrom prompt_toolkit.layout.dimension import LayoutDimension\nfrom prompt_toolkit.completion import Completer, Completion\n\n\nclass PromptToolkitCompleter(Completer):\n \"\"\"Simple prompt_toolkit Completer object.\n\n It just redirects requests to normal Xonsh completer.\n \"\"\"\n\n def __init__(self, completer, ctx):\n \"\"\"Takes instance of xonsh.completer.Completer and dict with context.\"\"\"\n self.completer = completer\n self.ctx = ctx\n\n def get_completions(self, document, complete_event):\n \"\"\"Returns a generator for list of completions.\"\"\"\n\n # Only generate completions when the user hits tab.\n if complete_event.completion_requested:\n if self.completer is None:\n yield from []\n else:\n line = document.current_line.lstrip()\n endidx = document.cursor_position_col\n begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0\n prefix = line[begidx:endidx]\n line = builtins.aliases.expand_alias(line)\n completions, l = self.completer.complete(prefix,\n line,\n begidx,\n endidx,\n self.ctx)\n if len(completions) <= 1:\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n for comp in completions:\n yield Completion(comp, -l)\n\n def reserve_space(self):\n cli = builtins.__xonsh_shell__.shell.prompter.cli\n window = cli.application.layout.children[0].content.children[1]\n\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')\n size = h + r\n\n def comp_height(cli):\n # If there is an autocompletion menu to be shown, make sure that o\n # layout has at least a minimal height in order to display it.\n if not cli.is_done:\n return LayoutDimension(min=size)\n else:\n return LayoutDimension()\n window._height = comp_height\n", "path": "xonsh/ptk/completer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\nimport os\nimport builtins\n\nfrom prompt_toolkit.layout.dimension import LayoutDimension\nfrom prompt_toolkit.completion import Completer, Completion\n\n\nclass PromptToolkitCompleter(Completer):\n \"\"\"Simple prompt_toolkit Completer object.\n\n It just redirects requests to normal Xonsh completer.\n \"\"\"\n\n def __init__(self, completer, ctx):\n \"\"\"Takes instance of xonsh.completer.Completer and dict with context.\"\"\"\n self.completer = completer\n self.ctx = ctx\n\n def get_completions(self, document, complete_event):\n \"\"\"Returns a generator for list of completions.\"\"\"\n\n # Only generate completions when the user hits tab.\n if complete_event.completion_requested:\n if self.completer is None:\n yield from []\n else:\n line = document.current_line.lstrip()\n endidx = document.cursor_position_col\n begidx = line[:endidx].rfind(' ') + 1 if line[:endidx].rfind(' ') >= 0 else 0\n prefix = line[begidx:endidx]\n line = builtins.aliases.expand_alias(line)\n completions, l = self.completer.complete(prefix,\n line,\n begidx,\n endidx,\n self.ctx)\n if len(completions) <= 1:\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n prefix, _, compprefix = prefix.rpartition('.')\n for comp in completions:\n if comp.rsplit('.', 1)[0] in prefix:\n comp = comp.rsplit('.', 1)[-1]\n l = len(compprefix) if compprefix in comp else 0\n yield Completion(comp, -l)\n\n def reserve_space(self):\n cli = builtins.__xonsh_shell__.shell.prompter.cli\n window = cli.application.layout.children[0].content.children[1]\n\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh_env__.get('COMPLETIONS_MENU_ROWS')\n size = h + r\n\n def comp_height(cli):\n # If there is an autocompletion menu to be shown, make sure that o\n # layout has at least a minimal height in order to display it.\n if not cli.is_done:\n return LayoutDimension(min=size)\n else:\n return LayoutDimension()\n window._height = comp_height\n", "path": "xonsh/ptk/completer.py"}]} | 1,055 | 170 |
gh_patches_debug_33734 | rasdani/github-patches | git_diff | 3cn-ecn__nantralPlatform-484 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problème avec les liens vers les auteur.ic.es des suggestions
Quand quelqu'un fait une suggestion depuis le site, le lien pour avoir le nom de la personne ne fonctionne pas.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/apps/home/forms.py`
Content:
```
1 from django import forms
2
3 class SuggestionForm(forms.Form):
4 title = forms.CharField(max_length=50, required=True)
5 description = forms.CharField(widget=forms.Textarea)
6
```
Path: `server/apps/utils/github.py`
Content:
```
1 import requests
2 from django.conf import settings
3
4
5 def create_issue(title: str, body: str):
6 issue = {
7 'title': title,
8 'body': body
9 }
10 resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',
11 json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))
12 if resp.status_code != 201:
13 raise Exception(f'Error while posting issue to Github: {resp.reason}')
14 return resp.json()['number']
15
16
17 def close_issue(number: int):
18 """Function to close an issue in the repo."""
19 update = {'state': 'closed'}
20 requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}',
21 json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))
22
```
Path: `server/apps/home/views.py`
Content:
```
1 from datetime import *
2 from typing import List
3 from django.contrib.sites.shortcuts import get_current_site
4 from django.db.models.query import QuerySet
5 from django.shortcuts import render, redirect
6 from django.views.generic import TemplateView, FormView
7 from django.contrib import messages
8 from django.contrib.auth.mixins import LoginRequiredMixin
9
10 from apps.event.models import BaseEvent
11 from apps.post.models import Post
12 from apps.utils.github import create_issue
13
14 from .forms import SuggestionForm
15
16
17 class HomeView(LoginRequiredMixin, TemplateView):
18 template_name = 'home/home.html'
19
20 def get_context_data(self, **kwargs):
21 # Call the base implementation first to get a context
22 context = super().get_context_data(**kwargs)
23 posts: List[Post] = Post.objects.filter(
24 publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date')
25 context['posts'] = [
26 post for post in posts if post.can_view(self.request.user)]
27 return context
28
29
30 class SuggestionView(LoginRequiredMixin, FormView):
31 template_name = 'home/suggestions.html'
32 form_class = SuggestionForm
33
34 def form_valid(self, form):
35 create_issue(
36 title=form.cleaned_data['title'],
37 body=f"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour découvrir qui propose ça.</a>"
38 )
39 messages.success(
40 self.request, 'Votre suggestion a été enregistrée merci')
41 return redirect('home:home')
42
43
44 def handler404(request, *args, **argv):
45 response = render(request, '404.html', context={}, status=404)
46 return response
47
48
49 def handler500(request, *args, **argv):
50 response = render(request, '500.html', context={},
51 status=500)
52 return response
53
54
55 def event_sort(events, request):
56 tri = {}
57 jours = ["Lundi", "Mardi", "Mercredi",
58 "Jeudi", "Vendredi", "Samedi", "Dimanche"]
59 mois = ["Janvier", "Février", "Mars", "Avril", "Mai", "Juin",
60 "Juillet", "Août", "Septembre", "Octobre", "Novembre", "Décembre"]
61 for event in events:
62 if event.date.date() == date.today():
63 if "Aujourd'hui" in tri:
64 tri["Aujourd'hui"].append(
65 (event, event.is_participating(request.user)))
66 else:
67 tri["Aujourd'hui"] = list()
68 tri["Aujourd'hui"].append(
69 (event, event.is_participating(request.user)))
70 elif event.date.date() == (date.today()+timedelta(days=1)):
71 if "Demain" in tri:
72 tri["Demain"].append(
73 (event, event.is_participating(request.user)))
74 else:
75 tri["Demain"] = list()
76 tri["Demain"].append(
77 (event, event.is_participating(request.user)))
78 else:
79 written_date = jours[event.date.weekday(
80 )] + " " + str(event.date.day) + " " + mois[event.date.month-1]
81 if written_date in tri:
82 tri[written_date].append(
83 (event, event.is_participating(request.user)))
84 else:
85 tri[written_date] = list()
86 tri[written_date].append(
87 (event, event.is_participating(request.user)))
88 return tri
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/server/apps/home/forms.py b/server/apps/home/forms.py
--- a/server/apps/home/forms.py
+++ b/server/apps/home/forms.py
@@ -1,5 +1,14 @@
from django import forms
+TYPE_CHOICES = (
+ (1, ("Bug")),
+ (2, ("Suggestion"))
+)
+
+
class SuggestionForm(forms.Form):
title = forms.CharField(max_length=50, required=True)
description = forms.CharField(widget=forms.Textarea)
+ suggestionOrBug = forms.ChoiceField(label="Type",
+ choices=TYPE_CHOICES,
+ required=True)
diff --git a/server/apps/home/views.py b/server/apps/home/views.py
--- a/server/apps/home/views.py
+++ b/server/apps/home/views.py
@@ -34,7 +34,8 @@
def form_valid(self, form):
create_issue(
title=form.cleaned_data['title'],
- body=f"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour découvrir qui propose ça.</a>"
+ body=f"{form.cleaned_data['description']} <br/> [Clique pour découvrir qui propose ça.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})",
+ label=form.cleaned_data['suggestionOrBug']
)
messages.success(
self.request, 'Votre suggestion a été enregistrée merci')
diff --git a/server/apps/utils/github.py b/server/apps/utils/github.py
--- a/server/apps/utils/github.py
+++ b/server/apps/utils/github.py
@@ -2,15 +2,18 @@
from django.conf import settings
-def create_issue(title: str, body: str):
+def create_issue(title: str, body: str, label):
+ label = "bug" if int(label) == 1 else "suggestion"
issue = {
'title': title,
- 'body': body
+ 'body': body,
+ 'labels': [label]
}
resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',
json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))
if resp.status_code != 201:
- raise Exception(f'Error while posting issue to Github: {resp.reason}')
+ raise Exception(
+ f'Error while posting issue to Github: {resp.reason}')
return resp.json()['number']
| {"golden_diff": "diff --git a/server/apps/home/forms.py b/server/apps/home/forms.py\n--- a/server/apps/home/forms.py\n+++ b/server/apps/home/forms.py\n@@ -1,5 +1,14 @@\n from django import forms\n \n+TYPE_CHOICES = (\n+ (1, (\"Bug\")),\n+ (2, (\"Suggestion\"))\n+)\n+\n+\n class SuggestionForm(forms.Form):\n title = forms.CharField(max_length=50, required=True)\n description = forms.CharField(widget=forms.Textarea)\n+ suggestionOrBug = forms.ChoiceField(label=\"Type\",\n+ choices=TYPE_CHOICES,\n+ required=True)\ndiff --git a/server/apps/home/views.py b/server/apps/home/views.py\n--- a/server/apps/home/views.py\n+++ b/server/apps/home/views.py\n@@ -34,7 +34,8 @@\n def form_valid(self, form):\n create_issue(\n title=form.cleaned_data['title'],\n- body=f\"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour d\u00e9couvrir qui propose \u00e7a.</a>\"\n+ body=f\"{form.cleaned_data['description']} <br/> [Clique pour d\u00e9couvrir qui propose \u00e7a.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})\",\n+ label=form.cleaned_data['suggestionOrBug']\n )\n messages.success(\n self.request, 'Votre suggestion a \u00e9t\u00e9 enregistr\u00e9e merci')\ndiff --git a/server/apps/utils/github.py b/server/apps/utils/github.py\n--- a/server/apps/utils/github.py\n+++ b/server/apps/utils/github.py\n@@ -2,15 +2,18 @@\n from django.conf import settings\n \n \n-def create_issue(title: str, body: str):\n+def create_issue(title: str, body: str, label):\n+ label = \"bug\" if int(label) == 1 else \"suggestion\"\n issue = {\n 'title': title,\n- 'body': body\n+ 'body': body,\n+ 'labels': [label]\n }\n resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',\n json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n if resp.status_code != 201:\n- raise Exception(f'Error while posting issue to Github: {resp.reason}')\n+ raise Exception(\n+ f'Error while posting issue to Github: {resp.reason}')\n return resp.json()['number']\n", "issue": "Probl\u00e8me avec les liens vers les auteur.ic.es des suggestions\nQuand quelqu'un fait une suggestion depuis le site, le lien pour avoir le nom de la personne ne fonctionne pas.\n", "before_files": [{"content": "from django import forms\n\nclass SuggestionForm(forms.Form):\n title = forms.CharField(max_length=50, required=True)\n description = forms.CharField(widget=forms.Textarea)\n", "path": "server/apps/home/forms.py"}, {"content": "import requests\nfrom django.conf import settings\n\n\ndef create_issue(title: str, body: str):\n issue = {\n 'title': title,\n 'body': body\n }\n resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',\n json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n if resp.status_code != 201:\n raise Exception(f'Error while posting issue to Github: {resp.reason}')\n return resp.json()['number']\n\n\ndef close_issue(number: int):\n \"\"\"Function to close an issue in the repo.\"\"\"\n update = {'state': 'closed'}\n requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}',\n json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n", "path": "server/apps/utils/github.py"}, {"content": "from datetime import *\nfrom typing import List\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.db.models.query import QuerySet\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView, FormView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom apps.event.models import BaseEvent\nfrom apps.post.models import Post\nfrom apps.utils.github import create_issue\n\nfrom .forms import SuggestionForm\n\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = 'home/home.html'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super().get_context_data(**kwargs)\n posts: List[Post] = Post.objects.filter(\n publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date')\n context['posts'] = [\n post for post in posts if post.can_view(self.request.user)]\n return context\n\n\nclass SuggestionView(LoginRequiredMixin, FormView):\n template_name = 'home/suggestions.html'\n form_class = SuggestionForm\n\n def form_valid(self, form):\n create_issue(\n title=form.cleaned_data['title'],\n body=f\"{form.cleaned_data['description']} <br/> <a href='http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url}'>Clique pour d\u00e9couvrir qui propose \u00e7a.</a>\"\n )\n messages.success(\n self.request, 'Votre suggestion a \u00e9t\u00e9 enregistr\u00e9e merci')\n return redirect('home:home')\n\n\ndef handler404(request, *args, **argv):\n response = render(request, '404.html', context={}, status=404)\n return response\n\n\ndef handler500(request, *args, **argv):\n response = render(request, '500.html', context={},\n status=500)\n return response\n\n\ndef event_sort(events, request):\n tri = {}\n jours = [\"Lundi\", \"Mardi\", \"Mercredi\",\n \"Jeudi\", \"Vendredi\", \"Samedi\", \"Dimanche\"]\n mois = [\"Janvier\", \"F\u00e9vrier\", \"Mars\", \"Avril\", \"Mai\", \"Juin\",\n \"Juillet\", \"Ao\u00fbt\", \"Septembre\", \"Octobre\", \"Novembre\", \"D\u00e9cembre\"]\n for event in events:\n if event.date.date() == date.today():\n if \"Aujourd'hui\" in tri:\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Aujourd'hui\"] = list()\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n elif event.date.date() == (date.today()+timedelta(days=1)):\n if \"Demain\" in tri:\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Demain\"] = list()\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n written_date = jours[event.date.weekday(\n )] + \" \" + str(event.date.day) + \" \" + mois[event.date.month-1]\n if written_date in tri:\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n else:\n tri[written_date] = list()\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n return tri\n", "path": "server/apps/home/views.py"}], "after_files": [{"content": "from django import forms\n\nTYPE_CHOICES = (\n (1, (\"Bug\")),\n (2, (\"Suggestion\"))\n)\n\n\nclass SuggestionForm(forms.Form):\n title = forms.CharField(max_length=50, required=True)\n description = forms.CharField(widget=forms.Textarea)\n suggestionOrBug = forms.ChoiceField(label=\"Type\",\n choices=TYPE_CHOICES,\n required=True)\n", "path": "server/apps/home/forms.py"}, {"content": "import requests\nfrom django.conf import settings\n\n\ndef create_issue(title: str, body: str, label):\n label = \"bug\" if int(label) == 1 else \"suggestion\"\n issue = {\n 'title': title,\n 'body': body,\n 'labels': [label]\n }\n resp = requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues',\n json=issue, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n if resp.status_code != 201:\n raise Exception(\n f'Error while posting issue to Github: {resp.reason}')\n return resp.json()['number']\n\n\ndef close_issue(number: int):\n \"\"\"Function to close an issue in the repo.\"\"\"\n update = {'state': 'closed'}\n requests.post(f'https://api.github.com/repos/{settings.GITHUB_REPO}/issues/{number}',\n json=update, auth=(settings.GITHUB_USER, settings.GITHUB_TOKEN))\n", "path": "server/apps/utils/github.py"}, {"content": "from datetime import *\nfrom typing import List\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.db.models.query import QuerySet\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import TemplateView, FormView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom apps.event.models import BaseEvent\nfrom apps.post.models import Post\nfrom apps.utils.github import create_issue\n\nfrom .forms import SuggestionForm\n\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = 'home/home.html'\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super().get_context_data(**kwargs)\n posts: List[Post] = Post.objects.filter(\n publication_date__gte=date.today()-timedelta(days=10)).order_by('-publication_date')\n context['posts'] = [\n post for post in posts if post.can_view(self.request.user)]\n return context\n\n\nclass SuggestionView(LoginRequiredMixin, FormView):\n template_name = 'home/suggestions.html'\n form_class = SuggestionForm\n\n def form_valid(self, form):\n create_issue(\n title=form.cleaned_data['title'],\n body=f\"{form.cleaned_data['description']} <br/> [Clique pour d\u00e9couvrir qui propose \u00e7a.](http://{get_current_site(self.request)}{self.request.user.student.get_absolute_url()})\",\n label=form.cleaned_data['suggestionOrBug']\n )\n messages.success(\n self.request, 'Votre suggestion a \u00e9t\u00e9 enregistr\u00e9e merci')\n return redirect('home:home')\n\n\ndef handler404(request, *args, **argv):\n response = render(request, '404.html', context={}, status=404)\n return response\n\n\ndef handler500(request, *args, **argv):\n response = render(request, '500.html', context={},\n status=500)\n return response\n\n\ndef event_sort(events, request):\n tri = {}\n jours = [\"Lundi\", \"Mardi\", \"Mercredi\",\n \"Jeudi\", \"Vendredi\", \"Samedi\", \"Dimanche\"]\n mois = [\"Janvier\", \"F\u00e9vrier\", \"Mars\", \"Avril\", \"Mai\", \"Juin\",\n \"Juillet\", \"Ao\u00fbt\", \"Septembre\", \"Octobre\", \"Novembre\", \"D\u00e9cembre\"]\n for event in events:\n if event.date.date() == date.today():\n if \"Aujourd'hui\" in tri:\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Aujourd'hui\"] = list()\n tri[\"Aujourd'hui\"].append(\n (event, event.is_participating(request.user)))\n elif event.date.date() == (date.today()+timedelta(days=1)):\n if \"Demain\" in tri:\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n tri[\"Demain\"] = list()\n tri[\"Demain\"].append(\n (event, event.is_participating(request.user)))\n else:\n written_date = jours[event.date.weekday(\n )] + \" \" + str(event.date.day) + \" \" + mois[event.date.month-1]\n if written_date in tri:\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n else:\n tri[written_date] = list()\n tri[written_date].append(\n (event, event.is_participating(request.user)))\n return tri\n", "path": "server/apps/home/views.py"}]} | 1,534 | 538 |
gh_patches_debug_20575 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-2720 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'ascii' codec can't decode byte 0xc3
```
2016-08-07 10:51:36,268 [RecycleItems] [INFO] [item_discarded] Discarded 1x Razz Berry (maximum 20).
2016-08-07 10:51:36,875 [TransferPokemon] [INFO] [future_pokemon_release] Releasing Charmander (CP 172/IV 0.18) based on rule: CP < 9 OR IV < 0.97
2016-08-07 10:51:37,437 [TransferPokemon] [INFO] [pokemon_release] Exchanged Charmander [CP 172] [IV 0.18] for candy.
2016-08-07 10:51:37,953 [MoveToFort] [INFO] [moving_to_lured_fort] Moving towards pokestop Estátua Moore - 0.05km (attraction of lure 0.05km)
2016-08-07 10:51:37,953 [MoveToFort] [INFO] [arrived_at_fort] Arrived at fort.
2016-08-07 10:51:39,679 [PokemonCatchWorker] [INFO] [pokemon_appeared] A wild Magnemite appeared! [CP 422] [Potential 0.71] [S/A/D 10/11/11]
2016-08-07 10:51:42,526 [PokemonCatchWorker] [INFO] [threw_pokeball] Used Pokeball, with chance 35.29 (127 left)
2016-08-07 10:51:43,728 [PokemonCatchWorker] [INFO] [pokemon_caught] Captured Magnemite! [CP 422] [Potential 0.71] [10/11/11] [+210 exp]
2016-08-07 10:51:44,706 [PokemonCatchWorker] [INFO] [pokemon_evolve_fail] Failed to evolve Magnemite!
2016-08-07 10:51:50,245 [ cli] [INFO]
2016-08-07 10:51:50,245 [ cli] [INFO] Ran for 0:01:58
2016-08-07 10:51:50,245 [ cli] [INFO] Total XP Earned: 1450 Average: 44164.12/h
2016-08-07 10:51:50,245 [ cli] [INFO] Travelled 0.00km
2016-08-07 10:51:50,246 [ cli] [INFO] Visited 8 stops
2016-08-07 10:51:50,246 [ cli] [INFO] Encountered 5 pokemon, 5 caught, 0 released, 0 evolved, 0 never seen before
2016-08-07 10:51:50,246 [ cli] [INFO] Threw 5 poke balls
2016-08-07 10:51:50,246 [ cli] [INFO] Earned 500 Stardust
2016-08-07 10:51:50,246 [ cli] [INFO]
2016-08-07 10:51:50,246 [ cli] [INFO] Highest CP Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78
2016-08-07 10:51:50,246 [ cli] [INFO] Most Perfect Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78
Traceback (most recent call last):
File "pokecli.py", line 499, in <module>
main()
File "pokecli.py", line 130, in main
raise e
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 3: ordinal not in range(128)
```
error appears only with movetofort
with path config no error appears
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pokemongo_bot/cell_workers/catch_lured_pokemon.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from pokemongo_bot.cell_workers.utils import fort_details
5 from pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker
6 from pokemongo_bot.base_task import BaseTask
7
8
9 class CatchLuredPokemon(BaseTask):
10 def work(self):
11 lured_pokemon = self.get_lured_pokemon()
12 if lured_pokemon:
13 self.catch_pokemon(lured_pokemon)
14
15 def get_lured_pokemon(self):
16 forts = self.bot.get_forts(order_by_distance=True)
17
18 if len(forts) == 0:
19 return False
20
21 fort = forts[0]
22 details = fort_details(self.bot, fort_id=fort['id'],
23 latitude=fort['latitude'],
24 longitude=fort['longitude'])
25 fort_name = details.get('name', 'Unknown').encode('utf8', 'replace')
26
27 encounter_id = fort.get('lure_info', {}).get('encounter_id', None)
28
29 if encounter_id:
30 result = {
31 'encounter_id': encounter_id,
32 'fort_id': fort['id'],
33 'fort_name': fort_name,
34 'latitude': fort['latitude'],
35 'longitude': fort['longitude']
36 }
37
38 self.emit_event(
39 'lured_pokemon_found',
40 formatted='Lured pokemon at fort {fort_name} ({fort_id})',
41 data=result
42 )
43 return result
44
45 return False
46
47 def catch_pokemon(self, pokemon):
48 worker = PokemonCatchWorker(pokemon, self.bot)
49 return_value = worker.work()
50
51 return return_value
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pokemongo_bot/cell_workers/catch_lured_pokemon.py b/pokemongo_bot/cell_workers/catch_lured_pokemon.py
--- a/pokemongo_bot/cell_workers/catch_lured_pokemon.py
+++ b/pokemongo_bot/cell_workers/catch_lured_pokemon.py
@@ -22,7 +22,7 @@
details = fort_details(self.bot, fort_id=fort['id'],
latitude=fort['latitude'],
longitude=fort['longitude'])
- fort_name = details.get('name', 'Unknown').encode('utf8', 'replace')
+ fort_name = details.get('name', 'Unknown')
encounter_id = fort.get('lure_info', {}).get('encounter_id', None)
@@ -30,7 +30,7 @@
result = {
'encounter_id': encounter_id,
'fort_id': fort['id'],
- 'fort_name': fort_name,
+ 'fort_name': u"{}".format(fort_name),
'latitude': fort['latitude'],
'longitude': fort['longitude']
}
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/catch_lured_pokemon.py b/pokemongo_bot/cell_workers/catch_lured_pokemon.py\n--- a/pokemongo_bot/cell_workers/catch_lured_pokemon.py\n+++ b/pokemongo_bot/cell_workers/catch_lured_pokemon.py\n@@ -22,7 +22,7 @@\n details = fort_details(self.bot, fort_id=fort['id'],\n latitude=fort['latitude'],\n longitude=fort['longitude'])\n- fort_name = details.get('name', 'Unknown').encode('utf8', 'replace')\n+ fort_name = details.get('name', 'Unknown')\n \n encounter_id = fort.get('lure_info', {}).get('encounter_id', None)\n \n@@ -30,7 +30,7 @@\n result = {\n 'encounter_id': encounter_id,\n 'fort_id': fort['id'],\n- 'fort_name': fort_name,\n+ 'fort_name': u\"{}\".format(fort_name),\n 'latitude': fort['latitude'],\n 'longitude': fort['longitude']\n }\n", "issue": "'ascii' codec can't decode byte 0xc3\n```\n2016-08-07 10:51:36,268 [RecycleItems] [INFO] [item_discarded] Discarded 1x Razz Berry (maximum 20).\n2016-08-07 10:51:36,875 [TransferPokemon] [INFO] [future_pokemon_release] Releasing Charmander (CP 172/IV 0.18) based on rule: CP < 9 OR IV < 0.97\n2016-08-07 10:51:37,437 [TransferPokemon] [INFO] [pokemon_release] Exchanged Charmander [CP 172] [IV 0.18] for candy.\n2016-08-07 10:51:37,953 [MoveToFort] [INFO] [moving_to_lured_fort] Moving towards pokestop Est\u00e1tua Moore - 0.05km (attraction of lure 0.05km)\n2016-08-07 10:51:37,953 [MoveToFort] [INFO] [arrived_at_fort] Arrived at fort.\n2016-08-07 10:51:39,679 [PokemonCatchWorker] [INFO] [pokemon_appeared] A wild Magnemite appeared! [CP 422] [Potential 0.71] [S/A/D 10/11/11]\n2016-08-07 10:51:42,526 [PokemonCatchWorker] [INFO] [threw_pokeball] Used Pokeball, with chance 35.29 (127 left)\n2016-08-07 10:51:43,728 [PokemonCatchWorker] [INFO] [pokemon_caught] Captured Magnemite! [CP 422] [Potential 0.71] [10/11/11] [+210 exp]\n2016-08-07 10:51:44,706 [PokemonCatchWorker] [INFO] [pokemon_evolve_fail] Failed to evolve Magnemite!\n2016-08-07 10:51:50,245 [ cli] [INFO] \n2016-08-07 10:51:50,245 [ cli] [INFO] Ran for 0:01:58\n2016-08-07 10:51:50,245 [ cli] [INFO] Total XP Earned: 1450 Average: 44164.12/h\n2016-08-07 10:51:50,245 [ cli] [INFO] Travelled 0.00km\n2016-08-07 10:51:50,246 [ cli] [INFO] Visited 8 stops\n2016-08-07 10:51:50,246 [ cli] [INFO] Encountered 5 pokemon, 5 caught, 0 released, 0 evolved, 0 never seen before\n2016-08-07 10:51:50,246 [ cli] [INFO] Threw 5 poke balls\n2016-08-07 10:51:50,246 [ cli] [INFO] Earned 500 Stardust\n2016-08-07 10:51:50,246 [ cli] [INFO] \n2016-08-07 10:51:50,246 [ cli] [INFO] Highest CP Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78 \n2016-08-07 10:51:50,246 [ cli] [INFO] Most Perfect Pokemon: Machop [CP: 645] [IV: 9/15/11] Potential: 0.78 \nTraceback (most recent call last):\n File \"pokecli.py\", line 499, in <module>\n main()\n File \"pokecli.py\", line 130, in main\n raise e\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 3: ordinal not in range(128)\n```\n\nerror appears only with movetofort\nwith path config no error appears \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom pokemongo_bot.cell_workers.utils import fort_details\nfrom pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass CatchLuredPokemon(BaseTask):\n def work(self):\n lured_pokemon = self.get_lured_pokemon()\n if lured_pokemon:\n self.catch_pokemon(lured_pokemon)\n\n def get_lured_pokemon(self):\n forts = self.bot.get_forts(order_by_distance=True)\n\n if len(forts) == 0:\n return False\n\n fort = forts[0]\n details = fort_details(self.bot, fort_id=fort['id'],\n latitude=fort['latitude'],\n longitude=fort['longitude'])\n fort_name = details.get('name', 'Unknown').encode('utf8', 'replace')\n\n encounter_id = fort.get('lure_info', {}).get('encounter_id', None)\n\n if encounter_id:\n result = {\n 'encounter_id': encounter_id,\n 'fort_id': fort['id'],\n 'fort_name': fort_name,\n 'latitude': fort['latitude'],\n 'longitude': fort['longitude']\n }\n\n self.emit_event(\n 'lured_pokemon_found',\n formatted='Lured pokemon at fort {fort_name} ({fort_id})',\n data=result\n )\n return result\n\n return False\n\n def catch_pokemon(self, pokemon):\n worker = PokemonCatchWorker(pokemon, self.bot)\n return_value = worker.work()\n\n return return_value\n", "path": "pokemongo_bot/cell_workers/catch_lured_pokemon.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom pokemongo_bot.cell_workers.utils import fort_details\nfrom pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass CatchLuredPokemon(BaseTask):\n def work(self):\n lured_pokemon = self.get_lured_pokemon()\n if lured_pokemon:\n self.catch_pokemon(lured_pokemon)\n\n def get_lured_pokemon(self):\n forts = self.bot.get_forts(order_by_distance=True)\n\n if len(forts) == 0:\n return False\n\n fort = forts[0]\n details = fort_details(self.bot, fort_id=fort['id'],\n latitude=fort['latitude'],\n longitude=fort['longitude'])\n fort_name = details.get('name', 'Unknown')\n\n encounter_id = fort.get('lure_info', {}).get('encounter_id', None)\n\n if encounter_id:\n result = {\n 'encounter_id': encounter_id,\n 'fort_id': fort['id'],\n 'fort_name': u\"{}\".format(fort_name),\n 'latitude': fort['latitude'],\n 'longitude': fort['longitude']\n }\n\n self.emit_event(\n 'lured_pokemon_found',\n formatted='Lured pokemon at fort {fort_name} ({fort_id})',\n data=result\n )\n return result\n\n return False\n\n def catch_pokemon(self, pokemon):\n worker = PokemonCatchWorker(pokemon, self.bot)\n return_value = worker.work()\n\n return return_value\n", "path": "pokemongo_bot/cell_workers/catch_lured_pokemon.py"}]} | 1,853 | 244 |
gh_patches_debug_17126 | rasdani/github-patches | git_diff | rucio__rucio-5505 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecation message in Paramiko
Motivation
----------
Paramiko outputs a deprecation message due to an outdated security algorithm. We do not depend on that algorithm.
https://github.com/paramiko/paramiko/pull/2039
Modification
------------
The paramiko team is aware of the problem. They opened a PR to fix it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/common/extra.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2021 CERN
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 # Authors:
17 # - Benedikt Ziemons <[email protected]>, 2021
18
19 import importlib
20
21
22 def import_extras(module_list):
23 out = dict()
24 for mod in module_list:
25 out[mod] = None
26 try:
27 out[mod] = importlib.import_module(mod)
28 except ImportError:
29 pass
30 return out
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rucio/common/extra.py b/lib/rucio/common/extra.py
--- a/lib/rucio/common/extra.py
+++ b/lib/rucio/common/extra.py
@@ -17,6 +17,7 @@
# - Benedikt Ziemons <[email protected]>, 2021
import importlib
+import warnings
def import_extras(module_list):
@@ -24,7 +25,12 @@
for mod in module_list:
out[mod] = None
try:
- out[mod] = importlib.import_module(mod)
+ with warnings.catch_warnings():
+ # TODO: remove when https://github.com/paramiko/paramiko/issues/2038 is fixed
+ warnings.filterwarnings('ignore', 'Blowfish has been deprecated', module='paramiko')
+ # TODO: deprecated python 2 and 3.6 too ...
+ warnings.filterwarnings('ignore', 'Python .* is no longer supported', module='paramiko')
+ out[mod] = importlib.import_module(mod)
except ImportError:
pass
return out
| {"golden_diff": "diff --git a/lib/rucio/common/extra.py b/lib/rucio/common/extra.py\n--- a/lib/rucio/common/extra.py\n+++ b/lib/rucio/common/extra.py\n@@ -17,6 +17,7 @@\n # - Benedikt Ziemons <[email protected]>, 2021\n \n import importlib\n+import warnings\n \n \n def import_extras(module_list):\n@@ -24,7 +25,12 @@\n for mod in module_list:\n out[mod] = None\n try:\n- out[mod] = importlib.import_module(mod)\n+ with warnings.catch_warnings():\n+ # TODO: remove when https://github.com/paramiko/paramiko/issues/2038 is fixed\n+ warnings.filterwarnings('ignore', 'Blowfish has been deprecated', module='paramiko')\n+ # TODO: deprecated python 2 and 3.6 too ...\n+ warnings.filterwarnings('ignore', 'Python .* is no longer supported', module='paramiko')\n+ out[mod] = importlib.import_module(mod)\n except ImportError:\n pass\n return out\n", "issue": "Deprecation message in Paramiko\nMotivation\r\n----------\r\nParamiko outputs a deprecation message due to an outdated security algorithm. We do not depend on that algorithm.\r\nhttps://github.com/paramiko/paramiko/pull/2039\r\n\r\nModification\r\n------------\r\nThe paramiko team is aware of the problem. They opened a PR to fix it.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Benedikt Ziemons <[email protected]>, 2021\n\nimport importlib\n\n\ndef import_extras(module_list):\n out = dict()\n for mod in module_list:\n out[mod] = None\n try:\n out[mod] = importlib.import_module(mod)\n except ImportError:\n pass\n return out\n", "path": "lib/rucio/common/extra.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Benedikt Ziemons <[email protected]>, 2021\n\nimport importlib\nimport warnings\n\n\ndef import_extras(module_list):\n out = dict()\n for mod in module_list:\n out[mod] = None\n try:\n with warnings.catch_warnings():\n # TODO: remove when https://github.com/paramiko/paramiko/issues/2038 is fixed\n warnings.filterwarnings('ignore', 'Blowfish has been deprecated', module='paramiko')\n # TODO: deprecated python 2 and 3.6 too ...\n warnings.filterwarnings('ignore', 'Python .* is no longer supported', module='paramiko')\n out[mod] = importlib.import_module(mod)\n except ImportError:\n pass\n return out\n", "path": "lib/rucio/common/extra.py"}]} | 617 | 255 |
gh_patches_debug_38999 | rasdani/github-patches | git_diff | SeldonIO__MLServer-445 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fastapi 0.70 failing tests
When using `fastapi` latest version, some tests fail e.g. `test_infer`, if we use an older version of it (e.g. `0.68.2`) tests pass.
We need to take a deeper look just in case it uncovers a more subtle issue with asyncio (judging by the error we get).
here is the error for reference
```
est_infer[sum-model-None] _____________________________________
rest_client = <starlette.testclient.TestClient object at 0x7fa456254bd0>
inference_request = InferenceRequest(id=None, parameters=None, inputs=[RequestInput(name='input-0', shape=[1, 3], datatype='INT32', parameters=None, data=TensorData(__root__=[1, 2, 3]))], outputs=None)
model_name = 'sum-model', model_version = None
@pytest.mark.parametrize(
"model_name,model_version", [("sum-model", "v1.2.3"), ("sum-model", None)]
)
def test_infer(rest_client, inference_request, model_name, model_version):
endpoint = f"/v2/models/{model_name}/infer"
if model_version is not None:
endpoint = f"/v2/models/{model_name}/versions/{model_version}/infer"
> response = rest_client.post(endpoint, json=inference_request.dict())
tests/rest/test_endpoints.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/requests/sessions.py:590: in post
return self.request('POST', url, data=data, json=json, **kwargs)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/testclient.py:484: in request
json=json,
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/requests/sessions.py:542: in request
resp = self.send(prep, **send_kwargs)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/requests/sessions.py:655: in send
r = adapter.send(request, **kwargs)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/testclient.py:266: in send
raise exc
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/testclient.py:263: in send
portal.call(self.app, scope, receive, send)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/anyio/from_thread.py:230: in call
return cast(T_Retval, self.start_task_soon(func, *args).result())
../../miniconda3/envs/test_nest_io/lib/python3.7/concurrent/futures/_base.py:435: in result
return self.__get_result()
../../miniconda3/envs/test_nest_io/lib/python3.7/concurrent/futures/_base.py:384: in __get_result
raise self._exception
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/anyio/from_thread.py:177: in _call_func
retval = await retval
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/fastapi/applications.py:208: in __call__
await super().__call__(scope, receive, send)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/applications.py:112: in __call__
await self.middleware_stack(scope, receive, send)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/middleware/errors.py:181: in __call__
raise exc
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/middleware/errors.py:159: in __call__
await self.app(scope, receive, _send)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/exceptions.py:82: in __call__
raise exc
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/exceptions.py:71: in __call__
await self.app(scope, receive, sender)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/routing.py:656: in __call__
await route.handle(scope, receive, send)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/routing.py:259: in handle
await self.app(scope, receive, send)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/routing.py:61: in app
response = await func(request)
mlserver/rest/app.py:24: in custom_route_handler
return await original_route_handler(request)
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/fastapi/routing.py:227: in app
dependant=dependant, values=values, is_coroutine=is_coroutine
../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/fastapi/routing.py:159: in run_endpoint_function
return await dependant.call(**values)
mlserver/rest/endpoints.py:52: in infer
return await self._data_plane.infer(payload, model_name, model_version)
mlserver/handlers/dataplane.py:60: in infer
prediction = await model.predict(payload)
mlserver/batching/hooks.py:43: in _inner
return await batcher.predict(payload)
mlserver/batching/adaptive.py:36: in predict
return await self._wait_response(internal_id)
mlserver/batching/adaptive.py:55: in _wait_response
response = await async_response
mlserver/batching/adaptive.py:85: in _batcher
async for batched in self._batch_requests():
mlserver/batching/adaptive.py:112: in _batch_requests
read_op, timeout=timeout
../../miniconda3/envs/test_nest_io/lib/python3.7/asyncio/tasks.py:442: in wait_for
return fut.result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <Queue at 0x7fa456575490 maxsize=10 tasks=1>
async def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
"""
while self.empty():
getter = self._loop.create_future()
self._getters.append(getter)
try:
> await getter
E RuntimeError: Task <Task pending coro=<Queue.get() running at /home/sa/miniconda3/envs/test_nest_io/lib/python3.7/asyncio/queues.py:159> cb=[_release_waiter(<Future pendi...4566d8550>()]>)() at /home/sa/miniconda3/envs/test_nest_io/lib/python3.7/asyncio/tasks.py:392]> got Future <Future pending> attached to a different loop
../../miniconda3/envs/test_nest_io/lib/python3.7/asyncio/queues.py:159: RuntimeError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/batching/adaptive.py`
Content:
```
1 import time
2 import asyncio
3
4 from asyncio import Future, Queue, wait_for, Task
5 from functools import partial
6 from typing import AsyncIterator, Awaitable, Dict, Tuple
7
8 from ..model import MLModel
9 from ..types import (
10 InferenceRequest,
11 InferenceResponse,
12 )
13 from ..utils import generate_uuid
14
15 from .requests import BatchedRequests
16
17
18 class AdaptiveBatcher:
19 def __init__(self, model: MLModel):
20 self._model = model
21
22 self._max_batch_size = model.settings.max_batch_size
23 self._max_batch_time = model.settings.max_batch_time
24
25 # Save predict function before it gets decorated
26 self._predict_fn = model.predict
27 self._requests: Queue[Tuple[str, InferenceRequest]] = Queue(
28 maxsize=self._max_batch_size
29 )
30 self._async_responses: Dict[str, Future[InferenceResponse]] = {}
31 self._batching_task = None
32
33 async def predict(self, req: InferenceRequest) -> InferenceResponse:
34 internal_id, _ = await self._queue_request(req)
35 self._start_batcher_if_needed()
36 return await self._wait_response(internal_id)
37
38 async def _queue_request(
39 self,
40 req: InferenceRequest,
41 ) -> Tuple[str, Awaitable[InferenceResponse]]:
42 internal_id = generate_uuid()
43 await self._requests.put((internal_id, req))
44
45 loop = asyncio.get_running_loop()
46 async_response = loop.create_future()
47 self._async_responses[internal_id] = async_response
48
49 return internal_id, async_response
50
51 async def _wait_response(self, internal_id: str) -> InferenceResponse:
52 async_response = self._async_responses[internal_id]
53
54 try:
55 response = await async_response
56 return response
57 finally:
58 del self._async_responses[internal_id]
59
60 def _start_batcher_if_needed(self):
61 if self._batching_task is not None:
62 if not self._batching_task.done():
63 # If task hasn't finished yet, let it keep running
64 return
65
66 self._batching_task = asyncio.create_task(self._batcher())
67 self._batching_task.add_done_callback(self._batching_task_callback)
68
69 def _batching_task_callback(self, batching_task: Task):
70 err = batching_task.exception()
71 if err:
72 # Clear queue
73 self._clear_queue(err)
74
75 def _clear_queue(self, err: BaseException):
76 # Cancel all pending async responses
77 for async_response in self._async_responses.values():
78 async_response.set_exception(err)
79
80 # Empty queue
81 for _ in range(self._requests.qsize()):
82 self._requests.get_nowait()
83
84 async def _batcher(self):
85 async for batched in self._batch_requests():
86 # We run prediction as a Task to ensure it gets scheduled
87 # immediately.
88 # That way, we can process multiple batches concurrently.
89 predict_task = asyncio.create_task(self._predict_fn(batched.merged_request))
90 predict_task.add_done_callback(partial(self._predict_callback, batched))
91
92 def _predict_callback(self, batched: BatchedRequests, predict_task: Task):
93 try:
94 batched_response = predict_task.result()
95 responses = batched.split_response(batched_response)
96 for internal_id, response in responses.items():
97 self._async_responses[internal_id].set_result(response)
98 except Exception as err:
99 for internal_id in batched.inference_requests.keys():
100 self._async_responses[internal_id].set_exception(err)
101
102 async def _batch_requests(self) -> AsyncIterator[BatchedRequests]:
103 while not self._requests.empty():
104 to_batch: Dict[str, InferenceRequest] = {}
105 start = time.time()
106 timeout = self._max_batch_time
107
108 try:
109 while len(to_batch) < self._max_batch_size:
110 read_op = self._requests.get()
111 internal_id, inference_request = await wait_for(
112 read_op, timeout=timeout
113 )
114 to_batch[internal_id] = inference_request
115
116 # Update remaining timeout
117 current = time.time()
118 timeout = timeout - (current - start)
119 except asyncio.TimeoutError:
120 # NOTE: Hit timeout, continue
121 pass
122
123 yield BatchedRequests(to_batch)
124
```
Path: `setup.py`
Content:
```
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 setup(
29 name=PKG_NAME,
30 version=_load_version(),
31 url="https://github.com/SeldonIO/MLServer.git",
32 author="Seldon Technologies Ltd.",
33 author_email="[email protected]",
34 description="ML server",
35 packages=find_packages(exclude=["tests", "tests.*"]),
36 install_requires=[
37 "click",
38 # We pin version of fastapi
39 # check https://github.com/SeldonIO/MLServer/issues/340
40 "fastapi==0.68.2",
41 "grpcio",
42 "importlib-metadata;python_version<'3.8'",
43 "numpy",
44 "pandas",
45 "protobuf",
46 "uvicorn",
47 "starlette_exporter",
48 "py-grpc-prometheus",
49 ],
50 extras_require={"all": ["orjson"]},
51 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
52 long_description=_load_description(),
53 long_description_content_type="text/markdown",
54 license="Apache 2.0",
55 )
56
```
Path: `mlserver/rest/errors.py`
Content:
```
1 from typing import Optional
2 from fastapi import Request, status
3 from pydantic import BaseModel
4
5 from .responses import Response
6 from ..errors import MLServerError
7
8
9 class APIErrorResponse(BaseModel):
10 error: Optional[str] = None
11
12
13 def handle_mlserver_error(request: Request, exc: MLServerError) -> Response:
14 err_res = APIErrorResponse(error=str(exc))
15 return Response(status_code=status.HTTP_400_BAD_REQUEST, content=err_res.dict())
16
17
18 _EXCEPTION_HANDLERS = {MLServerError: handle_mlserver_error}
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/batching/adaptive.py b/mlserver/batching/adaptive.py
--- a/mlserver/batching/adaptive.py
+++ b/mlserver/batching/adaptive.py
@@ -1,9 +1,13 @@
+# Required to deal with annotations including `Queue`
+# https://mypy.readthedocs.io/en/latest/common_issues.html#issues-with-code-at-runtime
+from __future__ import annotations
+
import time
import asyncio
from asyncio import Future, Queue, wait_for, Task
from functools import partial
-from typing import AsyncIterator, Awaitable, Dict, Tuple
+from typing import AsyncIterator, Awaitable, Dict, Optional, Tuple
from ..model import MLModel
from ..types import (
@@ -24,9 +28,7 @@
# Save predict function before it gets decorated
self._predict_fn = model.predict
- self._requests: Queue[Tuple[str, InferenceRequest]] = Queue(
- maxsize=self._max_batch_size
- )
+ self.__requests: Optional[Queue[Tuple[str, InferenceRequest]]] = None
self._async_responses: Dict[str, Future[InferenceResponse]] = {}
self._batching_task = None
@@ -35,11 +37,21 @@
self._start_batcher_if_needed()
return await self._wait_response(internal_id)
+ @property
+ def _requests(self) -> Queue[Tuple[str, InferenceRequest]]:
+ # NOTE: We need to create Queue within the async request path (and not
+ # during __init__!!) to ensure that it shares the same AsyncIO loop.
+ if self.__requests is None:
+ self.__requests = Queue(maxsize=self._max_batch_size)
+
+ return self.__requests
+
async def _queue_request(
self,
req: InferenceRequest,
) -> Tuple[str, Awaitable[InferenceResponse]]:
internal_id = generate_uuid()
+
await self._requests.put((internal_id, req))
loop = asyncio.get_running_loop()
diff --git a/mlserver/rest/errors.py b/mlserver/rest/errors.py
--- a/mlserver/rest/errors.py
+++ b/mlserver/rest/errors.py
@@ -10,7 +10,7 @@
error: Optional[str] = None
-def handle_mlserver_error(request: Request, exc: MLServerError) -> Response:
+async def handle_mlserver_error(request: Request, exc: MLServerError) -> Response:
err_res = APIErrorResponse(error=str(exc))
return Response(status_code=status.HTTP_400_BAD_REQUEST, content=err_res.dict())
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,9 +35,7 @@
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=[
"click",
- # We pin version of fastapi
- # check https://github.com/SeldonIO/MLServer/issues/340
- "fastapi==0.68.2",
+ "fastapi",
"grpcio",
"importlib-metadata;python_version<'3.8'",
"numpy",
| {"golden_diff": "diff --git a/mlserver/batching/adaptive.py b/mlserver/batching/adaptive.py\n--- a/mlserver/batching/adaptive.py\n+++ b/mlserver/batching/adaptive.py\n@@ -1,9 +1,13 @@\n+# Required to deal with annotations including `Queue`\n+# https://mypy.readthedocs.io/en/latest/common_issues.html#issues-with-code-at-runtime\n+from __future__ import annotations\n+\n import time\n import asyncio\n \n from asyncio import Future, Queue, wait_for, Task\n from functools import partial\n-from typing import AsyncIterator, Awaitable, Dict, Tuple\n+from typing import AsyncIterator, Awaitable, Dict, Optional, Tuple\n \n from ..model import MLModel\n from ..types import (\n@@ -24,9 +28,7 @@\n \n # Save predict function before it gets decorated\n self._predict_fn = model.predict\n- self._requests: Queue[Tuple[str, InferenceRequest]] = Queue(\n- maxsize=self._max_batch_size\n- )\n+ self.__requests: Optional[Queue[Tuple[str, InferenceRequest]]] = None\n self._async_responses: Dict[str, Future[InferenceResponse]] = {}\n self._batching_task = None\n \n@@ -35,11 +37,21 @@\n self._start_batcher_if_needed()\n return await self._wait_response(internal_id)\n \n+ @property\n+ def _requests(self) -> Queue[Tuple[str, InferenceRequest]]:\n+ # NOTE: We need to create Queue within the async request path (and not\n+ # during __init__!!) to ensure that it shares the same AsyncIO loop.\n+ if self.__requests is None:\n+ self.__requests = Queue(maxsize=self._max_batch_size)\n+\n+ return self.__requests\n+\n async def _queue_request(\n self,\n req: InferenceRequest,\n ) -> Tuple[str, Awaitable[InferenceResponse]]:\n internal_id = generate_uuid()\n+\n await self._requests.put((internal_id, req))\n \n loop = asyncio.get_running_loop()\ndiff --git a/mlserver/rest/errors.py b/mlserver/rest/errors.py\n--- a/mlserver/rest/errors.py\n+++ b/mlserver/rest/errors.py\n@@ -10,7 +10,7 @@\n error: Optional[str] = None\n \n \n-def handle_mlserver_error(request: Request, exc: MLServerError) -> Response:\n+async def handle_mlserver_error(request: Request, exc: MLServerError) -> Response:\n err_res = APIErrorResponse(error=str(exc))\n return Response(status_code=status.HTTP_400_BAD_REQUEST, content=err_res.dict())\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,9 +35,7 @@\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n- # We pin version of fastapi\n- # check https://github.com/SeldonIO/MLServer/issues/340\n- \"fastapi==0.68.2\",\n+ \"fastapi\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n", "issue": "fastapi 0.70 failing tests\nWhen using `fastapi` latest version, some tests fail e.g. `test_infer`, if we use an older version of it (e.g. `0.68.2`) tests pass.\r\n\r\nWe need to take a deeper look just in case it uncovers a more subtle issue with asyncio (judging by the error we get).\r\n\r\nhere is the error for reference\r\n\r\n```\r\nest_infer[sum-model-None] _____________________________________\r\n\r\nrest_client = <starlette.testclient.TestClient object at 0x7fa456254bd0>\r\ninference_request = InferenceRequest(id=None, parameters=None, inputs=[RequestInput(name='input-0', shape=[1, 3], datatype='INT32', parameters=None, data=TensorData(__root__=[1, 2, 3]))], outputs=None)\r\nmodel_name = 'sum-model', model_version = None\r\n\r\n @pytest.mark.parametrize(\r\n \"model_name,model_version\", [(\"sum-model\", \"v1.2.3\"), (\"sum-model\", None)]\r\n )\r\n def test_infer(rest_client, inference_request, model_name, model_version):\r\n endpoint = f\"/v2/models/{model_name}/infer\"\r\n if model_version is not None:\r\n endpoint = f\"/v2/models/{model_name}/versions/{model_version}/infer\"\r\n> response = rest_client.post(endpoint, json=inference_request.dict())\r\n\r\ntests/rest/test_endpoints.py:57: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/requests/sessions.py:590: in post\r\n return self.request('POST', url, data=data, json=json, **kwargs)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/testclient.py:484: in request\r\n json=json,\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/requests/sessions.py:542: in request\r\n resp = self.send(prep, **send_kwargs)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/requests/sessions.py:655: in send\r\n r = adapter.send(request, **kwargs)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/testclient.py:266: in send\r\n raise exc\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/testclient.py:263: in send\r\n portal.call(self.app, scope, receive, send)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/anyio/from_thread.py:230: in call\r\n return cast(T_Retval, self.start_task_soon(func, *args).result())\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/concurrent/futures/_base.py:435: in result\r\n return self.__get_result()\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/concurrent/futures/_base.py:384: in __get_result\r\n raise self._exception\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/anyio/from_thread.py:177: in _call_func\r\n retval = await retval\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/fastapi/applications.py:208: in __call__\r\n await super().__call__(scope, receive, send)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/applications.py:112: in __call__\r\n await self.middleware_stack(scope, receive, send)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/middleware/errors.py:181: in __call__\r\n raise exc\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/middleware/errors.py:159: in __call__\r\n await self.app(scope, receive, _send)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/exceptions.py:82: in __call__\r\n raise exc\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/exceptions.py:71: in __call__\r\n await self.app(scope, receive, sender)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/routing.py:656: in __call__\r\n await route.handle(scope, receive, send)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/routing.py:259: in handle\r\n await self.app(scope, receive, send)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/starlette/routing.py:61: in app\r\n response = await func(request)\r\nmlserver/rest/app.py:24: in custom_route_handler\r\n return await original_route_handler(request)\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/fastapi/routing.py:227: in app\r\n dependant=dependant, values=values, is_coroutine=is_coroutine\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/site-packages/fastapi/routing.py:159: in run_endpoint_function\r\n return await dependant.call(**values)\r\nmlserver/rest/endpoints.py:52: in infer\r\n return await self._data_plane.infer(payload, model_name, model_version)\r\nmlserver/handlers/dataplane.py:60: in infer\r\n prediction = await model.predict(payload)\r\nmlserver/batching/hooks.py:43: in _inner\r\n return await batcher.predict(payload)\r\nmlserver/batching/adaptive.py:36: in predict\r\n return await self._wait_response(internal_id)\r\nmlserver/batching/adaptive.py:55: in _wait_response\r\n response = await async_response\r\nmlserver/batching/adaptive.py:85: in _batcher\r\n async for batched in self._batch_requests():\r\nmlserver/batching/adaptive.py:112: in _batch_requests\r\n read_op, timeout=timeout\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/asyncio/tasks.py:442: in wait_for\r\n return fut.result()\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\nself = <Queue at 0x7fa456575490 maxsize=10 tasks=1>\r\n\r\n async def get(self):\r\n \"\"\"Remove and return an item from the queue.\r\n \r\n If queue is empty, wait until an item is available.\r\n \"\"\"\r\n while self.empty():\r\n getter = self._loop.create_future()\r\n self._getters.append(getter)\r\n try:\r\n> await getter\r\nE RuntimeError: Task <Task pending coro=<Queue.get() running at /home/sa/miniconda3/envs/test_nest_io/lib/python3.7/asyncio/queues.py:159> cb=[_release_waiter(<Future pendi...4566d8550>()]>)() at /home/sa/miniconda3/envs/test_nest_io/lib/python3.7/asyncio/tasks.py:392]> got Future <Future pending> attached to a different loop\r\n\r\n../../miniconda3/envs/test_nest_io/lib/python3.7/asyncio/queues.py:159: RuntimeError\r\n```\r\n\n", "before_files": [{"content": "import time\nimport asyncio\n\nfrom asyncio import Future, Queue, wait_for, Task\nfrom functools import partial\nfrom typing import AsyncIterator, Awaitable, Dict, Tuple\n\nfrom ..model import MLModel\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n)\nfrom ..utils import generate_uuid\n\nfrom .requests import BatchedRequests\n\n\nclass AdaptiveBatcher:\n def __init__(self, model: MLModel):\n self._model = model\n\n self._max_batch_size = model.settings.max_batch_size\n self._max_batch_time = model.settings.max_batch_time\n\n # Save predict function before it gets decorated\n self._predict_fn = model.predict\n self._requests: Queue[Tuple[str, InferenceRequest]] = Queue(\n maxsize=self._max_batch_size\n )\n self._async_responses: Dict[str, Future[InferenceResponse]] = {}\n self._batching_task = None\n\n async def predict(self, req: InferenceRequest) -> InferenceResponse:\n internal_id, _ = await self._queue_request(req)\n self._start_batcher_if_needed()\n return await self._wait_response(internal_id)\n\n async def _queue_request(\n self,\n req: InferenceRequest,\n ) -> Tuple[str, Awaitable[InferenceResponse]]:\n internal_id = generate_uuid()\n await self._requests.put((internal_id, req))\n\n loop = asyncio.get_running_loop()\n async_response = loop.create_future()\n self._async_responses[internal_id] = async_response\n\n return internal_id, async_response\n\n async def _wait_response(self, internal_id: str) -> InferenceResponse:\n async_response = self._async_responses[internal_id]\n\n try:\n response = await async_response\n return response\n finally:\n del self._async_responses[internal_id]\n\n def _start_batcher_if_needed(self):\n if self._batching_task is not None:\n if not self._batching_task.done():\n # If task hasn't finished yet, let it keep running\n return\n\n self._batching_task = asyncio.create_task(self._batcher())\n self._batching_task.add_done_callback(self._batching_task_callback)\n\n def _batching_task_callback(self, batching_task: Task):\n err = batching_task.exception()\n if err:\n # Clear queue\n self._clear_queue(err)\n\n def _clear_queue(self, err: BaseException):\n # Cancel all pending async responses\n for async_response in self._async_responses.values():\n async_response.set_exception(err)\n\n # Empty queue\n for _ in range(self._requests.qsize()):\n self._requests.get_nowait()\n\n async def _batcher(self):\n async for batched in self._batch_requests():\n # We run prediction as a Task to ensure it gets scheduled\n # immediately.\n # That way, we can process multiple batches concurrently.\n predict_task = asyncio.create_task(self._predict_fn(batched.merged_request))\n predict_task.add_done_callback(partial(self._predict_callback, batched))\n\n def _predict_callback(self, batched: BatchedRequests, predict_task: Task):\n try:\n batched_response = predict_task.result()\n responses = batched.split_response(batched_response)\n for internal_id, response in responses.items():\n self._async_responses[internal_id].set_result(response)\n except Exception as err:\n for internal_id in batched.inference_requests.keys():\n self._async_responses[internal_id].set_exception(err)\n\n async def _batch_requests(self) -> AsyncIterator[BatchedRequests]:\n while not self._requests.empty():\n to_batch: Dict[str, InferenceRequest] = {}\n start = time.time()\n timeout = self._max_batch_time\n\n try:\n while len(to_batch) < self._max_batch_size:\n read_op = self._requests.get()\n internal_id, inference_request = await wait_for(\n read_op, timeout=timeout\n )\n to_batch[internal_id] = inference_request\n\n # Update remaining timeout\n current = time.time()\n timeout = timeout - (current - start)\n except asyncio.TimeoutError:\n # NOTE: Hit timeout, continue\n pass\n\n yield BatchedRequests(to_batch)\n", "path": "mlserver/batching/adaptive.py"}, {"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # We pin version of fastapi\n # check https://github.com/SeldonIO/MLServer/issues/340\n \"fastapi==0.68.2\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n ],\n extras_require={\"all\": [\"orjson\"]},\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}, {"content": "from typing import Optional\nfrom fastapi import Request, status\nfrom pydantic import BaseModel\n\nfrom .responses import Response\nfrom ..errors import MLServerError\n\n\nclass APIErrorResponse(BaseModel):\n error: Optional[str] = None\n\n\ndef handle_mlserver_error(request: Request, exc: MLServerError) -> Response:\n err_res = APIErrorResponse(error=str(exc))\n return Response(status_code=status.HTTP_400_BAD_REQUEST, content=err_res.dict())\n\n\n_EXCEPTION_HANDLERS = {MLServerError: handle_mlserver_error}\n", "path": "mlserver/rest/errors.py"}], "after_files": [{"content": "# Required to deal with annotations including `Queue`\n# https://mypy.readthedocs.io/en/latest/common_issues.html#issues-with-code-at-runtime\nfrom __future__ import annotations\n\nimport time\nimport asyncio\n\nfrom asyncio import Future, Queue, wait_for, Task\nfrom functools import partial\nfrom typing import AsyncIterator, Awaitable, Dict, Optional, Tuple\n\nfrom ..model import MLModel\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n)\nfrom ..utils import generate_uuid\n\nfrom .requests import BatchedRequests\n\n\nclass AdaptiveBatcher:\n def __init__(self, model: MLModel):\n self._model = model\n\n self._max_batch_size = model.settings.max_batch_size\n self._max_batch_time = model.settings.max_batch_time\n\n # Save predict function before it gets decorated\n self._predict_fn = model.predict\n self.__requests: Optional[Queue[Tuple[str, InferenceRequest]]] = None\n self._async_responses: Dict[str, Future[InferenceResponse]] = {}\n self._batching_task = None\n\n async def predict(self, req: InferenceRequest) -> InferenceResponse:\n internal_id, _ = await self._queue_request(req)\n self._start_batcher_if_needed()\n return await self._wait_response(internal_id)\n\n @property\n def _requests(self) -> Queue[Tuple[str, InferenceRequest]]:\n # NOTE: We need to create Queue within the async request path (and not\n # during __init__!!) to ensure that it shares the same AsyncIO loop.\n if self.__requests is None:\n self.__requests = Queue(maxsize=self._max_batch_size)\n\n return self.__requests\n\n async def _queue_request(\n self,\n req: InferenceRequest,\n ) -> Tuple[str, Awaitable[InferenceResponse]]:\n internal_id = generate_uuid()\n\n await self._requests.put((internal_id, req))\n\n loop = asyncio.get_running_loop()\n async_response = loop.create_future()\n self._async_responses[internal_id] = async_response\n\n return internal_id, async_response\n\n async def _wait_response(self, internal_id: str) -> InferenceResponse:\n async_response = self._async_responses[internal_id]\n\n try:\n response = await async_response\n return response\n finally:\n del self._async_responses[internal_id]\n\n def _start_batcher_if_needed(self):\n if self._batching_task is not None:\n if not self._batching_task.done():\n # If task hasn't finished yet, let it keep running\n return\n\n self._batching_task = asyncio.create_task(self._batcher())\n self._batching_task.add_done_callback(self._batching_task_callback)\n\n def _batching_task_callback(self, batching_task: Task):\n err = batching_task.exception()\n if err:\n # Clear queue\n self._clear_queue(err)\n\n def _clear_queue(self, err: BaseException):\n # Cancel all pending async responses\n for async_response in self._async_responses.values():\n async_response.set_exception(err)\n\n # Empty queue\n for _ in range(self._requests.qsize()):\n self._requests.get_nowait()\n\n async def _batcher(self):\n async for batched in self._batch_requests():\n # We run prediction as a Task to ensure it gets scheduled\n # immediately.\n # That way, we can process multiple batches concurrently.\n predict_task = asyncio.create_task(self._predict_fn(batched.merged_request))\n predict_task.add_done_callback(partial(self._predict_callback, batched))\n\n def _predict_callback(self, batched: BatchedRequests, predict_task: Task):\n try:\n batched_response = predict_task.result()\n responses = batched.split_response(batched_response)\n for internal_id, response in responses.items():\n self._async_responses[internal_id].set_result(response)\n except Exception as err:\n for internal_id in batched.inference_requests.keys():\n self._async_responses[internal_id].set_exception(err)\n\n async def _batch_requests(self) -> AsyncIterator[BatchedRequests]:\n while not self._requests.empty():\n to_batch: Dict[str, InferenceRequest] = {}\n start = time.time()\n timeout = self._max_batch_time\n\n try:\n while len(to_batch) < self._max_batch_size:\n read_op = self._requests.get()\n internal_id, inference_request = await wait_for(\n read_op, timeout=timeout\n )\n to_batch[internal_id] = inference_request\n\n # Update remaining timeout\n current = time.time()\n timeout = timeout - (current - start)\n except asyncio.TimeoutError:\n # NOTE: Hit timeout, continue\n pass\n\n yield BatchedRequests(to_batch)\n", "path": "mlserver/batching/adaptive.py"}, {"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n \"fastapi\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n ],\n extras_require={\"all\": [\"orjson\"]},\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}, {"content": "from typing import Optional\nfrom fastapi import Request, status\nfrom pydantic import BaseModel\n\nfrom .responses import Response\nfrom ..errors import MLServerError\n\n\nclass APIErrorResponse(BaseModel):\n error: Optional[str] = None\n\n\nasync def handle_mlserver_error(request: Request, exc: MLServerError) -> Response:\n err_res = APIErrorResponse(error=str(exc))\n return Response(status_code=status.HTTP_400_BAD_REQUEST, content=err_res.dict())\n\n\n_EXCEPTION_HANDLERS = {MLServerError: handle_mlserver_error}\n", "path": "mlserver/rest/errors.py"}]} | 3,872 | 697 |
gh_patches_debug_16935 | rasdani/github-patches | git_diff | napari__napari-6546 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keybinding to set a new label doesn't work consistently
### 🐛 Bug Report
After creating a labels layer, I observed that the `m` keybinding does not behave consistently.
### 💡 Steps to Reproduce
Create a labels layer.
- If the layer is selected, the `m` keybinding will either do nothing or select another layer wiht a name that starts with the letter `m`;
- If the paintbrush, fill bucket or polygon tools are selected, the `m` keybinding doesn't do anything;
- If the paintbrush has been used in the canvas, the `m` keybinding correctly adds 1 to the label control and a new label is selected and can be used to paint.
### 💡 Expected Behavior
The `m` keybinding should increase the label number in all those situations.
### 🌎 Environment
napari: 0.5.0a2.dev486+g4d60a7ce
Platform: Linux-6.1.64-1-MANJARO-x86_64-with-glibc2.38
System: Manjaro Linux
Python: 3.11.6 | packaged by conda-forge | (main, Oct 3 2023, 10:40:35) [GCC 12.3.0]
Qt: 5.15.2
PyQt5: 5.15.10
NumPy: 1.26.2
SciPy: 1.11.4
Dask: 2023.11.0
VisPy: 0.14.1
magicgui: 0.8.0
superqt: 0.6.1
in-n-out: 0.1.9
app-model: 0.2.2
npe2: 0.7.3
OpenGL:
- GL version: 4.6 (Compatibility Profile) Mesa 23.1.9-manjaro1.1
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 1920x1080, scale 1.0
- screen 2: resolution 1920x1080, scale 1.0
Settings path:
- /home/melissa/.config/napari/napari-dev_f5bfbd9c5d96bcb503f816d91f8db95d3b6d554f/settings.yaml
Plugins:
- napari: 0.5.0a2.dev486+g4d60a7ce (77 contributions)
- napari-console: 0.0.9 (0 contributions)
- napari-svg: 0.1.10 (2 contributions)
### 💡 Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/layers/labels/_labels_key_bindings.py`
Content:
```
1 import numpy as np
2 from app_model.types import KeyCode, KeyMod
3
4 from napari.layers.labels._labels_constants import Mode
5 from napari.layers.labels.labels import Labels
6 from napari.layers.utils.layer_utils import (
7 register_layer_action,
8 register_layer_attr_action,
9 )
10 from napari.utils.notifications import show_info
11 from napari.utils.translations import trans
12
13 MIN_BRUSH_SIZE = 1
14
15
16 def register_label_action(description: str, repeatable: bool = False):
17 return register_layer_action(Labels, description, repeatable)
18
19
20 def register_label_mode_action(description):
21 return register_layer_attr_action(Labels, description, 'mode')
22
23
24 @register_label_mode_action(trans._('Transform'))
25 def activate_labels_transform_mode(layer: Labels):
26 layer.mode = Mode.TRANSFORM
27
28
29 @register_label_mode_action(trans._('Pan/zoom'))
30 def activate_labels_pan_zoom_mode(layer: Labels):
31 layer.mode = Mode.PAN_ZOOM
32
33
34 @register_label_mode_action(trans._("Activate the paint brush"))
35 def activate_labels_paint_mode(layer: Labels):
36 layer.mode = Mode.PAINT
37
38
39 @register_label_mode_action(trans._("Activate the polygon tool"))
40 def activate_labels_polygon_mode(layer: Labels):
41 layer.mode = Mode.POLYGON
42
43
44 @register_label_mode_action(trans._("Activate the fill bucket"))
45 def activate_labels_fill_mode(layer: Labels):
46 layer.mode = Mode.FILL
47
48
49 @register_label_mode_action(trans._('Pick mode'))
50 def activate_labels_picker_mode(layer: Labels):
51 """Activate the label picker."""
52 layer.mode = Mode.PICK
53
54
55 @register_label_mode_action(trans._("Activate the label eraser"))
56 def activate_labels_erase_mode(layer: Labels):
57 layer.mode = Mode.ERASE
58
59
60 labels_fun_to_mode = [
61 (activate_labels_pan_zoom_mode, Mode.PAN_ZOOM),
62 (activate_labels_transform_mode, Mode.TRANSFORM),
63 (activate_labels_erase_mode, Mode.ERASE),
64 (activate_labels_paint_mode, Mode.PAINT),
65 (activate_labels_polygon_mode, Mode.POLYGON),
66 (activate_labels_fill_mode, Mode.FILL),
67 (activate_labels_picker_mode, Mode.PICK),
68 ]
69
70
71 @register_label_action(
72 trans._(
73 "Set the currently selected label to the largest used label plus one."
74 ),
75 )
76 def new_label(layer: Labels):
77 """Set the currently selected label to the largest used label plus one."""
78 if isinstance(layer.data, np.ndarray):
79 layer.selected_label = np.max(layer.data) + 1
80 else:
81 show_info(
82 "Calculating empty label on non-numpy array is not supported"
83 )
84
85
86 @register_label_action(
87 trans._("Swap between the selected label and the background label."),
88 )
89 def swap_selected_and_background_labels(layer: Labels):
90 """Swap between the selected label and the background label."""
91 layer.swap_selected_and_background_labels()
92
93
94 @register_label_action(
95 trans._("Decrease the currently selected label by one."),
96 )
97 def decrease_label_id(layer: Labels):
98 layer.selected_label -= 1
99
100
101 @register_label_action(
102 trans._("Increase the currently selected label by one."),
103 )
104 def increase_label_id(layer: Labels):
105 layer.selected_label += 1
106
107
108 @register_label_action(
109 trans._("Decrease the paint brush size by one."),
110 repeatable=True,
111 )
112 def decrease_brush_size(layer: Labels):
113 """Decrease the brush size"""
114 if (
115 layer.brush_size > MIN_BRUSH_SIZE
116 ): # here we should probably add a non-hard-coded
117 # reference to the limit values of brush size?
118 layer.brush_size -= 1
119
120
121 @register_label_action(
122 trans._("Increase the paint brush size by one."),
123 repeatable=True,
124 )
125 def increase_brush_size(layer: Labels):
126 """Increase the brush size"""
127 layer.brush_size += 1
128
129
130 @register_layer_attr_action(
131 Labels, trans._("Toggle preserve labels"), "preserve_labels"
132 )
133 def toggle_preserve_labels(layer: Labels):
134 layer.preserve_labels = not layer.preserve_labels
135
136
137 @Labels.bind_key(KeyMod.CtrlCmd | KeyCode.KeyZ, overwrite=True)
138 def undo(layer: Labels):
139 """Undo the last paint or fill action since the view slice has changed."""
140 layer.undo()
141
142
143 @Labels.bind_key(KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.KeyZ, overwrite=True)
144 def redo(layer: Labels):
145 """Redo any previously undone actions."""
146 layer.redo()
147
148
149 @register_label_action(
150 trans._("Reset the current polygon"),
151 )
152 def reset_polygon(layer: Labels):
153 """Reset the drawing of the current polygon."""
154 layer._overlays["polygon"].points = []
155
156
157 @register_label_action(
158 trans._("Complete the current polygon"),
159 )
160 def complete_polygon(layer: Labels):
161 """Complete the drawing of the current polygon."""
162 # Because layer._overlays has type Overlay, mypy doesn't know that
163 # ._overlays["polygon"] has type LabelsPolygonOverlay, so type ignore for now
164 # TODO: Improve typing of layer._overlays to fix this
165 layer._overlays["polygon"].add_polygon_to_labels(layer)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/layers/labels/_labels_key_bindings.py b/napari/layers/labels/_labels_key_bindings.py
--- a/napari/layers/labels/_labels_key_bindings.py
+++ b/napari/layers/labels/_labels_key_bindings.py
@@ -76,10 +76,21 @@
def new_label(layer: Labels):
"""Set the currently selected label to the largest used label plus one."""
if isinstance(layer.data, np.ndarray):
- layer.selected_label = np.max(layer.data) + 1
+ new_selected_label = np.max(layer.data) + 1
+ if layer.selected_label == new_selected_label:
+ show_info(
+ trans._(
+ "Current selected label is not being used. You will need to use it first "
+ "to be able to set the current select label to the next one available",
+ )
+ )
+ else:
+ layer.selected_label = new_selected_label
else:
show_info(
- "Calculating empty label on non-numpy array is not supported"
+ trans._(
+ "Calculating empty label on non-numpy array is not supported"
+ )
)
| {"golden_diff": "diff --git a/napari/layers/labels/_labels_key_bindings.py b/napari/layers/labels/_labels_key_bindings.py\n--- a/napari/layers/labels/_labels_key_bindings.py\n+++ b/napari/layers/labels/_labels_key_bindings.py\n@@ -76,10 +76,21 @@\n def new_label(layer: Labels):\n \"\"\"Set the currently selected label to the largest used label plus one.\"\"\"\n if isinstance(layer.data, np.ndarray):\n- layer.selected_label = np.max(layer.data) + 1\n+ new_selected_label = np.max(layer.data) + 1\n+ if layer.selected_label == new_selected_label:\n+ show_info(\n+ trans._(\n+ \"Current selected label is not being used. You will need to use it first \"\n+ \"to be able to set the current select label to the next one available\",\n+ )\n+ )\n+ else:\n+ layer.selected_label = new_selected_label\n else:\n show_info(\n- \"Calculating empty label on non-numpy array is not supported\"\n+ trans._(\n+ \"Calculating empty label on non-numpy array is not supported\"\n+ )\n )\n", "issue": "Keybinding to set a new label doesn't work consistently\n### \ud83d\udc1b Bug Report\n\nAfter creating a labels layer, I observed that the `m` keybinding does not behave consistently.\r\n\r\n\n\n### \ud83d\udca1 Steps to Reproduce\n\nCreate a labels layer.\r\n\r\n- If the layer is selected, the `m` keybinding will either do nothing or select another layer wiht a name that starts with the letter `m`;\r\n- If the paintbrush, fill bucket or polygon tools are selected, the `m` keybinding doesn't do anything;\r\n- If the paintbrush has been used in the canvas, the `m` keybinding correctly adds 1 to the label control and a new label is selected and can be used to paint.\n\n### \ud83d\udca1 Expected Behavior\n\nThe `m` keybinding should increase the label number in all those situations.\n\n### \ud83c\udf0e Environment\n\nnapari: 0.5.0a2.dev486+g4d60a7ce\r\nPlatform: Linux-6.1.64-1-MANJARO-x86_64-with-glibc2.38\r\nSystem: Manjaro Linux\r\nPython: 3.11.6 | packaged by conda-forge | (main, Oct 3 2023, 10:40:35) [GCC 12.3.0]\r\nQt: 5.15.2\r\nPyQt5: 5.15.10\r\nNumPy: 1.26.2\r\nSciPy: 1.11.4\r\nDask: 2023.11.0\r\nVisPy: 0.14.1\r\nmagicgui: 0.8.0\r\nsuperqt: 0.6.1\r\nin-n-out: 0.1.9\r\napp-model: 0.2.2\r\nnpe2: 0.7.3\r\n\r\nOpenGL:\r\n - GL version: 4.6 (Compatibility Profile) Mesa 23.1.9-manjaro1.1\r\n - MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n - screen 1: resolution 1920x1080, scale 1.0\r\n - screen 2: resolution 1920x1080, scale 1.0\r\n\r\nSettings path:\r\n - /home/melissa/.config/napari/napari-dev_f5bfbd9c5d96bcb503f816d91f8db95d3b6d554f/settings.yaml\r\nPlugins:\r\n - napari: 0.5.0a2.dev486+g4d60a7ce (77 contributions)\r\n - napari-console: 0.0.9 (0 contributions)\r\n - napari-svg: 0.1.10 (2 contributions)\n\n### \ud83d\udca1 Additional Context\n\n_No response_\n", "before_files": [{"content": "import numpy as np\nfrom app_model.types import KeyCode, KeyMod\n\nfrom napari.layers.labels._labels_constants import Mode\nfrom napari.layers.labels.labels import Labels\nfrom napari.layers.utils.layer_utils import (\n register_layer_action,\n register_layer_attr_action,\n)\nfrom napari.utils.notifications import show_info\nfrom napari.utils.translations import trans\n\nMIN_BRUSH_SIZE = 1\n\n\ndef register_label_action(description: str, repeatable: bool = False):\n return register_layer_action(Labels, description, repeatable)\n\n\ndef register_label_mode_action(description):\n return register_layer_attr_action(Labels, description, 'mode')\n\n\n@register_label_mode_action(trans._('Transform'))\ndef activate_labels_transform_mode(layer: Labels):\n layer.mode = Mode.TRANSFORM\n\n\n@register_label_mode_action(trans._('Pan/zoom'))\ndef activate_labels_pan_zoom_mode(layer: Labels):\n layer.mode = Mode.PAN_ZOOM\n\n\n@register_label_mode_action(trans._(\"Activate the paint brush\"))\ndef activate_labels_paint_mode(layer: Labels):\n layer.mode = Mode.PAINT\n\n\n@register_label_mode_action(trans._(\"Activate the polygon tool\"))\ndef activate_labels_polygon_mode(layer: Labels):\n layer.mode = Mode.POLYGON\n\n\n@register_label_mode_action(trans._(\"Activate the fill bucket\"))\ndef activate_labels_fill_mode(layer: Labels):\n layer.mode = Mode.FILL\n\n\n@register_label_mode_action(trans._('Pick mode'))\ndef activate_labels_picker_mode(layer: Labels):\n \"\"\"Activate the label picker.\"\"\"\n layer.mode = Mode.PICK\n\n\n@register_label_mode_action(trans._(\"Activate the label eraser\"))\ndef activate_labels_erase_mode(layer: Labels):\n layer.mode = Mode.ERASE\n\n\nlabels_fun_to_mode = [\n (activate_labels_pan_zoom_mode, Mode.PAN_ZOOM),\n (activate_labels_transform_mode, Mode.TRANSFORM),\n (activate_labels_erase_mode, Mode.ERASE),\n (activate_labels_paint_mode, Mode.PAINT),\n (activate_labels_polygon_mode, Mode.POLYGON),\n (activate_labels_fill_mode, Mode.FILL),\n (activate_labels_picker_mode, Mode.PICK),\n]\n\n\n@register_label_action(\n trans._(\n \"Set the currently selected label to the largest used label plus one.\"\n ),\n)\ndef new_label(layer: Labels):\n \"\"\"Set the currently selected label to the largest used label plus one.\"\"\"\n if isinstance(layer.data, np.ndarray):\n layer.selected_label = np.max(layer.data) + 1\n else:\n show_info(\n \"Calculating empty label on non-numpy array is not supported\"\n )\n\n\n@register_label_action(\n trans._(\"Swap between the selected label and the background label.\"),\n)\ndef swap_selected_and_background_labels(layer: Labels):\n \"\"\"Swap between the selected label and the background label.\"\"\"\n layer.swap_selected_and_background_labels()\n\n\n@register_label_action(\n trans._(\"Decrease the currently selected label by one.\"),\n)\ndef decrease_label_id(layer: Labels):\n layer.selected_label -= 1\n\n\n@register_label_action(\n trans._(\"Increase the currently selected label by one.\"),\n)\ndef increase_label_id(layer: Labels):\n layer.selected_label += 1\n\n\n@register_label_action(\n trans._(\"Decrease the paint brush size by one.\"),\n repeatable=True,\n)\ndef decrease_brush_size(layer: Labels):\n \"\"\"Decrease the brush size\"\"\"\n if (\n layer.brush_size > MIN_BRUSH_SIZE\n ): # here we should probably add a non-hard-coded\n # reference to the limit values of brush size?\n layer.brush_size -= 1\n\n\n@register_label_action(\n trans._(\"Increase the paint brush size by one.\"),\n repeatable=True,\n)\ndef increase_brush_size(layer: Labels):\n \"\"\"Increase the brush size\"\"\"\n layer.brush_size += 1\n\n\n@register_layer_attr_action(\n Labels, trans._(\"Toggle preserve labels\"), \"preserve_labels\"\n)\ndef toggle_preserve_labels(layer: Labels):\n layer.preserve_labels = not layer.preserve_labels\n\n\[email protected]_key(KeyMod.CtrlCmd | KeyCode.KeyZ, overwrite=True)\ndef undo(layer: Labels):\n \"\"\"Undo the last paint or fill action since the view slice has changed.\"\"\"\n layer.undo()\n\n\[email protected]_key(KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.KeyZ, overwrite=True)\ndef redo(layer: Labels):\n \"\"\"Redo any previously undone actions.\"\"\"\n layer.redo()\n\n\n@register_label_action(\n trans._(\"Reset the current polygon\"),\n)\ndef reset_polygon(layer: Labels):\n \"\"\"Reset the drawing of the current polygon.\"\"\"\n layer._overlays[\"polygon\"].points = []\n\n\n@register_label_action(\n trans._(\"Complete the current polygon\"),\n)\ndef complete_polygon(layer: Labels):\n \"\"\"Complete the drawing of the current polygon.\"\"\"\n # Because layer._overlays has type Overlay, mypy doesn't know that\n # ._overlays[\"polygon\"] has type LabelsPolygonOverlay, so type ignore for now\n # TODO: Improve typing of layer._overlays to fix this\n layer._overlays[\"polygon\"].add_polygon_to_labels(layer)\n", "path": "napari/layers/labels/_labels_key_bindings.py"}], "after_files": [{"content": "import numpy as np\nfrom app_model.types import KeyCode, KeyMod\n\nfrom napari.layers.labels._labels_constants import Mode\nfrom napari.layers.labels.labels import Labels\nfrom napari.layers.utils.layer_utils import (\n register_layer_action,\n register_layer_attr_action,\n)\nfrom napari.utils.notifications import show_info\nfrom napari.utils.translations import trans\n\nMIN_BRUSH_SIZE = 1\n\n\ndef register_label_action(description: str, repeatable: bool = False):\n return register_layer_action(Labels, description, repeatable)\n\n\ndef register_label_mode_action(description):\n return register_layer_attr_action(Labels, description, 'mode')\n\n\n@register_label_mode_action(trans._('Transform'))\ndef activate_labels_transform_mode(layer: Labels):\n layer.mode = Mode.TRANSFORM\n\n\n@register_label_mode_action(trans._('Pan/zoom'))\ndef activate_labels_pan_zoom_mode(layer: Labels):\n layer.mode = Mode.PAN_ZOOM\n\n\n@register_label_mode_action(trans._(\"Activate the paint brush\"))\ndef activate_labels_paint_mode(layer: Labels):\n layer.mode = Mode.PAINT\n\n\n@register_label_mode_action(trans._(\"Activate the polygon tool\"))\ndef activate_labels_polygon_mode(layer: Labels):\n layer.mode = Mode.POLYGON\n\n\n@register_label_mode_action(trans._(\"Activate the fill bucket\"))\ndef activate_labels_fill_mode(layer: Labels):\n layer.mode = Mode.FILL\n\n\n@register_label_mode_action(trans._('Pick mode'))\ndef activate_labels_picker_mode(layer: Labels):\n \"\"\"Activate the label picker.\"\"\"\n layer.mode = Mode.PICK\n\n\n@register_label_mode_action(trans._(\"Activate the label eraser\"))\ndef activate_labels_erase_mode(layer: Labels):\n layer.mode = Mode.ERASE\n\n\nlabels_fun_to_mode = [\n (activate_labels_pan_zoom_mode, Mode.PAN_ZOOM),\n (activate_labels_transform_mode, Mode.TRANSFORM),\n (activate_labels_erase_mode, Mode.ERASE),\n (activate_labels_paint_mode, Mode.PAINT),\n (activate_labels_polygon_mode, Mode.POLYGON),\n (activate_labels_fill_mode, Mode.FILL),\n (activate_labels_picker_mode, Mode.PICK),\n]\n\n\n@register_label_action(\n trans._(\n \"Set the currently selected label to the largest used label plus one.\"\n ),\n)\ndef new_label(layer: Labels):\n \"\"\"Set the currently selected label to the largest used label plus one.\"\"\"\n if isinstance(layer.data, np.ndarray):\n new_selected_label = np.max(layer.data) + 1\n if layer.selected_label == new_selected_label:\n show_info(\n trans._(\n \"Current selected label is not being used. You will need to use it first \"\n \"to be able to set the current select label to the next one available\",\n )\n )\n else:\n layer.selected_label = new_selected_label\n else:\n show_info(\n trans._(\n \"Calculating empty label on non-numpy array is not supported\"\n )\n )\n\n\n@register_label_action(\n trans._(\"Swap between the selected label and the background label.\"),\n)\ndef swap_selected_and_background_labels(layer: Labels):\n \"\"\"Swap between the selected label and the background label.\"\"\"\n layer.swap_selected_and_background_labels()\n\n\n@register_label_action(\n trans._(\"Decrease the currently selected label by one.\"),\n)\ndef decrease_label_id(layer: Labels):\n layer.selected_label -= 1\n\n\n@register_label_action(\n trans._(\"Increase the currently selected label by one.\"),\n)\ndef increase_label_id(layer: Labels):\n layer.selected_label += 1\n\n\n@register_label_action(\n trans._(\"Decrease the paint brush size by one.\"),\n repeatable=True,\n)\ndef decrease_brush_size(layer: Labels):\n \"\"\"Decrease the brush size\"\"\"\n if (\n layer.brush_size > MIN_BRUSH_SIZE\n ): # here we should probably add a non-hard-coded\n # reference to the limit values of brush size?\n layer.brush_size -= 1\n\n\n@register_label_action(\n trans._(\"Increase the paint brush size by one.\"),\n repeatable=True,\n)\ndef increase_brush_size(layer: Labels):\n \"\"\"Increase the brush size\"\"\"\n layer.brush_size += 1\n\n\n@register_layer_attr_action(\n Labels, trans._(\"Toggle preserve labels\"), \"preserve_labels\"\n)\ndef toggle_preserve_labels(layer: Labels):\n layer.preserve_labels = not layer.preserve_labels\n\n\[email protected]_key(KeyMod.CtrlCmd | KeyCode.KeyZ, overwrite=True)\ndef undo(layer: Labels):\n \"\"\"Undo the last paint or fill action since the view slice has changed.\"\"\"\n layer.undo()\n\n\[email protected]_key(KeyMod.CtrlCmd | KeyMod.Shift | KeyCode.KeyZ, overwrite=True)\ndef redo(layer: Labels):\n \"\"\"Redo any previously undone actions.\"\"\"\n layer.redo()\n\n\n@register_label_action(\n trans._(\"Reset the current polygon\"),\n)\ndef reset_polygon(layer: Labels):\n \"\"\"Reset the drawing of the current polygon.\"\"\"\n layer._overlays[\"polygon\"].points = []\n\n\n@register_label_action(\n trans._(\"Complete the current polygon\"),\n)\ndef complete_polygon(layer: Labels):\n \"\"\"Complete the drawing of the current polygon.\"\"\"\n # Because layer._overlays has type Overlay, mypy doesn't know that\n # ._overlays[\"polygon\"] has type LabelsPolygonOverlay, so type ignore for now\n # TODO: Improve typing of layer._overlays to fix this\n layer._overlays[\"polygon\"].add_polygon_to_labels(layer)\n", "path": "napari/layers/labels/_labels_key_bindings.py"}]} | 2,401 | 262 |
gh_patches_debug_3406 | rasdani/github-patches | git_diff | vllm-project__vllm-153 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Got OOM when using TP
I got this when running OPT-13B on two A100s, with FP16. The error didn't occur when I decreased `gpu_memory_utilization` to 0.9. It seems our memory profiling is somehow inaccurate when using TP.
Find the command and error msg below:
```
$ python benchmarks/benchmark_latency.py --model facebook/opt-13b -tp 2
Namespace(model='facebook/opt-13b', tensor_parallel_size=2, input_len=32, output_len=128, batch_size=8, n=1, use_beam_search=False, num_iters=3, profile=False)
2023-06-09 09:17:47,945 INFO worker.py:1625 -- Started a local Ray instance.
INFO 06-09 09:17:48 llm_server.py:60] Initializing an LLM server with config: model='facebook/opt-13b', dtype=torch.float16, use_dummy_weights=False, download_dir=None, use_np_weights=False, tensor_parallel_size=2, seed=0)
INFO 06-09 09:18:11 llm_server.py:129] # GPU blocks: 4150, # CPU blocks: 655
Traceback (most recent call last):
File "/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py", line 80, in <module>
main(args)
File "/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py", line 17, in main
llm = LLM(
File "/home/gcpuser/workspace/cacheflow/cacheflow/entrypoints/llm.py", line 55, in __init__
self.llm_server = LLMServer.from_server_args(server_args)
File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 146, in from_server_args
server = cls(*server_configs, distributed_init_method, devices,
File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 103, in __init__
self._init_cache()
File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 135, in _init_cache
self._run_workers("init_cache_engine", cache_config=self.cache_config)
File "/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py", line 312, in _run_workers
all_outputs = ray.get(all_outputs)
File "/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/client_mode_hook.py", line 105, in wrapper
return func(*args, **kwargs)
File "/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/worker.py", line 2521, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(OutOfMemoryError): ray::Worker.init_cache_engine() (pid=11306, ip=10.128.0.79, repr=<cacheflow.worker.worker.Worker object at 0x7f4aa97596d0>)
File "/home/gcpuser/workspace/cacheflow/cacheflow/worker/worker.py", line 127, in init_cache_engine
self.cache_engine = CacheEngine(
File "/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py", line 40, in __init__
self.gpu_cache = self.allocate_gpu_cache()
File "/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py", line 76, in allocate_gpu_cache
value_blocks = torch.empty(
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 326.00 MiB (GPU 0; 39.41 GiB total capacity; 37.08 GiB already allocated; 188.56 MiB free; 37.25 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vllm/engine/arg_utils.py`
Content:
```
1 import argparse
2 import dataclasses
3 from dataclasses import dataclass
4 from typing import Optional, Tuple
5
6 from vllm.config import (CacheConfig, ModelConfig, ParallelConfig,
7 SchedulerConfig)
8
9
10 @dataclass
11 class EngineArgs:
12 """Arguments for vLLM engine."""
13 model: str
14 download_dir: Optional[str] = None
15 use_np_weights: bool = False
16 use_dummy_weights: bool = False
17 dtype: str = "auto"
18 seed: int = 0
19 worker_use_ray: bool = False
20 pipeline_parallel_size: int = 1
21 tensor_parallel_size: int = 1
22 block_size: int = 16
23 swap_space: int = 4 # GiB
24 gpu_memory_utilization: float = 0.95
25 max_num_batched_tokens: int = 2560
26 max_num_seqs: int = 256
27 disable_log_stats: bool = False
28
29 def __post_init__(self):
30 self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens)
31
32 @staticmethod
33 def add_cli_args(
34 parser: argparse.ArgumentParser,
35 ) -> argparse.ArgumentParser:
36 """Shared CLI arguments for vLLM engine."""
37 # Model arguments
38 parser.add_argument('--model', type=str, default='facebook/opt-125m',
39 help='name or path of the huggingface model to use')
40 parser.add_argument('--download-dir', type=str,
41 default=EngineArgs.download_dir,
42 help='directory to download and load the weights, '
43 'default to the default cache dir of '
44 'huggingface')
45 parser.add_argument('--use-np-weights', action='store_true',
46 help='save a numpy copy of model weights for '
47 'faster loading. This can increase the disk '
48 'usage by up to 2x.')
49 parser.add_argument('--use-dummy-weights', action='store_true',
50 help='use dummy values for model weights')
51 # TODO(woosuk): Support FP32.
52 parser.add_argument('--dtype', type=str, default=EngineArgs.dtype,
53 choices=['auto', 'half', 'bfloat16', 'float'],
54 help='data type for model weights and activations. '
55 'The "auto" option will use FP16 precision '
56 'for FP32 and FP16 models, and BF16 precision '
57 'for BF16 models.')
58 # Parallel arguments
59 parser.add_argument('--worker-use-ray', action='store_true',
60 help='use Ray for distributed serving, will be '
61 'automatically set when using more than 1 GPU')
62 parser.add_argument('--pipeline-parallel-size', '-pp', type=int,
63 default=EngineArgs.pipeline_parallel_size,
64 help='number of pipeline stages')
65 parser.add_argument('--tensor-parallel-size', '-tp', type=int,
66 default=EngineArgs.tensor_parallel_size,
67 help='number of tensor parallel replicas')
68 # KV cache arguments
69 parser.add_argument('--block-size', type=int,
70 default=EngineArgs.block_size,
71 choices=[8, 16, 32],
72 help='token block size')
73 # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
74 parser.add_argument('--seed', type=int, default=EngineArgs.seed,
75 help='random seed')
76 parser.add_argument('--swap-space', type=int,
77 default=EngineArgs.swap_space,
78 help='CPU swap space size (GiB) per GPU')
79 parser.add_argument('--gpu-memory-utilization', type=float,
80 default=EngineArgs.gpu_memory_utilization,
81 help='the percentage of GPU memory to be used for'
82 'the model executor')
83 parser.add_argument('--max-num-batched-tokens', type=int,
84 default=EngineArgs.max_num_batched_tokens,
85 help='maximum number of batched tokens per '
86 'iteration')
87 parser.add_argument('--max-num-seqs', type=int,
88 default=EngineArgs.max_num_seqs,
89 help='maximum number of sequences per iteration')
90 parser.add_argument('--disable-log-stats', action='store_true',
91 help='disable logging statistics')
92 return parser
93
94 @classmethod
95 def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs":
96 # Get the list of attributes of this dataclass.
97 attrs = [attr.name for attr in dataclasses.fields(cls)]
98 # Set the attributes from the parsed arguments.
99 engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
100 return engine_args
101
102 def create_engine_configs(
103 self,
104 ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]:
105 # Initialize the configs.
106 model_config = ModelConfig(
107 self.model, self.download_dir, self.use_np_weights,
108 self.use_dummy_weights, self.dtype, self.seed)
109 cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization,
110 self.swap_space)
111 parallel_config = ParallelConfig(self.pipeline_parallel_size,
112 self.tensor_parallel_size,
113 self.worker_use_ray)
114 scheduler_config = SchedulerConfig(self.max_num_batched_tokens,
115 self.max_num_seqs)
116 return model_config, cache_config, parallel_config, scheduler_config
117
118
119 @dataclass
120 class AsyncEngineArgs(EngineArgs):
121 """Arguments for asynchronous vLLM engine."""
122 engine_use_ray: bool = False
123 disable_log_requests: bool = False
124
125 @staticmethod
126 def add_cli_args(
127 parser: argparse.ArgumentParser,
128 ) -> argparse.ArgumentParser:
129 parser = EngineArgs.add_cli_args(parser)
130 parser.add_argument('--engine-use-ray', action='store_true',
131 help='use Ray to start the LLM engine in a '
132 'separate process as the server process.')
133 parser.add_argument('--disable-log-requests', action='store_true',
134 help='disable logging requests')
135 return parser
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py
--- a/vllm/engine/arg_utils.py
+++ b/vllm/engine/arg_utils.py
@@ -21,7 +21,7 @@
tensor_parallel_size: int = 1
block_size: int = 16
swap_space: int = 4 # GiB
- gpu_memory_utilization: float = 0.95
+ gpu_memory_utilization: float = 0.90
max_num_batched_tokens: int = 2560
max_num_seqs: int = 256
disable_log_stats: bool = False
| {"golden_diff": "diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py\n--- a/vllm/engine/arg_utils.py\n+++ b/vllm/engine/arg_utils.py\n@@ -21,7 +21,7 @@\n tensor_parallel_size: int = 1\n block_size: int = 16\n swap_space: int = 4 # GiB\n- gpu_memory_utilization: float = 0.95\n+ gpu_memory_utilization: float = 0.90\n max_num_batched_tokens: int = 2560\n max_num_seqs: int = 256\n disable_log_stats: bool = False\n", "issue": "Got OOM when using TP\nI got this when running OPT-13B on two A100s, with FP16. The error didn't occur when I decreased `gpu_memory_utilization` to 0.9. It seems our memory profiling is somehow inaccurate when using TP.\r\n\r\nFind the command and error msg below:\r\n```\r\n$ python benchmarks/benchmark_latency.py --model facebook/opt-13b -tp 2\r\nNamespace(model='facebook/opt-13b', tensor_parallel_size=2, input_len=32, output_len=128, batch_size=8, n=1, use_beam_search=False, num_iters=3, profile=False)\r\n2023-06-09 09:17:47,945 INFO worker.py:1625 -- Started a local Ray instance.\r\nINFO 06-09 09:17:48 llm_server.py:60] Initializing an LLM server with config: model='facebook/opt-13b', dtype=torch.float16, use_dummy_weights=False, download_dir=None, use_np_weights=False, tensor_parallel_size=2, seed=0)\r\nINFO 06-09 09:18:11 llm_server.py:129] # GPU blocks: 4150, # CPU blocks: 655\r\nTraceback (most recent call last):\r\n File \"/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py\", line 80, in <module>\r\n main(args)\r\n File \"/home/gcpuser/workspace/cacheflow/benchmarks/benchmark_latency.py\", line 17, in main\r\n llm = LLM(\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/entrypoints/llm.py\", line 55, in __init__\r\n self.llm_server = LLMServer.from_server_args(server_args)\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 146, in from_server_args\r\n server = cls(*server_configs, distributed_init_method, devices,\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 103, in __init__\r\n self._init_cache()\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 135, in _init_cache\r\n self._run_workers(\"init_cache_engine\", cache_config=self.cache_config)\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/server/llm_server.py\", line 312, in _run_workers\r\n all_outputs = ray.get(all_outputs)\r\n File \"/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/client_mode_hook.py\", line 105, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/conda/envs/dev/lib/python3.9/site-packages/ray/_private/worker.py\", line 2521, in get\r\n raise value.as_instanceof_cause()\r\nray.exceptions.RayTaskError(OutOfMemoryError): ray::Worker.init_cache_engine() (pid=11306, ip=10.128.0.79, repr=<cacheflow.worker.worker.Worker object at 0x7f4aa97596d0>)\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/worker/worker.py\", line 127, in init_cache_engine\r\n self.cache_engine = CacheEngine(\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py\", line 40, in __init__\r\n self.gpu_cache = self.allocate_gpu_cache()\r\n File \"/home/gcpuser/workspace/cacheflow/cacheflow/worker/cache_engine.py\", line 76, in allocate_gpu_cache\r\n value_blocks = torch.empty(\r\ntorch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 326.00 MiB (GPU 0; 39.41 GiB total capacity; 37.08 GiB already allocated; 188.56 MiB free; 37.25 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF\r\n```\r\n\r\n\n", "before_files": [{"content": "import argparse\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\n\n\n@dataclass\nclass EngineArgs:\n \"\"\"Arguments for vLLM engine.\"\"\"\n model: str\n download_dir: Optional[str] = None\n use_np_weights: bool = False\n use_dummy_weights: bool = False\n dtype: str = \"auto\"\n seed: int = 0\n worker_use_ray: bool = False\n pipeline_parallel_size: int = 1\n tensor_parallel_size: int = 1\n block_size: int = 16\n swap_space: int = 4 # GiB\n gpu_memory_utilization: float = 0.95\n max_num_batched_tokens: int = 2560\n max_num_seqs: int = 256\n disable_log_stats: bool = False\n\n def __post_init__(self):\n self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens)\n\n @staticmethod\n def add_cli_args(\n parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n \"\"\"Shared CLI arguments for vLLM engine.\"\"\"\n # Model arguments\n parser.add_argument('--model', type=str, default='facebook/opt-125m',\n help='name or path of the huggingface model to use')\n parser.add_argument('--download-dir', type=str,\n default=EngineArgs.download_dir,\n help='directory to download and load the weights, '\n 'default to the default cache dir of '\n 'huggingface')\n parser.add_argument('--use-np-weights', action='store_true',\n help='save a numpy copy of model weights for '\n 'faster loading. This can increase the disk '\n 'usage by up to 2x.')\n parser.add_argument('--use-dummy-weights', action='store_true',\n help='use dummy values for model weights')\n # TODO(woosuk): Support FP32.\n parser.add_argument('--dtype', type=str, default=EngineArgs.dtype,\n choices=['auto', 'half', 'bfloat16', 'float'],\n help='data type for model weights and activations. '\n 'The \"auto\" option will use FP16 precision '\n 'for FP32 and FP16 models, and BF16 precision '\n 'for BF16 models.')\n # Parallel arguments\n parser.add_argument('--worker-use-ray', action='store_true',\n help='use Ray for distributed serving, will be '\n 'automatically set when using more than 1 GPU')\n parser.add_argument('--pipeline-parallel-size', '-pp', type=int,\n default=EngineArgs.pipeline_parallel_size,\n help='number of pipeline stages')\n parser.add_argument('--tensor-parallel-size', '-tp', type=int,\n default=EngineArgs.tensor_parallel_size,\n help='number of tensor parallel replicas')\n # KV cache arguments\n parser.add_argument('--block-size', type=int,\n default=EngineArgs.block_size,\n choices=[8, 16, 32],\n help='token block size')\n # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).\n parser.add_argument('--seed', type=int, default=EngineArgs.seed,\n help='random seed')\n parser.add_argument('--swap-space', type=int,\n default=EngineArgs.swap_space,\n help='CPU swap space size (GiB) per GPU')\n parser.add_argument('--gpu-memory-utilization', type=float,\n default=EngineArgs.gpu_memory_utilization,\n help='the percentage of GPU memory to be used for'\n 'the model executor')\n parser.add_argument('--max-num-batched-tokens', type=int,\n default=EngineArgs.max_num_batched_tokens,\n help='maximum number of batched tokens per '\n 'iteration')\n parser.add_argument('--max-num-seqs', type=int,\n default=EngineArgs.max_num_seqs,\n help='maximum number of sequences per iteration')\n parser.add_argument('--disable-log-stats', action='store_true',\n help='disable logging statistics')\n return parser\n\n @classmethod\n def from_cli_args(cls, args: argparse.Namespace) -> \"EngineArgs\":\n # Get the list of attributes of this dataclass.\n attrs = [attr.name for attr in dataclasses.fields(cls)]\n # Set the attributes from the parsed arguments.\n engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})\n return engine_args\n\n def create_engine_configs(\n self,\n ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]:\n # Initialize the configs.\n model_config = ModelConfig(\n self.model, self.download_dir, self.use_np_weights,\n self.use_dummy_weights, self.dtype, self.seed)\n cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization,\n self.swap_space)\n parallel_config = ParallelConfig(self.pipeline_parallel_size,\n self.tensor_parallel_size,\n self.worker_use_ray)\n scheduler_config = SchedulerConfig(self.max_num_batched_tokens,\n self.max_num_seqs)\n return model_config, cache_config, parallel_config, scheduler_config\n\n\n@dataclass\nclass AsyncEngineArgs(EngineArgs):\n \"\"\"Arguments for asynchronous vLLM engine.\"\"\"\n engine_use_ray: bool = False\n disable_log_requests: bool = False\n\n @staticmethod\n def add_cli_args(\n parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n parser = EngineArgs.add_cli_args(parser)\n parser.add_argument('--engine-use-ray', action='store_true',\n help='use Ray to start the LLM engine in a '\n 'separate process as the server process.')\n parser.add_argument('--disable-log-requests', action='store_true',\n help='disable logging requests')\n return parser\n", "path": "vllm/engine/arg_utils.py"}], "after_files": [{"content": "import argparse\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nfrom vllm.config import (CacheConfig, ModelConfig, ParallelConfig,\n SchedulerConfig)\n\n\n@dataclass\nclass EngineArgs:\n \"\"\"Arguments for vLLM engine.\"\"\"\n model: str\n download_dir: Optional[str] = None\n use_np_weights: bool = False\n use_dummy_weights: bool = False\n dtype: str = \"auto\"\n seed: int = 0\n worker_use_ray: bool = False\n pipeline_parallel_size: int = 1\n tensor_parallel_size: int = 1\n block_size: int = 16\n swap_space: int = 4 # GiB\n gpu_memory_utilization: float = 0.90\n max_num_batched_tokens: int = 2560\n max_num_seqs: int = 256\n disable_log_stats: bool = False\n\n def __post_init__(self):\n self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens)\n\n @staticmethod\n def add_cli_args(\n parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n \"\"\"Shared CLI arguments for vLLM engine.\"\"\"\n # Model arguments\n parser.add_argument('--model', type=str, default='facebook/opt-125m',\n help='name or path of the huggingface model to use')\n parser.add_argument('--download-dir', type=str,\n default=EngineArgs.download_dir,\n help='directory to download and load the weights, '\n 'default to the default cache dir of '\n 'huggingface')\n parser.add_argument('--use-np-weights', action='store_true',\n help='save a numpy copy of model weights for '\n 'faster loading. This can increase the disk '\n 'usage by up to 2x.')\n parser.add_argument('--use-dummy-weights', action='store_true',\n help='use dummy values for model weights')\n # TODO(woosuk): Support FP32.\n parser.add_argument('--dtype', type=str, default=EngineArgs.dtype,\n choices=['auto', 'half', 'bfloat16', 'float'],\n help='data type for model weights and activations. '\n 'The \"auto\" option will use FP16 precision '\n 'for FP32 and FP16 models, and BF16 precision '\n 'for BF16 models.')\n # Parallel arguments\n parser.add_argument('--worker-use-ray', action='store_true',\n help='use Ray for distributed serving, will be '\n 'automatically set when using more than 1 GPU')\n parser.add_argument('--pipeline-parallel-size', '-pp', type=int,\n default=EngineArgs.pipeline_parallel_size,\n help='number of pipeline stages')\n parser.add_argument('--tensor-parallel-size', '-tp', type=int,\n default=EngineArgs.tensor_parallel_size,\n help='number of tensor parallel replicas')\n # KV cache arguments\n parser.add_argument('--block-size', type=int,\n default=EngineArgs.block_size,\n choices=[8, 16, 32],\n help='token block size')\n # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).\n parser.add_argument('--seed', type=int, default=EngineArgs.seed,\n help='random seed')\n parser.add_argument('--swap-space', type=int,\n default=EngineArgs.swap_space,\n help='CPU swap space size (GiB) per GPU')\n parser.add_argument('--gpu-memory-utilization', type=float,\n default=EngineArgs.gpu_memory_utilization,\n help='the percentage of GPU memory to be used for'\n 'the model executor')\n parser.add_argument('--max-num-batched-tokens', type=int,\n default=EngineArgs.max_num_batched_tokens,\n help='maximum number of batched tokens per '\n 'iteration')\n parser.add_argument('--max-num-seqs', type=int,\n default=EngineArgs.max_num_seqs,\n help='maximum number of sequences per iteration')\n parser.add_argument('--disable-log-stats', action='store_true',\n help='disable logging statistics')\n return parser\n\n @classmethod\n def from_cli_args(cls, args: argparse.Namespace) -> \"EngineArgs\":\n # Get the list of attributes of this dataclass.\n attrs = [attr.name for attr in dataclasses.fields(cls)]\n # Set the attributes from the parsed arguments.\n engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})\n return engine_args\n\n def create_engine_configs(\n self,\n ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]:\n # Initialize the configs.\n model_config = ModelConfig(\n self.model, self.download_dir, self.use_np_weights,\n self.use_dummy_weights, self.dtype, self.seed)\n cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization,\n self.swap_space)\n parallel_config = ParallelConfig(self.pipeline_parallel_size,\n self.tensor_parallel_size,\n self.worker_use_ray)\n scheduler_config = SchedulerConfig(self.max_num_batched_tokens,\n self.max_num_seqs)\n return model_config, cache_config, parallel_config, scheduler_config\n\n\n@dataclass\nclass AsyncEngineArgs(EngineArgs):\n \"\"\"Arguments for asynchronous vLLM engine.\"\"\"\n engine_use_ray: bool = False\n disable_log_requests: bool = False\n\n @staticmethod\n def add_cli_args(\n parser: argparse.ArgumentParser,\n ) -> argparse.ArgumentParser:\n parser = EngineArgs.add_cli_args(parser)\n parser.add_argument('--engine-use-ray', action='store_true',\n help='use Ray to start the LLM engine in a '\n 'separate process as the server process.')\n parser.add_argument('--disable-log-requests', action='store_true',\n help='disable logging requests')\n return parser\n", "path": "vllm/engine/arg_utils.py"}]} | 2,821 | 155 |
gh_patches_debug_16106 | rasdani/github-patches | git_diff | python-pillow__Pillow-3279 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PySide2
The PySide2 (Qt for Python)[1] project developed by qt is now available even in pypi[2], I have always changed ImageQt.py manually, but I think it's time to add such changing to the official source
[1] https://wiki.qt.io/Qt_for_Python
[2] https://pypi.org/project/PySide2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/ImageQt.py`
Content:
```
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # a simple Qt image interface.
6 #
7 # history:
8 # 2006-06-03 fl: created
9 # 2006-06-04 fl: inherit from QImage instead of wrapping it
10 # 2006-06-05 fl: removed toimage helper; move string support to ImageQt
11 # 2013-11-13 fl: add support for Qt5 ([email protected])
12 #
13 # Copyright (c) 2006 by Secret Labs AB
14 # Copyright (c) 2006 by Fredrik Lundh
15 #
16 # See the README file for information on usage and redistribution.
17 #
18
19 from . import Image
20 from ._util import isPath, py3
21 from io import BytesIO
22 import sys
23
24 qt_versions = [
25 ['5', 'PyQt5'],
26 ['4', 'PyQt4'],
27 ['side', 'PySide']
28 ]
29 # If a version has already been imported, attempt it first
30 qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)
31 for qt_version, qt_module in qt_versions:
32 try:
33 if qt_module == 'PyQt5':
34 from PyQt5.QtGui import QImage, qRgba, QPixmap
35 from PyQt5.QtCore import QBuffer, QIODevice
36 elif qt_module == 'PyQt4':
37 from PyQt4.QtGui import QImage, qRgba, QPixmap
38 from PyQt4.QtCore import QBuffer, QIODevice
39 elif qt_module == 'PySide':
40 from PySide.QtGui import QImage, qRgba, QPixmap
41 from PySide.QtCore import QBuffer, QIODevice
42 except (ImportError, RuntimeError):
43 continue
44 qt_is_installed = True
45 break
46 else:
47 qt_is_installed = False
48 qt_version = None
49
50
51 def rgb(r, g, b, a=255):
52 """(Internal) Turns an RGB color into a Qt compatible color integer."""
53 # use qRgb to pack the colors, and then turn the resulting long
54 # into a negative integer with the same bitpattern.
55 return (qRgba(r, g, b, a) & 0xffffffff)
56
57
58 def fromqimage(im):
59 """
60 :param im: A PIL Image object, or a file name
61 (given either as Python string or a PyQt string object)
62 """
63 buffer = QBuffer()
64 buffer.open(QIODevice.ReadWrite)
65 # preserve alha channel with png
66 # otherwise ppm is more friendly with Image.open
67 if im.hasAlphaChannel():
68 im.save(buffer, 'png')
69 else:
70 im.save(buffer, 'ppm')
71
72 b = BytesIO()
73 try:
74 b.write(buffer.data())
75 except TypeError:
76 # workaround for Python 2
77 b.write(str(buffer.data()))
78 buffer.close()
79 b.seek(0)
80
81 return Image.open(b)
82
83
84 def fromqpixmap(im):
85 return fromqimage(im)
86 # buffer = QBuffer()
87 # buffer.open(QIODevice.ReadWrite)
88 # # im.save(buffer)
89 # # What if png doesn't support some image features like animation?
90 # im.save(buffer, 'ppm')
91 # bytes_io = BytesIO()
92 # bytes_io.write(buffer.data())
93 # buffer.close()
94 # bytes_io.seek(0)
95 # return Image.open(bytes_io)
96
97
98 def align8to32(bytes, width, mode):
99 """
100 converts each scanline of data from 8 bit to 32 bit aligned
101 """
102
103 bits_per_pixel = {
104 '1': 1,
105 'L': 8,
106 'P': 8,
107 }[mode]
108
109 # calculate bytes per line and the extra padding if needed
110 bits_per_line = bits_per_pixel * width
111 full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
112 bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
113
114 extra_padding = -bytes_per_line % 4
115
116 # already 32 bit aligned by luck
117 if not extra_padding:
118 return bytes
119
120 new_data = []
121 for i in range(len(bytes) // bytes_per_line):
122 new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line]
123 + b'\x00' * extra_padding)
124
125 return b''.join(new_data)
126
127
128 def _toqclass_helper(im):
129 data = None
130 colortable = None
131
132 # handle filename, if given instead of image name
133 if hasattr(im, "toUtf8"):
134 # FIXME - is this really the best way to do this?
135 if py3:
136 im = str(im.toUtf8(), "utf-8")
137 else:
138 im = unicode(im.toUtf8(), "utf-8")
139 if isPath(im):
140 im = Image.open(im)
141
142 if im.mode == "1":
143 format = QImage.Format_Mono
144 elif im.mode == "L":
145 format = QImage.Format_Indexed8
146 colortable = []
147 for i in range(256):
148 colortable.append(rgb(i, i, i))
149 elif im.mode == "P":
150 format = QImage.Format_Indexed8
151 colortable = []
152 palette = im.getpalette()
153 for i in range(0, len(palette), 3):
154 colortable.append(rgb(*palette[i:i+3]))
155 elif im.mode == "RGB":
156 data = im.tobytes("raw", "BGRX")
157 format = QImage.Format_RGB32
158 elif im.mode == "RGBA":
159 try:
160 data = im.tobytes("raw", "BGRA")
161 except SystemError:
162 # workaround for earlier versions
163 r, g, b, a = im.split()
164 im = Image.merge("RGBA", (b, g, r, a))
165 format = QImage.Format_ARGB32
166 else:
167 raise ValueError("unsupported image mode %r" % im.mode)
168
169 __data = data or align8to32(im.tobytes(), im.size[0], im.mode)
170 return {
171 'data': __data, 'im': im, 'format': format, 'colortable': colortable
172 }
173
174
175 if qt_is_installed:
176 class ImageQt(QImage):
177
178 def __init__(self, im):
179 """
180 An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
181 class.
182
183 :param im: A PIL Image object, or a file name (given either as Python
184 string or a PyQt string object).
185 """
186 im_data = _toqclass_helper(im)
187 # must keep a reference, or Qt will crash!
188 # All QImage constructors that take data operate on an existing
189 # buffer, so this buffer has to hang on for the life of the image.
190 # Fixes https://github.com/python-pillow/Pillow/issues/1370
191 self.__data = im_data['data']
192 QImage.__init__(self,
193 self.__data, im_data['im'].size[0],
194 im_data['im'].size[1], im_data['format'])
195 if im_data['colortable']:
196 self.setColorTable(im_data['colortable'])
197
198
199 def toqimage(im):
200 return ImageQt(im)
201
202
203 def toqpixmap(im):
204 # # This doesn't work. For now using a dumb approach.
205 # im_data = _toqclass_helper(im)
206 # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])
207 # result.loadFromData(im_data['data'])
208 # Fix some strange bug that causes
209 if im.mode == 'RGB':
210 im = im.convert('RGBA')
211
212 qimage = toqimage(im)
213 return QPixmap.fromImage(qimage)
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py
--- a/src/PIL/ImageQt.py
+++ b/src/PIL/ImageQt.py
@@ -23,6 +23,7 @@
qt_versions = [
['5', 'PyQt5'],
+ ['side2', 'PySide2'],
['4', 'PyQt4'],
['side', 'PySide']
]
@@ -33,6 +34,9 @@
if qt_module == 'PyQt5':
from PyQt5.QtGui import QImage, qRgba, QPixmap
from PyQt5.QtCore import QBuffer, QIODevice
+ elif qt_module == 'PySide2':
+ from PySide2.QtGui import QImage, qRgba, QPixmap
+ from PySide2.QtCore import QBuffer, QIODevice
elif qt_module == 'PyQt4':
from PyQt4.QtGui import QImage, qRgba, QPixmap
from PyQt4.QtCore import QBuffer, QIODevice
| {"golden_diff": "diff --git a/src/PIL/ImageQt.py b/src/PIL/ImageQt.py\n--- a/src/PIL/ImageQt.py\n+++ b/src/PIL/ImageQt.py\n@@ -23,6 +23,7 @@\n \n qt_versions = [\n ['5', 'PyQt5'],\n+ ['side2', 'PySide2'],\n ['4', 'PyQt4'],\n ['side', 'PySide']\n ]\n@@ -33,6 +34,9 @@\n if qt_module == 'PyQt5':\n from PyQt5.QtGui import QImage, qRgba, QPixmap\n from PyQt5.QtCore import QBuffer, QIODevice\n+ elif qt_module == 'PySide2':\n+ from PySide2.QtGui import QImage, qRgba, QPixmap\n+ from PySide2.QtCore import QBuffer, QIODevice\n elif qt_module == 'PyQt4':\n from PyQt4.QtGui import QImage, qRgba, QPixmap\n from PyQt4.QtCore import QBuffer, QIODevice\n", "issue": "PySide2\nThe PySide2 (Qt for Python)[1] project developed by qt is now available even in pypi[2], I have always changed ImageQt.py manually, but I think it's time to add such changing to the official source\r\n\r\n[1] https://wiki.qt.io/Qt_for_Python\r\n[2] https://pypi.org/project/PySide2\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a simple Qt image interface.\n#\n# history:\n# 2006-06-03 fl: created\n# 2006-06-04 fl: inherit from QImage instead of wrapping it\n# 2006-06-05 fl: removed toimage helper; move string support to ImageQt\n# 2013-11-13 fl: add support for Qt5 ([email protected])\n#\n# Copyright (c) 2006 by Secret Labs AB\n# Copyright (c) 2006 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom . import Image\nfrom ._util import isPath, py3\nfrom io import BytesIO\nimport sys\n\nqt_versions = [\n ['5', 'PyQt5'],\n ['4', 'PyQt4'],\n ['side', 'PySide']\n]\n# If a version has already been imported, attempt it first\nqt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\nfor qt_version, qt_module in qt_versions:\n try:\n if qt_module == 'PyQt5':\n from PyQt5.QtGui import QImage, qRgba, QPixmap\n from PyQt5.QtCore import QBuffer, QIODevice\n elif qt_module == 'PyQt4':\n from PyQt4.QtGui import QImage, qRgba, QPixmap\n from PyQt4.QtCore import QBuffer, QIODevice\n elif qt_module == 'PySide':\n from PySide.QtGui import QImage, qRgba, QPixmap\n from PySide.QtCore import QBuffer, QIODevice\n except (ImportError, RuntimeError):\n continue\n qt_is_installed = True\n break\nelse:\n qt_is_installed = False\n qt_version = None\n\n\ndef rgb(r, g, b, a=255):\n \"\"\"(Internal) Turns an RGB color into a Qt compatible color integer.\"\"\"\n # use qRgb to pack the colors, and then turn the resulting long\n # into a negative integer with the same bitpattern.\n return (qRgba(r, g, b, a) & 0xffffffff)\n\n\ndef fromqimage(im):\n \"\"\"\n :param im: A PIL Image object, or a file name\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n buffer.open(QIODevice.ReadWrite)\n # preserve alha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n im.save(buffer, 'png')\n else:\n im.save(buffer, 'ppm')\n\n b = BytesIO()\n try:\n b.write(buffer.data())\n except TypeError:\n # workaround for Python 2\n b.write(str(buffer.data()))\n buffer.close()\n b.seek(0)\n\n return Image.open(b)\n\n\ndef fromqpixmap(im):\n return fromqimage(im)\n # buffer = QBuffer()\n # buffer.open(QIODevice.ReadWrite)\n # # im.save(buffer)\n # # What if png doesn't support some image features like animation?\n # im.save(buffer, 'ppm')\n # bytes_io = BytesIO()\n # bytes_io.write(buffer.data())\n # buffer.close()\n # bytes_io.seek(0)\n # return Image.open(bytes_io)\n\n\ndef align8to32(bytes, width, mode):\n \"\"\"\n converts each scanline of data from 8 bit to 32 bit aligned\n \"\"\"\n\n bits_per_pixel = {\n '1': 1,\n 'L': 8,\n 'P': 8,\n }[mode]\n\n # calculate bytes per line and the extra padding if needed\n bits_per_line = bits_per_pixel * width\n full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)\n bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)\n\n extra_padding = -bytes_per_line % 4\n\n # already 32 bit aligned by luck\n if not extra_padding:\n return bytes\n\n new_data = []\n for i in range(len(bytes) // bytes_per_line):\n new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line]\n + b'\\x00' * extra_padding)\n\n return b''.join(new_data)\n\n\ndef _toqclass_helper(im):\n data = None\n colortable = None\n\n # handle filename, if given instead of image name\n if hasattr(im, \"toUtf8\"):\n # FIXME - is this really the best way to do this?\n if py3:\n im = str(im.toUtf8(), \"utf-8\")\n else:\n im = unicode(im.toUtf8(), \"utf-8\")\n if isPath(im):\n im = Image.open(im)\n\n if im.mode == \"1\":\n format = QImage.Format_Mono\n elif im.mode == \"L\":\n format = QImage.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n format = QImage.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i:i+3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n format = QImage.Format_RGB32\n elif im.mode == \"RGBA\":\n try:\n data = im.tobytes(\"raw\", \"BGRA\")\n except SystemError:\n # workaround for earlier versions\n r, g, b, a = im.split()\n im = Image.merge(\"RGBA\", (b, g, r, a))\n format = QImage.Format_ARGB32\n else:\n raise ValueError(\"unsupported image mode %r\" % im.mode)\n\n __data = data or align8to32(im.tobytes(), im.size[0], im.mode)\n return {\n 'data': __data, 'im': im, 'format': format, 'colortable': colortable\n }\n\n\nif qt_is_installed:\n class ImageQt(QImage):\n\n def __init__(self, im):\n \"\"\"\n An PIL image wrapper for Qt. This is a subclass of PyQt's QImage\n class.\n\n :param im: A PIL Image object, or a file name (given either as Python\n string or a PyQt string object).\n \"\"\"\n im_data = _toqclass_helper(im)\n # must keep a reference, or Qt will crash!\n # All QImage constructors that take data operate on an existing\n # buffer, so this buffer has to hang on for the life of the image.\n # Fixes https://github.com/python-pillow/Pillow/issues/1370\n self.__data = im_data['data']\n QImage.__init__(self,\n self.__data, im_data['im'].size[0],\n im_data['im'].size[1], im_data['format'])\n if im_data['colortable']:\n self.setColorTable(im_data['colortable'])\n\n\ndef toqimage(im):\n return ImageQt(im)\n\n\ndef toqpixmap(im):\n # # This doesn't work. For now using a dumb approach.\n # im_data = _toqclass_helper(im)\n # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])\n # result.loadFromData(im_data['data'])\n # Fix some strange bug that causes\n if im.mode == 'RGB':\n im = im.convert('RGBA')\n\n qimage = toqimage(im)\n return QPixmap.fromImage(qimage)\n", "path": "src/PIL/ImageQt.py"}], "after_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a simple Qt image interface.\n#\n# history:\n# 2006-06-03 fl: created\n# 2006-06-04 fl: inherit from QImage instead of wrapping it\n# 2006-06-05 fl: removed toimage helper; move string support to ImageQt\n# 2013-11-13 fl: add support for Qt5 ([email protected])\n#\n# Copyright (c) 2006 by Secret Labs AB\n# Copyright (c) 2006 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom . import Image\nfrom ._util import isPath, py3\nfrom io import BytesIO\nimport sys\n\nqt_versions = [\n ['5', 'PyQt5'],\n ['side2', 'PySide2'],\n ['4', 'PyQt4'],\n ['side', 'PySide']\n]\n# If a version has already been imported, attempt it first\nqt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True)\nfor qt_version, qt_module in qt_versions:\n try:\n if qt_module == 'PyQt5':\n from PyQt5.QtGui import QImage, qRgba, QPixmap\n from PyQt5.QtCore import QBuffer, QIODevice\n elif qt_module == 'PySide2':\n from PySide2.QtGui import QImage, qRgba, QPixmap\n from PySide2.QtCore import QBuffer, QIODevice\n elif qt_module == 'PyQt4':\n from PyQt4.QtGui import QImage, qRgba, QPixmap\n from PyQt4.QtCore import QBuffer, QIODevice\n elif qt_module == 'PySide':\n from PySide.QtGui import QImage, qRgba, QPixmap\n from PySide.QtCore import QBuffer, QIODevice\n except (ImportError, RuntimeError):\n continue\n qt_is_installed = True\n break\nelse:\n qt_is_installed = False\n qt_version = None\n\n\ndef rgb(r, g, b, a=255):\n \"\"\"(Internal) Turns an RGB color into a Qt compatible color integer.\"\"\"\n # use qRgb to pack the colors, and then turn the resulting long\n # into a negative integer with the same bitpattern.\n return (qRgba(r, g, b, a) & 0xffffffff)\n\n\ndef fromqimage(im):\n \"\"\"\n :param im: A PIL Image object, or a file name\n (given either as Python string or a PyQt string object)\n \"\"\"\n buffer = QBuffer()\n buffer.open(QIODevice.ReadWrite)\n # preserve alha channel with png\n # otherwise ppm is more friendly with Image.open\n if im.hasAlphaChannel():\n im.save(buffer, 'png')\n else:\n im.save(buffer, 'ppm')\n\n b = BytesIO()\n try:\n b.write(buffer.data())\n except TypeError:\n # workaround for Python 2\n b.write(str(buffer.data()))\n buffer.close()\n b.seek(0)\n\n return Image.open(b)\n\n\ndef fromqpixmap(im):\n return fromqimage(im)\n # buffer = QBuffer()\n # buffer.open(QIODevice.ReadWrite)\n # # im.save(buffer)\n # # What if png doesn't support some image features like animation?\n # im.save(buffer, 'ppm')\n # bytes_io = BytesIO()\n # bytes_io.write(buffer.data())\n # buffer.close()\n # bytes_io.seek(0)\n # return Image.open(bytes_io)\n\n\ndef align8to32(bytes, width, mode):\n \"\"\"\n converts each scanline of data from 8 bit to 32 bit aligned\n \"\"\"\n\n bits_per_pixel = {\n '1': 1,\n 'L': 8,\n 'P': 8,\n }[mode]\n\n # calculate bytes per line and the extra padding if needed\n bits_per_line = bits_per_pixel * width\n full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)\n bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)\n\n extra_padding = -bytes_per_line % 4\n\n # already 32 bit aligned by luck\n if not extra_padding:\n return bytes\n\n new_data = []\n for i in range(len(bytes) // bytes_per_line):\n new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line]\n + b'\\x00' * extra_padding)\n\n return b''.join(new_data)\n\n\ndef _toqclass_helper(im):\n data = None\n colortable = None\n\n # handle filename, if given instead of image name\n if hasattr(im, \"toUtf8\"):\n # FIXME - is this really the best way to do this?\n if py3:\n im = str(im.toUtf8(), \"utf-8\")\n else:\n im = unicode(im.toUtf8(), \"utf-8\")\n if isPath(im):\n im = Image.open(im)\n\n if im.mode == \"1\":\n format = QImage.Format_Mono\n elif im.mode == \"L\":\n format = QImage.Format_Indexed8\n colortable = []\n for i in range(256):\n colortable.append(rgb(i, i, i))\n elif im.mode == \"P\":\n format = QImage.Format_Indexed8\n colortable = []\n palette = im.getpalette()\n for i in range(0, len(palette), 3):\n colortable.append(rgb(*palette[i:i+3]))\n elif im.mode == \"RGB\":\n data = im.tobytes(\"raw\", \"BGRX\")\n format = QImage.Format_RGB32\n elif im.mode == \"RGBA\":\n try:\n data = im.tobytes(\"raw\", \"BGRA\")\n except SystemError:\n # workaround for earlier versions\n r, g, b, a = im.split()\n im = Image.merge(\"RGBA\", (b, g, r, a))\n format = QImage.Format_ARGB32\n else:\n raise ValueError(\"unsupported image mode %r\" % im.mode)\n\n __data = data or align8to32(im.tobytes(), im.size[0], im.mode)\n return {\n 'data': __data, 'im': im, 'format': format, 'colortable': colortable\n }\n\n\nif qt_is_installed:\n class ImageQt(QImage):\n\n def __init__(self, im):\n \"\"\"\n An PIL image wrapper for Qt. This is a subclass of PyQt's QImage\n class.\n\n :param im: A PIL Image object, or a file name (given either as Python\n string or a PyQt string object).\n \"\"\"\n im_data = _toqclass_helper(im)\n # must keep a reference, or Qt will crash!\n # All QImage constructors that take data operate on an existing\n # buffer, so this buffer has to hang on for the life of the image.\n # Fixes https://github.com/python-pillow/Pillow/issues/1370\n self.__data = im_data['data']\n QImage.__init__(self,\n self.__data, im_data['im'].size[0],\n im_data['im'].size[1], im_data['format'])\n if im_data['colortable']:\n self.setColorTable(im_data['colortable'])\n\n\ndef toqimage(im):\n return ImageQt(im)\n\n\ndef toqpixmap(im):\n # # This doesn't work. For now using a dumb approach.\n # im_data = _toqclass_helper(im)\n # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])\n # result.loadFromData(im_data['data'])\n # Fix some strange bug that causes\n if im.mode == 'RGB':\n im = im.convert('RGBA')\n\n qimage = toqimage(im)\n return QPixmap.fromImage(qimage)\n", "path": "src/PIL/ImageQt.py"}]} | 2,605 | 222 |
gh_patches_debug_1801 | rasdani/github-patches | git_diff | mit-ll-responsible-ai__hydra-zen-355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`validates_with_beartype` considers `Partial` as `NoneType`
Hi @rsokl. I was having a blast using this fascinating library. But It seems when used with `hydra_zen.third_party.validates_with_beartype`, it casts `hydra_zen.typing.Partial` as `NoneType`.
```python
from hydra_zen.typing import Partial
from hydra_zen.third_party.beartype import validates_with_beartype
def f(x: Partial[list]):
return x
val_f = validates_with_beartype(f)
val_f(3)
```
It raises the following error. Can you take a look?
```bash
beartype.roar.BeartypeCallHintParamViolation: @beartyped __main__.f()
parameter x=3 violates type hint None, as int 3 not instance of <class "builtins.NoneType">.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/hydra_zen/typing/_implementations.py`
Content:
```
1 # Copyright (c) 2022 Massachusetts Institute of Technology
2 # SPDX-License-Identifier: MIT
3
4 # pyright: strict
5
6 import sys
7 import types
8 from enum import Enum
9 from pathlib import Path, PosixPath, WindowsPath
10 from typing import (
11 TYPE_CHECKING,
12 Any,
13 ByteString,
14 Callable,
15 ClassVar,
16 Dict,
17 FrozenSet,
18 Generic,
19 List,
20 Mapping,
21 NewType,
22 Optional,
23 Sequence,
24 Set,
25 Tuple,
26 Type,
27 TypeVar,
28 Union,
29 )
30
31 from omegaconf import DictConfig, ListConfig
32 from typing_extensions import (
33 Final,
34 Literal,
35 ParamSpec,
36 Protocol,
37 Self,
38 TypeAlias,
39 TypedDict,
40 runtime_checkable,
41 )
42
43 __all__ = [
44 "Just",
45 "Builds",
46 "PartialBuilds",
47 "Partial",
48 "Importable",
49 "SupportedPrimitive",
50 "ZenWrappers",
51 "ZenPartialBuilds",
52 "HydraPartialBuilds",
53 "ZenConvert",
54 ]
55
56 P = ParamSpec("P")
57 R = TypeVar("R")
58
59
60 class EmptyDict(TypedDict):
61 pass
62
63
64 T = TypeVar("T", covariant=True)
65 T2 = TypeVar("T2")
66 T3 = TypeVar("T3")
67
68 T4 = TypeVar("T4", bound=Callable[..., Any])
69
70
71 InstOrType: TypeAlias = Union[T, Type[T]]
72
73
74 if TYPE_CHECKING:
75 from dataclasses import Field # provided by typestub but not generic at runtime
76 else:
77
78 class Field(Protocol[T2]):
79 name: str
80 type: Type[T2]
81 default: T2
82 default_factory: Callable[[], T2]
83 repr: bool
84 hash: Optional[bool]
85 init: bool
86 compare: bool
87 metadata: Mapping[str, Any]
88
89
90 @runtime_checkable
91 class Partial(Protocol[T2]):
92 __call__: Callable[..., T2]
93
94 @property
95 def func(self) -> Callable[..., T2]:
96 ...
97
98 @property
99 def args(self) -> Tuple[Any, ...]:
100 ...
101
102 @property
103 def keywords(self) -> Dict[str, Any]:
104 ...
105
106 def __new__(
107 cls: Type[Self], __func: Callable[..., T2], *args: Any, **kwargs: Any
108 ) -> Self:
109 ...
110
111 if sys.version_info >= (3, 9): # pragma: no cover
112
113 def __class_getitem__(cls, item: Any) -> types.GenericAlias:
114 ...
115
116
117 InterpStr = NewType("InterpStr", str)
118
119
120 class DataClass_(Protocol):
121 # doesn't provide __init__, __getattribute__, etc.
122 __dataclass_fields__: ClassVar[Dict[str, Field[Any]]]
123
124
125 class DataClass(DataClass_, Protocol):
126 def __init__(self, *args: Any, **kwargs: Any) -> None:
127 ...
128
129 def __getattribute__(self, __name: str) -> Any:
130 ...
131
132 def __setattr__(self, __name: str, __value: Any) -> None:
133 ...
134
135
136 @runtime_checkable
137 class Builds(DataClass, Protocol[T]):
138 _target_: ClassVar[str]
139
140
141 class BuildsWithSig(Builds[T], Protocol[T, P]):
142 def __init__(self, *args: P.args, **kwds: P.kwargs):
143 ...
144
145
146 @runtime_checkable
147 class Just(Builds[T], Protocol[T]):
148 path: ClassVar[str] # interpolated string for importing obj
149 _target_: ClassVar[Literal["hydra_zen.funcs.get_obj"]] = "hydra_zen.funcs.get_obj"
150
151
152 class ZenPartialMixin(Protocol[T]):
153 _zen_target: ClassVar[str]
154 _zen_partial: ClassVar[Literal[True]] = True
155
156
157 class HydraPartialMixin(Protocol[T]):
158 _partial_: ClassVar[Literal[True]] = True
159
160
161 @runtime_checkable
162 class ZenPartialBuilds(Builds[T], ZenPartialMixin[T], Protocol[T]):
163 _target_: ClassVar[
164 Literal["hydra_zen.funcs.zen_processing"]
165 ] = "hydra_zen.funcs.zen_processing"
166
167
168 @runtime_checkable
169 class HydraPartialBuilds(Builds[T], HydraPartialMixin[T], Protocol[T]):
170 ...
171
172
173 # Necessary, but not sufficient, check for PartialBuilds; useful for creating
174 # non-overlapping overloads
175 IsPartial: TypeAlias = Union[ZenPartialMixin[T], HydraPartialMixin[T]]
176
177 PartialBuilds: TypeAlias = Union[ZenPartialBuilds[T], HydraPartialBuilds[T]]
178
179
180 @runtime_checkable
181 class HasTarget(Protocol):
182 _target_: str
183
184
185 Importable = TypeVar("Importable", bound=Callable[..., Any])
186
187 _HydraPrimitive: TypeAlias = Union[
188 bool, None, int, float, str, ByteString, Path, WindowsPath, PosixPath
189 ]
190
191 _SupportedViaBuilds = Union[
192 Partial[Any],
193 range,
194 Set[Any],
195 ]
196
197 _SupportedPrimitive: TypeAlias = Union[
198 _HydraPrimitive,
199 ListConfig,
200 DictConfig,
201 Callable[..., Any],
202 Enum,
203 DataClass_,
204 complex,
205 _SupportedViaBuilds,
206 EmptyDict, # not covered by Mapping[..., ...]]
207 ]
208
209 if TYPE_CHECKING:
210 SupportedPrimitive: TypeAlias = Union[
211 _SupportedPrimitive,
212 FrozenSet["SupportedPrimitive"],
213 # Even thought this is redundant with Sequence, it seems to
214 # be needed for pyright to do proper checking of tuple contents
215 Tuple["SupportedPrimitive", ...],
216 # Mutable generic containers need to be invariant, so
217 # we have to settle for Sequence/Mapping. While this
218 # is overly permissive in terms of sequence-type, it
219 # at least affords quality checking of sequence content
220 Sequence["SupportedPrimitive"],
221 # Mapping is covariant only in value
222 Mapping[Any, "SupportedPrimitive"],
223 ]
224 else:
225 # cleans up annotations for REPLs
226 SupportedPrimitive = TypeVar("SupportedPrimitive")
227
228
229 ZenWrapper: TypeAlias = Union[
230 None,
231 Builds[Callable[[T4], T4]],
232 PartialBuilds[Callable[[T4], T4]],
233 Just[Callable[[T4], T4]],
234 Type[Builds[Callable[[T4], T4]]],
235 Type[PartialBuilds[Callable[[T4], T4]]],
236 Type[Just[Callable[[T4], T4]]],
237 Callable[[T4], T4],
238 str,
239 ]
240 if TYPE_CHECKING:
241 ZenWrappers: TypeAlias = Union[ZenWrapper[T4], Sequence[ZenWrapper[T4]]]
242 else:
243 # cleans up annotations for REPLs
244 class ZenWrappers(Generic[T2]): # pragma: no cover
245 pass
246
247
248 DefaultsList = List[
249 Union[str, DataClass_, Mapping[str, Union[None, str, Sequence[str]]]]
250 ]
251
252
253 # Lists all zen-convert settings and their types. Not part of public API
254 class AllConvert(TypedDict, total=True):
255 dataclass: bool
256
257
258 # used for runtime type-checking
259 convert_types: Final = {"dataclass": bool}
260
261 GroupName: TypeAlias = Optional[str]
262 NodeName: TypeAlias = str
263 Node: TypeAlias = Any
264
265
266 # TODO: make immutable
267 class StoreEntry(TypedDict):
268 name: NodeName
269 group: GroupName
270 package: Optional[str]
271 provider: Optional[str]
272 node: Node
273
274
275 class ZenConvert(TypedDict, total=False):
276 """A TypedDict that provides a type-checked interface for specifying zen-convert
277 options that configure the hydra-zen config-creation functions (e.g., `builds`,
278 `just`, and `make_config`).
279
280 Note that, at runtime, `ZenConvert` is simply a dictionary with type-annotations. There is no enforced runtime validation of its keys and values.
281
282 Parameters
283 ----------
284 dataclass : bool
285 If `True` any dataclass type/instance without a `_target_` field is
286 automatically converted to a targeted config that will instantiate to that type/
287 instance. Otherwise the dataclass type/instance will be passed through as-is.
288
289 Note that this only works with statically-defined dataclass types, whereas
290 :func:`~hydra_zen.make_config` and :py:func:`dataclasses.make_dataclass`
291 dynamically generate dataclass types. Additionally, this feature is not
292 compatible with a dataclass instance whose type possesses an `InitVar` field.
293
294 Examples
295 --------
296 >>> from hydra_zen.typing import ZenConvert as zc
297 >>> zc()
298 {}
299 >>> zc(dataclass=True)
300 {"dataclass": True}
301 >>> # static type-checker will raise, but runtime will not
302 >>> zc(apple=1) # type: ignore
303 {"apple": 1}
304
305 **Configuring dataclass auto-config behaviors**
306
307 >>> from hydra_zen import instantiate as I
308 >>> from hydra_zen import builds, just
309 >>> from dataclasses import dataclass
310 >>> @dataclass
311 ... class B:
312 ... x: int
313 >>> b = B(x=1)
314
315 >>> I(just(b))
316 B(x=1)
317 >>> I(just(b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig
318 {"x": 1}
319
320 >>> I(builds(dict, y=b))
321 {'y': B(x=1)}
322 >>> I(builds(dict, y=b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig
323 {'y': {'x': 1}}
324
325 >>> I(make_config(y=b)) # returns omegaconf.DictConfig
326 {'y': {'x': 1}}
327 >>> I(make_config(y=b, zen_convert=zc(dataclass=True), hydra_convert="all"))
328 {'y': B(x=1)}
329
330 Auto-config support does not work with dynamically-generated dataclass types
331
332 >>> just(make_config(z=1))
333 HydraZenUnsupportedPrimitiveError: ...
334 >>> I(just(make_config(z=1), zen_convert=zc(dataclass=False)))
335 {'z': 1}
336
337 A dataclass with a `_target_` field will not be converted:
338
339 >>> @dataclass
340 ... class BuildsStr:
341 ... _target_: str = 'builtins.str'
342 ...
343 >>> BuildsStr is just(BuildsStr)
344 True
345 >>> (builds_str := BuildsStr()) is just(builds_str)
346 True
347 """
348
349 dataclass: bool
350
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py
--- a/src/hydra_zen/typing/_implementations.py
+++ b/src/hydra_zen/typing/_implementations.py
@@ -108,7 +108,7 @@
) -> Self:
...
- if sys.version_info >= (3, 9): # pragma: no cover
+ if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover
def __class_getitem__(cls, item: Any) -> types.GenericAlias:
...
| {"golden_diff": "diff --git a/src/hydra_zen/typing/_implementations.py b/src/hydra_zen/typing/_implementations.py\n--- a/src/hydra_zen/typing/_implementations.py\n+++ b/src/hydra_zen/typing/_implementations.py\n@@ -108,7 +108,7 @@\n ) -> Self:\n ...\n \n- if sys.version_info >= (3, 9): # pragma: no cover\n+ if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover\n \n def __class_getitem__(cls, item: Any) -> types.GenericAlias:\n ...\n", "issue": "`validates_with_beartype` considers `Partial` as `NoneType`\nHi @rsokl. I was having a blast using this fascinating library. But It seems when used with `hydra_zen.third_party.validates_with_beartype`, it casts `hydra_zen.typing.Partial` as `NoneType`. \r\n\r\n\r\n```python\r\nfrom hydra_zen.typing import Partial\r\nfrom hydra_zen.third_party.beartype import validates_with_beartype\r\n\r\n\r\ndef f(x: Partial[list]):\r\n return x\r\n\r\n\r\nval_f = validates_with_beartype(f)\r\n\r\nval_f(3)\r\n```\r\n\r\nIt raises the following error. Can you take a look?\r\n\r\n```bash\r\nbeartype.roar.BeartypeCallHintParamViolation: @beartyped __main__.f() \r\nparameter x=3 violates type hint None, as int 3 not instance of <class \"builtins.NoneType\">.\r\n```\n", "before_files": [{"content": "# Copyright (c) 2022 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\n# pyright: strict\n\nimport sys\nimport types\nfrom enum import Enum\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ByteString,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Generic,\n List,\n Mapping,\n NewType,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom omegaconf import DictConfig, ListConfig\nfrom typing_extensions import (\n Final,\n Literal,\n ParamSpec,\n Protocol,\n Self,\n TypeAlias,\n TypedDict,\n runtime_checkable,\n)\n\n__all__ = [\n \"Just\",\n \"Builds\",\n \"PartialBuilds\",\n \"Partial\",\n \"Importable\",\n \"SupportedPrimitive\",\n \"ZenWrappers\",\n \"ZenPartialBuilds\",\n \"HydraPartialBuilds\",\n \"ZenConvert\",\n]\n\nP = ParamSpec(\"P\")\nR = TypeVar(\"R\")\n\n\nclass EmptyDict(TypedDict):\n pass\n\n\nT = TypeVar(\"T\", covariant=True)\nT2 = TypeVar(\"T2\")\nT3 = TypeVar(\"T3\")\n\nT4 = TypeVar(\"T4\", bound=Callable[..., Any])\n\n\nInstOrType: TypeAlias = Union[T, Type[T]]\n\n\nif TYPE_CHECKING:\n from dataclasses import Field # provided by typestub but not generic at runtime\nelse:\n\n class Field(Protocol[T2]):\n name: str\n type: Type[T2]\n default: T2\n default_factory: Callable[[], T2]\n repr: bool\n hash: Optional[bool]\n init: bool\n compare: bool\n metadata: Mapping[str, Any]\n\n\n@runtime_checkable\nclass Partial(Protocol[T2]):\n __call__: Callable[..., T2]\n\n @property\n def func(self) -> Callable[..., T2]:\n ...\n\n @property\n def args(self) -> Tuple[Any, ...]:\n ...\n\n @property\n def keywords(self) -> Dict[str, Any]:\n ...\n\n def __new__(\n cls: Type[Self], __func: Callable[..., T2], *args: Any, **kwargs: Any\n ) -> Self:\n ...\n\n if sys.version_info >= (3, 9): # pragma: no cover\n\n def __class_getitem__(cls, item: Any) -> types.GenericAlias:\n ...\n\n\nInterpStr = NewType(\"InterpStr\", str)\n\n\nclass DataClass_(Protocol):\n # doesn't provide __init__, __getattribute__, etc.\n __dataclass_fields__: ClassVar[Dict[str, Field[Any]]]\n\n\nclass DataClass(DataClass_, Protocol):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n ...\n\n def __getattribute__(self, __name: str) -> Any:\n ...\n\n def __setattr__(self, __name: str, __value: Any) -> None:\n ...\n\n\n@runtime_checkable\nclass Builds(DataClass, Protocol[T]):\n _target_: ClassVar[str]\n\n\nclass BuildsWithSig(Builds[T], Protocol[T, P]):\n def __init__(self, *args: P.args, **kwds: P.kwargs):\n ...\n\n\n@runtime_checkable\nclass Just(Builds[T], Protocol[T]):\n path: ClassVar[str] # interpolated string for importing obj\n _target_: ClassVar[Literal[\"hydra_zen.funcs.get_obj\"]] = \"hydra_zen.funcs.get_obj\"\n\n\nclass ZenPartialMixin(Protocol[T]):\n _zen_target: ClassVar[str]\n _zen_partial: ClassVar[Literal[True]] = True\n\n\nclass HydraPartialMixin(Protocol[T]):\n _partial_: ClassVar[Literal[True]] = True\n\n\n@runtime_checkable\nclass ZenPartialBuilds(Builds[T], ZenPartialMixin[T], Protocol[T]):\n _target_: ClassVar[\n Literal[\"hydra_zen.funcs.zen_processing\"]\n ] = \"hydra_zen.funcs.zen_processing\"\n\n\n@runtime_checkable\nclass HydraPartialBuilds(Builds[T], HydraPartialMixin[T], Protocol[T]):\n ...\n\n\n# Necessary, but not sufficient, check for PartialBuilds; useful for creating\n# non-overlapping overloads\nIsPartial: TypeAlias = Union[ZenPartialMixin[T], HydraPartialMixin[T]]\n\nPartialBuilds: TypeAlias = Union[ZenPartialBuilds[T], HydraPartialBuilds[T]]\n\n\n@runtime_checkable\nclass HasTarget(Protocol):\n _target_: str\n\n\nImportable = TypeVar(\"Importable\", bound=Callable[..., Any])\n\n_HydraPrimitive: TypeAlias = Union[\n bool, None, int, float, str, ByteString, Path, WindowsPath, PosixPath\n]\n\n_SupportedViaBuilds = Union[\n Partial[Any],\n range,\n Set[Any],\n]\n\n_SupportedPrimitive: TypeAlias = Union[\n _HydraPrimitive,\n ListConfig,\n DictConfig,\n Callable[..., Any],\n Enum,\n DataClass_,\n complex,\n _SupportedViaBuilds,\n EmptyDict, # not covered by Mapping[..., ...]]\n]\n\nif TYPE_CHECKING:\n SupportedPrimitive: TypeAlias = Union[\n _SupportedPrimitive,\n FrozenSet[\"SupportedPrimitive\"],\n # Even thought this is redundant with Sequence, it seems to\n # be needed for pyright to do proper checking of tuple contents\n Tuple[\"SupportedPrimitive\", ...],\n # Mutable generic containers need to be invariant, so\n # we have to settle for Sequence/Mapping. While this\n # is overly permissive in terms of sequence-type, it\n # at least affords quality checking of sequence content\n Sequence[\"SupportedPrimitive\"],\n # Mapping is covariant only in value\n Mapping[Any, \"SupportedPrimitive\"],\n ]\nelse:\n # cleans up annotations for REPLs\n SupportedPrimitive = TypeVar(\"SupportedPrimitive\")\n\n\nZenWrapper: TypeAlias = Union[\n None,\n Builds[Callable[[T4], T4]],\n PartialBuilds[Callable[[T4], T4]],\n Just[Callable[[T4], T4]],\n Type[Builds[Callable[[T4], T4]]],\n Type[PartialBuilds[Callable[[T4], T4]]],\n Type[Just[Callable[[T4], T4]]],\n Callable[[T4], T4],\n str,\n]\nif TYPE_CHECKING:\n ZenWrappers: TypeAlias = Union[ZenWrapper[T4], Sequence[ZenWrapper[T4]]]\nelse:\n # cleans up annotations for REPLs\n class ZenWrappers(Generic[T2]): # pragma: no cover\n pass\n\n\nDefaultsList = List[\n Union[str, DataClass_, Mapping[str, Union[None, str, Sequence[str]]]]\n]\n\n\n# Lists all zen-convert settings and their types. Not part of public API\nclass AllConvert(TypedDict, total=True):\n dataclass: bool\n\n\n# used for runtime type-checking\nconvert_types: Final = {\"dataclass\": bool}\n\nGroupName: TypeAlias = Optional[str]\nNodeName: TypeAlias = str\nNode: TypeAlias = Any\n\n\n# TODO: make immutable\nclass StoreEntry(TypedDict):\n name: NodeName\n group: GroupName\n package: Optional[str]\n provider: Optional[str]\n node: Node\n\n\nclass ZenConvert(TypedDict, total=False):\n \"\"\"A TypedDict that provides a type-checked interface for specifying zen-convert\n options that configure the hydra-zen config-creation functions (e.g., `builds`,\n `just`, and `make_config`).\n\n Note that, at runtime, `ZenConvert` is simply a dictionary with type-annotations. There is no enforced runtime validation of its keys and values.\n\n Parameters\n ----------\n dataclass : bool\n If `True` any dataclass type/instance without a `_target_` field is\n automatically converted to a targeted config that will instantiate to that type/\n instance. Otherwise the dataclass type/instance will be passed through as-is.\n\n Note that this only works with statically-defined dataclass types, whereas\n :func:`~hydra_zen.make_config` and :py:func:`dataclasses.make_dataclass`\n dynamically generate dataclass types. Additionally, this feature is not\n compatible with a dataclass instance whose type possesses an `InitVar` field.\n\n Examples\n --------\n >>> from hydra_zen.typing import ZenConvert as zc\n >>> zc()\n {}\n >>> zc(dataclass=True)\n {\"dataclass\": True}\n >>> # static type-checker will raise, but runtime will not\n >>> zc(apple=1) # type: ignore\n {\"apple\": 1}\n\n **Configuring dataclass auto-config behaviors**\n\n >>> from hydra_zen import instantiate as I\n >>> from hydra_zen import builds, just\n >>> from dataclasses import dataclass\n >>> @dataclass\n ... class B:\n ... x: int\n >>> b = B(x=1)\n\n >>> I(just(b))\n B(x=1)\n >>> I(just(b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {\"x\": 1}\n\n >>> I(builds(dict, y=b))\n {'y': B(x=1)}\n >>> I(builds(dict, y=b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n\n >>> I(make_config(y=b)) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n >>> I(make_config(y=b, zen_convert=zc(dataclass=True), hydra_convert=\"all\"))\n {'y': B(x=1)}\n\n Auto-config support does not work with dynamically-generated dataclass types\n\n >>> just(make_config(z=1))\n HydraZenUnsupportedPrimitiveError: ...\n >>> I(just(make_config(z=1), zen_convert=zc(dataclass=False)))\n {'z': 1}\n\n A dataclass with a `_target_` field will not be converted:\n\n >>> @dataclass\n ... class BuildsStr:\n ... _target_: str = 'builtins.str'\n ...\n >>> BuildsStr is just(BuildsStr)\n True\n >>> (builds_str := BuildsStr()) is just(builds_str)\n True\n \"\"\"\n\n dataclass: bool\n", "path": "src/hydra_zen/typing/_implementations.py"}], "after_files": [{"content": "# Copyright (c) 2022 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\n# pyright: strict\n\nimport sys\nimport types\nfrom enum import Enum\nfrom pathlib import Path, PosixPath, WindowsPath\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ByteString,\n Callable,\n ClassVar,\n Dict,\n FrozenSet,\n Generic,\n List,\n Mapping,\n NewType,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom omegaconf import DictConfig, ListConfig\nfrom typing_extensions import (\n Final,\n Literal,\n ParamSpec,\n Protocol,\n Self,\n TypeAlias,\n TypedDict,\n runtime_checkable,\n)\n\n__all__ = [\n \"Just\",\n \"Builds\",\n \"PartialBuilds\",\n \"Partial\",\n \"Importable\",\n \"SupportedPrimitive\",\n \"ZenWrappers\",\n \"ZenPartialBuilds\",\n \"HydraPartialBuilds\",\n \"ZenConvert\",\n]\n\nP = ParamSpec(\"P\")\nR = TypeVar(\"R\")\n\n\nclass EmptyDict(TypedDict):\n pass\n\n\nT = TypeVar(\"T\", covariant=True)\nT2 = TypeVar(\"T2\")\nT3 = TypeVar(\"T3\")\n\nT4 = TypeVar(\"T4\", bound=Callable[..., Any])\n\n\nInstOrType: TypeAlias = Union[T, Type[T]]\n\n\nif TYPE_CHECKING:\n from dataclasses import Field # provided by typestub but not generic at runtime\nelse:\n\n class Field(Protocol[T2]):\n name: str\n type: Type[T2]\n default: T2\n default_factory: Callable[[], T2]\n repr: bool\n hash: Optional[bool]\n init: bool\n compare: bool\n metadata: Mapping[str, Any]\n\n\n@runtime_checkable\nclass Partial(Protocol[T2]):\n __call__: Callable[..., T2]\n\n @property\n def func(self) -> Callable[..., T2]:\n ...\n\n @property\n def args(self) -> Tuple[Any, ...]:\n ...\n\n @property\n def keywords(self) -> Dict[str, Any]:\n ...\n\n def __new__(\n cls: Type[Self], __func: Callable[..., T2], *args: Any, **kwargs: Any\n ) -> Self:\n ...\n\n if TYPE_CHECKING and sys.version_info >= (3, 9): # pragma: no cover\n\n def __class_getitem__(cls, item: Any) -> types.GenericAlias:\n ...\n\n\nInterpStr = NewType(\"InterpStr\", str)\n\n\nclass DataClass_(Protocol):\n # doesn't provide __init__, __getattribute__, etc.\n __dataclass_fields__: ClassVar[Dict[str, Field[Any]]]\n\n\nclass DataClass(DataClass_, Protocol):\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n ...\n\n def __getattribute__(self, __name: str) -> Any:\n ...\n\n def __setattr__(self, __name: str, __value: Any) -> None:\n ...\n\n\n@runtime_checkable\nclass Builds(DataClass, Protocol[T]):\n _target_: ClassVar[str]\n\n\nclass BuildsWithSig(Builds[T], Protocol[T, P]):\n def __init__(self, *args: P.args, **kwds: P.kwargs):\n ...\n\n\n@runtime_checkable\nclass Just(Builds[T], Protocol[T]):\n path: ClassVar[str] # interpolated string for importing obj\n _target_: ClassVar[Literal[\"hydra_zen.funcs.get_obj\"]] = \"hydra_zen.funcs.get_obj\"\n\n\nclass ZenPartialMixin(Protocol[T]):\n _zen_target: ClassVar[str]\n _zen_partial: ClassVar[Literal[True]] = True\n\n\nclass HydraPartialMixin(Protocol[T]):\n _partial_: ClassVar[Literal[True]] = True\n\n\n@runtime_checkable\nclass ZenPartialBuilds(Builds[T], ZenPartialMixin[T], Protocol[T]):\n _target_: ClassVar[\n Literal[\"hydra_zen.funcs.zen_processing\"]\n ] = \"hydra_zen.funcs.zen_processing\"\n\n\n@runtime_checkable\nclass HydraPartialBuilds(Builds[T], HydraPartialMixin[T], Protocol[T]):\n ...\n\n\n# Necessary, but not sufficient, check for PartialBuilds; useful for creating\n# non-overlapping overloads\nIsPartial: TypeAlias = Union[ZenPartialMixin[T], HydraPartialMixin[T]]\n\nPartialBuilds: TypeAlias = Union[ZenPartialBuilds[T], HydraPartialBuilds[T]]\n\n\n@runtime_checkable\nclass HasTarget(Protocol):\n _target_: str\n\n\nImportable = TypeVar(\"Importable\", bound=Callable[..., Any])\n\n_HydraPrimitive: TypeAlias = Union[\n bool, None, int, float, str, ByteString, Path, WindowsPath, PosixPath\n]\n\n_SupportedViaBuilds = Union[\n Partial[Any],\n range,\n Set[Any],\n]\n\n_SupportedPrimitive: TypeAlias = Union[\n _HydraPrimitive,\n ListConfig,\n DictConfig,\n Callable[..., Any],\n Enum,\n DataClass_,\n complex,\n _SupportedViaBuilds,\n EmptyDict, # not covered by Mapping[..., ...]]\n]\n\nif TYPE_CHECKING:\n SupportedPrimitive: TypeAlias = Union[\n _SupportedPrimitive,\n FrozenSet[\"SupportedPrimitive\"],\n # Even thought this is redundant with Sequence, it seems to\n # be needed for pyright to do proper checking of tuple contents\n Tuple[\"SupportedPrimitive\", ...],\n # Mutable generic containers need to be invariant, so\n # we have to settle for Sequence/Mapping. While this\n # is overly permissive in terms of sequence-type, it\n # at least affords quality checking of sequence content\n Sequence[\"SupportedPrimitive\"],\n # Mapping is covariant only in value\n Mapping[Any, \"SupportedPrimitive\"],\n ]\nelse:\n # cleans up annotations for REPLs\n SupportedPrimitive = TypeVar(\"SupportedPrimitive\")\n\n\nZenWrapper: TypeAlias = Union[\n None,\n Builds[Callable[[T4], T4]],\n PartialBuilds[Callable[[T4], T4]],\n Just[Callable[[T4], T4]],\n Type[Builds[Callable[[T4], T4]]],\n Type[PartialBuilds[Callable[[T4], T4]]],\n Type[Just[Callable[[T4], T4]]],\n Callable[[T4], T4],\n str,\n]\nif TYPE_CHECKING:\n ZenWrappers: TypeAlias = Union[ZenWrapper[T4], Sequence[ZenWrapper[T4]]]\nelse:\n # cleans up annotations for REPLs\n class ZenWrappers(Generic[T2]): # pragma: no cover\n pass\n\n\nDefaultsList = List[\n Union[str, DataClass_, Mapping[str, Union[None, str, Sequence[str]]]]\n]\n\n\n# Lists all zen-convert settings and their types. Not part of public API\nclass AllConvert(TypedDict, total=True):\n dataclass: bool\n\n\n# used for runtime type-checking\nconvert_types: Final = {\"dataclass\": bool}\n\nGroupName: TypeAlias = Optional[str]\nNodeName: TypeAlias = str\nNode: TypeAlias = Any\n\n\n# TODO: make immutable\nclass StoreEntry(TypedDict):\n name: NodeName\n group: GroupName\n package: Optional[str]\n provider: Optional[str]\n node: Node\n\n\nclass ZenConvert(TypedDict, total=False):\n \"\"\"A TypedDict that provides a type-checked interface for specifying zen-convert\n options that configure the hydra-zen config-creation functions (e.g., `builds`,\n `just`, and `make_config`).\n\n Note that, at runtime, `ZenConvert` is simply a dictionary with type-annotations. There is no enforced runtime validation of its keys and values.\n\n Parameters\n ----------\n dataclass : bool\n If `True` any dataclass type/instance without a `_target_` field is\n automatically converted to a targeted config that will instantiate to that type/\n instance. Otherwise the dataclass type/instance will be passed through as-is.\n\n Note that this only works with statically-defined dataclass types, whereas\n :func:`~hydra_zen.make_config` and :py:func:`dataclasses.make_dataclass`\n dynamically generate dataclass types. Additionally, this feature is not\n compatible with a dataclass instance whose type possesses an `InitVar` field.\n\n Examples\n --------\n >>> from hydra_zen.typing import ZenConvert as zc\n >>> zc()\n {}\n >>> zc(dataclass=True)\n {\"dataclass\": True}\n >>> # static type-checker will raise, but runtime will not\n >>> zc(apple=1) # type: ignore\n {\"apple\": 1}\n\n **Configuring dataclass auto-config behaviors**\n\n >>> from hydra_zen import instantiate as I\n >>> from hydra_zen import builds, just\n >>> from dataclasses import dataclass\n >>> @dataclass\n ... class B:\n ... x: int\n >>> b = B(x=1)\n\n >>> I(just(b))\n B(x=1)\n >>> I(just(b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {\"x\": 1}\n\n >>> I(builds(dict, y=b))\n {'y': B(x=1)}\n >>> I(builds(dict, y=b, zen_convert=zc(dataclass=False))) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n\n >>> I(make_config(y=b)) # returns omegaconf.DictConfig\n {'y': {'x': 1}}\n >>> I(make_config(y=b, zen_convert=zc(dataclass=True), hydra_convert=\"all\"))\n {'y': B(x=1)}\n\n Auto-config support does not work with dynamically-generated dataclass types\n\n >>> just(make_config(z=1))\n HydraZenUnsupportedPrimitiveError: ...\n >>> I(just(make_config(z=1), zen_convert=zc(dataclass=False)))\n {'z': 1}\n\n A dataclass with a `_target_` field will not be converted:\n\n >>> @dataclass\n ... class BuildsStr:\n ... _target_: str = 'builtins.str'\n ...\n >>> BuildsStr is just(BuildsStr)\n True\n >>> (builds_str := BuildsStr()) is just(builds_str)\n True\n \"\"\"\n\n dataclass: bool\n", "path": "src/hydra_zen/typing/_implementations.py"}]} | 3,740 | 146 |
gh_patches_debug_14548 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5261 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ajouter les unités temporelles sur les graphs des stats
Cete amélioration est demandée par un auteur :
> Ce serait bien, aussi, de mettre les unités à côté du temps. Parce que là, j’ai 03:51, mais je ne sais pas si c’est min:sec ou sec:mil
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/templatetags/seconds_to_duration.py`
Content:
```
1 from django import template
2 import datetime
3
4 register = template.Library()
5
6
7 # TODO add unit test
8 @register.filter('seconds_to_duration')
9 def seconds_to_duration(value):
10 """
11 Display a human-readable reading-time (or any other duration)
12 from a duration in seconds.
13 """
14 if value <= 0:
15 return ''
16
17 duration = datetime.timedelta(seconds=value)
18 return str(duration)
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/utils/templatetags/seconds_to_duration.py b/zds/utils/templatetags/seconds_to_duration.py
--- a/zds/utils/templatetags/seconds_to_duration.py
+++ b/zds/utils/templatetags/seconds_to_duration.py
@@ -4,6 +4,14 @@
register = template.Library()
+# https://stackoverflow.com/a/8907269/2226755
+def strfdelta(tdelta, fmt):
+ d = {'days': tdelta.days}
+ d['hours'], rem = divmod(tdelta.seconds, 3600)
+ d['minutes'], d['seconds'] = divmod(rem, 60)
+ return fmt.format(**d)
+
+
# TODO add unit test
@register.filter('seconds_to_duration')
def seconds_to_duration(value):
@@ -15,4 +23,7 @@
return ''
duration = datetime.timedelta(seconds=value)
- return str(duration)
+ if duration < 3600
+ return strfdelta(duration, '{minutes}m{seconds}s')
+ else
+ return strfdelta(duration, '{hours}h{minutes}m{seconds}s')
| {"golden_diff": "diff --git a/zds/utils/templatetags/seconds_to_duration.py b/zds/utils/templatetags/seconds_to_duration.py\n--- a/zds/utils/templatetags/seconds_to_duration.py\n+++ b/zds/utils/templatetags/seconds_to_duration.py\n@@ -4,6 +4,14 @@\n register = template.Library()\n \n \n+# https://stackoverflow.com/a/8907269/2226755\n+def strfdelta(tdelta, fmt):\n+ d = {'days': tdelta.days}\n+ d['hours'], rem = divmod(tdelta.seconds, 3600)\n+ d['minutes'], d['seconds'] = divmod(rem, 60)\n+ return fmt.format(**d)\n+\n+\n # TODO add unit test\n @register.filter('seconds_to_duration')\n def seconds_to_duration(value):\n@@ -15,4 +23,7 @@\n return ''\n \n duration = datetime.timedelta(seconds=value)\n- return str(duration)\n+ if duration < 3600\n+ return strfdelta(duration, '{minutes}m{seconds}s')\n+ else\n+ return strfdelta(duration, '{hours}h{minutes}m{seconds}s')\n", "issue": "Ajouter les unit\u00e9s temporelles sur les graphs des stats\nCete am\u00e9lioration est demand\u00e9e par un auteur : \r\n\r\n> Ce serait bien, aussi, de mettre les unit\u00e9s \u00e0 c\u00f4t\u00e9 du temps. Parce que l\u00e0, j\u2019ai 03:51, mais je ne sais pas si c\u2019est min:sec ou sec:mil\n", "before_files": [{"content": "from django import template\nimport datetime\n\nregister = template.Library()\n\n\n# TODO add unit test\[email protected]('seconds_to_duration')\ndef seconds_to_duration(value):\n \"\"\"\n Display a human-readable reading-time (or any other duration)\n from a duration in seconds.\n \"\"\"\n if value <= 0:\n return ''\n\n duration = datetime.timedelta(seconds=value)\n return str(duration)\n", "path": "zds/utils/templatetags/seconds_to_duration.py"}], "after_files": [{"content": "from django import template\nimport datetime\n\nregister = template.Library()\n\n\n# https://stackoverflow.com/a/8907269/2226755\ndef strfdelta(tdelta, fmt):\n d = {'days': tdelta.days}\n d['hours'], rem = divmod(tdelta.seconds, 3600)\n d['minutes'], d['seconds'] = divmod(rem, 60)\n return fmt.format(**d)\n\n\n# TODO add unit test\[email protected]('seconds_to_duration')\ndef seconds_to_duration(value):\n \"\"\"\n Display a human-readable reading-time (or any other duration)\n from a duration in seconds.\n \"\"\"\n if value <= 0:\n return ''\n\n duration = datetime.timedelta(seconds=value)\n if duration < 3600\n return strfdelta(duration, '{minutes}m{seconds}s')\n else\n return strfdelta(duration, '{hours}h{minutes}m{seconds}s') \n", "path": "zds/utils/templatetags/seconds_to_duration.py"}]} | 457 | 276 |
gh_patches_debug_20717 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-865 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash when trying to delete a region
### Describe the Bug
Deleting a region via this page:

fails with a `FieldError`:

### Steps to Reproduce
1. Go to the regions tab
2. Edit any region
3. Scroll down and click on delete. Click ok on the warning.
4. See the error
### Expected Behavior
The server does not fail and deletes the region
### Actual Behavior
The server crashes and does not delete the region
### Additional Information
/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/views/regions/region_actions.py`
Content:
```
1 """
2 This module contains view actions for region objects.
3 """
4 import logging
5
6 from django.contrib import messages
7 from django.contrib.auth import get_user_model
8 from django.contrib.auth.decorators import login_required, permission_required
9 from django.shortcuts import redirect
10 from django.utils.translation import ugettext as _
11 from django.views.decorators.http import require_POST
12
13 from ...decorators import staff_required
14 from ...models import Region
15
16 logger = logging.getLogger(__name__)
17
18
19 @require_POST
20 @login_required
21 @staff_required
22 @permission_required("cms.manage_regions", raise_exception=True)
23 # pylint: disable=unused-argument
24 def delete_region(request, *args, **kwargs):
25 """
26 This view deletes a region. All content is cascade deleted. Region users, who are not assigned to any other region,
27 are manually removed.
28
29 :param request: The current request
30 :type request: ~django.http.HttpResponse
31
32 :param args: The supplied arguments
33 :type args: list
34
35 :param kwargs: The supplied keyword arguments
36 :type kwargs: dict
37
38 :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to manage regions
39
40 :return: A redirection to the media library
41 :rtype: ~django.http.HttpResponseRedirect
42 """
43
44 region = Region.get_current_region(request)
45 # Remove hierarchy to prevent ProtectedError when children get deleted before their parents
46 region.pages.update(parent=None)
47 region.language_tree_nodes.update(parent=None)
48 # Delete region and cascade delete all contents
49 deleted_objects = region.delete()
50 logger.info(
51 "%r deleted %r, cascade deleted objects: %r",
52 request.user.profile,
53 region,
54 deleted_objects,
55 )
56 # Get orphan users who aren't superuser or staff and don't have a region assigned
57 # (Creating users with these combination is impossible, so they were region users of the deleted region before)
58 orphan_users = get_user_model().objects.filter(
59 is_superuser=False, is_staff=False, profile__regions=None
60 )
61 if orphan_users.exists():
62 logger.info(
63 "Deleted orphan users: %r",
64 orphan_users,
65 )
66 orphan_users.delete()
67
68 messages.success(request, _("Region was successfully deleted"))
69
70 return redirect("regions")
71
```
Path: `src/cms/models/languages/language_tree_node.py`
Content:
```
1 from mptt.fields import TreeForeignKey
2 from mptt.models import MPTTModel, raise_if_unsaved
3
4 from django.db import models
5 from django.utils import timezone
6 from django.utils.translation import ugettext_lazy as _
7
8 from .language import Language
9 from ..regions.region import Region
10
11
12 class LanguageTreeNode(MPTTModel):
13 """
14 Data model representing a region's language tree. Each tree node is a single object instance and the whole tree is
15 identified by the root node. The base functionality inherits from the package `django-mptt
16 <https://django-mptt.readthedocs.io/en/latest/index.html>`_ (Modified Preorder Tree Traversal).
17 """
18
19 language = models.ForeignKey(
20 Language,
21 on_delete=models.PROTECT,
22 related_name="language_tree_nodes",
23 verbose_name=_("language"),
24 )
25 parent = TreeForeignKey(
26 "self",
27 blank=True,
28 null=True,
29 on_delete=models.PROTECT,
30 related_name="children",
31 verbose_name=_("source language"),
32 )
33 region = models.ForeignKey(
34 Region,
35 on_delete=models.CASCADE,
36 related_name="language_tree_nodes",
37 verbose_name=_("region"),
38 )
39 visible = models.BooleanField(
40 default=True,
41 verbose_name=_("visible"),
42 help_text=_("Defined if this language should be delivered via the API"),
43 )
44 active = models.BooleanField(
45 default=True,
46 verbose_name=_("active"),
47 help_text=_("Defined if content in this language can be created or edited"),
48 )
49 created_date = models.DateTimeField(
50 default=timezone.now,
51 verbose_name=_("creation date"),
52 )
53 last_updated = models.DateTimeField(
54 auto_now=True,
55 verbose_name=_("modification date"),
56 )
57
58 @property
59 def slug(self):
60 """
61 Returns the slug of this node's language
62
63 :return: The language slug of this language node
64 :rtype: str
65 """
66 return self.language.slug
67
68 @property
69 def native_name(self):
70 """
71 Returns the native name of this node's language
72
73 :return: The native name of this language node
74 :rtype: str
75 """
76 return self.language.native_name
77
78 @property
79 def english_name(self):
80 """
81 Returns the name of this node's language in English
82
83 :return: The English name of this language node
84 :rtype: str
85 """
86 return self.language.english_name
87
88 @property
89 def translated_name(self):
90 """
91 Returns the name of this node's language in the current backend language
92
93 :return: The translated name of this language node
94 :rtype: str
95 """
96 return self.language.translated_name
97
98 @property
99 def text_direction(self):
100 """
101 Returns the text direction (e.g. left-to-right) of this node's language
102
103 :return: The text direction name of this language node
104 :rtype: str
105 """
106 return self.language.text_direction
107
108 @property
109 def depth(self):
110 """
111 Counts how many ancestors the node has. If the node is the root node, its depth is `0`.
112
113 :return: The depth of this language node
114 :rtype: str
115 """
116 return len(self.get_ancestors())
117
118 # Explicitly define functions to show documentation of base model
119 @raise_if_unsaved
120 def get_ancestors(self, ascending=False, include_self=False):
121 return super().get_ancestors(ascending, include_self)
122
123 # pylint: disable=useless-super-delegation
124 @raise_if_unsaved
125 def get_family(self):
126 return super().get_family()
127
128 @raise_if_unsaved
129 def get_children(self):
130 return super().get_children()
131
132 @raise_if_unsaved
133 def get_descendants(self, include_self=False):
134 return super().get_descendants(include_self)
135
136 def get_descendant_count(self):
137 return super().get_descendant_count()
138
139 @raise_if_unsaved
140 def get_root(self):
141 return super().get_root()
142
143 # pylint: disable=useless-super-delegation
144 def insert_at(
145 self,
146 target,
147 position="first-child",
148 save=False,
149 allow_existing_pk=False,
150 refresh_target=True,
151 ):
152 return super().insert_at(
153 target, position, save, allow_existing_pk, refresh_target
154 )
155
156 # pylint: disable=useless-super-delegation
157 def move_to(self, target, position="first-child"):
158 return super().move_to(target, position)
159
160 def __str__(self):
161 """
162 This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``LanguageTreeNode object (id)``.
163 It is used in the Django admin backend and as label for ModelChoiceFields.
164
165 :return: A readable string representation of the language node
166 :rtype: str
167 """
168 return self.translated_name
169
170 def __repr__(self):
171 """
172 This overwrites the default Django ``__repr__()`` method which would return ``<LanguageTreeNode: LanguageTreeNode object (id)>``.
173 It is used for logging.
174
175 :return: The canonical string representation of the language node
176 :rtype: str
177 """
178 return f"<LanguageTreeNode (id: {self.id}, language: {self.language.slug}, region: {self.region})>"
179
180 class Meta:
181 #: The verbose name of the model
182 verbose_name = _("language tree node")
183 #: The plural verbose name of the model
184 verbose_name_plural = _("language tree nodes")
185 #: There cannot be two language tree nodes with the same region and language
186 unique_together = (
187 (
188 "language",
189 "region",
190 ),
191 )
192 #: The default permissions for this model
193 default_permissions = ()
194 #: The custom permissions for this model
195 permissions = (("manage_language_tree", "Can manage language tree"),)
196 #: The fields which are used to sort the returned objects of a QuerySet
197 ordering = ["region", "level", "parent"]
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cms/models/languages/language_tree_node.py b/src/cms/models/languages/language_tree_node.py
--- a/src/cms/models/languages/language_tree_node.py
+++ b/src/cms/models/languages/language_tree_node.py
@@ -194,4 +194,4 @@
#: The custom permissions for this model
permissions = (("manage_language_tree", "Can manage language tree"),)
#: The fields which are used to sort the returned objects of a QuerySet
- ordering = ["region", "level", "parent"]
+ ordering = ["region", "level", "parent__pk"]
diff --git a/src/cms/views/regions/region_actions.py b/src/cms/views/regions/region_actions.py
--- a/src/cms/views/regions/region_actions.py
+++ b/src/cms/views/regions/region_actions.py
@@ -45,6 +45,8 @@
# Remove hierarchy to prevent ProtectedError when children get deleted before their parents
region.pages.update(parent=None)
region.language_tree_nodes.update(parent=None)
+ # Prevent ProtectedError when location gets deleted before their events
+ region.events.update(location=None)
# Delete region and cascade delete all contents
deleted_objects = region.delete()
logger.info(
| {"golden_diff": "diff --git a/src/cms/models/languages/language_tree_node.py b/src/cms/models/languages/language_tree_node.py\n--- a/src/cms/models/languages/language_tree_node.py\n+++ b/src/cms/models/languages/language_tree_node.py\n@@ -194,4 +194,4 @@\n #: The custom permissions for this model\n permissions = ((\"manage_language_tree\", \"Can manage language tree\"),)\n #: The fields which are used to sort the returned objects of a QuerySet\n- ordering = [\"region\", \"level\", \"parent\"]\n+ ordering = [\"region\", \"level\", \"parent__pk\"]\ndiff --git a/src/cms/views/regions/region_actions.py b/src/cms/views/regions/region_actions.py\n--- a/src/cms/views/regions/region_actions.py\n+++ b/src/cms/views/regions/region_actions.py\n@@ -45,6 +45,8 @@\n # Remove hierarchy to prevent ProtectedError when children get deleted before their parents\n region.pages.update(parent=None)\n region.language_tree_nodes.update(parent=None)\n+ # Prevent ProtectedError when location gets deleted before their events\n+ region.events.update(location=None)\n # Delete region and cascade delete all contents\n deleted_objects = region.delete()\n logger.info(\n", "issue": "Crash when trying to delete a region\n### Describe the Bug\r\nDeleting a region via this page: \r\n\r\nfails with a `FieldError`:\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Go to the regions tab\r\n2. Edit any region\r\n3. Scroll down and click on delete. Click ok on the warning.\r\n4. See the error\r\n\r\n### Expected Behavior\r\nThe server does not fail and deletes the region\r\n\r\n\r\n### Actual Behavior\r\nThe server crashes and does not delete the region\r\n\r\n\r\n### Additional Information\r\n/\n", "before_files": [{"content": "\"\"\"\nThis module contains view actions for region objects.\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.shortcuts import redirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom ...decorators import staff_required\nfrom ...models import Region\n\nlogger = logging.getLogger(__name__)\n\n\n@require_POST\n@login_required\n@staff_required\n@permission_required(\"cms.manage_regions\", raise_exception=True)\n# pylint: disable=unused-argument\ndef delete_region(request, *args, **kwargs):\n \"\"\"\n This view deletes a region. All content is cascade deleted. Region users, who are not assigned to any other region,\n are manually removed.\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to manage regions\n\n :return: A redirection to the media library\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n\n region = Region.get_current_region(request)\n # Remove hierarchy to prevent ProtectedError when children get deleted before their parents\n region.pages.update(parent=None)\n region.language_tree_nodes.update(parent=None)\n # Delete region and cascade delete all contents\n deleted_objects = region.delete()\n logger.info(\n \"%r deleted %r, cascade deleted objects: %r\",\n request.user.profile,\n region,\n deleted_objects,\n )\n # Get orphan users who aren't superuser or staff and don't have a region assigned\n # (Creating users with these combination is impossible, so they were region users of the deleted region before)\n orphan_users = get_user_model().objects.filter(\n is_superuser=False, is_staff=False, profile__regions=None\n )\n if orphan_users.exists():\n logger.info(\n \"Deleted orphan users: %r\",\n orphan_users,\n )\n orphan_users.delete()\n\n messages.success(request, _(\"Region was successfully deleted\"))\n\n return redirect(\"regions\")\n", "path": "src/cms/views/regions/region_actions.py"}, {"content": "from mptt.fields import TreeForeignKey\nfrom mptt.models import MPTTModel, raise_if_unsaved\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .language import Language\nfrom ..regions.region import Region\n\n\nclass LanguageTreeNode(MPTTModel):\n \"\"\"\n Data model representing a region's language tree. Each tree node is a single object instance and the whole tree is\n identified by the root node. The base functionality inherits from the package `django-mptt\n <https://django-mptt.readthedocs.io/en/latest/index.html>`_ (Modified Preorder Tree Traversal).\n \"\"\"\n\n language = models.ForeignKey(\n Language,\n on_delete=models.PROTECT,\n related_name=\"language_tree_nodes\",\n verbose_name=_(\"language\"),\n )\n parent = TreeForeignKey(\n \"self\",\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"children\",\n verbose_name=_(\"source language\"),\n )\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"language_tree_nodes\",\n verbose_name=_(\"region\"),\n )\n visible = models.BooleanField(\n default=True,\n verbose_name=_(\"visible\"),\n help_text=_(\"Defined if this language should be delivered via the API\"),\n )\n active = models.BooleanField(\n default=True,\n verbose_name=_(\"active\"),\n help_text=_(\"Defined if content in this language can be created or edited\"),\n )\n created_date = models.DateTimeField(\n default=timezone.now,\n verbose_name=_(\"creation date\"),\n )\n last_updated = models.DateTimeField(\n auto_now=True,\n verbose_name=_(\"modification date\"),\n )\n\n @property\n def slug(self):\n \"\"\"\n Returns the slug of this node's language\n\n :return: The language slug of this language node\n :rtype: str\n \"\"\"\n return self.language.slug\n\n @property\n def native_name(self):\n \"\"\"\n Returns the native name of this node's language\n\n :return: The native name of this language node\n :rtype: str\n \"\"\"\n return self.language.native_name\n\n @property\n def english_name(self):\n \"\"\"\n Returns the name of this node's language in English\n\n :return: The English name of this language node\n :rtype: str\n \"\"\"\n return self.language.english_name\n\n @property\n def translated_name(self):\n \"\"\"\n Returns the name of this node's language in the current backend language\n\n :return: The translated name of this language node\n :rtype: str\n \"\"\"\n return self.language.translated_name\n\n @property\n def text_direction(self):\n \"\"\"\n Returns the text direction (e.g. left-to-right) of this node's language\n\n :return: The text direction name of this language node\n :rtype: str\n \"\"\"\n return self.language.text_direction\n\n @property\n def depth(self):\n \"\"\"\n Counts how many ancestors the node has. If the node is the root node, its depth is `0`.\n\n :return: The depth of this language node\n :rtype: str\n \"\"\"\n return len(self.get_ancestors())\n\n # Explicitly define functions to show documentation of base model\n @raise_if_unsaved\n def get_ancestors(self, ascending=False, include_self=False):\n return super().get_ancestors(ascending, include_self)\n\n # pylint: disable=useless-super-delegation\n @raise_if_unsaved\n def get_family(self):\n return super().get_family()\n\n @raise_if_unsaved\n def get_children(self):\n return super().get_children()\n\n @raise_if_unsaved\n def get_descendants(self, include_self=False):\n return super().get_descendants(include_self)\n\n def get_descendant_count(self):\n return super().get_descendant_count()\n\n @raise_if_unsaved\n def get_root(self):\n return super().get_root()\n\n # pylint: disable=useless-super-delegation\n def insert_at(\n self,\n target,\n position=\"first-child\",\n save=False,\n allow_existing_pk=False,\n refresh_target=True,\n ):\n return super().insert_at(\n target, position, save, allow_existing_pk, refresh_target\n )\n\n # pylint: disable=useless-super-delegation\n def move_to(self, target, position=\"first-child\"):\n return super().move_to(target, position)\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``LanguageTreeNode object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the language node\n :rtype: str\n \"\"\"\n return self.translated_name\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<LanguageTreeNode: LanguageTreeNode object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the language node\n :rtype: str\n \"\"\"\n return f\"<LanguageTreeNode (id: {self.id}, language: {self.language.slug}, region: {self.region})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"language tree node\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"language tree nodes\")\n #: There cannot be two language tree nodes with the same region and language\n unique_together = (\n (\n \"language\",\n \"region\",\n ),\n )\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_language_tree\", \"Can manage language tree\"),)\n #: The fields which are used to sort the returned objects of a QuerySet\n ordering = [\"region\", \"level\", \"parent\"]\n", "path": "src/cms/models/languages/language_tree_node.py"}], "after_files": [{"content": "\"\"\"\nThis module contains view actions for region objects.\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.shortcuts import redirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.http import require_POST\n\nfrom ...decorators import staff_required\nfrom ...models import Region\n\nlogger = logging.getLogger(__name__)\n\n\n@require_POST\n@login_required\n@staff_required\n@permission_required(\"cms.manage_regions\", raise_exception=True)\n# pylint: disable=unused-argument\ndef delete_region(request, *args, **kwargs):\n \"\"\"\n This view deletes a region. All content is cascade deleted. Region users, who are not assigned to any other region,\n are manually removed.\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param args: The supplied arguments\n :type args: list\n\n :param kwargs: The supplied keyword arguments\n :type kwargs: dict\n\n :raises ~django.core.exceptions.PermissionDenied: If user does not have the permission to manage regions\n\n :return: A redirection to the media library\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n\n region = Region.get_current_region(request)\n # Remove hierarchy to prevent ProtectedError when children get deleted before their parents\n region.pages.update(parent=None)\n region.language_tree_nodes.update(parent=None)\n # Prevent ProtectedError when location gets deleted before their events\n region.events.update(location=None)\n # Delete region and cascade delete all contents\n deleted_objects = region.delete()\n logger.info(\n \"%r deleted %r, cascade deleted objects: %r\",\n request.user.profile,\n region,\n deleted_objects,\n )\n # Get orphan users who aren't superuser or staff and don't have a region assigned\n # (Creating users with these combination is impossible, so they were region users of the deleted region before)\n orphan_users = get_user_model().objects.filter(\n is_superuser=False, is_staff=False, profile__regions=None\n )\n if orphan_users.exists():\n logger.info(\n \"Deleted orphan users: %r\",\n orphan_users,\n )\n orphan_users.delete()\n\n messages.success(request, _(\"Region was successfully deleted\"))\n\n return redirect(\"regions\")\n", "path": "src/cms/views/regions/region_actions.py"}, {"content": "from mptt.fields import TreeForeignKey\nfrom mptt.models import MPTTModel, raise_if_unsaved\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .language import Language\nfrom ..regions.region import Region\n\n\nclass LanguageTreeNode(MPTTModel):\n \"\"\"\n Data model representing a region's language tree. Each tree node is a single object instance and the whole tree is\n identified by the root node. The base functionality inherits from the package `django-mptt\n <https://django-mptt.readthedocs.io/en/latest/index.html>`_ (Modified Preorder Tree Traversal).\n \"\"\"\n\n language = models.ForeignKey(\n Language,\n on_delete=models.PROTECT,\n related_name=\"language_tree_nodes\",\n verbose_name=_(\"language\"),\n )\n parent = TreeForeignKey(\n \"self\",\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"children\",\n verbose_name=_(\"source language\"),\n )\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"language_tree_nodes\",\n verbose_name=_(\"region\"),\n )\n visible = models.BooleanField(\n default=True,\n verbose_name=_(\"visible\"),\n help_text=_(\"Defined if this language should be delivered via the API\"),\n )\n active = models.BooleanField(\n default=True,\n verbose_name=_(\"active\"),\n help_text=_(\"Defined if content in this language can be created or edited\"),\n )\n created_date = models.DateTimeField(\n default=timezone.now,\n verbose_name=_(\"creation date\"),\n )\n last_updated = models.DateTimeField(\n auto_now=True,\n verbose_name=_(\"modification date\"),\n )\n\n @property\n def slug(self):\n \"\"\"\n Returns the slug of this node's language\n\n :return: The language slug of this language node\n :rtype: str\n \"\"\"\n return self.language.slug\n\n @property\n def native_name(self):\n \"\"\"\n Returns the native name of this node's language\n\n :return: The native name of this language node\n :rtype: str\n \"\"\"\n return self.language.native_name\n\n @property\n def english_name(self):\n \"\"\"\n Returns the name of this node's language in English\n\n :return: The English name of this language node\n :rtype: str\n \"\"\"\n return self.language.english_name\n\n @property\n def translated_name(self):\n \"\"\"\n Returns the name of this node's language in the current backend language\n\n :return: The translated name of this language node\n :rtype: str\n \"\"\"\n return self.language.translated_name\n\n @property\n def text_direction(self):\n \"\"\"\n Returns the text direction (e.g. left-to-right) of this node's language\n\n :return: The text direction name of this language node\n :rtype: str\n \"\"\"\n return self.language.text_direction\n\n @property\n def depth(self):\n \"\"\"\n Counts how many ancestors the node has. If the node is the root node, its depth is `0`.\n\n :return: The depth of this language node\n :rtype: str\n \"\"\"\n return len(self.get_ancestors())\n\n # Explicitly define functions to show documentation of base model\n @raise_if_unsaved\n def get_ancestors(self, ascending=False, include_self=False):\n return super().get_ancestors(ascending, include_self)\n\n # pylint: disable=useless-super-delegation\n @raise_if_unsaved\n def get_family(self):\n return super().get_family()\n\n @raise_if_unsaved\n def get_children(self):\n return super().get_children()\n\n @raise_if_unsaved\n def get_descendants(self, include_self=False):\n return super().get_descendants(include_self)\n\n def get_descendant_count(self):\n return super().get_descendant_count()\n\n @raise_if_unsaved\n def get_root(self):\n return super().get_root()\n\n # pylint: disable=useless-super-delegation\n def insert_at(\n self,\n target,\n position=\"first-child\",\n save=False,\n allow_existing_pk=False,\n refresh_target=True,\n ):\n return super().insert_at(\n target, position, save, allow_existing_pk, refresh_target\n )\n\n # pylint: disable=useless-super-delegation\n def move_to(self, target, position=\"first-child\"):\n return super().move_to(target, position)\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``LanguageTreeNode object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the language node\n :rtype: str\n \"\"\"\n return self.translated_name\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<LanguageTreeNode: LanguageTreeNode object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the language node\n :rtype: str\n \"\"\"\n return f\"<LanguageTreeNode (id: {self.id}, language: {self.language.slug}, region: {self.region})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"language tree node\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"language tree nodes\")\n #: There cannot be two language tree nodes with the same region and language\n unique_together = (\n (\n \"language\",\n \"region\",\n ),\n )\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_language_tree\", \"Can manage language tree\"),)\n #: The fields which are used to sort the returned objects of a QuerySet\n ordering = [\"region\", \"level\", \"parent__pk\"]\n", "path": "src/cms/models/languages/language_tree_node.py"}]} | 2,920 | 266 |
gh_patches_debug_7043 | rasdani/github-patches | git_diff | TabbycatDebate__tabbycat-1620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Summernote boxes don't show
I've had this happen a few times today, not sure if reliably reproducible yet. Only affects develop branch. Master branch works fine. Probably a dependency issue. Making a note so we remember to do something about it before Nebelung.


Browser shows error:
> Refused to display 'https://hidden-caverns-06472.herokuapp.com/summernote/editor/id_tournament_staff/' in a frame because it set 'X-Frame-Options' to 'deny'.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tabbycat/settings/core.py`
Content:
```
1 import os
2
3 from django.contrib.messages import constants as messages
4 from django.utils.translation import gettext_lazy as _
5
6
7 BASE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))
8 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
9
10 # ==============================================================================
11 # Overwritten in local.py or heroku.py
12 # ==============================================================================
13
14 ADMINS = ('Philip and Chuan-Zheng', '[email protected]'),
15 MANAGERS = ADMINS
16 DEBUG = bool(int(os.environ['DEBUG'])) if 'DEBUG' in os.environ else False
17 ENABLE_DEBUG_TOOLBAR = False # Must default to false; overriden in Dev config
18 DISABLE_SENTRY = True # Overriden in Heroku config
19 SECRET_KEY = r'#2q43u&tp4((4&m3i8v%w-6z6pp7m(v0-6@w@i!j5n)n15epwc'
20
21 # ==============================================================================
22 # Version
23 # ==============================================================================
24
25 TABBYCAT_VERSION = '2.5.0-dev'
26 TABBYCAT_CODENAME = 'Nebelung'
27 READTHEDOCS_VERSION = 'v2.5.0-dev'
28
29 # ==============================================================================
30 # Internationalization and Localization
31 # ==============================================================================
32
33 USE_I18N = True
34 USE_TZ = True
35 USE_L10N = True
36 LANGUAGE_CODE = 'en'
37 TIME_ZONE = os.environ.get('TIME_ZONE', 'Australia/Melbourne')
38
39 LOCALE_PATHS = [
40 os.path.join(BASE_DIR, 'locale'),
41 ]
42
43 # Languages that should be available in the switcher
44 EXTRA_LANG_INFO = {
45 'ms': {
46 'bidi': False,
47 'code': 'ms',
48 'name': 'Malay',
49 'name_local': 'Bahasa Melayu', #unicode codepoints here
50 },
51 }
52
53 # Add custom languages not provided by Django
54 import django.conf.locale
55 LANG_INFO = dict(django.conf.locale.LANG_INFO, **EXTRA_LANG_INFO)
56 django.conf.locale.LANG_INFO = LANG_INFO
57
58 LANGUAGES = [
59 ('ar', _('Arabic')),
60 ('bn', _('Bengali')),
61 ('en', _('English')),
62 ('es', _('Spanish')),
63 ('fr', _('French')),
64 ('ja', _('Japanese')),
65 ('ms', _('Malay')),
66 ('pt', _('Portuguese')),
67 ('ru', _('Russian')),
68 ('zh-hans', _('Simplified Chinese')),
69 ]
70
71 STATICI18N_ROOT = os.path.join(BASE_DIR, "locale")
72
73 FORMAT_MODULE_PATH = [
74 'utils.formats',
75 ]
76
77 # ==============================================================================
78 # Django-specific Modules
79 # ==============================================================================
80
81 MIDDLEWARE = [
82 'django.middleware.gzip.GZipMiddleware',
83 'django.middleware.security.SecurityMiddleware',
84 'django.contrib.sessions.middleware.SessionMiddleware',
85 # User language preferences; must be after Session
86 'django.middleware.locale.LocaleMiddleware',
87 # Set Etags; i.e. cached requests not on network; must precede Common
88 'django.middleware.http.ConditionalGetMiddleware',
89 'django.middleware.common.CommonMiddleware',
90 # Must be after SessionMiddleware
91 'django.contrib.auth.middleware.AuthenticationMiddleware',
92 'django.middleware.common.CommonMiddleware',
93 # Must be after SessionMiddleware
94 'django.contrib.messages.middleware.MessageMiddleware',
95 'django.middleware.clickjacking.XFrameOptionsMiddleware',
96 'utils.middleware.DebateMiddleware',
97 ]
98
99 TABBYCAT_APPS = (
100 'actionlog',
101 'adjallocation',
102 'adjfeedback',
103 'api',
104 'availability',
105 'breakqual',
106 'checkins',
107 'divisions', # obsolete
108 'draw',
109 'motions',
110 'options',
111 'participants',
112 'printing',
113 'privateurls',
114 'results',
115 'tournaments',
116 'venues',
117 'utils',
118 'users',
119 'standings',
120 'notifications',
121 'importer',
122 )
123
124 INSTALLED_APPS = (
125 'jet',
126 'django.contrib.admin',
127 'django.contrib.auth',
128 'django.contrib.contenttypes',
129 'django.contrib.sessions',
130 'channels', # For Websockets / real-time connections (above whitenoise)
131 'django.contrib.staticfiles',
132 'django.contrib.humanize',
133 'django_summernote', # Keep above our apps; as we unregister an admin model
134 'django.contrib.messages') \
135 + TABBYCAT_APPS + (
136 'dynamic_preferences',
137 'django_extensions', # For Secret Generation Command
138 'gfklookupwidget',
139 'formtools',
140 'statici18n', # Compile js translations as static file; saving requests
141 'polymorphic',
142 'rest_framework',
143 'rest_framework.authtoken',
144 )
145
146 ROOT_URLCONF = 'urls'
147 LOGIN_REDIRECT_URL = '/'
148 FIXTURE_DIRS = (os.path.join(os.path.dirname(BASE_DIR), 'data', 'fixtures'), )
149 SILENCED_SYSTEM_CHECKS = ('urls.W002',)
150
151 # ==============================================================================
152 # Templates
153 # ==============================================================================
154
155 TEMPLATES = [
156 {
157 'BACKEND': 'django.template.backends.django.DjangoTemplates',
158 'DIRS': [os.path.join(BASE_DIR, 'templates')],
159 'OPTIONS': {
160 'context_processors': [
161 'django.contrib.auth.context_processors.auth',
162 'django.contrib.messages.context_processors.messages',
163 'django.template.context_processors.debug',
164 'django.template.context_processors.i18n',
165 'django.template.context_processors.media',
166 'django.template.context_processors.static',
167 'django.template.context_processors.tz',
168 'django.template.context_processors.request', # for Jet
169 'utils.context_processors.debate_context', # for tournament config vars
170 'django.template.context_processors.i18n' # for serving static language translations,
171 ],
172 'loaders': [
173 ('django.template.loaders.cached.Loader', [
174 'django.template.loaders.filesystem.Loader',
175 'django.template.loaders.app_directories.Loader',
176 ]),
177 ],
178 }
179 },
180 ]
181
182 # ==============================================================================
183 # Caching
184 # ==============================================================================
185
186 PUBLIC_FAST_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_FAST_CACHE_TIMEOUT', 60 * 1))
187 PUBLIC_SLOW_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_SLOW_CACHE_TIMEOUT', 60 * 3.5))
188 TAB_PAGES_CACHE_TIMEOUT = int(os.environ.get('TAB_PAGES_CACHE_TIMEOUT', 60 * 120))
189
190 # Default non-heroku cache is to use local memory
191 CACHES = {
192 'default': {
193 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
194 },
195 }
196
197 SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
198
199 # ==============================================================================
200 # Static Files and Compilation
201 # ==============================================================================
202
203 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
204 STATIC_URL = '/static/'
205
206 STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
207
208 STATICFILES_FINDERS = (
209 'django.contrib.staticfiles.finders.FileSystemFinder',
210 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
211 )
212
213 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
214
215 # ==============================================================================
216 # Logging
217 # ==============================================================================
218
219 MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
220
221 LOGGING = {
222 'version': 1,
223 'disable_existing_loggers': False,
224 'handlers': {
225 'console': {
226 'class': 'logging.StreamHandler',
227 'formatter': 'standard',
228 },
229 },
230 'loggers': {
231 'django': {
232 'handlers': ['console'],
233 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
234 },
235 'sentry.errors': {
236 'level': 'INFO',
237 'handlers': ['console'],
238 'propagate': False,
239 },
240 },
241 'formatters': {
242 'standard': {
243 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s',
244 },
245 },
246 }
247
248 for app in TABBYCAT_APPS:
249 LOGGING['loggers'][app] = {
250 'handlers': ['console'],
251 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
252 }
253
254 # ==============================================================================
255 # Messages
256 # ==============================================================================
257
258 MESSAGE_TAGS = {messages.ERROR: 'danger', }
259
260 # ==============================================================================
261 # Summernote (WYSWIG)
262 # ==============================================================================
263
264 SUMMERNOTE_THEME = 'bs4' # Bootstrap 4
265
266 SUMMERNOTE_CONFIG = {
267 'width': '100%',
268 'height': '480',
269 'toolbar': [
270 ['style', ['bold', 'italic', 'underline', 'fontsize', 'color', 'clear']],
271 ['para', ['ul', 'ol']],
272 ['insert', ['link', 'picture']],
273 ['misc', ['undo', 'redo', 'codeview']],
274 ],
275 'disable_upload': True,
276 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,
277 }
278
279 # ==============================================================================
280 # Database
281 # ==============================================================================
282
283 DATABASES = {
284 'default': {
285 'ENGINE': 'django.db.backends.postgresql',
286 },
287 }
288
289 # ==============================================================================
290 # Channels
291 # ==============================================================================
292
293 ASGI_APPLICATION = "routing.application"
294
295 CHANNEL_LAYERS = {
296 "default": {
297 "BACKEND": "channels.layers.InMemoryChannelLayer",
298 },
299 }
300
301 # ==============================================================================
302 # Dynamic preferences
303 # ==============================================================================
304
305 DYNAMIC_PREFERENCES = {
306 'REGISTRY_MODULE': 'preferences',
307 }
308
309 # ==============================================================================
310 # REST Framework
311 # ==============================================================================
312
313 REST_FRAMEWORK = {
314 'DEFAULT_RENDERER_CLASSES': [
315 'rest_framework.renderers.JSONRenderer',
316 ],
317 'DEFAULT_PARSER_CLASSES': [
318 'rest_framework.parsers.JSONParser',
319 ],
320 'DEFAULT_AUTHENTICATION_CLASSES': [
321 'rest_framework.authentication.TokenAuthentication',
322 ],
323 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
324 }
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tabbycat/settings/core.py b/tabbycat/settings/core.py
--- a/tabbycat/settings/core.py
+++ b/tabbycat/settings/core.py
@@ -276,6 +276,8 @@
'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,
}
+X_FRAME_OPTIONS = 'SAMEORIGIN' # Necessary to get Django-Summernote working because of Django 3 changes
+
# ==============================================================================
# Database
# ==============================================================================
| {"golden_diff": "diff --git a/tabbycat/settings/core.py b/tabbycat/settings/core.py\n--- a/tabbycat/settings/core.py\n+++ b/tabbycat/settings/core.py\n@@ -276,6 +276,8 @@\n 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,\n }\n \n+X_FRAME_OPTIONS = 'SAMEORIGIN' # Necessary to get Django-Summernote working because of Django 3 changes\n+\n # ==============================================================================\n # Database\n # ==============================================================================\n", "issue": "Summernote boxes don't show\nI've had this happen a few times today, not sure if reliably reproducible yet. Only affects develop branch. Master branch works fine. Probably a dependency issue. Making a note so we remember to do something about it before Nebelung.\r\n\r\n\r\n\r\n\r\n\r\nBrowser shows error:\r\n\r\n> Refused to display 'https://hidden-caverns-06472.herokuapp.com/summernote/editor/id_tournament_staff/' in a frame because it set 'X-Frame-Options' to 'deny'.\r\n\n", "before_files": [{"content": "import os\n\nfrom django.contrib.messages import constants as messages\nfrom django.utils.translation import gettext_lazy as _\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# ==============================================================================\n# Overwritten in local.py or heroku.py\n# ==============================================================================\n\nADMINS = ('Philip and Chuan-Zheng', '[email protected]'),\nMANAGERS = ADMINS\nDEBUG = bool(int(os.environ['DEBUG'])) if 'DEBUG' in os.environ else False\nENABLE_DEBUG_TOOLBAR = False # Must default to false; overriden in Dev config\nDISABLE_SENTRY = True # Overriden in Heroku config\nSECRET_KEY = r'#2q43u&tp4((4&m3i8v%w-6z6pp7m(v0-6@w@i!j5n)n15epwc'\n\n# ==============================================================================\n# Version\n# ==============================================================================\n\nTABBYCAT_VERSION = '2.5.0-dev'\nTABBYCAT_CODENAME = 'Nebelung'\nREADTHEDOCS_VERSION = 'v2.5.0-dev'\n\n# ==============================================================================\n# Internationalization and Localization\n# ==============================================================================\n\nUSE_I18N = True\nUSE_TZ = True\nUSE_L10N = True\nLANGUAGE_CODE = 'en'\nTIME_ZONE = os.environ.get('TIME_ZONE', 'Australia/Melbourne')\n\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, 'locale'),\n]\n\n# Languages that should be available in the switcher\nEXTRA_LANG_INFO = {\n 'ms': {\n 'bidi': False,\n 'code': 'ms',\n 'name': 'Malay',\n 'name_local': 'Bahasa Melayu', #unicode codepoints here\n },\n}\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nLANG_INFO = dict(django.conf.locale.LANG_INFO, **EXTRA_LANG_INFO)\ndjango.conf.locale.LANG_INFO = LANG_INFO\n\nLANGUAGES = [\n ('ar', _('Arabic')),\n ('bn', _('Bengali')),\n ('en', _('English')),\n ('es', _('Spanish')),\n ('fr', _('French')),\n ('ja', _('Japanese')),\n ('ms', _('Malay')),\n ('pt', _('Portuguese')),\n ('ru', _('Russian')),\n ('zh-hans', _('Simplified Chinese')),\n]\n\nSTATICI18N_ROOT = os.path.join(BASE_DIR, \"locale\")\n\nFORMAT_MODULE_PATH = [\n 'utils.formats',\n]\n\n# ==============================================================================\n# Django-specific Modules\n# ==============================================================================\n\nMIDDLEWARE = [\n 'django.middleware.gzip.GZipMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n # User language preferences; must be after Session\n 'django.middleware.locale.LocaleMiddleware',\n # Set Etags; i.e. cached requests not on network; must precede Common\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'utils.middleware.DebateMiddleware',\n]\n\nTABBYCAT_APPS = (\n 'actionlog',\n 'adjallocation',\n 'adjfeedback',\n 'api',\n 'availability',\n 'breakqual',\n 'checkins',\n 'divisions', # obsolete\n 'draw',\n 'motions',\n 'options',\n 'participants',\n 'printing',\n 'privateurls',\n 'results',\n 'tournaments',\n 'venues',\n 'utils',\n 'users',\n 'standings',\n 'notifications',\n 'importer',\n)\n\nINSTALLED_APPS = (\n 'jet',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'channels', # For Websockets / real-time connections (above whitenoise)\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_summernote', # Keep above our apps; as we unregister an admin model\n 'django.contrib.messages') \\\n + TABBYCAT_APPS + (\n 'dynamic_preferences',\n 'django_extensions', # For Secret Generation Command\n 'gfklookupwidget',\n 'formtools',\n 'statici18n', # Compile js translations as static file; saving requests\n 'polymorphic',\n 'rest_framework',\n 'rest_framework.authtoken',\n)\n\nROOT_URLCONF = 'urls'\nLOGIN_REDIRECT_URL = '/'\nFIXTURE_DIRS = (os.path.join(os.path.dirname(BASE_DIR), 'data', 'fixtures'), )\nSILENCED_SYSTEM_CHECKS = ('urls.W002',)\n\n# ==============================================================================\n# Templates\n# ==============================================================================\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.request', # for Jet\n 'utils.context_processors.debate_context', # for tournament config vars\n 'django.template.context_processors.i18n' # for serving static language translations,\n ],\n 'loaders': [\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]),\n ],\n }\n },\n]\n\n# ==============================================================================\n# Caching\n# ==============================================================================\n\nPUBLIC_FAST_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_FAST_CACHE_TIMEOUT', 60 * 1))\nPUBLIC_SLOW_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_SLOW_CACHE_TIMEOUT', 60 * 3.5))\nTAB_PAGES_CACHE_TIMEOUT = int(os.environ.get('TAB_PAGES_CACHE_TIMEOUT', 60 * 120))\n\n# Default non-heroku cache is to use local memory\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n# ==============================================================================\n# Static Files and Compilation\n# ==============================================================================\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\n# ==============================================================================\n# Logging\n# ==============================================================================\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'sentry.errors': {\n 'level': 'INFO',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s',\n },\n },\n}\n\nfor app in TABBYCAT_APPS:\n LOGGING['loggers'][app] = {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n }\n\n# ==============================================================================\n# Messages\n# ==============================================================================\n\nMESSAGE_TAGS = {messages.ERROR: 'danger', }\n\n# ==============================================================================\n# Summernote (WYSWIG)\n# ==============================================================================\n\nSUMMERNOTE_THEME = 'bs4' # Bootstrap 4\n\nSUMMERNOTE_CONFIG = {\n 'width': '100%',\n 'height': '480',\n 'toolbar': [\n ['style', ['bold', 'italic', 'underline', 'fontsize', 'color', 'clear']],\n ['para', ['ul', 'ol']],\n ['insert', ['link', 'picture']],\n ['misc', ['undo', 'redo', 'codeview']],\n ],\n 'disable_upload': True,\n 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,\n}\n\n# ==============================================================================\n# Database\n# ==============================================================================\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n },\n}\n\n# ==============================================================================\n# Channels\n# ==============================================================================\n\nASGI_APPLICATION = \"routing.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels.layers.InMemoryChannelLayer\",\n },\n}\n\n# ==============================================================================\n# Dynamic preferences\n# ==============================================================================\n\nDYNAMIC_PREFERENCES = {\n 'REGISTRY_MODULE': 'preferences',\n}\n\n# ==============================================================================\n# REST Framework\n# ==============================================================================\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n ],\n 'DEFAULT_PARSER_CLASSES': [\n 'rest_framework.parsers.JSONParser',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n}\n", "path": "tabbycat/settings/core.py"}], "after_files": [{"content": "import os\n\nfrom django.contrib.messages import constants as messages\nfrom django.utils.translation import gettext_lazy as _\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir)))\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# ==============================================================================\n# Overwritten in local.py or heroku.py\n# ==============================================================================\n\nADMINS = ('Philip and Chuan-Zheng', '[email protected]'),\nMANAGERS = ADMINS\nDEBUG = bool(int(os.environ['DEBUG'])) if 'DEBUG' in os.environ else False\nENABLE_DEBUG_TOOLBAR = False # Must default to false; overriden in Dev config\nDISABLE_SENTRY = True # Overriden in Heroku config\nSECRET_KEY = r'#2q43u&tp4((4&m3i8v%w-6z6pp7m(v0-6@w@i!j5n)n15epwc'\n\n# ==============================================================================\n# Version\n# ==============================================================================\n\nTABBYCAT_VERSION = '2.5.0-dev'\nTABBYCAT_CODENAME = 'Nebelung'\nREADTHEDOCS_VERSION = 'v2.5.0-dev'\n\n# ==============================================================================\n# Internationalization and Localization\n# ==============================================================================\n\nUSE_I18N = True\nUSE_TZ = True\nUSE_L10N = True\nLANGUAGE_CODE = 'en'\nTIME_ZONE = os.environ.get('TIME_ZONE', 'Australia/Melbourne')\n\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, 'locale'),\n]\n\n# Languages that should be available in the switcher\nEXTRA_LANG_INFO = {\n 'ms': {\n 'bidi': False,\n 'code': 'ms',\n 'name': 'Malay',\n 'name_local': 'Bahasa Melayu', #unicode codepoints here\n },\n}\n\n# Add custom languages not provided by Django\nimport django.conf.locale\nLANG_INFO = dict(django.conf.locale.LANG_INFO, **EXTRA_LANG_INFO)\ndjango.conf.locale.LANG_INFO = LANG_INFO\n\nLANGUAGES = [\n ('ar', _('Arabic')),\n ('bn', _('Bengali')),\n ('en', _('English')),\n ('es', _('Spanish')),\n ('fr', _('French')),\n ('ja', _('Japanese')),\n ('ms', _('Malay')),\n ('pt', _('Portuguese')),\n ('ru', _('Russian')),\n ('zh-hans', _('Simplified Chinese')),\n]\n\nSTATICI18N_ROOT = os.path.join(BASE_DIR, \"locale\")\n\nFORMAT_MODULE_PATH = [\n 'utils.formats',\n]\n\n# ==============================================================================\n# Django-specific Modules\n# ==============================================================================\n\nMIDDLEWARE = [\n 'django.middleware.gzip.GZipMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n # User language preferences; must be after Session\n 'django.middleware.locale.LocaleMiddleware',\n # Set Etags; i.e. cached requests not on network; must precede Common\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.middleware.common.CommonMiddleware',\n # Must be after SessionMiddleware\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'utils.middleware.DebateMiddleware',\n]\n\nTABBYCAT_APPS = (\n 'actionlog',\n 'adjallocation',\n 'adjfeedback',\n 'api',\n 'availability',\n 'breakqual',\n 'checkins',\n 'divisions', # obsolete\n 'draw',\n 'motions',\n 'options',\n 'participants',\n 'printing',\n 'privateurls',\n 'results',\n 'tournaments',\n 'venues',\n 'utils',\n 'users',\n 'standings',\n 'notifications',\n 'importer',\n)\n\nINSTALLED_APPS = (\n 'jet',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'channels', # For Websockets / real-time connections (above whitenoise)\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_summernote', # Keep above our apps; as we unregister an admin model\n 'django.contrib.messages') \\\n + TABBYCAT_APPS + (\n 'dynamic_preferences',\n 'django_extensions', # For Secret Generation Command\n 'gfklookupwidget',\n 'formtools',\n 'statici18n', # Compile js translations as static file; saving requests\n 'polymorphic',\n 'rest_framework',\n 'rest_framework.authtoken',\n)\n\nROOT_URLCONF = 'urls'\nLOGIN_REDIRECT_URL = '/'\nFIXTURE_DIRS = (os.path.join(os.path.dirname(BASE_DIR), 'data', 'fixtures'), )\nSILENCED_SYSTEM_CHECKS = ('urls.W002',)\n\n# ==============================================================================\n# Templates\n# ==============================================================================\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.request', # for Jet\n 'utils.context_processors.debate_context', # for tournament config vars\n 'django.template.context_processors.i18n' # for serving static language translations,\n ],\n 'loaders': [\n ('django.template.loaders.cached.Loader', [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ]),\n ],\n }\n },\n]\n\n# ==============================================================================\n# Caching\n# ==============================================================================\n\nPUBLIC_FAST_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_FAST_CACHE_TIMEOUT', 60 * 1))\nPUBLIC_SLOW_CACHE_TIMEOUT = int(os.environ.get('PUBLIC_SLOW_CACHE_TIMEOUT', 60 * 3.5))\nTAB_PAGES_CACHE_TIMEOUT = int(os.environ.get('TAB_PAGES_CACHE_TIMEOUT', 60 * 120))\n\n# Default non-heroku cache is to use local memory\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n },\n}\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n# ==============================================================================\n# Static Files and Compilation\n# ==============================================================================\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'\n\n# ==============================================================================\n# Logging\n# ==============================================================================\n\nMESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n 'sentry.errors': {\n 'level': 'INFO',\n 'handlers': ['console'],\n 'propagate': False,\n },\n },\n 'formatters': {\n 'standard': {\n 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s',\n },\n },\n}\n\nfor app in TABBYCAT_APPS:\n LOGGING['loggers'][app] = {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n }\n\n# ==============================================================================\n# Messages\n# ==============================================================================\n\nMESSAGE_TAGS = {messages.ERROR: 'danger', }\n\n# ==============================================================================\n# Summernote (WYSWIG)\n# ==============================================================================\n\nSUMMERNOTE_THEME = 'bs4' # Bootstrap 4\n\nSUMMERNOTE_CONFIG = {\n 'width': '100%',\n 'height': '480',\n 'toolbar': [\n ['style', ['bold', 'italic', 'underline', 'fontsize', 'color', 'clear']],\n ['para', ['ul', 'ol']],\n ['insert', ['link', 'picture']],\n ['misc', ['undo', 'redo', 'codeview']],\n ],\n 'disable_upload': True,\n 'iframe': True, # Necessary; if just to compartmentalise jQuery dependency,\n}\n\nX_FRAME_OPTIONS = 'SAMEORIGIN' # Necessary to get Django-Summernote working because of Django 3 changes\n\n# ==============================================================================\n# Database\n# ==============================================================================\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n },\n}\n\n# ==============================================================================\n# Channels\n# ==============================================================================\n\nASGI_APPLICATION = \"routing.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels.layers.InMemoryChannelLayer\",\n },\n}\n\n# ==============================================================================\n# Dynamic preferences\n# ==============================================================================\n\nDYNAMIC_PREFERENCES = {\n 'REGISTRY_MODULE': 'preferences',\n}\n\n# ==============================================================================\n# REST Framework\n# ==============================================================================\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': [\n 'rest_framework.renderers.JSONRenderer',\n ],\n 'DEFAULT_PARSER_CLASSES': [\n 'rest_framework.parsers.JSONParser',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n}\n", "path": "tabbycat/settings/core.py"}]} | 3,454 | 106 |
gh_patches_debug_1553 | rasdani/github-patches | git_diff | feast-dev__feast-3756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Postgres engine default keepalives_idle value causes setsockopt(TCP_KEEPIDLE) invalid value
Get `OperationalError: connection to server at "localhost" (127.0.0.1), port 5432 failed: setsockopt(TCP_KEEPIDLE) failed: Invalid argument` when run `feast apply`.
Because of `keepalives_idle=config.keepalives_idle` field in function '_get_conn' in `infra/utils/postgres/connection_utils.py` file. For example, to avoid this error I need to pass 'keepalives_idle=1', but that argument isn't parsed for the registry in feature_store.yaml and pass 'keepalives_idle=0' by default setting in `infra/utils/postgres/postgres_config.py`.
- Version: 0.33.1
- Platform: linux ubuntu 20.04
- Subsystem:
## Possible Solution
Check this issue with the same problem https://github.com/TobikoData/sqlmesh/issues/750. I think you shouldn't pass 'keepalives_idle=0' by default.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/infra/utils/postgres/postgres_config.py`
Content:
```
1 from enum import Enum
2 from typing import Optional
3
4 from pydantic import StrictStr
5
6 from feast.repo_config import FeastConfigBaseModel
7
8
9 class ConnectionType(Enum):
10 singleton = "singleton"
11 pool = "pool"
12
13
14 class PostgreSQLConfig(FeastConfigBaseModel):
15 min_conn: int = 1
16 max_conn: int = 10
17 conn_type: ConnectionType = ConnectionType.singleton
18 host: StrictStr
19 port: int = 5432
20 database: StrictStr
21 db_schema: StrictStr = "public"
22 user: StrictStr
23 password: StrictStr
24 sslmode: Optional[StrictStr] = None
25 sslkey_path: Optional[StrictStr] = None
26 sslcert_path: Optional[StrictStr] = None
27 sslrootcert_path: Optional[StrictStr] = None
28 keepalives_idle: int = 0
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/infra/utils/postgres/postgres_config.py b/sdk/python/feast/infra/utils/postgres/postgres_config.py
--- a/sdk/python/feast/infra/utils/postgres/postgres_config.py
+++ b/sdk/python/feast/infra/utils/postgres/postgres_config.py
@@ -25,4 +25,4 @@
sslkey_path: Optional[StrictStr] = None
sslcert_path: Optional[StrictStr] = None
sslrootcert_path: Optional[StrictStr] = None
- keepalives_idle: int = 0
+ keepalives_idle: Optional[int] = None
| {"golden_diff": "diff --git a/sdk/python/feast/infra/utils/postgres/postgres_config.py b/sdk/python/feast/infra/utils/postgres/postgres_config.py\n--- a/sdk/python/feast/infra/utils/postgres/postgres_config.py\n+++ b/sdk/python/feast/infra/utils/postgres/postgres_config.py\n@@ -25,4 +25,4 @@\n sslkey_path: Optional[StrictStr] = None\n sslcert_path: Optional[StrictStr] = None\n sslrootcert_path: Optional[StrictStr] = None\n- keepalives_idle: int = 0\n+ keepalives_idle: Optional[int] = None\n", "issue": "Postgres engine default keepalives_idle value causes setsockopt(TCP_KEEPIDLE) invalid value\nGet `OperationalError: connection to server at \"localhost\" (127.0.0.1), port 5432 failed: setsockopt(TCP_KEEPIDLE) failed: Invalid argument` when run `feast apply`.\r\nBecause of `keepalives_idle=config.keepalives_idle` field in function '_get_conn' in `infra/utils/postgres/connection_utils.py` file. For example, to avoid this error I need to pass 'keepalives_idle=1', but that argument isn't parsed for the registry in feature_store.yaml and pass 'keepalives_idle=0' by default setting in `infra/utils/postgres/postgres_config.py`. \r\n\r\n- Version: 0.33.1\r\n- Platform: linux ubuntu 20.04\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nCheck this issue with the same problem https://github.com/TobikoData/sqlmesh/issues/750. I think you shouldn't pass 'keepalives_idle=0' by default.\n", "before_files": [{"content": "from enum import Enum\nfrom typing import Optional\n\nfrom pydantic import StrictStr\n\nfrom feast.repo_config import FeastConfigBaseModel\n\n\nclass ConnectionType(Enum):\n singleton = \"singleton\"\n pool = \"pool\"\n\n\nclass PostgreSQLConfig(FeastConfigBaseModel):\n min_conn: int = 1\n max_conn: int = 10\n conn_type: ConnectionType = ConnectionType.singleton\n host: StrictStr\n port: int = 5432\n database: StrictStr\n db_schema: StrictStr = \"public\"\n user: StrictStr\n password: StrictStr\n sslmode: Optional[StrictStr] = None\n sslkey_path: Optional[StrictStr] = None\n sslcert_path: Optional[StrictStr] = None\n sslrootcert_path: Optional[StrictStr] = None\n keepalives_idle: int = 0\n", "path": "sdk/python/feast/infra/utils/postgres/postgres_config.py"}], "after_files": [{"content": "from enum import Enum\nfrom typing import Optional\n\nfrom pydantic import StrictStr\n\nfrom feast.repo_config import FeastConfigBaseModel\n\n\nclass ConnectionType(Enum):\n singleton = \"singleton\"\n pool = \"pool\"\n\n\nclass PostgreSQLConfig(FeastConfigBaseModel):\n min_conn: int = 1\n max_conn: int = 10\n conn_type: ConnectionType = ConnectionType.singleton\n host: StrictStr\n port: int = 5432\n database: StrictStr\n db_schema: StrictStr = \"public\"\n user: StrictStr\n password: StrictStr\n sslmode: Optional[StrictStr] = None\n sslkey_path: Optional[StrictStr] = None\n sslcert_path: Optional[StrictStr] = None\n sslrootcert_path: Optional[StrictStr] = None\n keepalives_idle: Optional[int] = None\n", "path": "sdk/python/feast/infra/utils/postgres/postgres_config.py"}]} | 750 | 145 |
gh_patches_debug_18869 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
We should be able to create new admin users, upgrade existing users to admin
## Description
* `is_superuser` is currently a readonly property in Users APIs.
* We should be able to set them while an admin is editing a user.
* Users (including super-users) should not be able to modify it's value for themselves.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/ui/serializers/users.py`
Content:
```
1 from django.contrib.auth.password_validation import validate_password
2 from rest_access_policy import FieldAccessMixin, PermittedPkRelatedField
3 from rest_framework import serializers
4
5 from mathesar.api.db.permissions.database import DatabaseAccessPolicy
6 from mathesar.api.db.permissions.schema import SchemaAccessPolicy
7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
8 from mathesar.api.exceptions.validation_exceptions.exceptions import IncorrectOldPassword
9 from mathesar.api.ui.permissions.users import UserAccessPolicy
10 from mathesar.models.base import Database, Schema
11 from mathesar.models.users import User, DatabaseRole, SchemaRole
12
13
14 class NestedDatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
15 class Meta:
16 model = DatabaseRole
17 fields = ['id', 'database', 'role']
18
19
20 class NestedSchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
21 class Meta:
22 model = SchemaRole
23 fields = ['id', 'schema', 'role']
24
25
26 class UserSerializer(MathesarErrorMessageMixin, FieldAccessMixin, serializers.ModelSerializer):
27 database_roles = NestedDatabaseRoleSerializer(many=True, required=False)
28 schema_roles = NestedSchemaRoleSerializer(many=True, required=False)
29 access_policy = UserAccessPolicy
30
31 class Meta:
32 model = User
33 fields = [
34 'id',
35 'full_name',
36 'short_name',
37 'username',
38 'password',
39 'email',
40 'is_superuser',
41 'database_roles',
42 'schema_roles',
43 ]
44 extra_kwargs = {
45 'password': {'write_only': True},
46 'is_superuser': {'read_only': True},
47 'database_roles': {'read_only': True},
48 'schema_roles': {'read_only': True}
49 }
50
51 def create(self, validated_data):
52 password = validated_data.pop('password')
53 user = User(**validated_data)
54 user.password_change_needed = True
55 user.set_password(password)
56 user.save()
57 return user
58
59
60 class ChangePasswordSerializer(serializers.Serializer):
61 password = serializers.CharField(write_only=True, required=True, validators=[validate_password])
62 old_password = serializers.CharField(write_only=True, required=True)
63
64 def validate_old_password(self, value):
65 user = self.context['request'].user
66 if user.check_password(value) is True:
67 return value
68 raise IncorrectOldPassword(field='old_password')
69
70 def update(self, instance, validated_data):
71 instance.set_password(validated_data['password'])
72 instance.save()
73 return instance
74
75
76 class PasswordResetSerializer(MathesarErrorMessageMixin, serializers.Serializer):
77 password = serializers.CharField(write_only=True, required=True, validators=[validate_password])
78
79
80 class DatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
81 class Meta:
82 model = DatabaseRole
83 fields = ['id', 'user', 'database', 'role']
84
85 # Restrict the list of databases to which the user has access to create a database role
86 # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/ for the usage of `PermittedPkRelatedField`
87 database = PermittedPkRelatedField(
88 access_policy=DatabaseAccessPolicy,
89 queryset=Database.current_objects.all()
90 )
91
92
93 class SchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
94 class Meta:
95 model = SchemaRole
96 fields = ['id', 'user', 'schema', 'role']
97
98 schema = PermittedPkRelatedField(
99 access_policy=SchemaAccessPolicy,
100 queryset=Schema.current_objects.all()
101 )
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/ui/serializers/users.py b/mathesar/api/ui/serializers/users.py
--- a/mathesar/api/ui/serializers/users.py
+++ b/mathesar/api/ui/serializers/users.py
@@ -43,11 +43,23 @@
]
extra_kwargs = {
'password': {'write_only': True},
- 'is_superuser': {'read_only': True},
'database_roles': {'read_only': True},
'schema_roles': {'read_only': True}
}
+ def get_fields(self):
+ fields = super().get_fields()
+ request = self.context.get("request", None)
+ if not hasattr(request, 'parser_context'):
+ return fields
+ kwargs = request.parser_context.get('kwargs')
+ if kwargs:
+ user_pk = kwargs.get('pk')
+ if user_pk:
+ if request.user.id == int(user_pk) or not request.user.is_superuser:
+ fields["is_superuser"].read_only = True
+ return fields
+
def create(self, validated_data):
password = validated_data.pop('password')
user = User(**validated_data)
| {"golden_diff": "diff --git a/mathesar/api/ui/serializers/users.py b/mathesar/api/ui/serializers/users.py\n--- a/mathesar/api/ui/serializers/users.py\n+++ b/mathesar/api/ui/serializers/users.py\n@@ -43,11 +43,23 @@\n ]\n extra_kwargs = {\n 'password': {'write_only': True},\n- 'is_superuser': {'read_only': True},\n 'database_roles': {'read_only': True},\n 'schema_roles': {'read_only': True}\n }\n \n+ def get_fields(self):\n+ fields = super().get_fields()\n+ request = self.context.get(\"request\", None)\n+ if not hasattr(request, 'parser_context'):\n+ return fields\n+ kwargs = request.parser_context.get('kwargs')\n+ if kwargs:\n+ user_pk = kwargs.get('pk')\n+ if user_pk:\n+ if request.user.id == int(user_pk) or not request.user.is_superuser:\n+ fields[\"is_superuser\"].read_only = True\n+ return fields\n+\n def create(self, validated_data):\n password = validated_data.pop('password')\n user = User(**validated_data)\n", "issue": "We should be able to create new admin users, upgrade existing users to admin\n## Description\r\n* `is_superuser` is currently a readonly property in Users APIs.\r\n* We should be able to set them while an admin is editing a user.\r\n* Users (including super-users) should not be able to modify it's value for themselves.\r\n\n", "before_files": [{"content": "from django.contrib.auth.password_validation import validate_password\nfrom rest_access_policy import FieldAccessMixin, PermittedPkRelatedField\nfrom rest_framework import serializers\n\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import IncorrectOldPassword\nfrom mathesar.api.ui.permissions.users import UserAccessPolicy\nfrom mathesar.models.base import Database, Schema\nfrom mathesar.models.users import User, DatabaseRole, SchemaRole\n\n\nclass NestedDatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'database', 'role']\n\n\nclass NestedSchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'schema', 'role']\n\n\nclass UserSerializer(MathesarErrorMessageMixin, FieldAccessMixin, serializers.ModelSerializer):\n database_roles = NestedDatabaseRoleSerializer(many=True, required=False)\n schema_roles = NestedSchemaRoleSerializer(many=True, required=False)\n access_policy = UserAccessPolicy\n\n class Meta:\n model = User\n fields = [\n 'id',\n 'full_name',\n 'short_name',\n 'username',\n 'password',\n 'email',\n 'is_superuser',\n 'database_roles',\n 'schema_roles',\n ]\n extra_kwargs = {\n 'password': {'write_only': True},\n 'is_superuser': {'read_only': True},\n 'database_roles': {'read_only': True},\n 'schema_roles': {'read_only': True}\n }\n\n def create(self, validated_data):\n password = validated_data.pop('password')\n user = User(**validated_data)\n user.password_change_needed = True\n user.set_password(password)\n user.save()\n return user\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n old_password = serializers.CharField(write_only=True, required=True)\n\n def validate_old_password(self, value):\n user = self.context['request'].user\n if user.check_password(value) is True:\n return value\n raise IncorrectOldPassword(field='old_password')\n\n def update(self, instance, validated_data):\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n\n\nclass PasswordResetSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n\n\nclass DatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'user', 'database', 'role']\n\n # Restrict the list of databases to which the user has access to create a database role\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/ for the usage of `PermittedPkRelatedField`\n database = PermittedPkRelatedField(\n access_policy=DatabaseAccessPolicy,\n queryset=Database.current_objects.all()\n )\n\n\nclass SchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'user', 'schema', 'role']\n\n schema = PermittedPkRelatedField(\n access_policy=SchemaAccessPolicy,\n queryset=Schema.current_objects.all()\n )\n", "path": "mathesar/api/ui/serializers/users.py"}], "after_files": [{"content": "from django.contrib.auth.password_validation import validate_password\nfrom rest_access_policy import FieldAccessMixin, PermittedPkRelatedField\nfrom rest_framework import serializers\n\nfrom mathesar.api.db.permissions.database import DatabaseAccessPolicy\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import IncorrectOldPassword\nfrom mathesar.api.ui.permissions.users import UserAccessPolicy\nfrom mathesar.models.base import Database, Schema\nfrom mathesar.models.users import User, DatabaseRole, SchemaRole\n\n\nclass NestedDatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'database', 'role']\n\n\nclass NestedSchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'schema', 'role']\n\n\nclass UserSerializer(MathesarErrorMessageMixin, FieldAccessMixin, serializers.ModelSerializer):\n database_roles = NestedDatabaseRoleSerializer(many=True, required=False)\n schema_roles = NestedSchemaRoleSerializer(many=True, required=False)\n access_policy = UserAccessPolicy\n\n class Meta:\n model = User\n fields = [\n 'id',\n 'full_name',\n 'short_name',\n 'username',\n 'password',\n 'email',\n 'is_superuser',\n 'database_roles',\n 'schema_roles',\n ]\n extra_kwargs = {\n 'password': {'write_only': True},\n 'database_roles': {'read_only': True},\n 'schema_roles': {'read_only': True}\n }\n\n def get_fields(self):\n fields = super().get_fields()\n request = self.context.get(\"request\", None)\n if not hasattr(request, 'parser_context'):\n return fields\n kwargs = request.parser_context.get('kwargs')\n if kwargs:\n user_pk = kwargs.get('pk')\n if user_pk:\n if request.user.id == int(user_pk) or not request.user.is_superuser:\n fields[\"is_superuser\"].read_only = True\n return fields\n\n def create(self, validated_data):\n password = validated_data.pop('password')\n user = User(**validated_data)\n user.password_change_needed = True\n user.set_password(password)\n user.save()\n return user\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n old_password = serializers.CharField(write_only=True, required=True)\n\n def validate_old_password(self, value):\n user = self.context['request'].user\n if user.check_password(value) is True:\n return value\n raise IncorrectOldPassword(field='old_password')\n\n def update(self, instance, validated_data):\n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n\n\nclass PasswordResetSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n password = serializers.CharField(write_only=True, required=True, validators=[validate_password])\n\n\nclass DatabaseRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = DatabaseRole\n fields = ['id', 'user', 'database', 'role']\n\n # Restrict the list of databases to which the user has access to create a database role\n # Refer https://rsinger86.github.io/drf-access-policy/policy_reuse/ for the usage of `PermittedPkRelatedField`\n database = PermittedPkRelatedField(\n access_policy=DatabaseAccessPolicy,\n queryset=Database.current_objects.all()\n )\n\n\nclass SchemaRoleSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = SchemaRole\n fields = ['id', 'user', 'schema', 'role']\n\n schema = PermittedPkRelatedField(\n access_policy=SchemaAccessPolicy,\n queryset=Schema.current_objects.all()\n )\n", "path": "mathesar/api/ui/serializers/users.py"}]} | 1,261 | 253 |
gh_patches_debug_22200 | rasdani/github-patches | git_diff | mozilla__bugbug-1722 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Submit logged warnings to Sentry
Currently, we only report exceptions to Sentry.
It'd be nice to report warnings too, so we can get a sense of how often they happen.
IIRC in the code-review bot (https://github.com/mozilla/code-review), we are doing that kind of automatically.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `http_service/bugbug_http/worker.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # This Source Code Form is subject to the terms of the Mozilla Public
4 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
5 # You can obtain one at http://mozilla.org/MPL/2.0/.
6
7 import os
8 import sys
9
10 import sentry_sdk
11 from redis import Redis
12 from rq import Connection, Worker
13 from sentry_sdk.integrations.rq import RqIntegration
14
15 import bugbug_http.boot
16 from bugbug import get_bugbug_version
17
18 if os.environ.get("SENTRY_DSN"):
19 sentry_sdk.init(
20 os.environ.get("SENTRY_DSN"),
21 integrations=[RqIntegration()],
22 release=get_bugbug_version(),
23 )
24
25
26 def main():
27 # Bootstrap the worker assets
28 bugbug_http.boot.boot_worker()
29
30 # Provide queue names to listen to as arguments to this script,
31 # similar to rq worker
32 redis_url = os.environ.get("REDIS_URL", "redis://localhost/0")
33 redis_conn = Redis.from_url(redis_url)
34 with Connection(connection=redis_conn):
35 qs = sys.argv[1:] or ["default"]
36
37 w = Worker(qs)
38 w.work()
39
40
41 if __name__ == "__main__":
42 main()
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py
--- a/http_service/bugbug_http/worker.py
+++ b/http_service/bugbug_http/worker.py
@@ -4,21 +4,29 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
+import logging
import os
import sys
import sentry_sdk
from redis import Redis
from rq import Connection, Worker
+from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.rq import RqIntegration
import bugbug_http.boot
from bugbug import get_bugbug_version
if os.environ.get("SENTRY_DSN"):
+ logging_integration = LoggingIntegration(
+ # Default behaviour: INFO messages will be included as breadcrumbs
+ level=logging.INFO,
+ # Change default behaviour (ERROR messages events)
+ event_level=logging.WARNING,
+ )
sentry_sdk.init(
- os.environ.get("SENTRY_DSN"),
- integrations=[RqIntegration()],
+ dsn=os.environ.get("SENTRY_DSN"),
+ integrations=[RqIntegration(), logging_integration],
release=get_bugbug_version(),
)
| {"golden_diff": "diff --git a/http_service/bugbug_http/worker.py b/http_service/bugbug_http/worker.py\n--- a/http_service/bugbug_http/worker.py\n+++ b/http_service/bugbug_http/worker.py\n@@ -4,21 +4,29 @@\n # License, v. 2.0. If a copy of the MPL was not distributed with this file,\n # You can obtain one at http://mozilla.org/MPL/2.0/.\n \n+import logging\n import os\n import sys\n \n import sentry_sdk\n from redis import Redis\n from rq import Connection, Worker\n+from sentry_sdk.integrations.logging import LoggingIntegration\n from sentry_sdk.integrations.rq import RqIntegration\n \n import bugbug_http.boot\n from bugbug import get_bugbug_version\n \n if os.environ.get(\"SENTRY_DSN\"):\n+ logging_integration = LoggingIntegration(\n+ # Default behaviour: INFO messages will be included as breadcrumbs\n+ level=logging.INFO,\n+ # Change default behaviour (ERROR messages events)\n+ event_level=logging.WARNING,\n+ )\n sentry_sdk.init(\n- os.environ.get(\"SENTRY_DSN\"),\n- integrations=[RqIntegration()],\n+ dsn=os.environ.get(\"SENTRY_DSN\"),\n+ integrations=[RqIntegration(), logging_integration],\n release=get_bugbug_version(),\n )\n", "issue": "Submit logged warnings to Sentry\nCurrently, we only report exceptions to Sentry.\r\nIt'd be nice to report warnings too, so we can get a sense of how often they happen.\r\nIIRC in the code-review bot (https://github.com/mozilla/code-review), we are doing that kind of automatically.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport sys\n\nimport sentry_sdk\nfrom redis import Redis\nfrom rq import Connection, Worker\nfrom sentry_sdk.integrations.rq import RqIntegration\n\nimport bugbug_http.boot\nfrom bugbug import get_bugbug_version\n\nif os.environ.get(\"SENTRY_DSN\"):\n sentry_sdk.init(\n os.environ.get(\"SENTRY_DSN\"),\n integrations=[RqIntegration()],\n release=get_bugbug_version(),\n )\n\n\ndef main():\n # Bootstrap the worker assets\n bugbug_http.boot.boot_worker()\n\n # Provide queue names to listen to as arguments to this script,\n # similar to rq worker\n redis_url = os.environ.get(\"REDIS_URL\", \"redis://localhost/0\")\n redis_conn = Redis.from_url(redis_url)\n with Connection(connection=redis_conn):\n qs = sys.argv[1:] or [\"default\"]\n\n w = Worker(qs)\n w.work()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "http_service/bugbug_http/worker.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\nimport sys\n\nimport sentry_sdk\nfrom redis import Redis\nfrom rq import Connection, Worker\nfrom sentry_sdk.integrations.logging import LoggingIntegration\nfrom sentry_sdk.integrations.rq import RqIntegration\n\nimport bugbug_http.boot\nfrom bugbug import get_bugbug_version\n\nif os.environ.get(\"SENTRY_DSN\"):\n logging_integration = LoggingIntegration(\n # Default behaviour: INFO messages will be included as breadcrumbs\n level=logging.INFO,\n # Change default behaviour (ERROR messages events)\n event_level=logging.WARNING,\n )\n sentry_sdk.init(\n dsn=os.environ.get(\"SENTRY_DSN\"),\n integrations=[RqIntegration(), logging_integration],\n release=get_bugbug_version(),\n )\n\n\ndef main():\n # Bootstrap the worker assets\n bugbug_http.boot.boot_worker()\n\n # Provide queue names to listen to as arguments to this script,\n # similar to rq worker\n redis_url = os.environ.get(\"REDIS_URL\", \"redis://localhost/0\")\n redis_conn = Redis.from_url(redis_url)\n with Connection(connection=redis_conn):\n qs = sys.argv[1:] or [\"default\"]\n\n w = Worker(qs)\n w.work()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "http_service/bugbug_http/worker.py"}]} | 684 | 288 |
gh_patches_debug_10388 | rasdani/github-patches | git_diff | DistrictDataLabs__yellowbrick-766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Critical Vulnerability in np.load with NumPy v1.16 and earlier
There is a critical vulnerability with NumPy v1.16 and earlier that affects the new YB datasets module:
https://www.bleepingcomputer.com/news/security/numpy-is-awaiting-fix-for-critical-remote-code-execution-bug/
This does not affect any Yellowbrick user for version 0.9.1 or earlier and we will not release version 1.0 without a patch for this bug. When NumPy 1.17 is released (if it contains the fix), we will mark our minimum NumPy requirement to that version.
Currently, in the `develop` branch, we do use `np.load` when [loading a numpy dataset](https://github.com/DistrictDataLabs/yellowbrick/blob/develop/yellowbrick/datasets/base.py#L195), e.g. if Pandas is not available. We should update this to `np.load(allow_pickle=False)` as per the recommendation of the post above. Note that we do [ensure data downloaded from our repository matches an expected signature](https://github.com/DistrictDataLabs/yellowbrick/blob/708274289d66d9265f7ded03e3445bc2bd70f46e/yellowbrick/datasets/download.py#L106), which minimizes but does not eliminate the risk to Yellowbrick users.
Thanks @theagent for bringing this to our attention!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yellowbrick/datasets/base.py`
Content:
```
1 # yellowbrick.datasets.base
2 # Loading utilities for the yellowbrick datasets.
3 #
4 # Author: Rebecca Bilbro <[email protected]>
5 # Author: Benjamin Bengfort <[email protected]>
6 # Author: Raul Peralta <[email protected]>
7 # Created: Thu Jul 26 13:53:01 2018 -0400
8 #
9 # ID: base.py [] [email protected] $
10
11 """
12 Loading utilities for the yellowbrick datasets.
13 """
14
15 ##########################################################################
16 ## Imports
17 ##########################################################################
18
19 import os
20 import json
21 import numpy as np
22
23 from .download import download_data
24 from .path import find_dataset_path, dataset_exists
25
26 from yellowbrick.exceptions import DatasetsError
27 from yellowbrick.utils.decorators import memoized
28
29 try:
30 import pandas as pd
31 except ImportError:
32 pd = None
33
34
35 ##########################################################################
36 ## Dataset Object
37 ##########################################################################
38
39 class BaseDataset(object):
40 """
41 Base functionality for Dataset and Corpus objects.
42 """
43
44 def __init__(self, name, url=None, signature=None, data_home=None):
45 self.url = url
46 self.name = name
47 self.data_home = data_home
48 self.signature = signature
49
50 # Check if the dataset exists, and if not - download it!
51 if not dataset_exists(self.name, data_home=data_home):
52 self.download()
53
54 def download(self, replace=False):
55 """
56 Download the dataset from the hosted Yellowbrick data store and save
57 it to the location specified by ``get_data_home``. The downloader
58 verifies the download completed successfully and safely by comparing
59 the expected signature with the SHA 256 signature of the downloaded
60 archive file.
61
62 Parameters
63 ----------
64 replace : bool, default: False
65 If the data archive already exists, replace the dataset. If this is
66 False and the dataset exists, an exception is raised.
67 """
68 download_data(
69 self.url, self.signature, data_home=self.data_home,
70 replace=replace, extract=True
71 )
72
73 def contents(self):
74 """
75 Contents returns a list of the files in the data directory.
76 """
77 data = find_dataset_path(
78 self.name, data_home=self.data_home, ext=None
79 )
80 return os.listdir(data)
81
82 @memoized
83 def README(self):
84 """
85 Returns the contents of the README.md file that describes the dataset
86 in detail and contains attribution information.
87 """
88 path = find_dataset_path(
89 self.name, data_home=self.data_home, fname="README.md"
90 )
91 with open(path, 'r') as f:
92 return f.read()
93
94 @memoized
95 def meta(self):
96 """
97 Returns the contents of the meta.json file that describes important
98 attributes about the dataset and modifies the behavior of the loader.
99 """
100 path = find_dataset_path(
101 self.name, data_home=self.data_home, fname="meta.json", raises=False
102 )
103 if path is None:
104 return None
105
106 with open(path, 'r') as f:
107 return json.load(f)
108
109 @memoized
110 def citation(self):
111 """
112 Returns the contents of the citation.bib file that describes the source
113 and provenance of the dataset or to cite for academic work.
114 """
115 path = find_dataset_path(
116 self.name, data_home=self.data_home, fname="meta.json", raises=False
117 )
118 if path is None:
119 return None
120
121 with open(path, 'r') as f:
122 return f.read()
123
124
125 class Dataset(BaseDataset):
126 """
127 Datasets contain a reference to data on disk and provide utilities for
128 quickly loading files and objects into a variety of formats. The most
129 common use of the Dataset object is to load example datasets provided by
130 Yellowbrick to run the examples in the documentation.
131
132 The dataset by default will return the data as a numpy array, however if
133 Pandas is installed, it is possible to access the data as a DataFrame and
134 Series object. In either case, the data is represented by a features table,
135 X and a target vector, y.
136
137 Parameters
138 ----------
139 name : str
140 The name of the dataset; should either be a folder in data home or
141 specified in the yellowbrick.datasets.DATASETS variable. This name is
142 used to perform all lookups and identify the dataset externally.
143
144 data_home : str, optional
145 The path on disk where data is stored. If not passed in, it is looked
146 up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
147
148 url : str, optional
149 The web location where the archive file of the dataset can be
150 downloaded from.
151
152 signature : str, optional
153 The signature of the data archive file, used to verify that the latest
154 version of the data has been downloaded and that the download hasn't
155 been corrupted or modified in anyway.
156 """
157
158 def to_data(self):
159 """
160 Returns the data contained in the dataset as X and y where X is the
161 features matrix and y is the target vector. If pandas is installed,
162 the data will be returned as DataFrame and Series objects. Otherwise,
163 the data will be returned as two numpy arrays.
164
165 Returns
166 -------
167 X : array-like with shape (n_instances, n_features)
168 A pandas DataFrame or numpy array describing the instance features.
169
170 y : array-like with shape (n_instances,)
171 A pandas Series or numpy array describing the target vector.
172 """
173 if pd is not None:
174 return self.to_pandas()
175 return self.to_numpy()
176
177 def to_numpy(self):
178 """
179 Returns the dataset as two numpy arrays: X and y.
180
181 Returns
182 -------
183 X : array-like with shape (n_instances, n_features)
184 A numpy array describing the instance features.
185
186 y : array-like with shape (n_instances,)
187 A numpy array describing the target vector.
188 """
189 path = find_dataset_path(self.name, ext=".npz", data_home=self.data_home)
190 with np.load(path) as npf:
191 if "X" not in npf or "y" not in npf:
192 raise DatasetsError((
193 "the downloaded dataset was improperly packaged without numpy arrays "
194 "- please report this bug to the Yellowbrick maintainers!"
195 ))
196
197 # TODO: How to handle the case where y is None?
198 return npf["X"], npf["y"]
199
200 def to_pandas(self):
201 """
202 Returns the dataset as two pandas objects: X and y.
203
204 Returns
205 -------
206 X : DataFrame with shape (n_instances, n_features)
207 A pandas DataFrame containing feature data and named columns.
208
209 y : Series with shape (n_instances,)
210 A pandas Series containing target data and an index that matches
211 the feature DataFrame index.
212 """
213 # Ensure the metadata is valid before continuing
214 if self.meta is None:
215 raise DatasetsError((
216 "the downloaded dataset was improperly packaged without meta.json "
217 "- please report this bug to the Yellowbrick maintainers!"
218 ))
219
220 if "features" not in self.meta or "target" not in self.meta:
221 raise DatasetsError((
222 "the downloaded dataset was improperly packaged without features "
223 "or target - please report this bug to the Yellowbrick maintainers!"
224 ))
225
226 # Load data frame and return features and target
227 # TODO: Return y as None if there is no self.meta["target"]
228 df = self.to_dataframe()
229 return df[self.meta["features"]], df[self.meta["target"]]
230
231
232 def to_dataframe(self):
233 """
234 Returns the entire dataset as a single pandas DataFrame.
235
236 Returns
237 -------
238 df : DataFrame with shape (n_instances, n_columns)
239 A pandas DataFrame containing the complete original data table
240 including all targets (specified by the meta data) and all
241 features (including those that might have been filtered out).
242 """
243 if pd is None:
244 raise DatasetsError(
245 "pandas is required to load DataFrame, it can be installed with pip"
246 )
247
248 path = find_dataset_path(self.name, ext=".csv.gz", data_home=self.data_home)
249 return pd.read_csv(path, compression="gzip")
250
251
252 class Corpus(BaseDataset):
253 """
254 Corpus datasets contain a reference to documents on disk and provide
255 utilities for quickly loading text data for use in machine learning
256 workflows. The most common use of the corpus is to load the text analysis
257 examples from the Yellowbrick documentation.
258
259 Parameters
260 ----------
261 name : str
262 The name of the corpus; should either be a folder in data home or
263 specified in the yellowbrick.datasets.DATASETS variable. This name is
264 used to perform all lookups and identify the corpus externally.
265
266 data_home : str, optional
267 The path on disk where data is stored. If not passed in, it is looked
268 up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
269
270 url : str, optional
271 The web location where the archive file of the corpus can be
272 downloaded from.
273
274 signature : str, optional
275 The signature of the data archive file, used to verify that the latest
276 version of the data has been downloaded and that the download hasn't
277 been corrupted or modified in anyway.
278 """
279
280 @memoized
281 def root(self):
282 """
283 Discovers and caches the root directory of the corpus.
284 """
285 return find_dataset_path(self.name, data_home=self.data_home, ext=None)
286
287 @memoized
288 def labels(self):
289 """
290 Return the unique labels assigned to the documents.
291 """
292 return [
293 name for name in os.listdir(self.root)
294 if os.path.isdir(os.path.join(self.root, name))
295 ]
296
297 @property
298 def files(self):
299 """
300 Returns the list of file names for all documents.
301 """
302 return [
303 os.path.join(self.root, label, name)
304 for label in self.labels
305 for name in os.listdir(os.path.join(self.root, label))
306 ]
307
308 @property
309 def data(self):
310 """
311 Read all of the documents from disk into an in-memory list.
312 """
313 def read(path):
314 with open(path, 'r', encoding='UTF-8') as f:
315 return f.read()
316
317 return [
318 read(f) for f in self.files
319 ]
320
321 @property
322 def target(self):
323 """
324 Returns the label associated with each item in data.
325 """
326 return [
327 os.path.basename(os.path.dirname(f)) for f in self.files
328 ]
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yellowbrick/datasets/base.py b/yellowbrick/datasets/base.py
--- a/yellowbrick/datasets/base.py
+++ b/yellowbrick/datasets/base.py
@@ -187,7 +187,7 @@
A numpy array describing the target vector.
"""
path = find_dataset_path(self.name, ext=".npz", data_home=self.data_home)
- with np.load(path) as npf:
+ with np.load(path, allow_pickle=False) as npf:
if "X" not in npf or "y" not in npf:
raise DatasetsError((
"the downloaded dataset was improperly packaged without numpy arrays "
| {"golden_diff": "diff --git a/yellowbrick/datasets/base.py b/yellowbrick/datasets/base.py\n--- a/yellowbrick/datasets/base.py\n+++ b/yellowbrick/datasets/base.py\n@@ -187,7 +187,7 @@\n A numpy array describing the target vector.\n \"\"\"\n path = find_dataset_path(self.name, ext=\".npz\", data_home=self.data_home)\n- with np.load(path) as npf:\n+ with np.load(path, allow_pickle=False) as npf:\n if \"X\" not in npf or \"y\" not in npf:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without numpy arrays \"\n", "issue": "Critical Vulnerability in np.load with NumPy v1.16 and earlier\nThere is a critical vulnerability with NumPy v1.16 and earlier that affects the new YB datasets module:\r\n\r\nhttps://www.bleepingcomputer.com/news/security/numpy-is-awaiting-fix-for-critical-remote-code-execution-bug/\r\n\r\nThis does not affect any Yellowbrick user for version 0.9.1 or earlier and we will not release version 1.0 without a patch for this bug. When NumPy 1.17 is released (if it contains the fix), we will mark our minimum NumPy requirement to that version. \r\n\r\nCurrently, in the `develop` branch, we do use `np.load` when [loading a numpy dataset](https://github.com/DistrictDataLabs/yellowbrick/blob/develop/yellowbrick/datasets/base.py#L195), e.g. if Pandas is not available. We should update this to `np.load(allow_pickle=False)` as per the recommendation of the post above. Note that we do [ensure data downloaded from our repository matches an expected signature](https://github.com/DistrictDataLabs/yellowbrick/blob/708274289d66d9265f7ded03e3445bc2bd70f46e/yellowbrick/datasets/download.py#L106), which minimizes but does not eliminate the risk to Yellowbrick users. \r\n\r\nThanks @theagent for bringing this to our attention!\n", "before_files": [{"content": "# yellowbrick.datasets.base\n# Loading utilities for the yellowbrick datasets.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Raul Peralta <[email protected]>\n# Created: Thu Jul 26 13:53:01 2018 -0400\n#\n# ID: base.py [] [email protected] $\n\n\"\"\"\nLoading utilities for the yellowbrick datasets.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport json\nimport numpy as np\n\nfrom .download import download_data\nfrom .path import find_dataset_path, dataset_exists\n\nfrom yellowbrick.exceptions import DatasetsError\nfrom yellowbrick.utils.decorators import memoized\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\n\n##########################################################################\n## Dataset Object\n##########################################################################\n\nclass BaseDataset(object):\n \"\"\"\n Base functionality for Dataset and Corpus objects.\n \"\"\"\n\n def __init__(self, name, url=None, signature=None, data_home=None):\n self.url = url\n self.name = name\n self.data_home = data_home\n self.signature = signature\n\n # Check if the dataset exists, and if not - download it!\n if not dataset_exists(self.name, data_home=data_home):\n self.download()\n\n def download(self, replace=False):\n \"\"\"\n Download the dataset from the hosted Yellowbrick data store and save\n it to the location specified by ``get_data_home``. The downloader\n verifies the download completed successfully and safely by comparing\n the expected signature with the SHA 256 signature of the downloaded\n archive file.\n\n Parameters\n ----------\n replace : bool, default: False\n If the data archive already exists, replace the dataset. If this is\n False and the dataset exists, an exception is raised.\n \"\"\"\n download_data(\n self.url, self.signature, data_home=self.data_home,\n replace=replace, extract=True\n )\n\n def contents(self):\n \"\"\"\n Contents returns a list of the files in the data directory.\n \"\"\"\n data = find_dataset_path(\n self.name, data_home=self.data_home, ext=None\n )\n return os.listdir(data)\n\n @memoized\n def README(self):\n \"\"\"\n Returns the contents of the README.md file that describes the dataset\n in detail and contains attribution information.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"README.md\"\n )\n with open(path, 'r') as f:\n return f.read()\n\n @memoized\n def meta(self):\n \"\"\"\n Returns the contents of the meta.json file that describes important\n attributes about the dataset and modifies the behavior of the loader.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"meta.json\", raises=False\n )\n if path is None:\n return None\n\n with open(path, 'r') as f:\n return json.load(f)\n\n @memoized\n def citation(self):\n \"\"\"\n Returns the contents of the citation.bib file that describes the source\n and provenance of the dataset or to cite for academic work.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"meta.json\", raises=False\n )\n if path is None:\n return None\n\n with open(path, 'r') as f:\n return f.read()\n\n\nclass Dataset(BaseDataset):\n \"\"\"\n Datasets contain a reference to data on disk and provide utilities for\n quickly loading files and objects into a variety of formats. The most\n common use of the Dataset object is to load example datasets provided by\n Yellowbrick to run the examples in the documentation.\n\n The dataset by default will return the data as a numpy array, however if\n Pandas is installed, it is possible to access the data as a DataFrame and\n Series object. In either case, the data is represented by a features table,\n X and a target vector, y.\n\n Parameters\n ----------\n name : str\n The name of the dataset; should either be a folder in data home or\n specified in the yellowbrick.datasets.DATASETS variable. This name is\n used to perform all lookups and identify the dataset externally.\n\n data_home : str, optional\n The path on disk where data is stored. If not passed in, it is looked\n up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.\n\n url : str, optional\n The web location where the archive file of the dataset can be\n downloaded from.\n\n signature : str, optional\n The signature of the data archive file, used to verify that the latest\n version of the data has been downloaded and that the download hasn't\n been corrupted or modified in anyway.\n \"\"\"\n\n def to_data(self):\n \"\"\"\n Returns the data contained in the dataset as X and y where X is the\n features matrix and y is the target vector. If pandas is installed,\n the data will be returned as DataFrame and Series objects. Otherwise,\n the data will be returned as two numpy arrays.\n\n Returns\n -------\n X : array-like with shape (n_instances, n_features)\n A pandas DataFrame or numpy array describing the instance features.\n\n y : array-like with shape (n_instances,)\n A pandas Series or numpy array describing the target vector.\n \"\"\"\n if pd is not None:\n return self.to_pandas()\n return self.to_numpy()\n\n def to_numpy(self):\n \"\"\"\n Returns the dataset as two numpy arrays: X and y.\n\n Returns\n -------\n X : array-like with shape (n_instances, n_features)\n A numpy array describing the instance features.\n\n y : array-like with shape (n_instances,)\n A numpy array describing the target vector.\n \"\"\"\n path = find_dataset_path(self.name, ext=\".npz\", data_home=self.data_home)\n with np.load(path) as npf:\n if \"X\" not in npf or \"y\" not in npf:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without numpy arrays \"\n \"- please report this bug to the Yellowbrick maintainers!\"\n ))\n\n # TODO: How to handle the case where y is None?\n return npf[\"X\"], npf[\"y\"]\n\n def to_pandas(self):\n \"\"\"\n Returns the dataset as two pandas objects: X and y.\n\n Returns\n -------\n X : DataFrame with shape (n_instances, n_features)\n A pandas DataFrame containing feature data and named columns.\n\n y : Series with shape (n_instances,)\n A pandas Series containing target data and an index that matches\n the feature DataFrame index.\n \"\"\"\n # Ensure the metadata is valid before continuing\n if self.meta is None:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without meta.json \"\n \"- please report this bug to the Yellowbrick maintainers!\"\n ))\n\n if \"features\" not in self.meta or \"target\" not in self.meta:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without features \"\n \"or target - please report this bug to the Yellowbrick maintainers!\"\n ))\n\n # Load data frame and return features and target\n # TODO: Return y as None if there is no self.meta[\"target\"]\n df = self.to_dataframe()\n return df[self.meta[\"features\"]], df[self.meta[\"target\"]]\n\n\n def to_dataframe(self):\n \"\"\"\n Returns the entire dataset as a single pandas DataFrame.\n\n Returns\n -------\n df : DataFrame with shape (n_instances, n_columns)\n A pandas DataFrame containing the complete original data table\n including all targets (specified by the meta data) and all\n features (including those that might have been filtered out).\n \"\"\"\n if pd is None:\n raise DatasetsError(\n \"pandas is required to load DataFrame, it can be installed with pip\"\n )\n\n path = find_dataset_path(self.name, ext=\".csv.gz\", data_home=self.data_home)\n return pd.read_csv(path, compression=\"gzip\")\n\n\nclass Corpus(BaseDataset):\n \"\"\"\n Corpus datasets contain a reference to documents on disk and provide\n utilities for quickly loading text data for use in machine learning\n workflows. The most common use of the corpus is to load the text analysis\n examples from the Yellowbrick documentation.\n\n Parameters\n ----------\n name : str\n The name of the corpus; should either be a folder in data home or\n specified in the yellowbrick.datasets.DATASETS variable. This name is\n used to perform all lookups and identify the corpus externally.\n\n data_home : str, optional\n The path on disk where data is stored. If not passed in, it is looked\n up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.\n\n url : str, optional\n The web location where the archive file of the corpus can be\n downloaded from.\n\n signature : str, optional\n The signature of the data archive file, used to verify that the latest\n version of the data has been downloaded and that the download hasn't\n been corrupted or modified in anyway.\n \"\"\"\n\n @memoized\n def root(self):\n \"\"\"\n Discovers and caches the root directory of the corpus.\n \"\"\"\n return find_dataset_path(self.name, data_home=self.data_home, ext=None)\n\n @memoized\n def labels(self):\n \"\"\"\n Return the unique labels assigned to the documents.\n \"\"\"\n return [\n name for name in os.listdir(self.root)\n if os.path.isdir(os.path.join(self.root, name))\n ]\n\n @property\n def files(self):\n \"\"\"\n Returns the list of file names for all documents.\n \"\"\"\n return [\n os.path.join(self.root, label, name)\n for label in self.labels\n for name in os.listdir(os.path.join(self.root, label))\n ]\n\n @property\n def data(self):\n \"\"\"\n Read all of the documents from disk into an in-memory list.\n \"\"\"\n def read(path):\n with open(path, 'r', encoding='UTF-8') as f:\n return f.read()\n\n return [\n read(f) for f in self.files\n ]\n\n @property\n def target(self):\n \"\"\"\n Returns the label associated with each item in data.\n \"\"\"\n return [\n os.path.basename(os.path.dirname(f)) for f in self.files\n ]\n", "path": "yellowbrick/datasets/base.py"}], "after_files": [{"content": "# yellowbrick.datasets.base\n# Loading utilities for the yellowbrick datasets.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Raul Peralta <[email protected]>\n# Created: Thu Jul 26 13:53:01 2018 -0400\n#\n# ID: base.py [] [email protected] $\n\n\"\"\"\nLoading utilities for the yellowbrick datasets.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport json\nimport numpy as np\n\nfrom .download import download_data\nfrom .path import find_dataset_path, dataset_exists\n\nfrom yellowbrick.exceptions import DatasetsError\nfrom yellowbrick.utils.decorators import memoized\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n\n\n##########################################################################\n## Dataset Object\n##########################################################################\n\nclass BaseDataset(object):\n \"\"\"\n Base functionality for Dataset and Corpus objects.\n \"\"\"\n\n def __init__(self, name, url=None, signature=None, data_home=None):\n self.url = url\n self.name = name\n self.data_home = data_home\n self.signature = signature\n\n # Check if the dataset exists, and if not - download it!\n if not dataset_exists(self.name, data_home=data_home):\n self.download()\n\n def download(self, replace=False):\n \"\"\"\n Download the dataset from the hosted Yellowbrick data store and save\n it to the location specified by ``get_data_home``. The downloader\n verifies the download completed successfully and safely by comparing\n the expected signature with the SHA 256 signature of the downloaded\n archive file.\n\n Parameters\n ----------\n replace : bool, default: False\n If the data archive already exists, replace the dataset. If this is\n False and the dataset exists, an exception is raised.\n \"\"\"\n download_data(\n self.url, self.signature, data_home=self.data_home,\n replace=replace, extract=True\n )\n\n def contents(self):\n \"\"\"\n Contents returns a list of the files in the data directory.\n \"\"\"\n data = find_dataset_path(\n self.name, data_home=self.data_home, ext=None\n )\n return os.listdir(data)\n\n @memoized\n def README(self):\n \"\"\"\n Returns the contents of the README.md file that describes the dataset\n in detail and contains attribution information.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"README.md\"\n )\n with open(path, 'r') as f:\n return f.read()\n\n @memoized\n def meta(self):\n \"\"\"\n Returns the contents of the meta.json file that describes important\n attributes about the dataset and modifies the behavior of the loader.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"meta.json\", raises=False\n )\n if path is None:\n return None\n\n with open(path, 'r') as f:\n return json.load(f)\n\n @memoized\n def citation(self):\n \"\"\"\n Returns the contents of the citation.bib file that describes the source\n and provenance of the dataset or to cite for academic work.\n \"\"\"\n path = find_dataset_path(\n self.name, data_home=self.data_home, fname=\"meta.json\", raises=False\n )\n if path is None:\n return None\n\n with open(path, 'r') as f:\n return f.read()\n\n\nclass Dataset(BaseDataset):\n \"\"\"\n Datasets contain a reference to data on disk and provide utilities for\n quickly loading files and objects into a variety of formats. The most\n common use of the Dataset object is to load example datasets provided by\n Yellowbrick to run the examples in the documentation.\n\n The dataset by default will return the data as a numpy array, however if\n Pandas is installed, it is possible to access the data as a DataFrame and\n Series object. In either case, the data is represented by a features table,\n X and a target vector, y.\n\n Parameters\n ----------\n name : str\n The name of the dataset; should either be a folder in data home or\n specified in the yellowbrick.datasets.DATASETS variable. This name is\n used to perform all lookups and identify the dataset externally.\n\n data_home : str, optional\n The path on disk where data is stored. If not passed in, it is looked\n up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.\n\n url : str, optional\n The web location where the archive file of the dataset can be\n downloaded from.\n\n signature : str, optional\n The signature of the data archive file, used to verify that the latest\n version of the data has been downloaded and that the download hasn't\n been corrupted or modified in anyway.\n \"\"\"\n\n def to_data(self):\n \"\"\"\n Returns the data contained in the dataset as X and y where X is the\n features matrix and y is the target vector. If pandas is installed,\n the data will be returned as DataFrame and Series objects. Otherwise,\n the data will be returned as two numpy arrays.\n\n Returns\n -------\n X : array-like with shape (n_instances, n_features)\n A pandas DataFrame or numpy array describing the instance features.\n\n y : array-like with shape (n_instances,)\n A pandas Series or numpy array describing the target vector.\n \"\"\"\n if pd is not None:\n return self.to_pandas()\n return self.to_numpy()\n\n def to_numpy(self):\n \"\"\"\n Returns the dataset as two numpy arrays: X and y.\n\n Returns\n -------\n X : array-like with shape (n_instances, n_features)\n A numpy array describing the instance features.\n\n y : array-like with shape (n_instances,)\n A numpy array describing the target vector.\n \"\"\"\n path = find_dataset_path(self.name, ext=\".npz\", data_home=self.data_home)\n with np.load(path, allow_pickle=False) as npf:\n if \"X\" not in npf or \"y\" not in npf:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without numpy arrays \"\n \"- please report this bug to the Yellowbrick maintainers!\"\n ))\n\n # TODO: How to handle the case where y is None?\n return npf[\"X\"], npf[\"y\"]\n\n def to_pandas(self):\n \"\"\"\n Returns the dataset as two pandas objects: X and y.\n\n Returns\n -------\n X : DataFrame with shape (n_instances, n_features)\n A pandas DataFrame containing feature data and named columns.\n\n y : Series with shape (n_instances,)\n A pandas Series containing target data and an index that matches\n the feature DataFrame index.\n \"\"\"\n # Ensure the metadata is valid before continuing\n if self.meta is None:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without meta.json \"\n \"- please report this bug to the Yellowbrick maintainers!\"\n ))\n\n if \"features\" not in self.meta or \"target\" not in self.meta:\n raise DatasetsError((\n \"the downloaded dataset was improperly packaged without features \"\n \"or target - please report this bug to the Yellowbrick maintainers!\"\n ))\n\n # Load data frame and return features and target\n # TODO: Return y as None if there is no self.meta[\"target\"]\n df = self.to_dataframe()\n return df[self.meta[\"features\"]], df[self.meta[\"target\"]]\n\n\n def to_dataframe(self):\n \"\"\"\n Returns the entire dataset as a single pandas DataFrame.\n\n Returns\n -------\n df : DataFrame with shape (n_instances, n_columns)\n A pandas DataFrame containing the complete original data table\n including all targets (specified by the meta data) and all\n features (including those that might have been filtered out).\n \"\"\"\n if pd is None:\n raise DatasetsError(\n \"pandas is required to load DataFrame, it can be installed with pip\"\n )\n\n path = find_dataset_path(self.name, ext=\".csv.gz\", data_home=self.data_home)\n return pd.read_csv(path, compression=\"gzip\")\n\n\nclass Corpus(BaseDataset):\n \"\"\"\n Corpus datasets contain a reference to documents on disk and provide\n utilities for quickly loading text data for use in machine learning\n workflows. The most common use of the corpus is to load the text analysis\n examples from the Yellowbrick documentation.\n\n Parameters\n ----------\n name : str\n The name of the corpus; should either be a folder in data home or\n specified in the yellowbrick.datasets.DATASETS variable. This name is\n used to perform all lookups and identify the corpus externally.\n\n data_home : str, optional\n The path on disk where data is stored. If not passed in, it is looked\n up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.\n\n url : str, optional\n The web location where the archive file of the corpus can be\n downloaded from.\n\n signature : str, optional\n The signature of the data archive file, used to verify that the latest\n version of the data has been downloaded and that the download hasn't\n been corrupted or modified in anyway.\n \"\"\"\n\n @memoized\n def root(self):\n \"\"\"\n Discovers and caches the root directory of the corpus.\n \"\"\"\n return find_dataset_path(self.name, data_home=self.data_home, ext=None)\n\n @memoized\n def labels(self):\n \"\"\"\n Return the unique labels assigned to the documents.\n \"\"\"\n return [\n name for name in os.listdir(self.root)\n if os.path.isdir(os.path.join(self.root, name))\n ]\n\n @property\n def files(self):\n \"\"\"\n Returns the list of file names for all documents.\n \"\"\"\n return [\n os.path.join(self.root, label, name)\n for label in self.labels\n for name in os.listdir(os.path.join(self.root, label))\n ]\n\n @property\n def data(self):\n \"\"\"\n Read all of the documents from disk into an in-memory list.\n \"\"\"\n def read(path):\n with open(path, 'r', encoding='UTF-8') as f:\n return f.read()\n\n return [\n read(f) for f in self.files\n ]\n\n @property\n def target(self):\n \"\"\"\n Returns the label associated with each item in data.\n \"\"\"\n return [\n os.path.basename(os.path.dirname(f)) for f in self.files\n ]\n", "path": "yellowbrick/datasets/base.py"}]} | 3,759 | 147 |
gh_patches_debug_31438 | rasdani/github-patches | git_diff | pyodide__pyodide-2507 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyodide_build buildpkg does not install Cython as a build dependency when it is spell with a lower case c
## 🐛 Bug
When trying to build [cftime](https://github.com/Unidata/cftime) the isolated env does not install cython.
### To Reproduce
`python -m pyodide_build buildpkg packages/cftime/meta.yaml` on [this meta.yaml](https://gist.github.com/ocefpaf/8b9a90bfa40d7dc27c63e3bf22ef335a)
### Expected behavior
Successful build :smile:
### Environment
- Pyodide Version<!-- (e.g. 1.8.1) -->:
- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->:
- Any other relevant information:
### Additional context
A patch to rename `cython` to `Cython` in the cftime pyproject.toml fixed it but we should not be case sensitive with PyPI names.
xref.: https://github.com/pyodide/pyodide/pull/2504
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyodide-build/pyodide_build/pypabuild.py`
Content:
```
1 import contextlib
2 import os
3 import sys
4 import traceback
5 from itertools import chain
6 from pathlib import Path
7 from typing import Mapping
8
9 from build import BuildBackendException, ProjectBuilder # type: ignore[import]
10 from build.__main__ import ( # type: ignore[import]
11 _STYLES,
12 _error,
13 _handle_build_error,
14 _IsolatedEnvBuilder,
15 _ProjectBuilder,
16 )
17 from build.env import IsolatedEnv # type: ignore[import]
18 from packaging.requirements import Requirement
19
20 from .common import get_hostsitepackages, get_pyversion
21
22 UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran", "cython"]
23
24
25 def symlink_unisolated_packages(env: IsolatedEnv):
26 pyversion = get_pyversion()
27 site_packages_path = f"lib/{pyversion}/site-packages"
28 env_site_packages = Path(env._path) / site_packages_path
29 host_site_packages = Path(get_hostsitepackages())
30 for name in UNISOLATED_PACKAGES:
31 for path in chain(
32 host_site_packages.glob(f"{name}*"), host_site_packages.glob(f"_{name}*")
33 ):
34 (env_site_packages / path.name).unlink(missing_ok=True)
35 (env_site_packages / path.name).symlink_to(path)
36
37
38 def remove_unisolated_requirements(requires: set[str]) -> set[str]:
39 for reqstr in list(requires):
40 req = Requirement(reqstr)
41 for avoid_name in UNISOLATED_PACKAGES:
42 if avoid_name in req.name:
43 requires.remove(reqstr)
44 return requires
45
46
47 @contextlib.contextmanager
48 def replace_env(build_env: Mapping[str, str]):
49 old_environ = dict(os.environ)
50 os.environ.clear()
51 os.environ.update(build_env)
52 try:
53 yield
54 finally:
55 os.environ.clear()
56 os.environ.update(old_environ)
57
58
59 def install_reqs(env: IsolatedEnv, reqs: set[str]):
60 env.install(remove_unisolated_requirements(reqs))
61
62
63 def _build_in_isolated_env(
64 build_env: Mapping[str, str],
65 builder: ProjectBuilder,
66 outdir: str,
67 distribution: str,
68 ) -> str:
69 with _IsolatedEnvBuilder() as env:
70 builder.python_executable = env.executable
71 builder.scripts_dir = env.scripts_dir
72 # first install the build dependencies
73 symlink_unisolated_packages(env)
74 install_reqs(env, builder.build_system_requires)
75 installed_requires_for_build = False
76 try:
77 build_reqs = builder.get_requires_for_build(distribution)
78 except BuildBackendException:
79 pass
80 else:
81 install_reqs(env, build_reqs)
82 installed_requires_for_build = True
83
84 with replace_env(build_env):
85 if not installed_requires_for_build:
86 install_reqs(env, builder.get_requires_for_build(distribution))
87 return builder.build(distribution, outdir, {})
88
89
90 def build(build_env: Mapping[str, str]):
91 srcdir = Path.cwd()
92 outdir = srcdir / "dist"
93 builder = _ProjectBuilder(srcdir)
94 distribution = "wheel"
95 try:
96 with _handle_build_error():
97 built = _build_in_isolated_env(
98 build_env, builder, str(outdir), distribution
99 )
100 print("{bold}{green}Successfully built {}{reset}".format(built, **_STYLES))
101 except Exception as e: # pragma: no cover
102 tb = traceback.format_exc().strip("\n")
103 print("\n{dim}{}{reset}\n".format(tb, **_STYLES))
104 _error(str(e))
105 sys.exit(1)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyodide-build/pyodide_build/pypabuild.py b/pyodide-build/pyodide_build/pypabuild.py
--- a/pyodide-build/pyodide_build/pypabuild.py
+++ b/pyodide-build/pyodide_build/pypabuild.py
@@ -19,7 +19,7 @@
from .common import get_hostsitepackages, get_pyversion
-UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran", "cython"]
+UNISOLATED_PACKAGES = ["numpy", "scipy", "cffi", "pycparser", "pythran"]
def symlink_unisolated_packages(env: IsolatedEnv):
@@ -39,7 +39,7 @@
for reqstr in list(requires):
req = Requirement(reqstr)
for avoid_name in UNISOLATED_PACKAGES:
- if avoid_name in req.name:
+ if avoid_name in req.name.lower():
requires.remove(reqstr)
return requires
@@ -58,6 +58,11 @@
def install_reqs(env: IsolatedEnv, reqs: set[str]):
env.install(remove_unisolated_requirements(reqs))
+ # Some packages (numcodecs) don't declare cython as a build dependency and
+ # only recythonize if it is present. We need them to always recythonize so
+ # we always install cython. If the reqs included some cython version already
+ # then this won't do anything.
+ env.install(["cython"])
def _build_in_isolated_env(
@@ -66,6 +71,10 @@
outdir: str,
distribution: str,
) -> str:
+ # For debugging: The following line disables removal of the isolated venv.
+ # It will be left in the /tmp folder and can be inspected or entered as
+ # needed.
+ # _IsolatedEnvBuilder.__exit__ = lambda *args: None
with _IsolatedEnvBuilder() as env:
builder.python_executable = env.executable
builder.scripts_dir = env.scripts_dir
| {"golden_diff": "diff --git a/pyodide-build/pyodide_build/pypabuild.py b/pyodide-build/pyodide_build/pypabuild.py\n--- a/pyodide-build/pyodide_build/pypabuild.py\n+++ b/pyodide-build/pyodide_build/pypabuild.py\n@@ -19,7 +19,7 @@\n \n from .common import get_hostsitepackages, get_pyversion\n \n-UNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\", \"cython\"]\n+UNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\"]\n \n \n def symlink_unisolated_packages(env: IsolatedEnv):\n@@ -39,7 +39,7 @@\n for reqstr in list(requires):\n req = Requirement(reqstr)\n for avoid_name in UNISOLATED_PACKAGES:\n- if avoid_name in req.name:\n+ if avoid_name in req.name.lower():\n requires.remove(reqstr)\n return requires\n \n@@ -58,6 +58,11 @@\n \n def install_reqs(env: IsolatedEnv, reqs: set[str]):\n env.install(remove_unisolated_requirements(reqs))\n+ # Some packages (numcodecs) don't declare cython as a build dependency and\n+ # only recythonize if it is present. We need them to always recythonize so\n+ # we always install cython. If the reqs included some cython version already\n+ # then this won't do anything.\n+ env.install([\"cython\"])\n \n \n def _build_in_isolated_env(\n@@ -66,6 +71,10 @@\n outdir: str,\n distribution: str,\n ) -> str:\n+ # For debugging: The following line disables removal of the isolated venv.\n+ # It will be left in the /tmp folder and can be inspected or entered as\n+ # needed.\n+ # _IsolatedEnvBuilder.__exit__ = lambda *args: None\n with _IsolatedEnvBuilder() as env:\n builder.python_executable = env.executable\n builder.scripts_dir = env.scripts_dir\n", "issue": "pyodide_build buildpkg does not install Cython as a build dependency when it is spell with a lower case c\n## \ud83d\udc1b Bug\r\n\r\nWhen trying to build [cftime](https://github.com/Unidata/cftime) the isolated env does not install cython.\r\n\r\n### To Reproduce\r\n\r\n`python -m pyodide_build buildpkg packages/cftime/meta.yaml` on [this meta.yaml](https://gist.github.com/ocefpaf/8b9a90bfa40d7dc27c63e3bf22ef335a)\r\n\r\n### Expected behavior\r\n\r\nSuccessful build :smile: \r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->:\r\n- Browser version<!-- (e.g. Chrome 95.0.4638.54) -->:\r\n- Any other relevant information:\r\n\r\n\r\n### Additional context\r\n\r\nA patch to rename `cython` to `Cython` in the cftime pyproject.toml fixed it but we should not be case sensitive with PyPI names.\r\n\r\nxref.: https://github.com/pyodide/pyodide/pull/2504\n", "before_files": [{"content": "import contextlib\nimport os\nimport sys\nimport traceback\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Mapping\n\nfrom build import BuildBackendException, ProjectBuilder # type: ignore[import]\nfrom build.__main__ import ( # type: ignore[import]\n _STYLES,\n _error,\n _handle_build_error,\n _IsolatedEnvBuilder,\n _ProjectBuilder,\n)\nfrom build.env import IsolatedEnv # type: ignore[import]\nfrom packaging.requirements import Requirement\n\nfrom .common import get_hostsitepackages, get_pyversion\n\nUNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\", \"cython\"]\n\n\ndef symlink_unisolated_packages(env: IsolatedEnv):\n pyversion = get_pyversion()\n site_packages_path = f\"lib/{pyversion}/site-packages\"\n env_site_packages = Path(env._path) / site_packages_path\n host_site_packages = Path(get_hostsitepackages())\n for name in UNISOLATED_PACKAGES:\n for path in chain(\n host_site_packages.glob(f\"{name}*\"), host_site_packages.glob(f\"_{name}*\")\n ):\n (env_site_packages / path.name).unlink(missing_ok=True)\n (env_site_packages / path.name).symlink_to(path)\n\n\ndef remove_unisolated_requirements(requires: set[str]) -> set[str]:\n for reqstr in list(requires):\n req = Requirement(reqstr)\n for avoid_name in UNISOLATED_PACKAGES:\n if avoid_name in req.name:\n requires.remove(reqstr)\n return requires\n\n\[email protected]\ndef replace_env(build_env: Mapping[str, str]):\n old_environ = dict(os.environ)\n os.environ.clear()\n os.environ.update(build_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_environ)\n\n\ndef install_reqs(env: IsolatedEnv, reqs: set[str]):\n env.install(remove_unisolated_requirements(reqs))\n\n\ndef _build_in_isolated_env(\n build_env: Mapping[str, str],\n builder: ProjectBuilder,\n outdir: str,\n distribution: str,\n) -> str:\n with _IsolatedEnvBuilder() as env:\n builder.python_executable = env.executable\n builder.scripts_dir = env.scripts_dir\n # first install the build dependencies\n symlink_unisolated_packages(env)\n install_reqs(env, builder.build_system_requires)\n installed_requires_for_build = False\n try:\n build_reqs = builder.get_requires_for_build(distribution)\n except BuildBackendException:\n pass\n else:\n install_reqs(env, build_reqs)\n installed_requires_for_build = True\n\n with replace_env(build_env):\n if not installed_requires_for_build:\n install_reqs(env, builder.get_requires_for_build(distribution))\n return builder.build(distribution, outdir, {})\n\n\ndef build(build_env: Mapping[str, str]):\n srcdir = Path.cwd()\n outdir = srcdir / \"dist\"\n builder = _ProjectBuilder(srcdir)\n distribution = \"wheel\"\n try:\n with _handle_build_error():\n built = _build_in_isolated_env(\n build_env, builder, str(outdir), distribution\n )\n print(\"{bold}{green}Successfully built {}{reset}\".format(built, **_STYLES))\n except Exception as e: # pragma: no cover\n tb = traceback.format_exc().strip(\"\\n\")\n print(\"\\n{dim}{}{reset}\\n\".format(tb, **_STYLES))\n _error(str(e))\n sys.exit(1)\n", "path": "pyodide-build/pyodide_build/pypabuild.py"}], "after_files": [{"content": "import contextlib\nimport os\nimport sys\nimport traceback\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Mapping\n\nfrom build import BuildBackendException, ProjectBuilder # type: ignore[import]\nfrom build.__main__ import ( # type: ignore[import]\n _STYLES,\n _error,\n _handle_build_error,\n _IsolatedEnvBuilder,\n _ProjectBuilder,\n)\nfrom build.env import IsolatedEnv # type: ignore[import]\nfrom packaging.requirements import Requirement\n\nfrom .common import get_hostsitepackages, get_pyversion\n\nUNISOLATED_PACKAGES = [\"numpy\", \"scipy\", \"cffi\", \"pycparser\", \"pythran\"]\n\n\ndef symlink_unisolated_packages(env: IsolatedEnv):\n pyversion = get_pyversion()\n site_packages_path = f\"lib/{pyversion}/site-packages\"\n env_site_packages = Path(env._path) / site_packages_path\n host_site_packages = Path(get_hostsitepackages())\n for name in UNISOLATED_PACKAGES:\n for path in chain(\n host_site_packages.glob(f\"{name}*\"), host_site_packages.glob(f\"_{name}*\")\n ):\n (env_site_packages / path.name).unlink(missing_ok=True)\n (env_site_packages / path.name).symlink_to(path)\n\n\ndef remove_unisolated_requirements(requires: set[str]) -> set[str]:\n for reqstr in list(requires):\n req = Requirement(reqstr)\n for avoid_name in UNISOLATED_PACKAGES:\n if avoid_name in req.name.lower():\n requires.remove(reqstr)\n return requires\n\n\[email protected]\ndef replace_env(build_env: Mapping[str, str]):\n old_environ = dict(os.environ)\n os.environ.clear()\n os.environ.update(build_env)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_environ)\n\n\ndef install_reqs(env: IsolatedEnv, reqs: set[str]):\n env.install(remove_unisolated_requirements(reqs))\n # Some packages (numcodecs) don't declare cython as a build dependency and\n # only recythonize if it is present. We need them to always recythonize so\n # we always install cython. If the reqs included some cython version already\n # then this won't do anything.\n env.install([\"cython\"])\n\n\ndef _build_in_isolated_env(\n build_env: Mapping[str, str],\n builder: ProjectBuilder,\n outdir: str,\n distribution: str,\n) -> str:\n # For debugging: The following line disables removal of the isolated venv.\n # It will be left in the /tmp folder and can be inspected or entered as\n # needed.\n # _IsolatedEnvBuilder.__exit__ = lambda *args: None\n with _IsolatedEnvBuilder() as env:\n builder.python_executable = env.executable\n builder.scripts_dir = env.scripts_dir\n # first install the build dependencies\n symlink_unisolated_packages(env)\n install_reqs(env, builder.build_system_requires)\n installed_requires_for_build = False\n try:\n build_reqs = builder.get_requires_for_build(distribution)\n except BuildBackendException:\n pass\n else:\n install_reqs(env, build_reqs)\n installed_requires_for_build = True\n\n with replace_env(build_env):\n if not installed_requires_for_build:\n install_reqs(env, builder.get_requires_for_build(distribution))\n return builder.build(distribution, outdir, {})\n\n\ndef build(build_env: Mapping[str, str]):\n srcdir = Path.cwd()\n outdir = srcdir / \"dist\"\n builder = _ProjectBuilder(srcdir)\n distribution = \"wheel\"\n try:\n with _handle_build_error():\n built = _build_in_isolated_env(\n build_env, builder, str(outdir), distribution\n )\n print(\"{bold}{green}Successfully built {}{reset}\".format(built, **_STYLES))\n except Exception as e: # pragma: no cover\n tb = traceback.format_exc().strip(\"\\n\")\n print(\"\\n{dim}{}{reset}\\n\".format(tb, **_STYLES))\n _error(str(e))\n sys.exit(1)\n", "path": "pyodide-build/pyodide_build/pypabuild.py"}]} | 1,521 | 485 |
gh_patches_debug_43929 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-71 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change the way the UI receives game updates
The UI currently polls the frontend server constantly for updates.
Implement a solution where the _game simulation_ server pushes updates instead - e.g. using web sockets (or some other better solution that you can think of!)
Possible server-side solutions:
1. client ==> proxy (on Kubernetes) ==> game simulation (on Kubernetes)
Here we only need one public IP address for the proxy, no matter how many game simulation servers we spawn.
1. client ==> game simulation (on Kubernetes)
We need a public IP address per game simulation. We need to investigate whether this would be a problem (e.g. can't be done dynamically, or is expensive).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aimmo-game/simulation/turn_manager.py`
Content:
```
1 import logging
2 import requests
3 import threading
4 import time
5 from threading import Lock
6 from simulation import world_map
7 from simulation.action import ACTIONS
8
9 LOGGER = logging.getLogger(__name__)
10
11
12 class WorldStateProvider:
13 """
14 Thread-safe container for the world state.
15
16 TODO: think about changing to snapshot rather than lock?
17 """
18
19 def __init__(self):
20 self._world_state = None
21 self._lock = Lock()
22
23 def lock_and_get_world(self):
24 self._lock.acquire()
25 return self._world_state
26
27 def release_lock(self):
28 self._lock.release()
29
30 def set_world(self, new_world_state):
31 self._lock.acquire()
32 self._world_state = new_world_state
33 self._lock.release()
34
35 world_state_provider = WorldStateProvider()
36
37
38 class TurnManager(threading.Thread):
39 """
40 Game loop
41 """
42 daemon = True
43
44 def __init__(self, game_state):
45 world_state_provider.set_world(game_state)
46 super(TurnManager, self).__init__()
47
48 def _update_environment(self, game_state):
49 num_avatars = len(game_state.avatar_manager.active_avatars)
50 game_state.world_map.reconstruct_interactive_state(num_avatars)
51
52 def run_turn(self):
53 try:
54 game_state = world_state_provider.lock_and_get_world()
55
56 for avatar in game_state.avatar_manager.active_avatars:
57 turn_state = game_state.get_state_for(avatar)
58 try:
59 data = requests.post(avatar.worker_url, json=turn_state).json()
60 except ValueError as err:
61 LOGGER.info("Failed to get turn result: %s", err)
62 else:
63 try:
64 action_data = data['action']
65 action = ACTIONS[action_data['action_type']](**action_data.get('options', {}))
66 except (KeyError, ValueError) as err:
67 LOGGER.info("Bad action data supplied: %s", err)
68 else:
69 action.apply(game_state, avatar)
70
71 self._update_environment(game_state)
72
73 finally:
74 world_state_provider.release_lock()
75
76 def run(self):
77 while True:
78 self.run_turn()
79 time.sleep(0.5)
80
```
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from setuptools import find_packages, setup
3
4 setup(name='aimmo',
5 packages=find_packages(),
6 include_package_data=True,
7 install_requires = [
8 'django >= 1.8.3, < 1.9.0',
9 'django-autoconfig >= 0.3.6, < 1.0.0',
10 'django-js-reverse',
11 'flask',
12 'flask-cors',
13 'requests',
14 ],
15 tests_require=[
16 'django-setuptest',
17 ],
18 test_suite='setuptest.setuptest.SetupTestSuite',
19 version='0.0.0',
20 zip_safe=False,
21 )
22
```
Path: `aimmo-game/service.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3
4 import flask
5 from flask.ext.cors import CORS
6
7 from simulation.turn_manager import world_state_provider
8 from simulation import map_generator
9 from simulation.avatar.avatar_manager import AvatarManager
10 from simulation.game_state import GameState
11 from simulation.turn_manager import TurnManager
12 from simulation.worker_manager import LocalWorkerManager
13
14 app = flask.Flask(__name__)
15 CORS(app)
16
17
18 def to_cell_type(cell):
19 if not cell.habitable:
20 return 1
21 if cell.generates_score:
22 return 2
23 return 0
24
25
26 def player_dict(avatar):
27 # TODO: implement better colour functionality: will eventually fall off end of numbers
28 colour = "#%06x" % (avatar.player_id * 4999)
29 return {
30 'id': avatar.player_id,
31 'x': avatar.location.x,
32 'y': avatar.location.y,
33 'health': avatar.health,
34 'score': avatar.score,
35 'rotation': 0,
36 "colours": {
37 "bodyStroke": "#0ff",
38 "bodyFill": colour,
39 "eyeStroke": "#aff",
40 "eyeFill": "#eff",
41 }
42 }
43
44
45 @app.route('/')
46 def get_world_state():
47 try:
48 world = world_state_provider.lock_and_get_world()
49 num_cols = len(world.world_map.grid)
50 num_rows = len(world.world_map.grid[0])
51 grid = [[None for x in xrange(num_cols)] for y in xrange(num_rows)]
52 for cell in world.world_map.all_cells():
53 grid[cell.location.x][cell.location.y] = to_cell_type(cell)
54 player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars}
55 return flask.jsonify(
56 players=player_data,
57 score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],
58 pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],
59 map_changed=True, # TODO: experiment with only sending deltas (not if not required)
60 width=num_cols,
61 height=num_rows,
62 layout=grid,
63 )
64 finally:
65 world_state_provider.release_lock()
66
67
68 def run_game():
69 print("Running game...")
70 my_map = map_generator.generate_map(15, 15, 0.1)
71 player_manager = AvatarManager()
72 game_state = GameState(my_map, player_manager)
73 turn_manager = TurnManager(game_state=game_state)
74 worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/')
75 worker_manager.start()
76 turn_manager.start()
77
78
79 if __name__ == '__main__':
80 logging.basicConfig(level=logging.DEBUG)
81
82 run_game()
83 app.run(debug=True, use_reloader=False)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aimmo-game/service.py b/aimmo-game/service.py
--- a/aimmo-game/service.py
+++ b/aimmo-game/service.py
@@ -1,8 +1,11 @@
#!/usr/bin/env python
import logging
+import eventlet
+eventlet.monkey_patch()
+
import flask
-from flask.ext.cors import CORS
+from flask.ext.socketio import SocketIO, emit
from simulation.turn_manager import world_state_provider
from simulation import map_generator
@@ -12,7 +15,7 @@
from simulation.worker_manager import LocalWorkerManager
app = flask.Flask(__name__)
-CORS(app)
+socketio = SocketIO(app)
def to_cell_type(cell):
@@ -42,7 +45,6 @@
}
[email protected]('/')
def get_world_state():
try:
world = world_state_provider.lock_and_get_world()
@@ -52,25 +54,41 @@
for cell in world.world_map.all_cells():
grid[cell.location.x][cell.location.y] = to_cell_type(cell)
player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars}
- return flask.jsonify(
- players=player_data,
- score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],
- pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],
- map_changed=True, # TODO: experiment with only sending deltas (not if not required)
- width=num_cols,
- height=num_rows,
- layout=grid,
- )
+ return {
+ 'players': player_data,
+ 'score_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],
+ 'pickup_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],
+ 'map_changed': True, # TODO: experiment with only sending deltas (not if not required)
+ 'width': num_cols,
+ 'height': num_rows,
+ 'layout': grid,
+ }
finally:
world_state_provider.release_lock()
[email protected]('connect')
+def world_update_on_connect():
+ emit(
+ 'world-update',
+ get_world_state(),
+ )
+
+
+def send_world_update():
+ socketio.emit(
+ 'world-update',
+ get_world_state(),
+ broadcast=True,
+ )
+
+
def run_game():
print("Running game...")
my_map = map_generator.generate_map(15, 15, 0.1)
player_manager = AvatarManager()
game_state = GameState(my_map, player_manager)
- turn_manager = TurnManager(game_state=game_state)
+ turn_manager = TurnManager(game_state=game_state, end_turn_callback=send_world_update)
worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/')
worker_manager.start()
turn_manager.start()
@@ -80,4 +98,4 @@
logging.basicConfig(level=logging.DEBUG)
run_game()
- app.run(debug=True, use_reloader=False)
+ socketio.run(app, debug=True, use_reloader=False)
diff --git a/aimmo-game/simulation/turn_manager.py b/aimmo-game/simulation/turn_manager.py
--- a/aimmo-game/simulation/turn_manager.py
+++ b/aimmo-game/simulation/turn_manager.py
@@ -41,8 +41,9 @@
"""
daemon = True
- def __init__(self, game_state):
+ def __init__(self, game_state, end_turn_callback):
world_state_provider.set_world(game_state)
+ self.end_turn_callback = end_turn_callback
super(TurnManager, self).__init__()
def _update_environment(self, game_state):
@@ -76,4 +77,5 @@
def run(self):
while True:
self.run_turn()
+ self.end_turn_callback()
time.sleep(0.5)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,8 +8,9 @@
'django >= 1.8.3, < 1.9.0',
'django-autoconfig >= 0.3.6, < 1.0.0',
'django-js-reverse',
+ 'eventlet',
'flask',
- 'flask-cors',
+ 'flask-socketio',
'requests',
],
tests_require=[
| {"golden_diff": "diff --git a/aimmo-game/service.py b/aimmo-game/service.py\n--- a/aimmo-game/service.py\n+++ b/aimmo-game/service.py\n@@ -1,8 +1,11 @@\n #!/usr/bin/env python\n import logging\n \n+import eventlet\n+eventlet.monkey_patch()\n+\n import flask\n-from flask.ext.cors import CORS\n+from flask.ext.socketio import SocketIO, emit\n \n from simulation.turn_manager import world_state_provider\n from simulation import map_generator\n@@ -12,7 +15,7 @@\n from simulation.worker_manager import LocalWorkerManager\n \n app = flask.Flask(__name__)\n-CORS(app)\n+socketio = SocketIO(app)\n \n \n def to_cell_type(cell):\n@@ -42,7 +45,6 @@\n }\n \n \[email protected]('/')\n def get_world_state():\n try:\n world = world_state_provider.lock_and_get_world()\n@@ -52,25 +54,41 @@\n for cell in world.world_map.all_cells():\n grid[cell.location.x][cell.location.y] = to_cell_type(cell)\n player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars}\n- return flask.jsonify(\n- players=player_data,\n- score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],\n- pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],\n- map_changed=True, # TODO: experiment with only sending deltas (not if not required)\n- width=num_cols,\n- height=num_rows,\n- layout=grid,\n- )\n+ return {\n+ 'players': player_data,\n+ 'score_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],\n+ 'pickup_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],\n+ 'map_changed': True, # TODO: experiment with only sending deltas (not if not required)\n+ 'width': num_cols,\n+ 'height': num_rows,\n+ 'layout': grid,\n+ }\n finally:\n world_state_provider.release_lock()\n \n \[email protected]('connect')\n+def world_update_on_connect():\n+ emit(\n+ 'world-update',\n+ get_world_state(),\n+ )\n+\n+\n+def send_world_update():\n+ socketio.emit(\n+ 'world-update',\n+ get_world_state(),\n+ broadcast=True,\n+ )\n+\n+\n def run_game():\n print(\"Running game...\")\n my_map = map_generator.generate_map(15, 15, 0.1)\n player_manager = AvatarManager()\n game_state = GameState(my_map, player_manager)\n- turn_manager = TurnManager(game_state=game_state)\n+ turn_manager = TurnManager(game_state=game_state, end_turn_callback=send_world_update)\n worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/')\n worker_manager.start()\n turn_manager.start()\n@@ -80,4 +98,4 @@\n logging.basicConfig(level=logging.DEBUG)\n \n run_game()\n- app.run(debug=True, use_reloader=False)\n+ socketio.run(app, debug=True, use_reloader=False)\ndiff --git a/aimmo-game/simulation/turn_manager.py b/aimmo-game/simulation/turn_manager.py\n--- a/aimmo-game/simulation/turn_manager.py\n+++ b/aimmo-game/simulation/turn_manager.py\n@@ -41,8 +41,9 @@\n \"\"\"\n daemon = True\n \n- def __init__(self, game_state):\n+ def __init__(self, game_state, end_turn_callback):\n world_state_provider.set_world(game_state)\n+ self.end_turn_callback = end_turn_callback\n super(TurnManager, self).__init__()\n \n def _update_environment(self, game_state):\n@@ -76,4 +77,5 @@\n def run(self):\n while True:\n self.run_turn()\n+ self.end_turn_callback()\n time.sleep(0.5)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,8 +8,9 @@\n 'django >= 1.8.3, < 1.9.0',\n 'django-autoconfig >= 0.3.6, < 1.0.0',\n 'django-js-reverse',\n+ 'eventlet',\n 'flask',\n- 'flask-cors',\n+ 'flask-socketio',\n 'requests',\n ],\n tests_require=[\n", "issue": "Change the way the UI receives game updates\nThe UI currently polls the frontend server constantly for updates.\n\nImplement a solution where the _game simulation_ server pushes updates instead - e.g. using web sockets (or some other better solution that you can think of!)\n\nPossible server-side solutions:\n1. client ==> proxy (on Kubernetes) ==> game simulation (on Kubernetes)\nHere we only need one public IP address for the proxy, no matter how many game simulation servers we spawn.\n1. client ==> game simulation (on Kubernetes)\n We need a public IP address per game simulation. We need to investigate whether this would be a problem (e.g. can't be done dynamically, or is expensive).\n\n", "before_files": [{"content": "import logging\nimport requests\nimport threading\nimport time\nfrom threading import Lock\nfrom simulation import world_map\nfrom simulation.action import ACTIONS\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass WorldStateProvider:\n \"\"\"\n Thread-safe container for the world state.\n\n TODO: think about changing to snapshot rather than lock?\n \"\"\"\n\n def __init__(self):\n self._world_state = None\n self._lock = Lock()\n\n def lock_and_get_world(self):\n self._lock.acquire()\n return self._world_state\n\n def release_lock(self):\n self._lock.release()\n\n def set_world(self, new_world_state):\n self._lock.acquire()\n self._world_state = new_world_state\n self._lock.release()\n\nworld_state_provider = WorldStateProvider()\n\n\nclass TurnManager(threading.Thread):\n \"\"\"\n Game loop\n \"\"\"\n daemon = True\n\n def __init__(self, game_state):\n world_state_provider.set_world(game_state)\n super(TurnManager, self).__init__()\n\n def _update_environment(self, game_state):\n num_avatars = len(game_state.avatar_manager.active_avatars)\n game_state.world_map.reconstruct_interactive_state(num_avatars)\n\n def run_turn(self):\n try:\n game_state = world_state_provider.lock_and_get_world()\n\n for avatar in game_state.avatar_manager.active_avatars:\n turn_state = game_state.get_state_for(avatar)\n try:\n data = requests.post(avatar.worker_url, json=turn_state).json()\n except ValueError as err:\n LOGGER.info(\"Failed to get turn result: %s\", err)\n else:\n try:\n action_data = data['action']\n action = ACTIONS[action_data['action_type']](**action_data.get('options', {}))\n except (KeyError, ValueError) as err:\n LOGGER.info(\"Bad action data supplied: %s\", err)\n else:\n action.apply(game_state, avatar)\n\n self._update_environment(game_state)\n\n finally:\n world_state_provider.release_lock()\n\n def run(self):\n while True:\n self.run_turn()\n time.sleep(0.5)\n", "path": "aimmo-game/simulation/turn_manager.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\nsetup(name='aimmo',\n packages=find_packages(),\n include_package_data=True,\n install_requires = [\n 'django >= 1.8.3, < 1.9.0',\n 'django-autoconfig >= 0.3.6, < 1.0.0',\n 'django-js-reverse',\n 'flask',\n 'flask-cors',\n 'requests',\n ],\n tests_require=[\n 'django-setuptest',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n version='0.0.0',\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\nimport logging\n\nimport flask\nfrom flask.ext.cors import CORS\n\nfrom simulation.turn_manager import world_state_provider\nfrom simulation import map_generator\nfrom simulation.avatar.avatar_manager import AvatarManager\nfrom simulation.game_state import GameState\nfrom simulation.turn_manager import TurnManager\nfrom simulation.worker_manager import LocalWorkerManager\n\napp = flask.Flask(__name__)\nCORS(app)\n\n\ndef to_cell_type(cell):\n if not cell.habitable:\n return 1\n if cell.generates_score:\n return 2\n return 0\n\n\ndef player_dict(avatar):\n # TODO: implement better colour functionality: will eventually fall off end of numbers\n colour = \"#%06x\" % (avatar.player_id * 4999)\n return {\n 'id': avatar.player_id,\n 'x': avatar.location.x,\n 'y': avatar.location.y,\n 'health': avatar.health,\n 'score': avatar.score,\n 'rotation': 0,\n \"colours\": {\n \"bodyStroke\": \"#0ff\",\n \"bodyFill\": colour,\n \"eyeStroke\": \"#aff\",\n \"eyeFill\": \"#eff\",\n }\n }\n\n\[email protected]('/')\ndef get_world_state():\n try:\n world = world_state_provider.lock_and_get_world()\n num_cols = len(world.world_map.grid)\n num_rows = len(world.world_map.grid[0])\n grid = [[None for x in xrange(num_cols)] for y in xrange(num_rows)]\n for cell in world.world_map.all_cells():\n grid[cell.location.x][cell.location.y] = to_cell_type(cell)\n player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars}\n return flask.jsonify(\n players=player_data,\n score_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],\n pickup_locations=[(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],\n map_changed=True, # TODO: experiment with only sending deltas (not if not required)\n width=num_cols,\n height=num_rows,\n layout=grid,\n )\n finally:\n world_state_provider.release_lock()\n\n\ndef run_game():\n print(\"Running game...\")\n my_map = map_generator.generate_map(15, 15, 0.1)\n player_manager = AvatarManager()\n game_state = GameState(my_map, player_manager)\n turn_manager = TurnManager(game_state=game_state)\n worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/')\n worker_manager.start()\n turn_manager.start()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n run_game()\n app.run(debug=True, use_reloader=False)\n", "path": "aimmo-game/service.py"}], "after_files": [{"content": "import logging\nimport requests\nimport threading\nimport time\nfrom threading import Lock\nfrom simulation import world_map\nfrom simulation.action import ACTIONS\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass WorldStateProvider:\n \"\"\"\n Thread-safe container for the world state.\n\n TODO: think about changing to snapshot rather than lock?\n \"\"\"\n\n def __init__(self):\n self._world_state = None\n self._lock = Lock()\n\n def lock_and_get_world(self):\n self._lock.acquire()\n return self._world_state\n\n def release_lock(self):\n self._lock.release()\n\n def set_world(self, new_world_state):\n self._lock.acquire()\n self._world_state = new_world_state\n self._lock.release()\n\nworld_state_provider = WorldStateProvider()\n\n\nclass TurnManager(threading.Thread):\n \"\"\"\n Game loop\n \"\"\"\n daemon = True\n\n def __init__(self, game_state, end_turn_callback):\n world_state_provider.set_world(game_state)\n self.end_turn_callback = end_turn_callback\n super(TurnManager, self).__init__()\n\n def _update_environment(self, game_state):\n num_avatars = len(game_state.avatar_manager.active_avatars)\n game_state.world_map.reconstruct_interactive_state(num_avatars)\n\n def run_turn(self):\n try:\n game_state = world_state_provider.lock_and_get_world()\n\n for avatar in game_state.avatar_manager.active_avatars:\n turn_state = game_state.get_state_for(avatar)\n try:\n data = requests.post(avatar.worker_url, json=turn_state).json()\n except ValueError as err:\n LOGGER.info(\"Failed to get turn result: %s\", err)\n else:\n try:\n action_data = data['action']\n action = ACTIONS[action_data['action_type']](**action_data.get('options', {}))\n except (KeyError, ValueError) as err:\n LOGGER.info(\"Bad action data supplied: %s\", err)\n else:\n action.apply(game_state, avatar)\n\n self._update_environment(game_state)\n\n finally:\n world_state_provider.release_lock()\n\n def run(self):\n while True:\n self.run_turn()\n self.end_turn_callback()\n time.sleep(0.5)\n", "path": "aimmo-game/simulation/turn_manager.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\nsetup(name='aimmo',\n packages=find_packages(),\n include_package_data=True,\n install_requires = [\n 'django >= 1.8.3, < 1.9.0',\n 'django-autoconfig >= 0.3.6, < 1.0.0',\n 'django-js-reverse',\n 'eventlet',\n 'flask',\n 'flask-socketio',\n 'requests',\n ],\n tests_require=[\n 'django-setuptest',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n version='0.0.0',\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\nimport logging\n\nimport eventlet\neventlet.monkey_patch()\n\nimport flask\nfrom flask.ext.socketio import SocketIO, emit\n\nfrom simulation.turn_manager import world_state_provider\nfrom simulation import map_generator\nfrom simulation.avatar.avatar_manager import AvatarManager\nfrom simulation.game_state import GameState\nfrom simulation.turn_manager import TurnManager\nfrom simulation.worker_manager import LocalWorkerManager\n\napp = flask.Flask(__name__)\nsocketio = SocketIO(app)\n\n\ndef to_cell_type(cell):\n if not cell.habitable:\n return 1\n if cell.generates_score:\n return 2\n return 0\n\n\ndef player_dict(avatar):\n # TODO: implement better colour functionality: will eventually fall off end of numbers\n colour = \"#%06x\" % (avatar.player_id * 4999)\n return {\n 'id': avatar.player_id,\n 'x': avatar.location.x,\n 'y': avatar.location.y,\n 'health': avatar.health,\n 'score': avatar.score,\n 'rotation': 0,\n \"colours\": {\n \"bodyStroke\": \"#0ff\",\n \"bodyFill\": colour,\n \"eyeStroke\": \"#aff\",\n \"eyeFill\": \"#eff\",\n }\n }\n\n\ndef get_world_state():\n try:\n world = world_state_provider.lock_and_get_world()\n num_cols = len(world.world_map.grid)\n num_rows = len(world.world_map.grid[0])\n grid = [[None for x in xrange(num_cols)] for y in xrange(num_rows)]\n for cell in world.world_map.all_cells():\n grid[cell.location.x][cell.location.y] = to_cell_type(cell)\n player_data = {p.player_id: player_dict(p) for p in world.avatar_manager.avatars}\n return {\n 'players': player_data,\n 'score_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.score_cells()],\n 'pickup_locations': [(cell.location.x, cell.location.y) for cell in world.world_map.pickup_cells()],\n 'map_changed': True, # TODO: experiment with only sending deltas (not if not required)\n 'width': num_cols,\n 'height': num_rows,\n 'layout': grid,\n }\n finally:\n world_state_provider.release_lock()\n\n\[email protected]('connect')\ndef world_update_on_connect():\n emit(\n 'world-update',\n get_world_state(),\n )\n\n\ndef send_world_update():\n socketio.emit(\n 'world-update',\n get_world_state(),\n broadcast=True,\n )\n\n\ndef run_game():\n print(\"Running game...\")\n my_map = map_generator.generate_map(15, 15, 0.1)\n player_manager = AvatarManager()\n game_state = GameState(my_map, player_manager)\n turn_manager = TurnManager(game_state=game_state, end_turn_callback=send_world_update)\n worker_manager = LocalWorkerManager(game_state=game_state, users_url='http://localhost:8000/players/api/games/')\n worker_manager.start()\n turn_manager.start()\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n run_game()\n socketio.run(app, debug=True, use_reloader=False)\n", "path": "aimmo-game/service.py"}]} | 1,995 | 1,018 |
gh_patches_debug_3703 | rasdani/github-patches | git_diff | wright-group__WrightTools-359 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
coverage
consider using [coverage](https://coverage.readthedocs.io/en/coverage-4.4.1/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python3
2
3 import os
4 from setuptools import setup, find_packages
5
6
7 def package_files(directory):
8 paths = []
9 for (path, directories, filenames) in os.walk(directory):
10 for filename in filenames:
11 paths.append(os.path.join('..', path, filename))
12 return paths
13
14
15 here = os.path.abspath(os.path.dirname(__file__))
16
17 extra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))
18 extra_files.append(os.path.join(here, 'CONTRIBUTORS'))
19 extra_files.append(os.path.join(here, 'LICENSE'))
20 extra_files.append(os.path.join(here, 'README.rst'))
21 extra_files.append(os.path.join(here, 'requirements.txt'))
22 extra_files.append(os.path.join(here, 'VERSION'))
23 extra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))
24
25 with open(os.path.join(here, 'requirements.txt')) as f:
26 required = f.read().splitlines()
27
28 with open(os.path.join(here, 'VERSION')) as version_file:
29 version = version_file.read().strip()
30
31 setup(
32 name='WrightTools',
33 packages=find_packages(),
34 package_data={'': extra_files},
35 setup_requires=['pytest-runner'],
36 tests_require=['pytest'],
37 install_requires=required,
38 extras_require={'docs': ['sphinx-gallery>=0.1.9']},
39 version=version,
40 description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',
41 author='Blaise Thompson',
42 author_email='[email protected]',
43 license='MIT',
44 url='http://wright.tools',
45 keywords='spectroscopy science multidimensional visualization',
46 classifiers=['Development Status :: 5 - Production/Stable',
47 'Intended Audience :: Science/Research',
48 'License :: OSI Approved :: MIT License',
49 'Natural Language :: English',
50 'Programming Language :: Python :: 2',
51 'Programming Language :: Python :: 2.7',
52 'Programming Language :: Python :: 3',
53 'Programming Language :: Python :: 3.3',
54 'Programming Language :: Python :: 3.4',
55 'Programming Language :: Python :: 3.5',
56 'Topic :: Scientific/Engineering']
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
packages=find_packages(),
package_data={'': extra_files},
setup_requires=['pytest-runner'],
- tests_require=['pytest'],
+ tests_require=['pytest', 'pytest-cov'],
install_requires=required,
extras_require={'docs': ['sphinx-gallery>=0.1.9']},
version=version,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n- tests_require=['pytest'],\n+ tests_require=['pytest', 'pytest-cov'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n", "issue": "coverage\nconsider using [coverage](https://coverage.readthedocs.io/en/coverage-4.4.1/)\n", "before_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\ndef package_files(directory):\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nextra_files = package_files(os.path.join(here, 'WrightTools', 'datasets'))\nextra_files.append(os.path.join(here, 'CONTRIBUTORS'))\nextra_files.append(os.path.join(here, 'LICENSE'))\nextra_files.append(os.path.join(here, 'README.rst'))\nextra_files.append(os.path.join(here, 'requirements.txt'))\nextra_files.append(os.path.join(here, 'VERSION'))\nextra_files.append(os.path.join(here, 'WrightTools', 'client_secrets.json'))\n\nwith open(os.path.join(here, 'requirements.txt')) as f:\n required = f.read().splitlines()\n\nwith open(os.path.join(here, 'VERSION')) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name='WrightTools',\n packages=find_packages(),\n package_data={'': extra_files},\n setup_requires=['pytest-runner'],\n tests_require=['pytest', 'pytest-cov'],\n install_requires=required,\n extras_require={'docs': ['sphinx-gallery>=0.1.9']},\n version=version,\n description='Tools for loading, processing, and plotting multidimensional spectroscopy data.',\n author='Blaise Thompson',\n author_email='[email protected]',\n license='MIT',\n url='http://wright.tools',\n keywords='spectroscopy science multidimensional visualization',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Scientific/Engineering']\n)\n", "path": "setup.py"}]} | 866 | 99 |
gh_patches_debug_7544 | rasdani/github-patches | git_diff | liqd__a4-product-375 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[partner landing page] unpublished and archived projects are shown
On the partner landing page, we show unpublished and archived projects. Unpublished projects should never be shown and archived projects should be hidden per default.
See: https://product-dev.liqd.net/teststadt/

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liqd_product/apps/partners/views.py`
Content:
```
1 from django.contrib.messages.views import SuccessMessageMixin
2 from django.utils.translation import ugettext_lazy as _
3 from django.views import generic
4 from django.views.generic import DetailView
5
6 from adhocracy4.actions.models import Action
7 from adhocracy4.projects.models import Project
8 from adhocracy4.rules import mixins as rules_mixins
9 from liqd_product.apps.partners.models import Partner
10
11 from . import forms
12
13
14 class PartnerView(DetailView):
15 template_name = 'partner_landing_page.html'
16 model = Partner
17 slug_url_kwarg = 'partner_slug'
18
19 def get_context_data(self, **kwargs):
20 context = super().get_context_data(**kwargs)
21
22 context['project_list'] = Project.objects\
23 .filter(organisation__partner=self.object)
24
25 context['action_list'] = Action.objects\
26 .filter(project__organisation__partner=self.object)\
27 .filter_public()\
28 .exclude_updates()[:4]
29
30 context['stats'] = {
31 'users': 1204,
32 'items': 3425,
33 'comments': 23234,
34 'ratings': 134234,
35 }
36
37 return context
38
39
40 class InformationView(DetailView):
41 template_name = 'partner_information.html'
42 model = Partner
43 slug_url_kwarg = 'partner_slug'
44
45
46 class ImprintView(DetailView):
47 template_name = 'partner_imprint.html'
48 model = Partner
49 slug_url_kwarg = 'partner_slug'
50
51
52 class PartnerUpdateView(rules_mixins.PermissionRequiredMixin,
53 SuccessMessageMixin,
54 generic.UpdateView):
55 model = Partner
56 form_class = forms.PartnerForm
57 slug_url_kwarg = 'partner_slug'
58 template_name = 'partner_form.html'
59 success_message = _('Municipality successfully updated.')
60 permission_required = 'liqd_product_partners.change_partner'
61 menu_item = 'partner'
62
63 def get_success_url(self):
64 return self.request.path
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liqd_product/apps/partners/views.py b/liqd_product/apps/partners/views.py
--- a/liqd_product/apps/partners/views.py
+++ b/liqd_product/apps/partners/views.py
@@ -20,7 +20,9 @@
context = super().get_context_data(**kwargs)
context['project_list'] = Project.objects\
- .filter(organisation__partner=self.object)
+ .filter(organisation__partner=self.object,
+ is_archived=False,
+ is_draft=False)
context['action_list'] = Action.objects\
.filter(project__organisation__partner=self.object)\
| {"golden_diff": "diff --git a/liqd_product/apps/partners/views.py b/liqd_product/apps/partners/views.py\n--- a/liqd_product/apps/partners/views.py\n+++ b/liqd_product/apps/partners/views.py\n@@ -20,7 +20,9 @@\n context = super().get_context_data(**kwargs)\n \n context['project_list'] = Project.objects\\\n- .filter(organisation__partner=self.object)\n+ .filter(organisation__partner=self.object,\n+ is_archived=False,\n+ is_draft=False)\n \n context['action_list'] = Action.objects\\\n .filter(project__organisation__partner=self.object)\\\n", "issue": "[partner landing page] unpublished and archived projects are shown\nOn the partner landing page, we show unpublished and archived projects. Unpublished projects should never be shown and archived projects should be hidden per default.\r\n\r\nSee: https://product-dev.liqd.net/teststadt/\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom liqd_product.apps.partners.models import Partner\n\nfrom . import forms\n\n\nclass PartnerView(DetailView):\n template_name = 'partner_landing_page.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['project_list'] = Project.objects\\\n .filter(organisation__partner=self.object)\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation__partner=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'partner_information.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'partner_imprint.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass PartnerUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Partner\n form_class = forms.PartnerForm\n slug_url_kwarg = 'partner_slug'\n template_name = 'partner_form.html'\n success_message = _('Municipality successfully updated.')\n permission_required = 'liqd_product_partners.change_partner'\n menu_item = 'partner'\n\n def get_success_url(self):\n return self.request.path\n", "path": "liqd_product/apps/partners/views.py"}], "after_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom liqd_product.apps.partners.models import Partner\n\nfrom . import forms\n\n\nclass PartnerView(DetailView):\n template_name = 'partner_landing_page.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context['project_list'] = Project.objects\\\n .filter(organisation__partner=self.object,\n is_archived=False,\n is_draft=False)\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation__partner=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'partner_information.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'partner_imprint.html'\n model = Partner\n slug_url_kwarg = 'partner_slug'\n\n\nclass PartnerUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Partner\n form_class = forms.PartnerForm\n slug_url_kwarg = 'partner_slug'\n template_name = 'partner_form.html'\n success_message = _('Municipality successfully updated.')\n permission_required = 'liqd_product_partners.change_partner'\n menu_item = 'partner'\n\n def get_success_url(self):\n return self.request.path\n", "path": "liqd_product/apps/partners/views.py"}]} | 953 | 135 |
gh_patches_debug_15038 | rasdani/github-patches | git_diff | OpenEnergyPlatform__oeplatform-1454 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
toep: wrong FAQ link
## Description of the issue
On toep there is now a drop down menu including a link to "FAQs".
The link however goes to the main page of the OEA instead of going to the questions section: https://openenergyplatform.github.io/academy/questions/
## Steps to Reproduce
1. Visit toep and click FAQ in "About" drop-down
## Ideas of solution
Link directly to https://openenergyplatform.github.io/academy/questions/
FYI @wingechr
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `oeplatform/settings.py`
Content:
```
1 """
2 Django settings for oeplatform project.
3
4 Generated by 'django-admin startproject' using Django 1.8.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.8/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.8/ref/settings/
11 """
12
13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
14
15 try:
16 from .securitysettings import * # noqa
17 except ImportError:
18 import logging
19 import os
20
21 logging.error("No securitysettings found. Triggerd in oeplatform/settings.py")
22 SECRET_KEY = os.environ.get("SECRET_KEY", "0")
23 DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL")
24 URL = os.environ.get("URL")
25
26 # Quick-start development settings - unsuitable for production
27 # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
28
29 # Application definition
30
31 INSTALLED_APPS = (
32 "django.contrib.sites",
33 "django.contrib.admin",
34 "django.contrib.auth",
35 "django.contrib.contenttypes",
36 "django.contrib.sessions",
37 "django.contrib.messages",
38 "django.contrib.staticfiles",
39 "django.contrib.sessions.backends.signed_cookies",
40 "django_bootstrap5",
41 "rest_framework",
42 "rest_framework.authtoken",
43 "modelview",
44 "modelview.templatetags.modelview_extras",
45 "login",
46 "base",
47 "base.templatetags.base_tags",
48 "widget_tweaks",
49 "dataedit",
50 "colorfield",
51 "api",
52 "ontology",
53 "axes",
54 "captcha",
55 "django.contrib.postgres",
56 "fontawesome_5",
57 "django_better_admin_arrayfield",
58 "oeo_viewer",
59 "factsheet",
60 "corsheaders",
61 "owlready2",
62 "compressor",
63 )
64
65 MIDDLEWARE = (
66 "django.contrib.sites.middleware.CurrentSiteMiddleware",
67 "django.contrib.sessions.middleware.SessionMiddleware",
68 "django.middleware.common.CommonMiddleware",
69 "django.middleware.csrf.CsrfViewMiddleware",
70 "django.contrib.auth.middleware.AuthenticationMiddleware",
71 "django.contrib.messages.middleware.MessageMiddleware",
72 "django.middleware.clickjacking.XFrameOptionsMiddleware",
73 "django.middleware.security.SecurityMiddleware",
74 "login.middleware.DetachMiddleware",
75 "axes.middleware.AxesMiddleware",
76 "corsheaders.middleware.CorsMiddleware",
77 "django.middleware.common.CommonMiddleware",
78 )
79
80 ROOT_URLCONF = "oeplatform.urls"
81
82 EXTERNAL_URLS = {
83 "tutorials_index": "https://openenergyplatform.github.io/academy/",
84 "tutorials_faq": "https://openenergyplatform.github.io/academy/",
85 "tutorials_api1": "https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/", # noqa E501
86 "tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/", # noqa E501
87 "readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest",
88 "mkdocs": "https://openenergyplatform.github.io/oeplatform/",
89 "compendium": "https://openenergyplatform.github.io/organisation/",
90 }
91
92
93 def external_urls_context_processor(request):
94 """Define hard coded external urls here.
95 Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}
96 Also, you may want to add an icon indicating external links, e.g.
97 """
98 return {"EXTERNAL_URLS": EXTERNAL_URLS}
99
100
101 SITE_ID = 1
102
103 TEMPLATES = [
104 {
105 "BACKEND": "django.template.backends.django.DjangoTemplates",
106 "DIRS": [],
107 "APP_DIRS": True,
108 "OPTIONS": {
109 "context_processors": [
110 "django.template.context_processors.debug",
111 "django.template.context_processors.request",
112 "django.contrib.auth.context_processors.auth",
113 "django.contrib.messages.context_processors.messages",
114 "oeplatform.settings.external_urls_context_processor",
115 ]
116 },
117 }
118 ]
119
120 CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"]
121
122 GRAPHENE = {"SCHEMA": "factsheet.schema.schema"}
123
124 WSGI_APPLICATION = "oeplatform.wsgi.application"
125
126 try:
127 ONTOLOGY_FOLDER # noqa
128 except NameError:
129 ONTOLOGY_FOLDER = "/tmp"
130
131
132 # Internationalization
133 # https://docs.djangoproject.com/en/1.8/topics/i18n/
134
135 LANGUAGE_CODE = "en-us"
136
137 TIME_ZONE = "Europe/Berlin"
138
139 USE_I18N = True
140
141 USE_L10N = True
142
143 USE_TZ = True
144
145 # Static files (CSS, JavaScript, Images)
146 # https://docs.djangoproject.com/en/1.8/howto/static-files/
147
148 AUTH_USER_MODEL = "login.myuser"
149 LOGIN_URL = "/user/login"
150 LOGIN_REDIRECT_URL = "/"
151
152 REST_FRAMEWORK = {
153 "DEFAULT_AUTHENTICATION_CLASSES": (
154 "rest_framework.authentication.BasicAuthentication",
155 "rest_framework.authentication.SessionAuthentication",
156 "rest_framework.authentication.TokenAuthentication",
157 )
158 }
159
160 AUTHENTICATION_BACKENDS = [
161 # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.
162 "axes.backends.AxesBackend",
163 # custom class extenging Django ModelBackend for login with username OR email
164 "login.backends.ModelBackendWithEmail",
165 ]
166
167 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
168
169 STATICFILES_FINDERS = {
170 "django.contrib.staticfiles.finders.FileSystemFinder",
171 "django.contrib.staticfiles.finders.AppDirectoriesFinder",
172 "compressor.finders.CompressorFinder",
173 }
174
175
176 # https://django-compressor.readthedocs.io/en/stable/settings.html
177 COMPRESS_ENABLED = True
178 COMPRESS_OFFLINE = True
179 COMPRESS_REBUILD_TIMEOUT = 0
180 COMPRESS_MTIME_DELAY = 0
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/oeplatform/settings.py b/oeplatform/settings.py
--- a/oeplatform/settings.py
+++ b/oeplatform/settings.py
@@ -81,7 +81,7 @@
EXTERNAL_URLS = {
"tutorials_index": "https://openenergyplatform.github.io/academy/",
- "tutorials_faq": "https://openenergyplatform.github.io/academy/",
+ "tutorials_faq": "https://openenergyplatform.github.io/academy/questions/",
"tutorials_api1": "https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/", # noqa E501
"tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/", # noqa E501
"readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest",
| {"golden_diff": "diff --git a/oeplatform/settings.py b/oeplatform/settings.py\n--- a/oeplatform/settings.py\n+++ b/oeplatform/settings.py\n@@ -81,7 +81,7 @@\n \n EXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n- \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n+ \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/questions/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\", # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n", "issue": "toep: wrong FAQ link \n## Description of the issue\r\n\r\nOn toep there is now a drop down menu including a link to \"FAQs\". \r\nThe link however goes to the main page of the OEA instead of going to the questions section: https://openenergyplatform.github.io/academy/questions/\r\n\r\n## Steps to Reproduce\r\n1. Visit toep and click FAQ in \"About\" drop-down\r\n\r\n## Ideas of solution\r\n\r\nLink directly to https://openenergyplatform.github.io/academy/questions/\r\n\r\nFYI @wingechr \n", "before_files": [{"content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\ntry:\n from .securitysettings import * # noqa\nexcept ImportError:\n import logging\n import os\n\n logging.error(\"No securitysettings found. Triggerd in oeplatform/settings.py\")\n SECRET_KEY = os.environ.get(\"SECRET_KEY\", \"0\")\n DEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\")\n URL = os.environ.get(\"URL\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = (\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sessions.backends.signed_cookies\",\n \"django_bootstrap5\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"modelview\",\n \"modelview.templatetags.modelview_extras\",\n \"login\",\n \"base\",\n \"base.templatetags.base_tags\",\n \"widget_tweaks\",\n \"dataedit\",\n \"colorfield\",\n \"api\",\n \"ontology\",\n \"axes\",\n \"captcha\",\n \"django.contrib.postgres\",\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n \"factsheet\",\n \"corsheaders\",\n \"owlready2\",\n \"compressor\",\n)\n\nMIDDLEWARE = (\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"login.middleware.DetachMiddleware\",\n \"axes.middleware.AxesMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n)\n\nROOT_URLCONF = \"oeplatform.urls\"\n\nEXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\", # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n \"mkdocs\": \"https://openenergyplatform.github.io/oeplatform/\",\n \"compendium\": \"https://openenergyplatform.github.io/organisation/\",\n}\n\n\ndef external_urls_context_processor(request):\n \"\"\"Define hard coded external urls here.\n Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}\n Also, you may want to add an icon indicating external links, e.g.\n \"\"\"\n return {\"EXTERNAL_URLS\": EXTERNAL_URLS}\n\n\nSITE_ID = 1\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oeplatform.settings.external_urls_context_processor\",\n ]\n },\n }\n]\n\nCORS_ORIGIN_WHITELIST = [\"http://localhost:3000\", \"http://127.0.0.1:3000\"]\n\nGRAPHENE = {\"SCHEMA\": \"factsheet.schema.schema\"}\n\nWSGI_APPLICATION = \"oeplatform.wsgi.application\"\n\ntry:\n ONTOLOGY_FOLDER # noqa\nexcept NameError:\n ONTOLOGY_FOLDER = \"/tmp\"\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nAUTH_USER_MODEL = \"login.myuser\"\nLOGIN_URL = \"/user/login\"\nLOGIN_REDIRECT_URL = \"/\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n )\n}\n\nAUTHENTICATION_BACKENDS = [\n # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.\n \"axes.backends.AxesBackend\",\n # custom class extenging Django ModelBackend for login with username OR email\n \"login.backends.ModelBackendWithEmail\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nSTATICFILES_FINDERS = {\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"compressor.finders.CompressorFinder\",\n}\n\n\n# https://django-compressor.readthedocs.io/en/stable/settings.html\nCOMPRESS_ENABLED = True\nCOMPRESS_OFFLINE = True\nCOMPRESS_REBUILD_TIMEOUT = 0\nCOMPRESS_MTIME_DELAY = 0\n", "path": "oeplatform/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\ntry:\n from .securitysettings import * # noqa\nexcept ImportError:\n import logging\n import os\n\n logging.error(\"No securitysettings found. Triggerd in oeplatform/settings.py\")\n SECRET_KEY = os.environ.get(\"SECRET_KEY\", \"0\")\n DEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\")\n URL = os.environ.get(\"URL\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = (\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sessions.backends.signed_cookies\",\n \"django_bootstrap5\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"modelview\",\n \"modelview.templatetags.modelview_extras\",\n \"login\",\n \"base\",\n \"base.templatetags.base_tags\",\n \"widget_tweaks\",\n \"dataedit\",\n \"colorfield\",\n \"api\",\n \"ontology\",\n \"axes\",\n \"captcha\",\n \"django.contrib.postgres\",\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n \"factsheet\",\n \"corsheaders\",\n \"owlready2\",\n \"compressor\",\n)\n\nMIDDLEWARE = (\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"login.middleware.DetachMiddleware\",\n \"axes.middleware.AxesMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n)\n\nROOT_URLCONF = \"oeplatform.urls\"\n\nEXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/questions/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/01_api/01_api_download/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\", # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n \"mkdocs\": \"https://openenergyplatform.github.io/oeplatform/\",\n \"compendium\": \"https://openenergyplatform.github.io/organisation/\",\n}\n\n\ndef external_urls_context_processor(request):\n \"\"\"Define hard coded external urls here.\n Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}\n Also, you may want to add an icon indicating external links, e.g.\n \"\"\"\n return {\"EXTERNAL_URLS\": EXTERNAL_URLS}\n\n\nSITE_ID = 1\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oeplatform.settings.external_urls_context_processor\",\n ]\n },\n }\n]\n\nCORS_ORIGIN_WHITELIST = [\"http://localhost:3000\", \"http://127.0.0.1:3000\"]\n\nGRAPHENE = {\"SCHEMA\": \"factsheet.schema.schema\"}\n\nWSGI_APPLICATION = \"oeplatform.wsgi.application\"\n\ntry:\n ONTOLOGY_FOLDER # noqa\nexcept NameError:\n ONTOLOGY_FOLDER = \"/tmp\"\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nAUTH_USER_MODEL = \"login.myuser\"\nLOGIN_URL = \"/user/login\"\nLOGIN_REDIRECT_URL = \"/\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n )\n}\n\nAUTHENTICATION_BACKENDS = [\n # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.\n \"axes.backends.AxesBackend\",\n # custom class extenging Django ModelBackend for login with username OR email\n \"login.backends.ModelBackendWithEmail\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nSTATICFILES_FINDERS = {\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"compressor.finders.CompressorFinder\",\n}\n\n\n# https://django-compressor.readthedocs.io/en/stable/settings.html\nCOMPRESS_ENABLED = True\nCOMPRESS_OFFLINE = True\nCOMPRESS_REBUILD_TIMEOUT = 0\nCOMPRESS_MTIME_DELAY = 0\n", "path": "oeplatform/settings.py"}]} | 2,094 | 206 |
gh_patches_debug_25960 | rasdani/github-patches | git_diff | secdev__scapy-1126 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Generating sets of packets" (as explained in the documentation) does not work for mixed sequence and range (e.g. IP(ttl=[1,2,(5,9)]))
Hi,
as explained in the [tutorial](http://scapy.readthedocs.io/en/latest/usage.html#generating-sets-of-packets)
these two lines
`b=IP(ttl=[1,2,(5,9)])`
`[p for p in b]`
should create a set of 7 packets, like
`[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=5 |>, <IP ttl=6 |>, <IP ttl=7 |>, <IP ttl=8 |>, <IP ttl=9 |>]`.
But the result is just:
`[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=(5, 9) |>]`
If I just use a "sequence" (`IP(ttl=[1,2,5,7,9])`) or "range" (`IP(ttl=(1,9)`), the result is that I expected.
So, there are recent changesin scpay and the documentation is outdated or Is this a bug or I'm just stupid ;) ?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/base_classes.py`
Content:
```
1 ## This file is part of Scapy
2 ## See http://www.secdev.org/projects/scapy for more informations
3 ## Copyright (C) Philippe Biondi <[email protected]>
4 ## This program is published under a GPLv2 license
5
6 """
7 Generators and packet meta classes.
8 """
9
10 ###############
11 ## Generators ##
12 ################
13
14 from __future__ import absolute_import
15 import re,random,socket
16 import types
17 from scapy.modules.six.moves import range
18
19 class Gen(object):
20 __slots__ = []
21 def __iter__(self):
22 return iter([])
23
24 class SetGen(Gen):
25 def __init__(self, values, _iterpacket=1):
26 self._iterpacket=_iterpacket
27 if isinstance(values, (list, BasePacketList)):
28 self.values = list(values)
29 elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \
30 all(hasattr(i, "__int__") for i in values)):
31 # We use values[1] + 1 as stop value for (x)range to maintain
32 # the behavior of using tuples as field `values`
33 self.values = [range(*((int(values[0]), int(values[1]) + 1)
34 + tuple(int(v) for v in values[2:])))]
35 else:
36 self.values = [values]
37 def transf(self, element):
38 return element
39 def __iter__(self):
40 for i in self.values:
41 if (isinstance(i, Gen) and
42 (self._iterpacket or not isinstance(i,BasePacket))) or (
43 isinstance(i, (range, types.GeneratorType))):
44 for j in i:
45 yield j
46 else:
47 yield i
48 def __repr__(self):
49 return "<SetGen %r>" % self.values
50
51 class Net(Gen):
52 """Generate a list of IPs from a network address or a name"""
53 name = "ip"
54 ip_regex = re.compile(r"^(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\.(\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$")
55
56 @staticmethod
57 def _parse_digit(a,netmask):
58 netmask = min(8,max(netmask,0))
59 if a == "*":
60 a = (0,256)
61 elif a.find("-") >= 0:
62 x, y = [int(d) for d in a.split('-')]
63 if x > y:
64 y = x
65 a = (x & (0xff<<netmask) , max(y, (x | (0xff>>(8-netmask))))+1)
66 else:
67 a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)
68 return a
69
70 @classmethod
71 def _parse_net(cls, net):
72 tmp=net.split('/')+["32"]
73 if not cls.ip_regex.match(net):
74 tmp[0]=socket.gethostbyname(tmp[0])
75 netmask = int(tmp[1])
76 ret_list = [cls._parse_digit(x, y-netmask) for (x, y) in zip(tmp[0].split('.'), [8, 16, 24, 32])]
77 return ret_list, netmask
78
79 def __init__(self, net):
80 self.repr=net
81 self.parsed,self.netmask = self._parse_net(net)
82
83 def __str__(self):
84 try:
85 return next(self.__iter__())
86 except StopIteration:
87 return None
88
89 def __iter__(self):
90 for d in range(*self.parsed[3]):
91 for c in range(*self.parsed[2]):
92 for b in range(*self.parsed[1]):
93 for a in range(*self.parsed[0]):
94 yield "%i.%i.%i.%i" % (a,b,c,d)
95 def choice(self):
96 ip = []
97 for v in self.parsed:
98 ip.append(str(random.randint(v[0],v[1]-1)))
99 return ".".join(ip)
100
101 def __repr__(self):
102 return "Net(%r)" % self.repr
103 def __eq__(self, other):
104 if hasattr(other, "parsed"):
105 p2 = other.parsed
106 else:
107 p2,nm2 = self._parse_net(other)
108 return self.parsed == p2
109 def __contains__(self, other):
110 if hasattr(other, "parsed"):
111 p2 = other.parsed
112 else:
113 p2,nm2 = self._parse_net(other)
114 for (a1,b1),(a2,b2) in zip(self.parsed,p2):
115 if a1 > a2 or b1 < b2:
116 return False
117 return True
118 def __rcontains__(self, other):
119 return self in self.__class__(other)
120
121
122 class OID(Gen):
123 name = "OID"
124 def __init__(self, oid):
125 self.oid = oid
126 self.cmpt = []
127 fmt = []
128 for i in oid.split("."):
129 if "-" in i:
130 fmt.append("%i")
131 self.cmpt.append(tuple(map(int, i.split("-"))))
132 else:
133 fmt.append(i)
134 self.fmt = ".".join(fmt)
135 def __repr__(self):
136 return "OID(%r)" % self.oid
137 def __iter__(self):
138 ii = [k[0] for k in self.cmpt]
139 while True:
140 yield self.fmt % tuple(ii)
141 i = 0
142 while True:
143 if i >= len(ii):
144 raise StopIteration
145 if ii[i] < self.cmpt[i][1]:
146 ii[i]+=1
147 break
148 else:
149 ii[i] = self.cmpt[i][0]
150 i += 1
151
152
153
154 ######################################
155 ## Packet abstract and base classes ##
156 ######################################
157
158 class Packet_metaclass(type):
159 def __new__(cls, name, bases, dct):
160 if "fields_desc" in dct: # perform resolution of references to other packets
161 current_fld = dct["fields_desc"]
162 resolved_fld = []
163 for f in current_fld:
164 if isinstance(f, Packet_metaclass): # reference to another fields_desc
165 for f2 in f.fields_desc:
166 resolved_fld.append(f2)
167 else:
168 resolved_fld.append(f)
169 else: # look for a fields_desc in parent classes
170 resolved_fld = None
171 for b in bases:
172 if hasattr(b,"fields_desc"):
173 resolved_fld = b.fields_desc
174 break
175
176 if resolved_fld: # perform default value replacements
177 final_fld = []
178 for f in resolved_fld:
179 if f.name in dct:
180 f = f.copy()
181 f.default = dct[f.name]
182 del(dct[f.name])
183 final_fld.append(f)
184
185 dct["fields_desc"] = final_fld
186
187 if "__slots__" not in dct:
188 dct["__slots__"] = []
189 for attr in ["name", "overload_fields"]:
190 try:
191 dct["_%s" % attr] = dct.pop(attr)
192 except KeyError:
193 pass
194 newcls = super(Packet_metaclass, cls).__new__(cls, name, bases, dct)
195 newcls.__all_slots__ = set(
196 attr
197 for cls in newcls.__mro__ if hasattr(cls, "__slots__")
198 for attr in cls.__slots__
199 )
200
201 if hasattr(newcls, "aliastypes"):
202 newcls.aliastypes = [newcls] + newcls.aliastypes
203 else:
204 newcls.aliastypes = [newcls]
205
206 if hasattr(newcls,"register_variant"):
207 newcls.register_variant()
208 for f in newcls.fields_desc:
209 if hasattr(f, "register_owner"):
210 f.register_owner(newcls)
211 from scapy import config
212 config.conf.layers.register(newcls)
213 return newcls
214
215 def __getattr__(self, attr):
216 for k in self.fields_desc:
217 if k.name == attr:
218 return k
219 raise AttributeError(attr)
220
221 def __call__(cls, *args, **kargs):
222 if "dispatch_hook" in cls.__dict__:
223 try:
224 cls = cls.dispatch_hook(*args, **kargs)
225 except:
226 from scapy import config
227 if config.conf.debug_dissector:
228 raise
229 cls = config.conf.raw_layer
230 i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)
231 i.__init__(*args, **kargs)
232 return i
233
234 class Field_metaclass(type):
235 def __new__(cls, name, bases, dct):
236 if "__slots__" not in dct:
237 dct["__slots__"] = []
238 newcls = super(Field_metaclass, cls).__new__(cls, name, bases, dct)
239 return newcls
240
241 class NewDefaultValues(Packet_metaclass):
242 """NewDefaultValues is deprecated (not needed anymore)
243
244 remove this:
245 __metaclass__ = NewDefaultValues
246 and it should still work.
247 """
248 def __new__(cls, name, bases, dct):
249 from scapy.error import log_loading
250 import traceback
251 try:
252 for tb in traceback.extract_stack()+[("??",-1,None,"")]:
253 f,l,_,line = tb
254 if line.startswith("class"):
255 break
256 except:
257 f,l="??",-1
258 raise
259 log_loading.warning("Deprecated (no more needed) use of NewDefaultValues (%s l. %i).", f, l)
260
261 return super(NewDefaultValues, cls).__new__(cls, name, bases, dct)
262
263 class BasePacket(Gen):
264 __slots__ = []
265
266
267 #############################
268 ## Packet list base class ##
269 #############################
270
271 class BasePacketList(object):
272 __slots__ = []
273
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scapy/base_classes.py b/scapy/base_classes.py
--- a/scapy/base_classes.py
+++ b/scapy/base_classes.py
@@ -20,20 +20,29 @@
__slots__ = []
def __iter__(self):
return iter([])
-
+
+
+def _get_values(value):
+ """Generate a range object from (start, stop[, step]) tuples, or
+return value.
+
+ """
+ if (isinstance(value, tuple) and (2 <= len(value) <= 3) and \
+ all(hasattr(i, "__int__") for i in value)):
+ # We use values[1] + 1 as stop value for (x)range to maintain
+ # the behavior of using tuples as field `values`
+ return range(*((int(value[0]), int(value[1]) + 1)
+ + tuple(int(v) for v in value[2:])))
+ return value
+
+
class SetGen(Gen):
def __init__(self, values, _iterpacket=1):
self._iterpacket=_iterpacket
if isinstance(values, (list, BasePacketList)):
- self.values = list(values)
- elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \
- all(hasattr(i, "__int__") for i in values)):
- # We use values[1] + 1 as stop value for (x)range to maintain
- # the behavior of using tuples as field `values`
- self.values = [range(*((int(values[0]), int(values[1]) + 1)
- + tuple(int(v) for v in values[2:])))]
+ self.values = [_get_values(val) for val in values]
else:
- self.values = [values]
+ self.values = [_get_values(values)]
def transf(self, element):
return element
def __iter__(self):
| {"golden_diff": "diff --git a/scapy/base_classes.py b/scapy/base_classes.py\n--- a/scapy/base_classes.py\n+++ b/scapy/base_classes.py\n@@ -20,20 +20,29 @@\n __slots__ = []\n def __iter__(self):\n return iter([])\n- \n+\n+\n+def _get_values(value):\n+ \"\"\"Generate a range object from (start, stop[, step]) tuples, or\n+return value.\n+\n+ \"\"\"\n+ if (isinstance(value, tuple) and (2 <= len(value) <= 3) and \\\n+ all(hasattr(i, \"__int__\") for i in value)):\n+ # We use values[1] + 1 as stop value for (x)range to maintain\n+ # the behavior of using tuples as field `values`\n+ return range(*((int(value[0]), int(value[1]) + 1)\n+ + tuple(int(v) for v in value[2:])))\n+ return value\n+\n+\n class SetGen(Gen):\n def __init__(self, values, _iterpacket=1):\n self._iterpacket=_iterpacket\n if isinstance(values, (list, BasePacketList)):\n- self.values = list(values)\n- elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \\\n- all(hasattr(i, \"__int__\") for i in values)):\n- # We use values[1] + 1 as stop value for (x)range to maintain\n- # the behavior of using tuples as field `values`\n- self.values = [range(*((int(values[0]), int(values[1]) + 1)\n- + tuple(int(v) for v in values[2:])))]\n+ self.values = [_get_values(val) for val in values]\n else:\n- self.values = [values]\n+ self.values = [_get_values(values)]\n def transf(self, element):\n return element\n def __iter__(self):\n", "issue": "\"Generating sets of packets\" (as explained in the documentation) does not work for mixed sequence and range (e.g. IP(ttl=[1,2,(5,9)]))\nHi,\r\nas explained in the [tutorial](http://scapy.readthedocs.io/en/latest/usage.html#generating-sets-of-packets) \r\nthese two lines \r\n`b=IP(ttl=[1,2,(5,9)])`\r\n`[p for p in b]` \r\nshould create a set of 7 packets, like\r\n`[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=5 |>, <IP ttl=6 |>, <IP ttl=7 |>, <IP ttl=8 |>, <IP ttl=9 |>]`.\r\n\r\nBut the result is just:\r\n`[<IP ttl=1 |>, <IP ttl=2 |>, <IP ttl=(5, 9) |>]`\r\n\r\nIf I just use a \"sequence\" (`IP(ttl=[1,2,5,7,9])`) or \"range\" (`IP(ttl=(1,9)`), the result is that I expected. \r\n\r\nSo, there are recent changesin scpay and the documentation is outdated or Is this a bug or I'm just stupid ;) ?\r\n\r\nThanks!\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nGenerators and packet meta classes.\n\"\"\"\n\n###############\n## Generators ##\n################\n\nfrom __future__ import absolute_import\nimport re,random,socket\nimport types\nfrom scapy.modules.six.moves import range\n\nclass Gen(object):\n __slots__ = []\n def __iter__(self):\n return iter([])\n \nclass SetGen(Gen):\n def __init__(self, values, _iterpacket=1):\n self._iterpacket=_iterpacket\n if isinstance(values, (list, BasePacketList)):\n self.values = list(values)\n elif (isinstance(values, tuple) and (2 <= len(values) <= 3) and \\\n all(hasattr(i, \"__int__\") for i in values)):\n # We use values[1] + 1 as stop value for (x)range to maintain\n # the behavior of using tuples as field `values`\n self.values = [range(*((int(values[0]), int(values[1]) + 1)\n + tuple(int(v) for v in values[2:])))]\n else:\n self.values = [values]\n def transf(self, element):\n return element\n def __iter__(self):\n for i in self.values:\n if (isinstance(i, Gen) and\n (self._iterpacket or not isinstance(i,BasePacket))) or (\n isinstance(i, (range, types.GeneratorType))):\n for j in i:\n yield j\n else:\n yield i\n def __repr__(self):\n return \"<SetGen %r>\" % self.values\n\nclass Net(Gen):\n \"\"\"Generate a list of IPs from a network address or a name\"\"\"\n name = \"ip\"\n ip_regex = re.compile(r\"^(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$\")\n\n @staticmethod\n def _parse_digit(a,netmask):\n netmask = min(8,max(netmask,0))\n if a == \"*\":\n a = (0,256)\n elif a.find(\"-\") >= 0:\n x, y = [int(d) for d in a.split('-')]\n if x > y:\n y = x\n a = (x & (0xff<<netmask) , max(y, (x | (0xff>>(8-netmask))))+1)\n else:\n a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)\n return a\n\n @classmethod\n def _parse_net(cls, net):\n tmp=net.split('/')+[\"32\"]\n if not cls.ip_regex.match(net):\n tmp[0]=socket.gethostbyname(tmp[0])\n netmask = int(tmp[1])\n ret_list = [cls._parse_digit(x, y-netmask) for (x, y) in zip(tmp[0].split('.'), [8, 16, 24, 32])]\n return ret_list, netmask\n\n def __init__(self, net):\n self.repr=net\n self.parsed,self.netmask = self._parse_net(net)\n\n def __str__(self):\n try:\n return next(self.__iter__())\n except StopIteration:\n return None\n \n def __iter__(self):\n for d in range(*self.parsed[3]):\n for c in range(*self.parsed[2]):\n for b in range(*self.parsed[1]):\n for a in range(*self.parsed[0]):\n yield \"%i.%i.%i.%i\" % (a,b,c,d)\n def choice(self):\n ip = []\n for v in self.parsed:\n ip.append(str(random.randint(v[0],v[1]-1)))\n return \".\".join(ip) \n \n def __repr__(self):\n return \"Net(%r)\" % self.repr\n def __eq__(self, other):\n if hasattr(other, \"parsed\"):\n p2 = other.parsed\n else:\n p2,nm2 = self._parse_net(other)\n return self.parsed == p2\n def __contains__(self, other):\n if hasattr(other, \"parsed\"):\n p2 = other.parsed\n else:\n p2,nm2 = self._parse_net(other)\n for (a1,b1),(a2,b2) in zip(self.parsed,p2):\n if a1 > a2 or b1 < b2:\n return False\n return True\n def __rcontains__(self, other): \n return self in self.__class__(other)\n \n\nclass OID(Gen):\n name = \"OID\"\n def __init__(self, oid):\n self.oid = oid \n self.cmpt = []\n fmt = [] \n for i in oid.split(\".\"):\n if \"-\" in i:\n fmt.append(\"%i\")\n self.cmpt.append(tuple(map(int, i.split(\"-\"))))\n else:\n fmt.append(i)\n self.fmt = \".\".join(fmt)\n def __repr__(self):\n return \"OID(%r)\" % self.oid\n def __iter__(self): \n ii = [k[0] for k in self.cmpt]\n while True:\n yield self.fmt % tuple(ii)\n i = 0\n while True:\n if i >= len(ii):\n raise StopIteration\n if ii[i] < self.cmpt[i][1]:\n ii[i]+=1\n break\n else:\n ii[i] = self.cmpt[i][0]\n i += 1\n\n\n \n######################################\n## Packet abstract and base classes ##\n######################################\n\nclass Packet_metaclass(type):\n def __new__(cls, name, bases, dct):\n if \"fields_desc\" in dct: # perform resolution of references to other packets\n current_fld = dct[\"fields_desc\"]\n resolved_fld = []\n for f in current_fld:\n if isinstance(f, Packet_metaclass): # reference to another fields_desc\n for f2 in f.fields_desc:\n resolved_fld.append(f2)\n else:\n resolved_fld.append(f)\n else: # look for a fields_desc in parent classes\n resolved_fld = None\n for b in bases:\n if hasattr(b,\"fields_desc\"):\n resolved_fld = b.fields_desc\n break\n\n if resolved_fld: # perform default value replacements\n final_fld = []\n for f in resolved_fld:\n if f.name in dct:\n f = f.copy()\n f.default = dct[f.name]\n del(dct[f.name])\n final_fld.append(f)\n\n dct[\"fields_desc\"] = final_fld\n\n if \"__slots__\" not in dct:\n dct[\"__slots__\"] = []\n for attr in [\"name\", \"overload_fields\"]:\n try:\n dct[\"_%s\" % attr] = dct.pop(attr)\n except KeyError:\n pass\n newcls = super(Packet_metaclass, cls).__new__(cls, name, bases, dct)\n newcls.__all_slots__ = set(\n attr\n for cls in newcls.__mro__ if hasattr(cls, \"__slots__\")\n for attr in cls.__slots__\n )\n\n if hasattr(newcls, \"aliastypes\"):\n newcls.aliastypes = [newcls] + newcls.aliastypes\n else:\n newcls.aliastypes = [newcls]\n\n if hasattr(newcls,\"register_variant\"):\n newcls.register_variant()\n for f in newcls.fields_desc:\n if hasattr(f, \"register_owner\"):\n f.register_owner(newcls)\n from scapy import config\n config.conf.layers.register(newcls)\n return newcls\n\n def __getattr__(self, attr):\n for k in self.fields_desc:\n if k.name == attr:\n return k\n raise AttributeError(attr)\n\n def __call__(cls, *args, **kargs):\n if \"dispatch_hook\" in cls.__dict__:\n try:\n cls = cls.dispatch_hook(*args, **kargs)\n except:\n from scapy import config\n if config.conf.debug_dissector:\n raise\n cls = config.conf.raw_layer\n i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)\n i.__init__(*args, **kargs)\n return i\n\nclass Field_metaclass(type):\n def __new__(cls, name, bases, dct):\n if \"__slots__\" not in dct:\n dct[\"__slots__\"] = []\n newcls = super(Field_metaclass, cls).__new__(cls, name, bases, dct)\n return newcls\n\nclass NewDefaultValues(Packet_metaclass):\n \"\"\"NewDefaultValues is deprecated (not needed anymore)\n \n remove this:\n __metaclass__ = NewDefaultValues\n and it should still work.\n \"\"\" \n def __new__(cls, name, bases, dct):\n from scapy.error import log_loading\n import traceback\n try:\n for tb in traceback.extract_stack()+[(\"??\",-1,None,\"\")]:\n f,l,_,line = tb\n if line.startswith(\"class\"):\n break\n except:\n f,l=\"??\",-1\n raise\n log_loading.warning(\"Deprecated (no more needed) use of NewDefaultValues (%s l. %i).\", f, l)\n \n return super(NewDefaultValues, cls).__new__(cls, name, bases, dct)\n\nclass BasePacket(Gen):\n __slots__ = []\n\n\n#############################\n## Packet list base class ##\n#############################\n\nclass BasePacketList(object):\n __slots__ = []\n", "path": "scapy/base_classes.py"}], "after_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nGenerators and packet meta classes.\n\"\"\"\n\n###############\n## Generators ##\n################\n\nfrom __future__ import absolute_import\nimport re,random,socket\nimport types\nfrom scapy.modules.six.moves import range\n\nclass Gen(object):\n __slots__ = []\n def __iter__(self):\n return iter([])\n\n\ndef _get_values(value):\n \"\"\"Generate a range object from (start, stop[, step]) tuples, or\nreturn value.\n\n \"\"\"\n if (isinstance(value, tuple) and (2 <= len(value) <= 3) and \\\n all(hasattr(i, \"__int__\") for i in value)):\n # We use values[1] + 1 as stop value for (x)range to maintain\n # the behavior of using tuples as field `values`\n return range(*((int(value[0]), int(value[1]) + 1)\n + tuple(int(v) for v in value[2:])))\n return value\n\n\nclass SetGen(Gen):\n def __init__(self, values, _iterpacket=1):\n self._iterpacket=_iterpacket\n if isinstance(values, (list, BasePacketList)):\n self.values = [_get_values(val) for val in values]\n else:\n self.values = [_get_values(values)]\n def transf(self, element):\n return element\n def __iter__(self):\n for i in self.values:\n if (isinstance(i, Gen) and\n (self._iterpacket or not isinstance(i,BasePacket))) or (\n isinstance(i, (range, types.GeneratorType))):\n for j in i:\n yield j\n else:\n yield i\n def __repr__(self):\n return \"<SetGen %r>\" % self.values\n\nclass Net(Gen):\n \"\"\"Generate a list of IPs from a network address or a name\"\"\"\n name = \"ip\"\n ip_regex = re.compile(r\"^(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)\\.(\\*|[0-2]?[0-9]?[0-9](-[0-2]?[0-9]?[0-9])?)(/[0-3]?[0-9])?$\")\n\n @staticmethod\n def _parse_digit(a,netmask):\n netmask = min(8,max(netmask,0))\n if a == \"*\":\n a = (0,256)\n elif a.find(\"-\") >= 0:\n x, y = [int(d) for d in a.split('-')]\n if x > y:\n y = x\n a = (x & (0xff<<netmask) , max(y, (x | (0xff>>(8-netmask))))+1)\n else:\n a = (int(a) & (0xff<<netmask),(int(a) | (0xff>>(8-netmask)))+1)\n return a\n\n @classmethod\n def _parse_net(cls, net):\n tmp=net.split('/')+[\"32\"]\n if not cls.ip_regex.match(net):\n tmp[0]=socket.gethostbyname(tmp[0])\n netmask = int(tmp[1])\n ret_list = [cls._parse_digit(x, y-netmask) for (x, y) in zip(tmp[0].split('.'), [8, 16, 24, 32])]\n return ret_list, netmask\n\n def __init__(self, net):\n self.repr=net\n self.parsed,self.netmask = self._parse_net(net)\n\n def __str__(self):\n try:\n return next(self.__iter__())\n except StopIteration:\n return None\n \n def __iter__(self):\n for d in range(*self.parsed[3]):\n for c in range(*self.parsed[2]):\n for b in range(*self.parsed[1]):\n for a in range(*self.parsed[0]):\n yield \"%i.%i.%i.%i\" % (a,b,c,d)\n def choice(self):\n ip = []\n for v in self.parsed:\n ip.append(str(random.randint(v[0],v[1]-1)))\n return \".\".join(ip) \n \n def __repr__(self):\n return \"Net(%r)\" % self.repr\n def __eq__(self, other):\n if hasattr(other, \"parsed\"):\n p2 = other.parsed\n else:\n p2,nm2 = self._parse_net(other)\n return self.parsed == p2\n def __contains__(self, other):\n if hasattr(other, \"parsed\"):\n p2 = other.parsed\n else:\n p2,nm2 = self._parse_net(other)\n for (a1,b1),(a2,b2) in zip(self.parsed,p2):\n if a1 > a2 or b1 < b2:\n return False\n return True\n def __rcontains__(self, other): \n return self in self.__class__(other)\n \n\nclass OID(Gen):\n name = \"OID\"\n def __init__(self, oid):\n self.oid = oid \n self.cmpt = []\n fmt = [] \n for i in oid.split(\".\"):\n if \"-\" in i:\n fmt.append(\"%i\")\n self.cmpt.append(tuple(map(int, i.split(\"-\"))))\n else:\n fmt.append(i)\n self.fmt = \".\".join(fmt)\n def __repr__(self):\n return \"OID(%r)\" % self.oid\n def __iter__(self): \n ii = [k[0] for k in self.cmpt]\n while True:\n yield self.fmt % tuple(ii)\n i = 0\n while True:\n if i >= len(ii):\n raise StopIteration\n if ii[i] < self.cmpt[i][1]:\n ii[i]+=1\n break\n else:\n ii[i] = self.cmpt[i][0]\n i += 1\n\n\n \n######################################\n## Packet abstract and base classes ##\n######################################\n\nclass Packet_metaclass(type):\n def __new__(cls, name, bases, dct):\n if \"fields_desc\" in dct: # perform resolution of references to other packets\n current_fld = dct[\"fields_desc\"]\n resolved_fld = []\n for f in current_fld:\n if isinstance(f, Packet_metaclass): # reference to another fields_desc\n for f2 in f.fields_desc:\n resolved_fld.append(f2)\n else:\n resolved_fld.append(f)\n else: # look for a fields_desc in parent classes\n resolved_fld = None\n for b in bases:\n if hasattr(b,\"fields_desc\"):\n resolved_fld = b.fields_desc\n break\n\n if resolved_fld: # perform default value replacements\n final_fld = []\n for f in resolved_fld:\n if f.name in dct:\n f = f.copy()\n f.default = dct[f.name]\n del(dct[f.name])\n final_fld.append(f)\n\n dct[\"fields_desc\"] = final_fld\n\n if \"__slots__\" not in dct:\n dct[\"__slots__\"] = []\n for attr in [\"name\", \"overload_fields\"]:\n try:\n dct[\"_%s\" % attr] = dct.pop(attr)\n except KeyError:\n pass\n newcls = super(Packet_metaclass, cls).__new__(cls, name, bases, dct)\n newcls.__all_slots__ = set(\n attr\n for cls in newcls.__mro__ if hasattr(cls, \"__slots__\")\n for attr in cls.__slots__\n )\n\n if hasattr(newcls, \"aliastypes\"):\n newcls.aliastypes = [newcls] + newcls.aliastypes\n else:\n newcls.aliastypes = [newcls]\n\n if hasattr(newcls,\"register_variant\"):\n newcls.register_variant()\n for f in newcls.fields_desc:\n if hasattr(f, \"register_owner\"):\n f.register_owner(newcls)\n from scapy import config\n config.conf.layers.register(newcls)\n return newcls\n\n def __getattr__(self, attr):\n for k in self.fields_desc:\n if k.name == attr:\n return k\n raise AttributeError(attr)\n\n def __call__(cls, *args, **kargs):\n if \"dispatch_hook\" in cls.__dict__:\n try:\n cls = cls.dispatch_hook(*args, **kargs)\n except:\n from scapy import config\n if config.conf.debug_dissector:\n raise\n cls = config.conf.raw_layer\n i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__)\n i.__init__(*args, **kargs)\n return i\n\nclass Field_metaclass(type):\n def __new__(cls, name, bases, dct):\n if \"__slots__\" not in dct:\n dct[\"__slots__\"] = []\n newcls = super(Field_metaclass, cls).__new__(cls, name, bases, dct)\n return newcls\n\nclass NewDefaultValues(Packet_metaclass):\n \"\"\"NewDefaultValues is deprecated (not needed anymore)\n \n remove this:\n __metaclass__ = NewDefaultValues\n and it should still work.\n \"\"\" \n def __new__(cls, name, bases, dct):\n from scapy.error import log_loading\n import traceback\n try:\n for tb in traceback.extract_stack()+[(\"??\",-1,None,\"\")]:\n f,l,_,line = tb\n if line.startswith(\"class\"):\n break\n except:\n f,l=\"??\",-1\n raise\n log_loading.warning(\"Deprecated (no more needed) use of NewDefaultValues (%s l. %i).\", f, l)\n \n return super(NewDefaultValues, cls).__new__(cls, name, bases, dct)\n\nclass BasePacket(Gen):\n __slots__ = []\n\n\n#############################\n## Packet list base class ##\n#############################\n\nclass BasePacketList(object):\n __slots__ = []\n", "path": "scapy/base_classes.py"}]} | 3,527 | 436 |
gh_patches_debug_13696 | rasdani/github-patches | git_diff | enthought__chaco-634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dont use traitsui.api as tui
https://github.com/enthought/chaco/blob/3de7780561fa29e79c887432d3ce408ea82d1614/chaco/plugin/plot_editor.py makes use of the odd `import traitsui.api as tui` alias which needs to be updated and removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/plugin/plot_editor.py`
Content:
```
1 from chaco.shell.scaly_plot import ScalyPlot
2 from enable.component_editor import ComponentEditor
3 from pyface.workbench.api import TraitsUIEditor
4 from traits.api import Any, Enum, HasTraits, Property, Str
5 from traitsui import api as tui
6
7
8 class PlotUI(HasTraits):
9 """Simple Traits UI proxy for a Chaco plot."""
10
11 # The plot.
12 component = Any()
13
14 traits_view = tui.View(
15 tui.Item("component", editor=ComponentEditor(), show_label=False),
16 resizable=True,
17 )
18
19
20 class PlotEditor(TraitsUIEditor):
21 """A Workbench Editor showing a Chaco plot for the shell interface."""
22
23 bgcolor = Str("white")
24 image_default_origin = Enum(
25 "bottom left", "top left", "bottom right", "top right"
26 )
27
28 # The plot.
29 component = Property(Any)
30 container = Property(Any)
31
32 # The PlotData.
33 data = Any()
34
35 # The PlotSession of which we are a part. We need to know this in order
36 # to notify it of our being closed, etc.
37 session = Any()
38
39 def __init__(
40 self,
41 is_image=False,
42 bgcolor="white",
43 image_default_origin="top left",
44 *args,
45 **kw
46 ):
47
48 super(TraitsUIEditor, self).__init__(**kw)
49
50 # Some defaults which should be overridden by preferences.
51 self.bgcolor = bgcolor
52 self.image_default_origin = image_default_origin
53
54 # Create an empty top-level container
55 if is_image:
56 top_container = self._create_top_img_container()
57 else:
58 top_container = self._create_top_container()
59
60 self.obj = PlotUI(component=top_container)
61
62 #### PlotWindow interface ##################################################
63
64 def get_container(self):
65 return self.obj.component
66
67 def set_container(self, container):
68 self.obj.component = container
69
70 def iconize(self, iconize):
71 """Iconizes the window if *iconize* is True.
72
73 Do nothing in this implementation.
74 """
75
76 def maximize(self, maximize):
77 """If *maximize* is True, maximizes the window size; restores if False.
78
79 Do nothing in this implementation.
80 """
81
82 def set_size(self, width, height):
83 pass
84
85 def set_title(self, title):
86 self.name = title
87
88 def raise_window(self):
89 self.window.activate_editor(self)
90
91 #### Editor interface ######################################################
92
93 def destroy_control(self):
94 """Destroy the toolkit-specific control that represents the part."""
95 self._on_window_close()
96 super(TraitsUIEditor, self).destroy_control()
97
98 #### Private interface #####################################################
99
100 def _get_container(self):
101 return self.obj.component
102
103 def _set_container(self, value):
104 self.obj.component = value
105
106 def _get_component(self):
107 return self.obj.component
108
109 def _set_component(self, value):
110 self.obj.component = value
111
112 def _create_top_container(self):
113 plot = ScalyPlot(
114 padding=50,
115 fill_padding=True,
116 bgcolor=self.bgcolor,
117 use_backbuffer=True,
118 )
119 return plot
120
121 def _create_top_img_container(self):
122 plot = ScalyPlot(
123 padding=50,
124 fill_padding=True,
125 bgcolor=self.bgcolor,
126 use_backbuffer=True,
127 default_origin=self.image_default_origin,
128 )
129 return plot
130
131 def _on_window_close(self):
132 if self.session:
133 try:
134 ndx = self.session.windows.index(self)
135 self.session.del_window(ndx)
136 except ValueError:
137 pass
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/plugin/plot_editor.py b/chaco/plugin/plot_editor.py
--- a/chaco/plugin/plot_editor.py
+++ b/chaco/plugin/plot_editor.py
@@ -2,7 +2,7 @@
from enable.component_editor import ComponentEditor
from pyface.workbench.api import TraitsUIEditor
from traits.api import Any, Enum, HasTraits, Property, Str
-from traitsui import api as tui
+from traitsui.api import Item, View
class PlotUI(HasTraits):
@@ -11,8 +11,8 @@
# The plot.
component = Any()
- traits_view = tui.View(
- tui.Item("component", editor=ComponentEditor(), show_label=False),
+ traits_view = View(
+ Item("component", editor=ComponentEditor(), show_label=False),
resizable=True,
)
| {"golden_diff": "diff --git a/chaco/plugin/plot_editor.py b/chaco/plugin/plot_editor.py\n--- a/chaco/plugin/plot_editor.py\n+++ b/chaco/plugin/plot_editor.py\n@@ -2,7 +2,7 @@\n from enable.component_editor import ComponentEditor\n from pyface.workbench.api import TraitsUIEditor\n from traits.api import Any, Enum, HasTraits, Property, Str\n-from traitsui import api as tui\n+from traitsui.api import Item, View\n \n \n class PlotUI(HasTraits):\n@@ -11,8 +11,8 @@\n # The plot.\n component = Any()\n \n- traits_view = tui.View(\n- tui.Item(\"component\", editor=ComponentEditor(), show_label=False),\n+ traits_view = View(\n+ Item(\"component\", editor=ComponentEditor(), show_label=False),\n resizable=True,\n )\n", "issue": "Dont use traitsui.api as tui\nhttps://github.com/enthought/chaco/blob/3de7780561fa29e79c887432d3ce408ea82d1614/chaco/plugin/plot_editor.py makes use of the odd `import traitsui.api as tui` alias which needs to be updated and removed.\n", "before_files": [{"content": "from chaco.shell.scaly_plot import ScalyPlot\nfrom enable.component_editor import ComponentEditor\nfrom pyface.workbench.api import TraitsUIEditor\nfrom traits.api import Any, Enum, HasTraits, Property, Str\nfrom traitsui import api as tui\n\n\nclass PlotUI(HasTraits):\n \"\"\"Simple Traits UI proxy for a Chaco plot.\"\"\"\n\n # The plot.\n component = Any()\n\n traits_view = tui.View(\n tui.Item(\"component\", editor=ComponentEditor(), show_label=False),\n resizable=True,\n )\n\n\nclass PlotEditor(TraitsUIEditor):\n \"\"\"A Workbench Editor showing a Chaco plot for the shell interface.\"\"\"\n\n bgcolor = Str(\"white\")\n image_default_origin = Enum(\n \"bottom left\", \"top left\", \"bottom right\", \"top right\"\n )\n\n # The plot.\n component = Property(Any)\n container = Property(Any)\n\n # The PlotData.\n data = Any()\n\n # The PlotSession of which we are a part. We need to know this in order\n # to notify it of our being closed, etc.\n session = Any()\n\n def __init__(\n self,\n is_image=False,\n bgcolor=\"white\",\n image_default_origin=\"top left\",\n *args,\n **kw\n ):\n\n super(TraitsUIEditor, self).__init__(**kw)\n\n # Some defaults which should be overridden by preferences.\n self.bgcolor = bgcolor\n self.image_default_origin = image_default_origin\n\n # Create an empty top-level container\n if is_image:\n top_container = self._create_top_img_container()\n else:\n top_container = self._create_top_container()\n\n self.obj = PlotUI(component=top_container)\n\n #### PlotWindow interface ##################################################\n\n def get_container(self):\n return self.obj.component\n\n def set_container(self, container):\n self.obj.component = container\n\n def iconize(self, iconize):\n \"\"\"Iconizes the window if *iconize* is True.\n\n Do nothing in this implementation.\n \"\"\"\n\n def maximize(self, maximize):\n \"\"\"If *maximize* is True, maximizes the window size; restores if False.\n\n Do nothing in this implementation.\n \"\"\"\n\n def set_size(self, width, height):\n pass\n\n def set_title(self, title):\n self.name = title\n\n def raise_window(self):\n self.window.activate_editor(self)\n\n #### Editor interface ######################################################\n\n def destroy_control(self):\n \"\"\"Destroy the toolkit-specific control that represents the part.\"\"\"\n self._on_window_close()\n super(TraitsUIEditor, self).destroy_control()\n\n #### Private interface #####################################################\n\n def _get_container(self):\n return self.obj.component\n\n def _set_container(self, value):\n self.obj.component = value\n\n def _get_component(self):\n return self.obj.component\n\n def _set_component(self, value):\n self.obj.component = value\n\n def _create_top_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n )\n return plot\n\n def _create_top_img_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n default_origin=self.image_default_origin,\n )\n return plot\n\n def _on_window_close(self):\n if self.session:\n try:\n ndx = self.session.windows.index(self)\n self.session.del_window(ndx)\n except ValueError:\n pass\n", "path": "chaco/plugin/plot_editor.py"}], "after_files": [{"content": "from chaco.shell.scaly_plot import ScalyPlot\nfrom enable.component_editor import ComponentEditor\nfrom pyface.workbench.api import TraitsUIEditor\nfrom traits.api import Any, Enum, HasTraits, Property, Str\nfrom traitsui.api import Item, View\n\n\nclass PlotUI(HasTraits):\n \"\"\"Simple Traits UI proxy for a Chaco plot.\"\"\"\n\n # The plot.\n component = Any()\n\n traits_view = View(\n Item(\"component\", editor=ComponentEditor(), show_label=False),\n resizable=True,\n )\n\n\nclass PlotEditor(TraitsUIEditor):\n \"\"\"A Workbench Editor showing a Chaco plot for the shell interface.\"\"\"\n\n bgcolor = Str(\"white\")\n image_default_origin = Enum(\n \"bottom left\", \"top left\", \"bottom right\", \"top right\"\n )\n\n # The plot.\n component = Property(Any)\n container = Property(Any)\n\n # The PlotData.\n data = Any()\n\n # The PlotSession of which we are a part. We need to know this in order\n # to notify it of our being closed, etc.\n session = Any()\n\n def __init__(\n self,\n is_image=False,\n bgcolor=\"white\",\n image_default_origin=\"top left\",\n *args,\n **kw\n ):\n\n super(TraitsUIEditor, self).__init__(**kw)\n\n # Some defaults which should be overridden by preferences.\n self.bgcolor = bgcolor\n self.image_default_origin = image_default_origin\n\n # Create an empty top-level container\n if is_image:\n top_container = self._create_top_img_container()\n else:\n top_container = self._create_top_container()\n\n self.obj = PlotUI(component=top_container)\n\n #### PlotWindow interface ##################################################\n\n def get_container(self):\n return self.obj.component\n\n def set_container(self, container):\n self.obj.component = container\n\n def iconize(self, iconize):\n \"\"\"Iconizes the window if *iconize* is True.\n\n Do nothing in this implementation.\n \"\"\"\n\n def maximize(self, maximize):\n \"\"\"If *maximize* is True, maximizes the window size; restores if False.\n\n Do nothing in this implementation.\n \"\"\"\n\n def set_size(self, width, height):\n pass\n\n def set_title(self, title):\n self.name = title\n\n def raise_window(self):\n self.window.activate_editor(self)\n\n #### Editor interface ######################################################\n\n def destroy_control(self):\n \"\"\"Destroy the toolkit-specific control that represents the part.\"\"\"\n self._on_window_close()\n super(TraitsUIEditor, self).destroy_control()\n\n #### Private interface #####################################################\n\n def _get_container(self):\n return self.obj.component\n\n def _set_container(self, value):\n self.obj.component = value\n\n def _get_component(self):\n return self.obj.component\n\n def _set_component(self, value):\n self.obj.component = value\n\n def _create_top_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n )\n return plot\n\n def _create_top_img_container(self):\n plot = ScalyPlot(\n padding=50,\n fill_padding=True,\n bgcolor=self.bgcolor,\n use_backbuffer=True,\n default_origin=self.image_default_origin,\n )\n return plot\n\n def _on_window_close(self):\n if self.session:\n try:\n ndx = self.session.windows.index(self)\n self.session.del_window(ndx)\n except ValueError:\n pass\n", "path": "chaco/plugin/plot_editor.py"}]} | 1,441 | 188 |
gh_patches_debug_35309 | rasdani/github-patches | git_diff | nvaccess__nvda-9119 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows Store apps: use app title as product name instead of wwahost
**Reported by nvdakor on 2014-07-07 13:10**
Hi,
Currently, when invoking appModule.productName for a Windows Store app, NVDA says "Windows operating system". Although this is fine for built-in apps such as Bing Weather and Windows Store, this may confuse users and developers when they are testing accessibility of Store apps (both existing ones and apps to be created in the future). As a way of providing actual product name for debugging purposes and for people to tell us which app they are having issues with, I propose using app title for productName.
Before:
1. Go to Windows Store app such as Weather.
2. Once the app opens, press NVDA+F1 to show developer info. Look under appModule.productName.
After:
1. Open any Windows Store app.
2. Press NVDA+F1 to open developer info. appModule.productName tells you the title of the app.
Implementation:
1. In appModules.wwahost.py, override _get_productName to return the title (api.getForegroundObject().name).
2. wwahost.AppModule.productName will contain the actual title of the app.
Thanks.
**Blocked by #4569**
Load specific app modules for apps hosted by wwahost.exe
**Reported by jteh on 2014-10-23 07:35**
wwahost.exe hosts many Windows Store apps (though it seems some do have their own executables). To allow for application specific customisation, we should identify the specific application in the app module name.
I think we can use GetApplicationUserModelId (in kernel32) for this purpose. This includes a "!" character, so we'll need to check whether Python will cope with this and replace if not.
**Blocked by #4360**
**Blocking #4259**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/appModules/wwahost.py`
Content:
```
1 #A part of NonVisual Desktop Access (NVDA)
2 #This file is covered by the GNU General Public License.
3 #See the file COPYING for more details.
4 #Copyright (C) 2012 NV Access Limited
5
6 from comtypes import COMError
7 import IAccessibleHandler
8 from NVDAObjects.IAccessible.MSHTML import Body
9 import appModuleHandler
10 import controlTypes
11 import winUser
12
13 class AppModule(appModuleHandler.AppModule):
14
15 def event_NVDAObject_init(self,obj):
16 #The root document of HTML Metro Apps must be treeted as an application.
17 if isinstance(obj,Body) and obj.windowClassName=="Internet Explorer_Server":
18 try:
19 paccParent=obj.IAccessibleObject.accParent.accParent
20 identity=IAccessibleHandler.getIAccIdentity(paccParent,0)
21 except (COMError,AttributeError):
22 identity=None
23 if identity:
24 windowHandle=identity.get('windowHandle')
25 if windowHandle and winUser.getClassName(windowHandle)=="Web Platform Embedding":
26 obj.role=controlTypes.ROLE_APPLICATION
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/source/appModules/wwahost.py b/source/appModules/wwahost.py
--- a/source/appModules/wwahost.py
+++ b/source/appModules/wwahost.py
@@ -1,26 +1,40 @@
-#A part of NonVisual Desktop Access (NVDA)
-#This file is covered by the GNU General Public License.
-#See the file COPYING for more details.
-#Copyright (C) 2012 NV Access Limited
+# A part of NonVisual Desktop Access (NVDA)
+# This file is covered by the GNU General Public License.
+# See the file COPYING for more details.
+# Copyright (C) 2012-2020 NV Access Limited, Joseph Lee
-from comtypes import COMError
-import IAccessibleHandler
-from NVDAObjects.IAccessible.MSHTML import Body
+"""App module host for Windows 8.x and 10 apps hosted by wwahost.exe.
+In Windows 8, apps written in Javascript are executed inside WWAHost, including some WinRT apps.
+In Windows 10, progressive web apps (PWA) and friends are hosted inside this process.
+App modules wishing to support apps hosted inside this process must subclass the AppModule class.
+"""
+
+import ctypes
import appModuleHandler
-import controlTypes
-import winUser
+import winKernel
+
+
+def getAppNameFromHost(processId):
+ # Some apps that come with Windows 8 and 8.1 are hosted by wwahost.exe.
+ # App modules for these are named after the hosted app name.
+ processHandle = winKernel.openProcess(
+ winKernel.SYNCHRONIZE | winKernel.PROCESS_QUERY_INFORMATION, False, processId
+ )
+ length = ctypes.c_uint()
+ winKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), None)
+ appModel = ctypes.create_unicode_buffer(length.value)
+ winKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), appModel)
+ winKernel.closeHandle(processHandle)
+ # Sometimes app model might be empty, so raise errors and fall back to wwahost.
+ if not appModel.value:
+ raise LookupError
+ # App model is shown as familyName!appName,
+ # and importing files with the exclamation point in the middle of the name isn't supported.
+ # Therefore return only the app name portion.
+ # Convert this into lowercase to make the file name consistent with other NVDA app modules.
+ return appModel.value.split("!")[-1].lower()
class AppModule(appModuleHandler.AppModule):
- def event_NVDAObject_init(self,obj):
- #The root document of HTML Metro Apps must be treeted as an application.
- if isinstance(obj,Body) and obj.windowClassName=="Internet Explorer_Server":
- try:
- paccParent=obj.IAccessibleObject.accParent.accParent
- identity=IAccessibleHandler.getIAccIdentity(paccParent,0)
- except (COMError,AttributeError):
- identity=None
- if identity:
- windowHandle=identity.get('windowHandle')
- if windowHandle and winUser.getClassName(windowHandle)=="Web Platform Embedding":
- obj.role=controlTypes.ROLE_APPLICATION
+ # WWAHost app content is treated as part of an app, not a browse mode document.
+ disableBrowseModeByDefault = True
| {"golden_diff": "diff --git a/source/appModules/wwahost.py b/source/appModules/wwahost.py\n--- a/source/appModules/wwahost.py\n+++ b/source/appModules/wwahost.py\n@@ -1,26 +1,40 @@\n-#A part of NonVisual Desktop Access (NVDA)\r\n-#This file is covered by the GNU General Public License.\r\n-#See the file COPYING for more details.\r\n-#Copyright (C) 2012 NV Access Limited\r\n+# A part of NonVisual Desktop Access (NVDA)\r\n+# This file is covered by the GNU General Public License.\r\n+# See the file COPYING for more details.\r\n+# Copyright (C) 2012-2020 NV Access Limited, Joseph Lee\r\n \r\n-from comtypes import COMError\r\n-import IAccessibleHandler\r\n-from NVDAObjects.IAccessible.MSHTML import Body\r\n+\"\"\"App module host for Windows 8.x and 10 apps hosted by wwahost.exe.\r\n+In Windows 8, apps written in Javascript are executed inside WWAHost, including some WinRT apps.\r\n+In Windows 10, progressive web apps (PWA) and friends are hosted inside this process.\r\n+App modules wishing to support apps hosted inside this process must subclass the AppModule class.\r\n+\"\"\"\r\n+\r\n+import ctypes\r\n import appModuleHandler\r\n-import controlTypes\r\n-import winUser\r\n+import winKernel\r\n+\r\n+\r\n+def getAppNameFromHost(processId):\r\n+\t# Some apps that come with Windows 8 and 8.1 are hosted by wwahost.exe.\r\n+\t# App modules for these are named after the hosted app name.\r\n+\tprocessHandle = winKernel.openProcess(\r\n+\t\twinKernel.SYNCHRONIZE | winKernel.PROCESS_QUERY_INFORMATION, False, processId\r\n+\t)\r\n+\tlength = ctypes.c_uint()\r\n+\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), None)\r\n+\tappModel = ctypes.create_unicode_buffer(length.value)\r\n+\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), appModel)\r\n+\twinKernel.closeHandle(processHandle)\r\n+\t# Sometimes app model might be empty, so raise errors and fall back to wwahost.\r\n+\tif not appModel.value:\r\n+\t\traise LookupError\r\n+\t# App model is shown as familyName!appName,\r\n+\t# and importing files with the exclamation point in the middle of the name isn't supported.\r\n+\t# Therefore return only the app name portion.\r\n+\t# Convert this into lowercase to make the file name consistent with other NVDA app modules.\r\n+\treturn appModel.value.split(\"!\")[-1].lower()\r\n \r\n class AppModule(appModuleHandler.AppModule):\r\n \r\n-\tdef event_NVDAObject_init(self,obj):\r\n-\t\t#The root document of HTML Metro Apps must be treeted as an application. \r\n-\t\tif isinstance(obj,Body) and obj.windowClassName==\"Internet Explorer_Server\":\r\n-\t\t\ttry:\r\n-\t\t\t\tpaccParent=obj.IAccessibleObject.accParent.accParent\r\n-\t\t\t\tidentity=IAccessibleHandler.getIAccIdentity(paccParent,0)\r\n-\t\t\texcept (COMError,AttributeError):\r\n-\t\t\t\tidentity=None\r\n-\t\t\tif identity:\r\n-\t\t\t\twindowHandle=identity.get('windowHandle')\r\n-\t\t\t\tif windowHandle and winUser.getClassName(windowHandle)==\"Web Platform Embedding\":\r\n-\t\t\t\t\tobj.role=controlTypes.ROLE_APPLICATION\r\n+\t# WWAHost app content is treated as part of an app, not a browse mode document.\r\n+\tdisableBrowseModeByDefault = True\n", "issue": "Windows Store apps: use app title as product name instead of wwahost\n**Reported by nvdakor on 2014-07-07 13:10**\nHi,\nCurrently, when invoking appModule.productName for a Windows Store app, NVDA says \"Windows operating system\". Although this is fine for built-in apps such as Bing Weather and Windows Store, this may confuse users and developers when they are testing accessibility of Store apps (both existing ones and apps to be created in the future). As a way of providing actual product name for debugging purposes and for people to tell us which app they are having issues with, I propose using app title for productName.\nBefore:\n1. Go to Windows Store app such as Weather.\n2. Once the app opens, press NVDA+F1 to show developer info. Look under appModule.productName.\nAfter:\n1. Open any Windows Store app.\n2. Press NVDA+F1 to open developer info. appModule.productName tells you the title of the app.\nImplementation:\n1. In appModules.wwahost.py, override _get_productName to return the title (api.getForegroundObject().name).\n2. wwahost.AppModule.productName will contain the actual title of the app.\nThanks.\n\n**Blocked by #4569**\n\nLoad specific app modules for apps hosted by wwahost.exe\n**Reported by jteh on 2014-10-23 07:35**\nwwahost.exe hosts many Windows Store apps (though it seems some do have their own executables). To allow for application specific customisation, we should identify the specific application in the app module name.\n\nI think we can use GetApplicationUserModelId (in kernel32) for this purpose. This includes a \"!\" character, so we'll need to check whether Python will cope with this and replace if not.\n**Blocked by #4360**\n**Blocking #4259**\n\n", "before_files": [{"content": "#A part of NonVisual Desktop Access (NVDA)\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n#Copyright (C) 2012 NV Access Limited\r\n\r\nfrom comtypes import COMError\r\nimport IAccessibleHandler\r\nfrom NVDAObjects.IAccessible.MSHTML import Body\r\nimport appModuleHandler\r\nimport controlTypes\r\nimport winUser\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef event_NVDAObject_init(self,obj):\r\n\t\t#The root document of HTML Metro Apps must be treeted as an application. \r\n\t\tif isinstance(obj,Body) and obj.windowClassName==\"Internet Explorer_Server\":\r\n\t\t\ttry:\r\n\t\t\t\tpaccParent=obj.IAccessibleObject.accParent.accParent\r\n\t\t\t\tidentity=IAccessibleHandler.getIAccIdentity(paccParent,0)\r\n\t\t\texcept (COMError,AttributeError):\r\n\t\t\t\tidentity=None\r\n\t\t\tif identity:\r\n\t\t\t\twindowHandle=identity.get('windowHandle')\r\n\t\t\t\tif windowHandle and winUser.getClassName(windowHandle)==\"Web Platform Embedding\":\r\n\t\t\t\t\tobj.role=controlTypes.ROLE_APPLICATION\r\n", "path": "source/appModules/wwahost.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n# Copyright (C) 2012-2020 NV Access Limited, Joseph Lee\r\n\r\n\"\"\"App module host for Windows 8.x and 10 apps hosted by wwahost.exe.\r\nIn Windows 8, apps written in Javascript are executed inside WWAHost, including some WinRT apps.\r\nIn Windows 10, progressive web apps (PWA) and friends are hosted inside this process.\r\nApp modules wishing to support apps hosted inside this process must subclass the AppModule class.\r\n\"\"\"\r\n\r\nimport ctypes\r\nimport appModuleHandler\r\nimport winKernel\r\n\r\n\r\ndef getAppNameFromHost(processId):\r\n\t# Some apps that come with Windows 8 and 8.1 are hosted by wwahost.exe.\r\n\t# App modules for these are named after the hosted app name.\r\n\tprocessHandle = winKernel.openProcess(\r\n\t\twinKernel.SYNCHRONIZE | winKernel.PROCESS_QUERY_INFORMATION, False, processId\r\n\t)\r\n\tlength = ctypes.c_uint()\r\n\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), None)\r\n\tappModel = ctypes.create_unicode_buffer(length.value)\r\n\twinKernel.kernel32.GetApplicationUserModelId(processHandle, ctypes.byref(length), appModel)\r\n\twinKernel.closeHandle(processHandle)\r\n\t# Sometimes app model might be empty, so raise errors and fall back to wwahost.\r\n\tif not appModel.value:\r\n\t\traise LookupError\r\n\t# App model is shown as familyName!appName,\r\n\t# and importing files with the exclamation point in the middle of the name isn't supported.\r\n\t# Therefore return only the app name portion.\r\n\t# Convert this into lowercase to make the file name consistent with other NVDA app modules.\r\n\treturn appModel.value.split(\"!\")[-1].lower()\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\t# WWAHost app content is treated as part of an app, not a browse mode document.\r\n\tdisableBrowseModeByDefault = True\r\n", "path": "source/appModules/wwahost.py"}]} | 952 | 752 |
gh_patches_debug_34722 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Transform BytesCodec into a general codec.
now the BytesCodec is for mnist dataset specified, and it should be modified to encode/decode any dataset using bytes format.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `record_codec/__init__.py`
Content:
```
1 from record_codec.tf_example_codec import TFExampleCodec
2
3 __all__=[TFExampleCodec]
4
```
Path: `record_codec/bytes_codec.py`
Content:
```
1 import numpy as np
2
3 N = 28
4
5
6 # TODO: maybe use TF variant tensor to do more flexible encoding.
7 class BytesCodec(object):
8 def encode(self, data):
9 values = []
10 for _, f_value in data:
11 values.append(f_value)
12 return np.concatenate(values, axis=None).tobytes()
13
14 def decode(self, record):
15 parsed = np.frombuffer(record, dtype="uint8")
16 assert len(parsed) == N * N + 1
17 label = parsed[-1]
18 parsed = np.resize(parsed[:-1], new_shape=(N, N))
19 return {'image': parsed, 'label': label}
20
21 def show(self, data, label):
22 """Print the image and label on terminal for debugging"""
23 assert data.shape == (N, N) and data.dtype == "uint8"
24 assert label >= 0 and label <= 9
25
26 def grey(x):
27 return "\033[48;2;%d;%d;%dm" % (x, x, x) + " \033[0m"
28
29 for line in data:
30 s = "".join(grey(x) for x in line)
31 print(s)
32 print("label =", label)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/record_codec/__init__.py b/record_codec/__init__.py
--- a/record_codec/__init__.py
+++ b/record_codec/__init__.py
@@ -1,3 +1,4 @@
+from record_codec.bytes_codec import BytesCodec
from record_codec.tf_example_codec import TFExampleCodec
-__all__=[TFExampleCodec]
+__all__=[BytesCodec, TFExampleCodec]
diff --git a/record_codec/bytes_codec.py b/record_codec/bytes_codec.py
--- a/record_codec/bytes_codec.py
+++ b/record_codec/bytes_codec.py
@@ -1,32 +1,42 @@
import numpy as np
-N = 28
-
-# TODO: maybe use TF variant tensor to do more flexible encoding.
class BytesCodec(object):
+ def __init__(self, feature_columns):
+ self._feature_columns = feature_columns
+ self._col_id = {
+ c.name: order for order, c in enumerate(feature_columns)
+ }
+
def encode(self, data):
- values = []
- for _, f_value in data:
- values.append(f_value)
- return np.concatenate(values, axis=None).tobytes()
+ # Rearrange the data in order of the columns.
+ values = [None] * len(self._feature_columns)
+ for f_name, f_value in data:
+ col_id = self._col_id[f_name]
+ column = self._feature_columns[col_id]
+ if column.dtype != f_value.dtype or column.shape != f_value.shape:
+ raise ValueError(
+ "Input data doesn't match column %s definition: column: (%s, %s) data: (%s, %s)" % (
+ f_name, column.dtype, column.shape, f_value.dtype, f_value.shape)
+ )
+ values[col_id] = f_value.tobytes()
+ for id, value in enumerate(values):
+ if value is None:
+ raise ValueError(
+ "Missing value for column: %s",
+ self._col_id[id].name
+ )
+ return b"".join(values)
def decode(self, record):
- parsed = np.frombuffer(record, dtype="uint8")
- assert len(parsed) == N * N + 1
- label = parsed[-1]
- parsed = np.resize(parsed[:-1], new_shape=(N, N))
- return {'image': parsed, 'label': label}
-
- def show(self, data, label):
- """Print the image and label on terminal for debugging"""
- assert data.shape == (N, N) and data.dtype == "uint8"
- assert label >= 0 and label <= 9
-
- def grey(x):
- return "\033[48;2;%d;%d;%dm" % (x, x, x) + " \033[0m"
-
- for line in data:
- s = "".join(grey(x) for x in line)
- print(s)
- print("label =", label)
+ offset = 0
+ res = {}
+ for c in self._feature_columns:
+ count = np.prod(c.shape)
+ res[c.name] = np.frombuffer(
+ record,
+ dtype=c.dtype.as_numpy_dtype,
+ count=count,
+ offset=offset).reshape(c.shape)
+ offset += count * c.dtype.size
+ return res
| {"golden_diff": "diff --git a/record_codec/__init__.py b/record_codec/__init__.py\n--- a/record_codec/__init__.py\n+++ b/record_codec/__init__.py\n@@ -1,3 +1,4 @@\n+from record_codec.bytes_codec import BytesCodec\n from record_codec.tf_example_codec import TFExampleCodec\n \n-__all__=[TFExampleCodec]\n+__all__=[BytesCodec, TFExampleCodec]\ndiff --git a/record_codec/bytes_codec.py b/record_codec/bytes_codec.py\n--- a/record_codec/bytes_codec.py\n+++ b/record_codec/bytes_codec.py\n@@ -1,32 +1,42 @@\n import numpy as np\n \n-N = 28\n \n-\n-# TODO: maybe use TF variant tensor to do more flexible encoding.\n class BytesCodec(object):\n+ def __init__(self, feature_columns):\n+ self._feature_columns = feature_columns\n+ self._col_id = {\n+ c.name: order for order, c in enumerate(feature_columns)\n+ }\n+\n def encode(self, data):\n- values = [] \n- for _, f_value in data:\n- values.append(f_value)\n- return np.concatenate(values, axis=None).tobytes()\n+ # Rearrange the data in order of the columns.\n+ values = [None] * len(self._feature_columns)\n+ for f_name, f_value in data:\n+ col_id = self._col_id[f_name]\n+ column = self._feature_columns[col_id]\n+ if column.dtype != f_value.dtype or column.shape != f_value.shape:\n+ raise ValueError(\n+ \"Input data doesn't match column %s definition: column: (%s, %s) data: (%s, %s)\" % (\n+ f_name, column.dtype, column.shape, f_value.dtype, f_value.shape)\n+ )\n+ values[col_id] = f_value.tobytes()\n+ for id, value in enumerate(values):\n+ if value is None:\n+ raise ValueError(\n+ \"Missing value for column: %s\",\n+ self._col_id[id].name\n+ )\n+ return b\"\".join(values)\n \n def decode(self, record):\n- parsed = np.frombuffer(record, dtype=\"uint8\")\n- assert len(parsed) == N * N + 1\n- label = parsed[-1]\n- parsed = np.resize(parsed[:-1], new_shape=(N, N))\n- return {'image': parsed, 'label': label}\n-\n- def show(self, data, label):\n- \"\"\"Print the image and label on terminal for debugging\"\"\"\n- assert data.shape == (N, N) and data.dtype == \"uint8\"\n- assert label >= 0 and label <= 9\n-\n- def grey(x):\n- return \"\\033[48;2;%d;%d;%dm\" % (x, x, x) + \" \\033[0m\"\n-\n- for line in data:\n- s = \"\".join(grey(x) for x in line)\n- print(s)\n- print(\"label =\", label)\n+ offset = 0\n+ res = {}\n+ for c in self._feature_columns:\n+ count = np.prod(c.shape)\n+ res[c.name] = np.frombuffer(\n+ record,\n+ dtype=c.dtype.as_numpy_dtype,\n+ count=count,\n+ offset=offset).reshape(c.shape)\n+ offset += count * c.dtype.size\n+ return res\n", "issue": "Transform BytesCodec into a general codec.\nnow the BytesCodec is for mnist dataset specified, and it should be modified to encode/decode any dataset using bytes format.\n", "before_files": [{"content": "from record_codec.tf_example_codec import TFExampleCodec\n\n__all__=[TFExampleCodec]\n", "path": "record_codec/__init__.py"}, {"content": "import numpy as np\n\nN = 28\n\n\n# TODO: maybe use TF variant tensor to do more flexible encoding.\nclass BytesCodec(object):\n def encode(self, data):\n values = [] \n for _, f_value in data:\n values.append(f_value)\n return np.concatenate(values, axis=None).tobytes()\n\n def decode(self, record):\n parsed = np.frombuffer(record, dtype=\"uint8\")\n assert len(parsed) == N * N + 1\n label = parsed[-1]\n parsed = np.resize(parsed[:-1], new_shape=(N, N))\n return {'image': parsed, 'label': label}\n\n def show(self, data, label):\n \"\"\"Print the image and label on terminal for debugging\"\"\"\n assert data.shape == (N, N) and data.dtype == \"uint8\"\n assert label >= 0 and label <= 9\n\n def grey(x):\n return \"\\033[48;2;%d;%d;%dm\" % (x, x, x) + \" \\033[0m\"\n\n for line in data:\n s = \"\".join(grey(x) for x in line)\n print(s)\n print(\"label =\", label)\n", "path": "record_codec/bytes_codec.py"}], "after_files": [{"content": "from record_codec.bytes_codec import BytesCodec\nfrom record_codec.tf_example_codec import TFExampleCodec\n\n__all__=[BytesCodec, TFExampleCodec]\n", "path": "record_codec/__init__.py"}, {"content": "import numpy as np\n\n\nclass BytesCodec(object):\n def __init__(self, feature_columns):\n self._feature_columns = feature_columns\n self._col_id = {\n c.name: order for order, c in enumerate(feature_columns)\n }\n\n def encode(self, data):\n # Rearrange the data in order of the columns.\n values = [None] * len(self._feature_columns)\n for f_name, f_value in data:\n col_id = self._col_id[f_name]\n column = self._feature_columns[col_id]\n if column.dtype != f_value.dtype or column.shape != f_value.shape:\n raise ValueError(\n \"Input data doesn't match column %s definition: column: (%s, %s) data: (%s, %s)\" % (\n f_name, column.dtype, column.shape, f_value.dtype, f_value.shape)\n )\n values[col_id] = f_value.tobytes()\n for id, value in enumerate(values):\n if value is None:\n raise ValueError(\n \"Missing value for column: %s\",\n self._col_id[id].name\n )\n return b\"\".join(values)\n\n def decode(self, record):\n offset = 0\n res = {}\n for c in self._feature_columns:\n count = np.prod(c.shape)\n res[c.name] = np.frombuffer(\n record,\n dtype=c.dtype.as_numpy_dtype,\n count=count,\n offset=offset).reshape(c.shape)\n offset += count * c.dtype.size\n return res\n", "path": "record_codec/bytes_codec.py"}]} | 658 | 765 |
gh_patches_debug_13513 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-2109 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EB_PYTHON startup behaviour
The documentation at https://easybuild.readthedocs.io/en/latest/Python-2-3-compatibility.html?#controlling-which-python-command-easybuild-will-use-via-eb-python suggests that one can control which version of Python EasyBuild will use by setting EB_PYTHON.
Since https://github.com/easybuilders/easybuild-easyblocks/commit/40b76bef5a1f4f149e0c13cce913c051b54da5a3 running `module load EasyBuild` will now overwrite any existing EB_PYTHON environment variable.
While it is still possible to configure EB_PYTHON after running `module load EasyBuild`, it is no longer possible to configure EB_PYTHON first (e.g. in one's shell startup), since it is overwritten when EasyBuild loads.
If this is the desired behaviour then it should be documented. If this is not the desired behaviour, then it is a bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/e/easybuildmeta.py`
Content:
```
1 # #
2 # Copyright 2013-2020 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 # #
25 """
26 EasyBuild support for installing EasyBuild, implemented as an easyblock
27
28 @author: Kenneth Hoste (UGent)
29 """
30 import copy
31 import os
32 import re
33 from distutils.version import LooseVersion
34
35 from easybuild.easyblocks.generic.pythonpackage import PythonPackage
36 from easybuild.tools.build_log import EasyBuildError
37 from easybuild.tools.filetools import read_file
38 from easybuild.tools.modules import get_software_root_env_var_name
39 from easybuild.tools.py2vs3 import OrderedDict
40 from easybuild.tools.utilities import flatten
41
42
43 # note: we can't use EB_EasyBuild as easyblock name, as that would require an easyblock named 'easybuild.py',
44 # which would screw up namespacing and create all kinds of problems (e.g. easyblocks not being found anymore)
45 class EB_EasyBuildMeta(PythonPackage):
46 """Support for install EasyBuild."""
47
48 def __init__(self, *args, **kwargs):
49 """Initialize custom class variables."""
50 super(EB_EasyBuildMeta, self).__init__(*args, **kwargs)
51 self.real_initial_environ = None
52
53 self.easybuild_pkgs = ['easybuild-framework', 'easybuild-easyblocks', 'easybuild-easyconfigs']
54 if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) <= LooseVersion('3.999'):
55 # deliberately include vsc-install & vsc-base twice;
56 # first time to ensure the specified vsc-install/vsc-base package is available when framework gets installed
57 self.easybuild_pkgs.insert(0, 'vsc-base')
58 self.easybuild_pkgs.insert(0, 'vsc-install')
59 # second time as last package to be installed, to ensure that the vsc-base version listed
60 # in easy-install.pth is the one specified;
61 # when installing the easybuild-* packages, the vsc-base version in easy-install.pth may be 'bumped'
62 # if a newer vsc-base version is found somewhere (e.g. provided by the OS)
63 self.easybuild_pkgs.extend(['vsc-base', 'vsc-install'])
64 # consider setuptools first, in case it is listed as a sources
65 self.easybuild_pkgs.insert(0, 'setuptools')
66
67 def check_readiness_step(self):
68 """Make sure EasyBuild can be installed with a loaded EasyBuild module."""
69 env_var_name = get_software_root_env_var_name(self.name)
70 if env_var_name in os.environ:
71 os.environ.pop(env_var_name)
72 self.log.debug("$%s is unset so EasyBuild can be installed with a loaded EasyBuild module" % env_var_name)
73 else:
74 self.log.debug("Not unsetting $%s since it's not set" % env_var_name)
75
76 super(EB_EasyBuildMeta, self).check_readiness_step()
77
78 def build_step(self):
79 """No building for EasyBuild packages."""
80 pass
81
82 def install_step(self):
83 """Install EasyBuild packages one by one."""
84 try:
85 subdirs = os.listdir(self.builddir)
86 for pkg in self.easybuild_pkgs:
87 seldirs = [x for x in subdirs if x.startswith(pkg)]
88 if len(seldirs) != 1:
89 # setuptools is optional since it may be available in the OS;
90 # vsc-install and vsc-base sources are optional,
91 # they can be pulled in from PyPi when installing easybuild-framework too
92 if pkg not in ['setuptools', 'vsc-base', 'vsc-install']:
93 raise EasyBuildError("Failed to find required EasyBuild package %s (subdirs: %s, seldirs: %s)",
94 pkg, subdirs, seldirs)
95
96 else:
97 self.log.info("Installing package %s", pkg)
98 os.chdir(os.path.join(self.builddir, seldirs[0]))
99 super(EB_EasyBuildMeta, self).install_step()
100
101 except OSError as err:
102 raise EasyBuildError("Failed to install EasyBuild packages: %s", err)
103
104 def post_install_step(self):
105 """Remove setuptools.pth file that hard includes a system-wide (site-packages) path, if it is there."""
106
107 setuptools_pth = os.path.join(self.installdir, self.pylibdir, 'setuptools.pth')
108 if os.path.exists(setuptools_pth):
109 setuptools_pth_txt = read_file(setuptools_pth)
110 # any line that starts with '/' is a sign of trouble
111 sys_path_regex = re.compile('^/', re.M)
112 if sys_path_regex.search(setuptools_pth_txt):
113 self.log.warning("Found %s, and includes one or more absolute system paths. Removing it.",
114 setuptools_pth)
115 try:
116 os.remove(setuptools_pth)
117 except OSError as err:
118 raise EasyBuildError("Failed to remove %s: %s", setuptools_pth, err)
119
120 def sanity_check_step(self):
121 """Custom sanity check for EasyBuild."""
122
123 # check whether easy-install.pth contains correct entries
124 easy_install_pth = os.path.join(self.installdir, self.pylibdir, 'easy-install.pth')
125 if os.path.exists(easy_install_pth):
126 easy_install_pth_txt = read_file(easy_install_pth)
127
128 ignore_pkgs = ['setuptools', 'vsc-install']
129 if LooseVersion(self.version) > LooseVersion('3.999'):
130 ignore_pkgs.append('vsc-base')
131
132 for pkg in [p for p in self.easybuild_pkgs if p not in ignore_pkgs]:
133 if pkg == 'vsc-base':
134 # don't include strict version check for vsc-base
135 pkg_regex = re.compile(r"^\./%s" % pkg.replace('-', '_'), re.M)
136 else:
137 major_minor_version = '.'.join(self.version.split('.')[:2])
138 pkg_regex = re.compile(r"^\./%s-%s" % (pkg.replace('-', '_'), major_minor_version), re.M)
139
140 if not pkg_regex.search(easy_install_pth_txt):
141 raise EasyBuildError("Failed to find pattern '%s' in %s: %s",
142 pkg_regex.pattern, easy_install_pth, easy_install_pth_txt)
143
144 # list of dirs to check, by package
145 # boolean indicates whether dir is expected to reside in Python lib/pythonX/site-packages dir
146 subdirs_by_pkg = {
147 'easybuild-framework': [('easybuild/framework', True), ('easybuild/tools', True)],
148 'easybuild-easyblocks': [('easybuild/easyblocks', True)],
149 'easybuild-easyconfigs': [('easybuild/easyconfigs', False)],
150 }
151 if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) < LooseVersion('3.999'):
152 subdirs_by_pkg.update({
153 'vsc-base': [('vsc/utils', True)],
154 })
155
156 # final list of directories to check, by setup tool
157 # order matters, e.g. setuptools before distutils
158 eb_dirs = OrderedDict()
159 eb_dirs['setuptools'] = []
160 eb_dirs['distutils.core'] = flatten([x for x in subdirs_by_pkg.values()])
161
162 # determine setup tool (setuptools or distutils)
163 setup_tool = None
164 for tool in eb_dirs.keys():
165 self.log.debug("Trying %s.." % tool)
166 try:
167 exec("from %s import setup" % tool)
168 setup_tool = tool
169 break
170 except ImportError:
171 pass
172 self.log.debug('setup_tool: %s' % setup_tool)
173
174 # for a setuptools installation, we need to figure out the egg dirs,
175 # since we don't know the individual package versions
176 if setup_tool == 'setuptools':
177 try:
178 installed_dirs = os.listdir(os.path.join(self.installdir, self.pylibdir))
179 for (pkg, subdirs) in subdirs_by_pkg.items():
180 sel_dirs = [x for x in installed_dirs if x.startswith(pkg.replace('-', '_'))]
181 if not len(sel_dirs) == 1:
182 raise EasyBuildError("Failed to isolate installed egg dir for %s", pkg)
183
184 for (subdir, _) in subdirs:
185 # eggs always go in Python lib/pythonX/site-packages dir with setuptools
186 eb_dirs['setuptools'].append((os.path.join(sel_dirs[0], subdir), True))
187 except OSError as err:
188 raise EasyBuildError("Failed to determine sanity check dir paths: %s", err)
189
190 # set of sanity check paths to check for EasyBuild
191 custom_paths = {
192 'files': ['bin/eb'],
193 'dirs': [self.pylibdir] + [[x, os.path.join(self.pylibdir, x)][y] for (x, y) in eb_dirs[setup_tool]],
194 }
195
196 # make sure we don't trip over deprecated behavior in old EasyBuild versions
197 eb_cmd = 'eb'
198 if LooseVersion(self.version) <= LooseVersion('1.16.0'):
199 eb_cmd = 'EASYBUILD_DEPRECATED=1.0 eb'
200
201 # set of sanity check commands to run for EasyBuild
202 custom_commands = [
203 # this may spit out a wrong version, but that should be safe to ignore
204 # occurs when the EasyBuild being used is newer than the EasyBuild being installed
205 (eb_cmd, '--version'),
206 (eb_cmd, '-a'),
207 (eb_cmd, '-e ConfigureMake -a'),
208 ]
209
210 # (temporary) cleanse copy of initial environment to avoid conflict with (potentially) loaded EasyBuild module
211 self.real_initial_environ = copy.deepcopy(self.initial_environ)
212 for env_var in ['_LMFILES_', 'LOADEDMODULES']:
213 if env_var in self.initial_environ:
214 self.initial_environ.pop(env_var)
215 os.environ.pop(env_var)
216 self.log.debug("Unset $%s in current env and copy of original env to make sanity check work" % env_var)
217
218 super(EB_EasyBuildMeta, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
219
220 def make_module_extra(self):
221 """
222 Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with.
223 """
224 txt = super(EB_EasyBuildMeta, self).make_module_extra()
225 txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd)
226 return txt
227
228 def make_module_step(self, fake=False):
229 """Create module file, before copy of original environment that was tampered with is restored."""
230 modpath = super(EB_EasyBuildMeta, self).make_module_step(fake=fake)
231
232 if not fake:
233 # restore copy of original environment
234 self.initial_environ = copy.deepcopy(self.real_initial_environ)
235 self.log.debug("Restored copy of original environment")
236
237 return modpath
238
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/easybuild/easyblocks/e/easybuildmeta.py b/easybuild/easyblocks/e/easybuildmeta.py
--- a/easybuild/easyblocks/e/easybuildmeta.py
+++ b/easybuild/easyblocks/e/easybuildmeta.py
@@ -219,10 +219,11 @@
def make_module_extra(self):
"""
- Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with.
+ Set $EB_INSTALLPYTHON to ensure that this EasyBuild installation uses the same Python executable it was
+ installed with (which can still be overridden by the user with $EB_PYTHON).
"""
txt = super(EB_EasyBuildMeta, self).make_module_extra()
- txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd)
+ txt += self.module_generator.set_environment('EB_INSTALLPYTHON', self.python_cmd)
return txt
def make_module_step(self, fake=False):
| {"golden_diff": "diff --git a/easybuild/easyblocks/e/easybuildmeta.py b/easybuild/easyblocks/e/easybuildmeta.py\n--- a/easybuild/easyblocks/e/easybuildmeta.py\n+++ b/easybuild/easyblocks/e/easybuildmeta.py\n@@ -219,10 +219,11 @@\n \n def make_module_extra(self):\n \"\"\"\n- Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with.\n+ Set $EB_INSTALLPYTHON to ensure that this EasyBuild installation uses the same Python executable it was\n+ installed with (which can still be overridden by the user with $EB_PYTHON).\n \"\"\"\n txt = super(EB_EasyBuildMeta, self).make_module_extra()\n- txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd)\n+ txt += self.module_generator.set_environment('EB_INSTALLPYTHON', self.python_cmd)\n return txt\n \n def make_module_step(self, fake=False):\n", "issue": "EB_PYTHON startup behaviour\nThe documentation at https://easybuild.readthedocs.io/en/latest/Python-2-3-compatibility.html?#controlling-which-python-command-easybuild-will-use-via-eb-python suggests that one can control which version of Python EasyBuild will use by setting EB_PYTHON.\r\n\r\nSince https://github.com/easybuilders/easybuild-easyblocks/commit/40b76bef5a1f4f149e0c13cce913c051b54da5a3 running `module load EasyBuild` will now overwrite any existing EB_PYTHON environment variable.\r\n\r\nWhile it is still possible to configure EB_PYTHON after running `module load EasyBuild`, it is no longer possible to configure EB_PYTHON first (e.g. in one's shell startup), since it is overwritten when EasyBuild loads.\r\n\r\nIf this is the desired behaviour then it should be documented. If this is not the desired behaviour, then it is a bug.\n", "before_files": [{"content": "# #\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n# #\n\"\"\"\nEasyBuild support for installing EasyBuild, implemented as an easyblock\n\n@author: Kenneth Hoste (UGent)\n\"\"\"\nimport copy\nimport os\nimport re\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import read_file\nfrom easybuild.tools.modules import get_software_root_env_var_name\nfrom easybuild.tools.py2vs3 import OrderedDict\nfrom easybuild.tools.utilities import flatten\n\n\n# note: we can't use EB_EasyBuild as easyblock name, as that would require an easyblock named 'easybuild.py',\n# which would screw up namespacing and create all kinds of problems (e.g. easyblocks not being found anymore)\nclass EB_EasyBuildMeta(PythonPackage):\n \"\"\"Support for install EasyBuild.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize custom class variables.\"\"\"\n super(EB_EasyBuildMeta, self).__init__(*args, **kwargs)\n self.real_initial_environ = None\n\n self.easybuild_pkgs = ['easybuild-framework', 'easybuild-easyblocks', 'easybuild-easyconfigs']\n if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) <= LooseVersion('3.999'):\n # deliberately include vsc-install & vsc-base twice;\n # first time to ensure the specified vsc-install/vsc-base package is available when framework gets installed\n self.easybuild_pkgs.insert(0, 'vsc-base')\n self.easybuild_pkgs.insert(0, 'vsc-install')\n # second time as last package to be installed, to ensure that the vsc-base version listed\n # in easy-install.pth is the one specified;\n # when installing the easybuild-* packages, the vsc-base version in easy-install.pth may be 'bumped'\n # if a newer vsc-base version is found somewhere (e.g. provided by the OS)\n self.easybuild_pkgs.extend(['vsc-base', 'vsc-install'])\n # consider setuptools first, in case it is listed as a sources\n self.easybuild_pkgs.insert(0, 'setuptools')\n\n def check_readiness_step(self):\n \"\"\"Make sure EasyBuild can be installed with a loaded EasyBuild module.\"\"\"\n env_var_name = get_software_root_env_var_name(self.name)\n if env_var_name in os.environ:\n os.environ.pop(env_var_name)\n self.log.debug(\"$%s is unset so EasyBuild can be installed with a loaded EasyBuild module\" % env_var_name)\n else:\n self.log.debug(\"Not unsetting $%s since it's not set\" % env_var_name)\n\n super(EB_EasyBuildMeta, self).check_readiness_step()\n\n def build_step(self):\n \"\"\"No building for EasyBuild packages.\"\"\"\n pass\n\n def install_step(self):\n \"\"\"Install EasyBuild packages one by one.\"\"\"\n try:\n subdirs = os.listdir(self.builddir)\n for pkg in self.easybuild_pkgs:\n seldirs = [x for x in subdirs if x.startswith(pkg)]\n if len(seldirs) != 1:\n # setuptools is optional since it may be available in the OS;\n # vsc-install and vsc-base sources are optional,\n # they can be pulled in from PyPi when installing easybuild-framework too\n if pkg not in ['setuptools', 'vsc-base', 'vsc-install']:\n raise EasyBuildError(\"Failed to find required EasyBuild package %s (subdirs: %s, seldirs: %s)\",\n pkg, subdirs, seldirs)\n\n else:\n self.log.info(\"Installing package %s\", pkg)\n os.chdir(os.path.join(self.builddir, seldirs[0]))\n super(EB_EasyBuildMeta, self).install_step()\n\n except OSError as err:\n raise EasyBuildError(\"Failed to install EasyBuild packages: %s\", err)\n\n def post_install_step(self):\n \"\"\"Remove setuptools.pth file that hard includes a system-wide (site-packages) path, if it is there.\"\"\"\n\n setuptools_pth = os.path.join(self.installdir, self.pylibdir, 'setuptools.pth')\n if os.path.exists(setuptools_pth):\n setuptools_pth_txt = read_file(setuptools_pth)\n # any line that starts with '/' is a sign of trouble\n sys_path_regex = re.compile('^/', re.M)\n if sys_path_regex.search(setuptools_pth_txt):\n self.log.warning(\"Found %s, and includes one or more absolute system paths. Removing it.\",\n setuptools_pth)\n try:\n os.remove(setuptools_pth)\n except OSError as err:\n raise EasyBuildError(\"Failed to remove %s: %s\", setuptools_pth, err)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for EasyBuild.\"\"\"\n\n # check whether easy-install.pth contains correct entries\n easy_install_pth = os.path.join(self.installdir, self.pylibdir, 'easy-install.pth')\n if os.path.exists(easy_install_pth):\n easy_install_pth_txt = read_file(easy_install_pth)\n\n ignore_pkgs = ['setuptools', 'vsc-install']\n if LooseVersion(self.version) > LooseVersion('3.999'):\n ignore_pkgs.append('vsc-base')\n\n for pkg in [p for p in self.easybuild_pkgs if p not in ignore_pkgs]:\n if pkg == 'vsc-base':\n # don't include strict version check for vsc-base\n pkg_regex = re.compile(r\"^\\./%s\" % pkg.replace('-', '_'), re.M)\n else:\n major_minor_version = '.'.join(self.version.split('.')[:2])\n pkg_regex = re.compile(r\"^\\./%s-%s\" % (pkg.replace('-', '_'), major_minor_version), re.M)\n\n if not pkg_regex.search(easy_install_pth_txt):\n raise EasyBuildError(\"Failed to find pattern '%s' in %s: %s\",\n pkg_regex.pattern, easy_install_pth, easy_install_pth_txt)\n\n # list of dirs to check, by package\n # boolean indicates whether dir is expected to reside in Python lib/pythonX/site-packages dir\n subdirs_by_pkg = {\n 'easybuild-framework': [('easybuild/framework', True), ('easybuild/tools', True)],\n 'easybuild-easyblocks': [('easybuild/easyblocks', True)],\n 'easybuild-easyconfigs': [('easybuild/easyconfigs', False)],\n }\n if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) < LooseVersion('3.999'):\n subdirs_by_pkg.update({\n 'vsc-base': [('vsc/utils', True)],\n })\n\n # final list of directories to check, by setup tool\n # order matters, e.g. setuptools before distutils\n eb_dirs = OrderedDict()\n eb_dirs['setuptools'] = []\n eb_dirs['distutils.core'] = flatten([x for x in subdirs_by_pkg.values()])\n\n # determine setup tool (setuptools or distutils)\n setup_tool = None\n for tool in eb_dirs.keys():\n self.log.debug(\"Trying %s..\" % tool)\n try:\n exec(\"from %s import setup\" % tool)\n setup_tool = tool\n break\n except ImportError:\n pass\n self.log.debug('setup_tool: %s' % setup_tool)\n\n # for a setuptools installation, we need to figure out the egg dirs,\n # since we don't know the individual package versions\n if setup_tool == 'setuptools':\n try:\n installed_dirs = os.listdir(os.path.join(self.installdir, self.pylibdir))\n for (pkg, subdirs) in subdirs_by_pkg.items():\n sel_dirs = [x for x in installed_dirs if x.startswith(pkg.replace('-', '_'))]\n if not len(sel_dirs) == 1:\n raise EasyBuildError(\"Failed to isolate installed egg dir for %s\", pkg)\n\n for (subdir, _) in subdirs:\n # eggs always go in Python lib/pythonX/site-packages dir with setuptools\n eb_dirs['setuptools'].append((os.path.join(sel_dirs[0], subdir), True))\n except OSError as err:\n raise EasyBuildError(\"Failed to determine sanity check dir paths: %s\", err)\n\n # set of sanity check paths to check for EasyBuild\n custom_paths = {\n 'files': ['bin/eb'],\n 'dirs': [self.pylibdir] + [[x, os.path.join(self.pylibdir, x)][y] for (x, y) in eb_dirs[setup_tool]],\n }\n\n # make sure we don't trip over deprecated behavior in old EasyBuild versions\n eb_cmd = 'eb'\n if LooseVersion(self.version) <= LooseVersion('1.16.0'):\n eb_cmd = 'EASYBUILD_DEPRECATED=1.0 eb'\n\n # set of sanity check commands to run for EasyBuild\n custom_commands = [\n # this may spit out a wrong version, but that should be safe to ignore\n # occurs when the EasyBuild being used is newer than the EasyBuild being installed\n (eb_cmd, '--version'),\n (eb_cmd, '-a'),\n (eb_cmd, '-e ConfigureMake -a'),\n ]\n\n # (temporary) cleanse copy of initial environment to avoid conflict with (potentially) loaded EasyBuild module\n self.real_initial_environ = copy.deepcopy(self.initial_environ)\n for env_var in ['_LMFILES_', 'LOADEDMODULES']:\n if env_var in self.initial_environ:\n self.initial_environ.pop(env_var)\n os.environ.pop(env_var)\n self.log.debug(\"Unset $%s in current env and copy of original env to make sanity check work\" % env_var)\n\n super(EB_EasyBuildMeta, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n\n def make_module_extra(self):\n \"\"\"\n Set $EB_PYTHON to ensure that this EasyBuild installation uses the same Python executable it was installed with.\n \"\"\"\n txt = super(EB_EasyBuildMeta, self).make_module_extra()\n txt += self.module_generator.set_environment('EB_PYTHON', self.python_cmd)\n return txt\n\n def make_module_step(self, fake=False):\n \"\"\"Create module file, before copy of original environment that was tampered with is restored.\"\"\"\n modpath = super(EB_EasyBuildMeta, self).make_module_step(fake=fake)\n\n if not fake:\n # restore copy of original environment\n self.initial_environ = copy.deepcopy(self.real_initial_environ)\n self.log.debug(\"Restored copy of original environment\")\n\n return modpath\n", "path": "easybuild/easyblocks/e/easybuildmeta.py"}], "after_files": [{"content": "# #\n# Copyright 2013-2020 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n# #\n\"\"\"\nEasyBuild support for installing EasyBuild, implemented as an easyblock\n\n@author: Kenneth Hoste (UGent)\n\"\"\"\nimport copy\nimport os\nimport re\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import read_file\nfrom easybuild.tools.modules import get_software_root_env_var_name\nfrom easybuild.tools.py2vs3 import OrderedDict\nfrom easybuild.tools.utilities import flatten\n\n\n# note: we can't use EB_EasyBuild as easyblock name, as that would require an easyblock named 'easybuild.py',\n# which would screw up namespacing and create all kinds of problems (e.g. easyblocks not being found anymore)\nclass EB_EasyBuildMeta(PythonPackage):\n \"\"\"Support for install EasyBuild.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize custom class variables.\"\"\"\n super(EB_EasyBuildMeta, self).__init__(*args, **kwargs)\n self.real_initial_environ = None\n\n self.easybuild_pkgs = ['easybuild-framework', 'easybuild-easyblocks', 'easybuild-easyconfigs']\n if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) <= LooseVersion('3.999'):\n # deliberately include vsc-install & vsc-base twice;\n # first time to ensure the specified vsc-install/vsc-base package is available when framework gets installed\n self.easybuild_pkgs.insert(0, 'vsc-base')\n self.easybuild_pkgs.insert(0, 'vsc-install')\n # second time as last package to be installed, to ensure that the vsc-base version listed\n # in easy-install.pth is the one specified;\n # when installing the easybuild-* packages, the vsc-base version in easy-install.pth may be 'bumped'\n # if a newer vsc-base version is found somewhere (e.g. provided by the OS)\n self.easybuild_pkgs.extend(['vsc-base', 'vsc-install'])\n # consider setuptools first, in case it is listed as a sources\n self.easybuild_pkgs.insert(0, 'setuptools')\n\n def check_readiness_step(self):\n \"\"\"Make sure EasyBuild can be installed with a loaded EasyBuild module.\"\"\"\n env_var_name = get_software_root_env_var_name(self.name)\n if env_var_name in os.environ:\n os.environ.pop(env_var_name)\n self.log.debug(\"$%s is unset so EasyBuild can be installed with a loaded EasyBuild module\" % env_var_name)\n else:\n self.log.debug(\"Not unsetting $%s since it's not set\" % env_var_name)\n\n super(EB_EasyBuildMeta, self).check_readiness_step()\n\n def build_step(self):\n \"\"\"No building for EasyBuild packages.\"\"\"\n pass\n\n def install_step(self):\n \"\"\"Install EasyBuild packages one by one.\"\"\"\n try:\n subdirs = os.listdir(self.builddir)\n for pkg in self.easybuild_pkgs:\n seldirs = [x for x in subdirs if x.startswith(pkg)]\n if len(seldirs) != 1:\n # setuptools is optional since it may be available in the OS;\n # vsc-install and vsc-base sources are optional,\n # they can be pulled in from PyPi when installing easybuild-framework too\n if pkg not in ['setuptools', 'vsc-base', 'vsc-install']:\n raise EasyBuildError(\"Failed to find required EasyBuild package %s (subdirs: %s, seldirs: %s)\",\n pkg, subdirs, seldirs)\n\n else:\n self.log.info(\"Installing package %s\", pkg)\n os.chdir(os.path.join(self.builddir, seldirs[0]))\n super(EB_EasyBuildMeta, self).install_step()\n\n except OSError as err:\n raise EasyBuildError(\"Failed to install EasyBuild packages: %s\", err)\n\n def post_install_step(self):\n \"\"\"Remove setuptools.pth file that hard includes a system-wide (site-packages) path, if it is there.\"\"\"\n\n setuptools_pth = os.path.join(self.installdir, self.pylibdir, 'setuptools.pth')\n if os.path.exists(setuptools_pth):\n setuptools_pth_txt = read_file(setuptools_pth)\n # any line that starts with '/' is a sign of trouble\n sys_path_regex = re.compile('^/', re.M)\n if sys_path_regex.search(setuptools_pth_txt):\n self.log.warning(\"Found %s, and includes one or more absolute system paths. Removing it.\",\n setuptools_pth)\n try:\n os.remove(setuptools_pth)\n except OSError as err:\n raise EasyBuildError(\"Failed to remove %s: %s\", setuptools_pth, err)\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for EasyBuild.\"\"\"\n\n # check whether easy-install.pth contains correct entries\n easy_install_pth = os.path.join(self.installdir, self.pylibdir, 'easy-install.pth')\n if os.path.exists(easy_install_pth):\n easy_install_pth_txt = read_file(easy_install_pth)\n\n ignore_pkgs = ['setuptools', 'vsc-install']\n if LooseVersion(self.version) > LooseVersion('3.999'):\n ignore_pkgs.append('vsc-base')\n\n for pkg in [p for p in self.easybuild_pkgs if p not in ignore_pkgs]:\n if pkg == 'vsc-base':\n # don't include strict version check for vsc-base\n pkg_regex = re.compile(r\"^\\./%s\" % pkg.replace('-', '_'), re.M)\n else:\n major_minor_version = '.'.join(self.version.split('.')[:2])\n pkg_regex = re.compile(r\"^\\./%s-%s\" % (pkg.replace('-', '_'), major_minor_version), re.M)\n\n if not pkg_regex.search(easy_install_pth_txt):\n raise EasyBuildError(\"Failed to find pattern '%s' in %s: %s\",\n pkg_regex.pattern, easy_install_pth, easy_install_pth_txt)\n\n # list of dirs to check, by package\n # boolean indicates whether dir is expected to reside in Python lib/pythonX/site-packages dir\n subdirs_by_pkg = {\n 'easybuild-framework': [('easybuild/framework', True), ('easybuild/tools', True)],\n 'easybuild-easyblocks': [('easybuild/easyblocks', True)],\n 'easybuild-easyconfigs': [('easybuild/easyconfigs', False)],\n }\n if LooseVersion(self.version) >= LooseVersion('2.0') and LooseVersion(self.version) < LooseVersion('3.999'):\n subdirs_by_pkg.update({\n 'vsc-base': [('vsc/utils', True)],\n })\n\n # final list of directories to check, by setup tool\n # order matters, e.g. setuptools before distutils\n eb_dirs = OrderedDict()\n eb_dirs['setuptools'] = []\n eb_dirs['distutils.core'] = flatten([x for x in subdirs_by_pkg.values()])\n\n # determine setup tool (setuptools or distutils)\n setup_tool = None\n for tool in eb_dirs.keys():\n self.log.debug(\"Trying %s..\" % tool)\n try:\n exec(\"from %s import setup\" % tool)\n setup_tool = tool\n break\n except ImportError:\n pass\n self.log.debug('setup_tool: %s' % setup_tool)\n\n # for a setuptools installation, we need to figure out the egg dirs,\n # since we don't know the individual package versions\n if setup_tool == 'setuptools':\n try:\n installed_dirs = os.listdir(os.path.join(self.installdir, self.pylibdir))\n for (pkg, subdirs) in subdirs_by_pkg.items():\n sel_dirs = [x for x in installed_dirs if x.startswith(pkg.replace('-', '_'))]\n if not len(sel_dirs) == 1:\n raise EasyBuildError(\"Failed to isolate installed egg dir for %s\", pkg)\n\n for (subdir, _) in subdirs:\n # eggs always go in Python lib/pythonX/site-packages dir with setuptools\n eb_dirs['setuptools'].append((os.path.join(sel_dirs[0], subdir), True))\n except OSError as err:\n raise EasyBuildError(\"Failed to determine sanity check dir paths: %s\", err)\n\n # set of sanity check paths to check for EasyBuild\n custom_paths = {\n 'files': ['bin/eb'],\n 'dirs': [self.pylibdir] + [[x, os.path.join(self.pylibdir, x)][y] for (x, y) in eb_dirs[setup_tool]],\n }\n\n # make sure we don't trip over deprecated behavior in old EasyBuild versions\n eb_cmd = 'eb'\n if LooseVersion(self.version) <= LooseVersion('1.16.0'):\n eb_cmd = 'EASYBUILD_DEPRECATED=1.0 eb'\n\n # set of sanity check commands to run for EasyBuild\n custom_commands = [\n # this may spit out a wrong version, but that should be safe to ignore\n # occurs when the EasyBuild being used is newer than the EasyBuild being installed\n (eb_cmd, '--version'),\n (eb_cmd, '-a'),\n (eb_cmd, '-e ConfigureMake -a'),\n ]\n\n # (temporary) cleanse copy of initial environment to avoid conflict with (potentially) loaded EasyBuild module\n self.real_initial_environ = copy.deepcopy(self.initial_environ)\n for env_var in ['_LMFILES_', 'LOADEDMODULES']:\n if env_var in self.initial_environ:\n self.initial_environ.pop(env_var)\n os.environ.pop(env_var)\n self.log.debug(\"Unset $%s in current env and copy of original env to make sanity check work\" % env_var)\n\n super(EB_EasyBuildMeta, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n\n def make_module_extra(self):\n \"\"\"\n Set $EB_INSTALLPYTHON to ensure that this EasyBuild installation uses the same Python executable it was\n installed with (which can still be overridden by the user with $EB_PYTHON).\n \"\"\"\n txt = super(EB_EasyBuildMeta, self).make_module_extra()\n txt += self.module_generator.set_environment('EB_INSTALLPYTHON', self.python_cmd)\n return txt\n\n def make_module_step(self, fake=False):\n \"\"\"Create module file, before copy of original environment that was tampered with is restored.\"\"\"\n modpath = super(EB_EasyBuildMeta, self).make_module_step(fake=fake)\n\n if not fake:\n # restore copy of original environment\n self.initial_environ = copy.deepcopy(self.real_initial_environ)\n self.log.debug(\"Restored copy of original environment\")\n\n return modpath\n", "path": "easybuild/easyblocks/e/easybuildmeta.py"}]} | 3,702 | 220 |
gh_patches_debug_38766 | rasdani/github-patches | git_diff | PrefectHQ__prefect-1582 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Parameter Specification to run CLI command
## Current behavior
Currently the CLI command doesn't allow for the specification of parameters for a flow run
## Proposed behavior
Couple options we could take here:
Multiple --parameter NAME=VALUE options (e.g. each --parameter similar to how docker build does --build-arg)
Single --parameters {name: value} that takes a dictionary
Also we should have a file option where you can provide a json file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/cli/run.py`
Content:
```
1 import time
2
3 import click
4 from tabulate import tabulate
5
6 from prefect.client import Client
7 from prefect.utilities.graphql import EnumValue, with_args
8
9
10 @click.group(hidden=True)
11 def run():
12 """
13 Run Prefect flows.
14
15 \b
16 Usage:
17 $ prefect run [STORAGE/PLATFORM]
18
19 \b
20 Arguments:
21 cloud Run flows in Prefect Cloud
22
23 \b
24 Examples:
25 $ prefect run cloud --name Test-Flow --project My-Project
26 Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9
27
28 \b
29 $ prefect run cloud --name Test-Flow --project My-Project --watch
30 Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9
31 Scheduled -> Submitted -> Running -> Success
32 """
33 pass
34
35
36 @run.command(hidden=True)
37 @click.option(
38 "--name", "-n", required=True, help="The name of a flow to run.", hidden=True
39 )
40 @click.option(
41 "--project",
42 "-p",
43 required=True,
44 help="The project that contains the flow.",
45 hidden=True,
46 )
47 @click.option("--version", "-v", type=int, help="A flow version to run.", hidden=True)
48 @click.option(
49 "--watch",
50 "-w",
51 is_flag=True,
52 help="Watch current state of the flow run.",
53 hidden=True,
54 )
55 @click.option(
56 "--logs", "-l", is_flag=True, help="Live logs of the flow run.", hidden=True
57 )
58 def cloud(name, project, version, watch, logs):
59 """
60 Run a deployed flow in Prefect Cloud.
61
62 \b
63 Options:
64 --name, -n TEXT The name of a flow to run [required]
65 --project, -p TEXT The name of a project that contains the flow [required]
66 --version, -v INTEGER A flow version to run
67 --watch, -w Watch current state of the flow run, stream output to stdout
68 --logs, -l Get logs of the flow run, stream output to stdout
69 """
70
71 if watch and logs:
72 click.secho(
73 "Streaming state and logs not currently supported together.", fg="red"
74 )
75 return
76
77 query = {
78 "query": {
79 with_args(
80 "flow",
81 {
82 "where": {
83 "_and": {
84 "name": {"_eq": name},
85 "version": {"_eq": version},
86 "project": {"name": {"_eq": project}},
87 }
88 },
89 "order_by": {
90 "name": EnumValue("asc"),
91 "version": EnumValue("desc"),
92 },
93 "distinct_on": EnumValue("name"),
94 },
95 ): {"id": True}
96 }
97 }
98
99 client = Client()
100 result = client.graphql(query)
101
102 flow_data = result.data.flow
103
104 if flow_data:
105 flow_id = flow_data[0].id
106 else:
107 click.secho("{} not found".format(name), fg="red")
108 return
109
110 flow_run_id = client.create_flow_run(flow_id=flow_id)
111 click.echo("Flow Run ID: {}".format(flow_run_id))
112
113 if watch:
114 current_states = []
115 while True:
116 query = {
117 "query": {
118 with_args("flow_run_by_pk", {"id": flow_run_id}): {
119 with_args(
120 "states",
121 {"order_by": {EnumValue("timestamp"): EnumValue("asc")}},
122 ): {"state": True, "timestamp": True}
123 }
124 }
125 }
126
127 result = client.graphql(query)
128
129 # Filter through retrieved states and output in order
130 for state_index in result.data.flow_run_by_pk.states:
131 state = state_index.state
132 if state not in current_states:
133 if state != "Success" and state != "Failed":
134 click.echo("{} -> ".format(state), nl=False)
135 else:
136 click.echo(state)
137 return
138
139 current_states.append(state)
140
141 time.sleep(3)
142
143 if logs:
144 all_logs = []
145
146 log_query = {
147 with_args(
148 "logs", {"order_by": {EnumValue("timestamp"): EnumValue("asc")}}
149 ): {"timestamp": True, "message": True, "level": True},
150 "start_time": True,
151 }
152
153 query = {
154 "query": {
155 with_args(
156 "flow_run",
157 {
158 "where": {"id": {"_eq": flow_run_id}},
159 "order_by": {EnumValue("start_time"): EnumValue("desc")},
160 },
161 ): log_query
162 }
163 }
164
165 while True:
166 result = Client().graphql(query)
167
168 flow_run = result.data.flow_run
169 if not flow_run:
170 click.secho("{} not found".format(flow_run_id), fg="red")
171 return
172
173 new_run = flow_run[0]
174 logs = new_run.logs
175 output = []
176
177 for i in logs:
178 if [i.timestamp, i.level, i.message] not in all_logs:
179
180 if not len(all_logs):
181 click.echo(
182 tabulate(
183 [[i.timestamp, i.level, i.message]],
184 headers=["TIMESTAMP", "LEVEL", "MESSAGE"],
185 tablefmt="plain",
186 numalign="left",
187 stralign="left",
188 )
189 )
190 all_logs.append([i.timestamp, i.level, i.message])
191 continue
192
193 output.append([i.timestamp, i.level, i.message])
194 all_logs.append([i.timestamp, i.level, i.message])
195
196 if output:
197 click.echo(
198 tabulate(output, tablefmt="plain", numalign="left", stralign="left")
199 )
200
201 # Check if state is either Success or Failed, exit if it is
202 pk_query = {
203 "query": {
204 with_args("flow_run_by_pk", {"id": flow_run_id}): {"state": True}
205 }
206 }
207 result = client.graphql(pk_query)
208
209 if (
210 result.data.flow_run_by_pk.state == "Success"
211 or result.data.flow_run_by_pk.state == "Failed"
212 ):
213 return
214
215 time.sleep(3)
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py
--- a/src/prefect/cli/run.py
+++ b/src/prefect/cli/run.py
@@ -1,3 +1,4 @@
+import json
import time
import click
@@ -45,6 +46,16 @@
hidden=True,
)
@click.option("--version", "-v", type=int, help="A flow version to run.", hidden=True)
[email protected](
+ "--parameters-file",
+ "-pf",
+ help="A parameters JSON file.",
+ hidden=True,
+ type=click.Path(exists=True),
+)
[email protected](
+ "--parameters-string", "--ps", help="A parameters JSON string.", hidden=True
+)
@click.option(
"--watch",
"-w",
@@ -55,17 +66,29 @@
@click.option(
"--logs", "-l", is_flag=True, help="Live logs of the flow run.", hidden=True
)
-def cloud(name, project, version, watch, logs):
+def cloud(name, project, version, parameters_file, parameters_string, watch, logs):
"""
Run a deployed flow in Prefect Cloud.
\b
Options:
- --name, -n TEXT The name of a flow to run [required]
- --project, -p TEXT The name of a project that contains the flow [required]
- --version, -v INTEGER A flow version to run
- --watch, -w Watch current state of the flow run, stream output to stdout
- --logs, -l Get logs of the flow run, stream output to stdout
+ --name, -n TEXT The name of a flow to run [required]
+ --project, -p TEXT The name of a project that contains the flow [required]
+ --version, -v INTEGER A flow version to run
+ --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters
+ --parameters-string, -ps TEXT A string of JSON parameters
+ --watch, -w Watch current state of the flow run, stream output to stdout
+ --logs, -l Get logs of the flow run, stream output to stdout
+
+ \b
+ If both `--parameters-file` and `--parameters-string` are provided then the values passed
+ in through the string will override the values provided from the file.
+
+ \b
+ e.g.
+ File contains: {"a": 1, "b": 2}
+ String: '{"a": 3}'
+ Parameters passed to the flow run: {"a": 3, "b": 2}
"""
if watch and logs:
@@ -107,7 +130,20 @@
click.secho("{} not found".format(name), fg="red")
return
- flow_run_id = client.create_flow_run(flow_id=flow_id)
+ # Load parameters from file if provided
+ file_params = {}
+ if parameters_file:
+ with open(parameters_file) as params_file:
+ file_params = json.load(params_file)
+
+ # Load parameters from string if provided
+ string_params = {}
+ if parameters_string:
+ string_params = json.loads(parameters_string)
+
+ flow_run_id = client.create_flow_run(
+ flow_id=flow_id, parameters={**file_params, **string_params}
+ )
click.echo("Flow Run ID: {}".format(flow_run_id))
if watch:
| {"golden_diff": "diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py\n--- a/src/prefect/cli/run.py\n+++ b/src/prefect/cli/run.py\n@@ -1,3 +1,4 @@\n+import json\n import time\n \n import click\n@@ -45,6 +46,16 @@\n hidden=True,\n )\n @click.option(\"--version\", \"-v\", type=int, help=\"A flow version to run.\", hidden=True)\[email protected](\n+ \"--parameters-file\",\n+ \"-pf\",\n+ help=\"A parameters JSON file.\",\n+ hidden=True,\n+ type=click.Path(exists=True),\n+)\[email protected](\n+ \"--parameters-string\", \"--ps\", help=\"A parameters JSON string.\", hidden=True\n+)\n @click.option(\n \"--watch\",\n \"-w\",\n@@ -55,17 +66,29 @@\n @click.option(\n \"--logs\", \"-l\", is_flag=True, help=\"Live logs of the flow run.\", hidden=True\n )\n-def cloud(name, project, version, watch, logs):\n+def cloud(name, project, version, parameters_file, parameters_string, watch, logs):\n \"\"\"\n Run a deployed flow in Prefect Cloud.\n \n \\b\n Options:\n- --name, -n TEXT The name of a flow to run [required]\n- --project, -p TEXT The name of a project that contains the flow [required]\n- --version, -v INTEGER A flow version to run\n- --watch, -w Watch current state of the flow run, stream output to stdout\n- --logs, -l Get logs of the flow run, stream output to stdout\n+ --name, -n TEXT The name of a flow to run [required]\n+ --project, -p TEXT The name of a project that contains the flow [required]\n+ --version, -v INTEGER A flow version to run\n+ --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters\n+ --parameters-string, -ps TEXT A string of JSON parameters\n+ --watch, -w Watch current state of the flow run, stream output to stdout\n+ --logs, -l Get logs of the flow run, stream output to stdout\n+\n+ \\b\n+ If both `--parameters-file` and `--parameters-string` are provided then the values passed\n+ in through the string will override the values provided from the file.\n+\n+ \\b\n+ e.g.\n+ File contains: {\"a\": 1, \"b\": 2}\n+ String: '{\"a\": 3}'\n+ Parameters passed to the flow run: {\"a\": 3, \"b\": 2}\n \"\"\"\n \n if watch and logs:\n@@ -107,7 +130,20 @@\n click.secho(\"{} not found\".format(name), fg=\"red\")\n return\n \n- flow_run_id = client.create_flow_run(flow_id=flow_id)\n+ # Load parameters from file if provided\n+ file_params = {}\n+ if parameters_file:\n+ with open(parameters_file) as params_file:\n+ file_params = json.load(params_file)\n+\n+ # Load parameters from string if provided\n+ string_params = {}\n+ if parameters_string:\n+ string_params = json.loads(parameters_string)\n+\n+ flow_run_id = client.create_flow_run(\n+ flow_id=flow_id, parameters={**file_params, **string_params}\n+ )\n click.echo(\"Flow Run ID: {}\".format(flow_run_id))\n \n if watch:\n", "issue": "Add Parameter Specification to run CLI command\n## Current behavior\r\nCurrently the CLI command doesn't allow for the specification of parameters for a flow run\r\n\r\n\r\n\r\n## Proposed behavior\r\nCouple options we could take here:\r\n\r\nMultiple --parameter NAME=VALUE options (e.g. each --parameter similar to how docker build does --build-arg)\r\n\r\nSingle --parameters {name: value} that takes a dictionary\r\n\r\nAlso we should have a file option where you can provide a json file\n", "before_files": [{"content": "import time\n\nimport click\nfrom tabulate import tabulate\n\nfrom prefect.client import Client\nfrom prefect.utilities.graphql import EnumValue, with_args\n\n\[email protected](hidden=True)\ndef run():\n \"\"\"\n Run Prefect flows.\n\n \\b\n Usage:\n $ prefect run [STORAGE/PLATFORM]\n\n \\b\n Arguments:\n cloud Run flows in Prefect Cloud\n\n \\b\n Examples:\n $ prefect run cloud --name Test-Flow --project My-Project\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n\n \\b\n $ prefect run cloud --name Test-Flow --project My-Project --watch\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n Scheduled -> Submitted -> Running -> Success\n \"\"\"\n pass\n\n\[email protected](hidden=True)\[email protected](\n \"--name\", \"-n\", required=True, help=\"The name of a flow to run.\", hidden=True\n)\[email protected](\n \"--project\",\n \"-p\",\n required=True,\n help=\"The project that contains the flow.\",\n hidden=True,\n)\[email protected](\"--version\", \"-v\", type=int, help=\"A flow version to run.\", hidden=True)\[email protected](\n \"--watch\",\n \"-w\",\n is_flag=True,\n help=\"Watch current state of the flow run.\",\n hidden=True,\n)\[email protected](\n \"--logs\", \"-l\", is_flag=True, help=\"Live logs of the flow run.\", hidden=True\n)\ndef cloud(name, project, version, watch, logs):\n \"\"\"\n Run a deployed flow in Prefect Cloud.\n\n \\b\n Options:\n --name, -n TEXT The name of a flow to run [required]\n --project, -p TEXT The name of a project that contains the flow [required]\n --version, -v INTEGER A flow version to run\n --watch, -w Watch current state of the flow run, stream output to stdout\n --logs, -l Get logs of the flow run, stream output to stdout\n \"\"\"\n\n if watch and logs:\n click.secho(\n \"Streaming state and logs not currently supported together.\", fg=\"red\"\n )\n return\n\n query = {\n \"query\": {\n with_args(\n \"flow\",\n {\n \"where\": {\n \"_and\": {\n \"name\": {\"_eq\": name},\n \"version\": {\"_eq\": version},\n \"project\": {\"name\": {\"_eq\": project}},\n }\n },\n \"order_by\": {\n \"name\": EnumValue(\"asc\"),\n \"version\": EnumValue(\"desc\"),\n },\n \"distinct_on\": EnumValue(\"name\"),\n },\n ): {\"id\": True}\n }\n }\n\n client = Client()\n result = client.graphql(query)\n\n flow_data = result.data.flow\n\n if flow_data:\n flow_id = flow_data[0].id\n else:\n click.secho(\"{} not found\".format(name), fg=\"red\")\n return\n\n flow_run_id = client.create_flow_run(flow_id=flow_id)\n click.echo(\"Flow Run ID: {}\".format(flow_run_id))\n\n if watch:\n current_states = []\n while True:\n query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\n with_args(\n \"states\",\n {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}},\n ): {\"state\": True, \"timestamp\": True}\n }\n }\n }\n\n result = client.graphql(query)\n\n # Filter through retrieved states and output in order\n for state_index in result.data.flow_run_by_pk.states:\n state = state_index.state\n if state not in current_states:\n if state != \"Success\" and state != \"Failed\":\n click.echo(\"{} -> \".format(state), nl=False)\n else:\n click.echo(state)\n return\n\n current_states.append(state)\n\n time.sleep(3)\n\n if logs:\n all_logs = []\n\n log_query = {\n with_args(\n \"logs\", {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}}\n ): {\"timestamp\": True, \"message\": True, \"level\": True},\n \"start_time\": True,\n }\n\n query = {\n \"query\": {\n with_args(\n \"flow_run\",\n {\n \"where\": {\"id\": {\"_eq\": flow_run_id}},\n \"order_by\": {EnumValue(\"start_time\"): EnumValue(\"desc\")},\n },\n ): log_query\n }\n }\n\n while True:\n result = Client().graphql(query)\n\n flow_run = result.data.flow_run\n if not flow_run:\n click.secho(\"{} not found\".format(flow_run_id), fg=\"red\")\n return\n\n new_run = flow_run[0]\n logs = new_run.logs\n output = []\n\n for i in logs:\n if [i.timestamp, i.level, i.message] not in all_logs:\n\n if not len(all_logs):\n click.echo(\n tabulate(\n [[i.timestamp, i.level, i.message]],\n headers=[\"TIMESTAMP\", \"LEVEL\", \"MESSAGE\"],\n tablefmt=\"plain\",\n numalign=\"left\",\n stralign=\"left\",\n )\n )\n all_logs.append([i.timestamp, i.level, i.message])\n continue\n\n output.append([i.timestamp, i.level, i.message])\n all_logs.append([i.timestamp, i.level, i.message])\n\n if output:\n click.echo(\n tabulate(output, tablefmt=\"plain\", numalign=\"left\", stralign=\"left\")\n )\n\n # Check if state is either Success or Failed, exit if it is\n pk_query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\"state\": True}\n }\n }\n result = client.graphql(pk_query)\n\n if (\n result.data.flow_run_by_pk.state == \"Success\"\n or result.data.flow_run_by_pk.state == \"Failed\"\n ):\n return\n\n time.sleep(3)\n", "path": "src/prefect/cli/run.py"}], "after_files": [{"content": "import json\nimport time\n\nimport click\nfrom tabulate import tabulate\n\nfrom prefect.client import Client\nfrom prefect.utilities.graphql import EnumValue, with_args\n\n\[email protected](hidden=True)\ndef run():\n \"\"\"\n Run Prefect flows.\n\n \\b\n Usage:\n $ prefect run [STORAGE/PLATFORM]\n\n \\b\n Arguments:\n cloud Run flows in Prefect Cloud\n\n \\b\n Examples:\n $ prefect run cloud --name Test-Flow --project My-Project\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n\n \\b\n $ prefect run cloud --name Test-Flow --project My-Project --watch\n Flow Run ID: 2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9\n Scheduled -> Submitted -> Running -> Success\n \"\"\"\n pass\n\n\[email protected](hidden=True)\[email protected](\n \"--name\", \"-n\", required=True, help=\"The name of a flow to run.\", hidden=True\n)\[email protected](\n \"--project\",\n \"-p\",\n required=True,\n help=\"The project that contains the flow.\",\n hidden=True,\n)\[email protected](\"--version\", \"-v\", type=int, help=\"A flow version to run.\", hidden=True)\[email protected](\n \"--parameters-file\",\n \"-pf\",\n help=\"A parameters JSON file.\",\n hidden=True,\n type=click.Path(exists=True),\n)\[email protected](\n \"--parameters-string\", \"--ps\", help=\"A parameters JSON string.\", hidden=True\n)\[email protected](\n \"--watch\",\n \"-w\",\n is_flag=True,\n help=\"Watch current state of the flow run.\",\n hidden=True,\n)\[email protected](\n \"--logs\", \"-l\", is_flag=True, help=\"Live logs of the flow run.\", hidden=True\n)\ndef cloud(name, project, version, parameters_file, parameters_string, watch, logs):\n \"\"\"\n Run a deployed flow in Prefect Cloud.\n\n \\b\n Options:\n --name, -n TEXT The name of a flow to run [required]\n --project, -p TEXT The name of a project that contains the flow [required]\n --version, -v INTEGER A flow version to run\n --parameters-file, -pf FILE PATH A filepath of a JSON file containing parameters\n --parameters-string, -ps TEXT A string of JSON parameters\n --watch, -w Watch current state of the flow run, stream output to stdout\n --logs, -l Get logs of the flow run, stream output to stdout\n\n \\b\n If both `--parameters-file` and `--parameters-string` are provided then the values passed\n in through the string will override the values provided from the file.\n\n \\b\n e.g.\n File contains: {\"a\": 1, \"b\": 2}\n String: '{\"a\": 3}'\n Parameters passed to the flow run: {\"a\": 3, \"b\": 2}\n \"\"\"\n\n if watch and logs:\n click.secho(\n \"Streaming state and logs not currently supported together.\", fg=\"red\"\n )\n return\n\n query = {\n \"query\": {\n with_args(\n \"flow\",\n {\n \"where\": {\n \"_and\": {\n \"name\": {\"_eq\": name},\n \"version\": {\"_eq\": version},\n \"project\": {\"name\": {\"_eq\": project}},\n }\n },\n \"order_by\": {\n \"name\": EnumValue(\"asc\"),\n \"version\": EnumValue(\"desc\"),\n },\n \"distinct_on\": EnumValue(\"name\"),\n },\n ): {\"id\": True}\n }\n }\n\n client = Client()\n result = client.graphql(query)\n\n flow_data = result.data.flow\n\n if flow_data:\n flow_id = flow_data[0].id\n else:\n click.secho(\"{} not found\".format(name), fg=\"red\")\n return\n\n # Load parameters from file if provided\n file_params = {}\n if parameters_file:\n with open(parameters_file) as params_file:\n file_params = json.load(params_file)\n\n # Load parameters from string if provided\n string_params = {}\n if parameters_string:\n string_params = json.loads(parameters_string)\n\n flow_run_id = client.create_flow_run(\n flow_id=flow_id, parameters={**file_params, **string_params}\n )\n click.echo(\"Flow Run ID: {}\".format(flow_run_id))\n\n if watch:\n current_states = []\n while True:\n query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\n with_args(\n \"states\",\n {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}},\n ): {\"state\": True, \"timestamp\": True}\n }\n }\n }\n\n result = client.graphql(query)\n\n # Filter through retrieved states and output in order\n for state_index in result.data.flow_run_by_pk.states:\n state = state_index.state\n if state not in current_states:\n if state != \"Success\" and state != \"Failed\":\n click.echo(\"{} -> \".format(state), nl=False)\n else:\n click.echo(state)\n return\n\n current_states.append(state)\n\n time.sleep(3)\n\n if logs:\n all_logs = []\n\n log_query = {\n with_args(\n \"logs\", {\"order_by\": {EnumValue(\"timestamp\"): EnumValue(\"asc\")}}\n ): {\"timestamp\": True, \"message\": True, \"level\": True},\n \"start_time\": True,\n }\n\n query = {\n \"query\": {\n with_args(\n \"flow_run\",\n {\n \"where\": {\"id\": {\"_eq\": flow_run_id}},\n \"order_by\": {EnumValue(\"start_time\"): EnumValue(\"desc\")},\n },\n ): log_query\n }\n }\n\n while True:\n result = Client().graphql(query)\n\n flow_run = result.data.flow_run\n if not flow_run:\n click.secho(\"{} not found\".format(flow_run_id), fg=\"red\")\n return\n\n new_run = flow_run[0]\n logs = new_run.logs\n output = []\n\n for i in logs:\n if [i.timestamp, i.level, i.message] not in all_logs:\n\n if not len(all_logs):\n click.echo(\n tabulate(\n [[i.timestamp, i.level, i.message]],\n headers=[\"TIMESTAMP\", \"LEVEL\", \"MESSAGE\"],\n tablefmt=\"plain\",\n numalign=\"left\",\n stralign=\"left\",\n )\n )\n all_logs.append([i.timestamp, i.level, i.message])\n continue\n\n output.append([i.timestamp, i.level, i.message])\n all_logs.append([i.timestamp, i.level, i.message])\n\n if output:\n click.echo(\n tabulate(output, tablefmt=\"plain\", numalign=\"left\", stralign=\"left\")\n )\n\n # Check if state is either Success or Failed, exit if it is\n pk_query = {\n \"query\": {\n with_args(\"flow_run_by_pk\", {\"id\": flow_run_id}): {\"state\": True}\n }\n }\n result = client.graphql(pk_query)\n\n if (\n result.data.flow_run_by_pk.state == \"Success\"\n or result.data.flow_run_by_pk.state == \"Failed\"\n ):\n return\n\n time.sleep(3)\n", "path": "src/prefect/cli/run.py"}]} | 2,280 | 803 |
gh_patches_debug_1859 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The "show_stats" command got broken
The "show_stats server" and "show_stats client" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162):
> show_stats server
Error: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict'
Done [7269 usecs] 2022-02-08 17:26:12.865006
>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/fuel/hci/client/api_spec.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import annotations
16
17 from abc import ABC, abstractmethod
18 from typing import Optional
19
20 from nvflare.fuel.hci.table import Table
21
22
23 class ReplyProcessor:
24 """A base class for parsing server's response."""
25
26 def reply_start(self, api: AdminAPISpec, reply_json):
27 pass
28
29 def process_string(self, api: AdminAPISpec, item: str):
30 pass
31
32 def process_success(self, api: AdminAPISpec, item: str):
33 pass
34
35 def process_error(self, api: AdminAPISpec, err: str):
36 pass
37
38 def process_table(self, api: AdminAPISpec, table: Table):
39 pass
40
41 def process_shutdown(self, api: AdminAPISpec, msg: str):
42 pass
43
44 def process_token(self, api: AdminAPISpec, token: str):
45 pass
46
47 def protocol_error(self, api: AdminAPISpec, err: str):
48 pass
49
50 def reply_done(self, api: AdminAPISpec):
51 pass
52
53
54 class AdminAPISpec(ABC):
55 def __init__(self):
56 self.reply_processor = None
57 self.command_result = None
58
59 @abstractmethod
60 def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None):
61 """Executes a command on server side.
62
63 Args:
64 command: The command to be executed.
65 reply_processor: Reply callback to use.
66 """
67 pass
68
69 def set_command_result(self, result):
70 """Sets the result returning from executing the command."""
71 self.command_result = result
72
73 def get_command_result(self):
74 """Gets the result returning from executing the command."""
75 return self.command_result
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py
--- a/nvflare/fuel/hci/client/api_spec.py
+++ b/nvflare/fuel/hci/client/api_spec.py
@@ -38,6 +38,9 @@
def process_table(self, api: AdminAPISpec, table: Table):
pass
+ def process_dict(self, api: AdminAPISpec, data: dict):
+ pass
+
def process_shutdown(self, api: AdminAPISpec, msg: str):
pass
| {"golden_diff": "diff --git a/nvflare/fuel/hci/client/api_spec.py b/nvflare/fuel/hci/client/api_spec.py\n--- a/nvflare/fuel/hci/client/api_spec.py\n+++ b/nvflare/fuel/hci/client/api_spec.py\n@@ -38,6 +38,9 @@\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n \n+ def process_dict(self, api: AdminAPISpec, data: dict):\n+ pass\n+\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n", "issue": "The \"show_stats\" command got broken\nThe \"show_stats server\" and \"show_stats client\" command got the following error. This is caused by this PR change (https://github.com/NVIDIA/NVFlare/pull/162):\r\n\r\n> show_stats server\r\nError: Failed to communicate with Admin Server localhost on 8003: '_DefaultReplyProcessor' object has no attribute 'process_dict'\r\nDone [7269 usecs] 2022-02-08 17:26:12.865006\r\n> \r\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom nvflare.fuel.hci.table import Table\n\n\nclass ReplyProcessor:\n \"\"\"A base class for parsing server's response.\"\"\"\n\n def reply_start(self, api: AdminAPISpec, reply_json):\n pass\n\n def process_string(self, api: AdminAPISpec, item: str):\n pass\n\n def process_success(self, api: AdminAPISpec, item: str):\n pass\n\n def process_error(self, api: AdminAPISpec, err: str):\n pass\n\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n\n def process_token(self, api: AdminAPISpec, token: str):\n pass\n\n def protocol_error(self, api: AdminAPISpec, err: str):\n pass\n\n def reply_done(self, api: AdminAPISpec):\n pass\n\n\nclass AdminAPISpec(ABC):\n def __init__(self):\n self.reply_processor = None\n self.command_result = None\n\n @abstractmethod\n def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None):\n \"\"\"Executes a command on server side.\n\n Args:\n command: The command to be executed.\n reply_processor: Reply callback to use.\n \"\"\"\n pass\n\n def set_command_result(self, result):\n \"\"\"Sets the result returning from executing the command.\"\"\"\n self.command_result = result\n\n def get_command_result(self):\n \"\"\"Gets the result returning from executing the command.\"\"\"\n return self.command_result\n", "path": "nvflare/fuel/hci/client/api_spec.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional\n\nfrom nvflare.fuel.hci.table import Table\n\n\nclass ReplyProcessor:\n \"\"\"A base class for parsing server's response.\"\"\"\n\n def reply_start(self, api: AdminAPISpec, reply_json):\n pass\n\n def process_string(self, api: AdminAPISpec, item: str):\n pass\n\n def process_success(self, api: AdminAPISpec, item: str):\n pass\n\n def process_error(self, api: AdminAPISpec, err: str):\n pass\n\n def process_table(self, api: AdminAPISpec, table: Table):\n pass\n\n def process_dict(self, api: AdminAPISpec, data: dict):\n pass\n\n def process_shutdown(self, api: AdminAPISpec, msg: str):\n pass\n\n def process_token(self, api: AdminAPISpec, token: str):\n pass\n\n def protocol_error(self, api: AdminAPISpec, err: str):\n pass\n\n def reply_done(self, api: AdminAPISpec):\n pass\n\n\nclass AdminAPISpec(ABC):\n def __init__(self):\n self.reply_processor = None\n self.command_result = None\n\n @abstractmethod\n def server_execute(self, command: str, reply_processor: Optional[ReplyProcessor] = None):\n \"\"\"Executes a command on server side.\n\n Args:\n command: The command to be executed.\n reply_processor: Reply callback to use.\n \"\"\"\n pass\n\n def set_command_result(self, result):\n \"\"\"Sets the result returning from executing the command.\"\"\"\n self.command_result = result\n\n def get_command_result(self):\n \"\"\"Gets the result returning from executing the command.\"\"\"\n return self.command_result\n", "path": "nvflare/fuel/hci/client/api_spec.py"}]} | 1,060 | 131 |
gh_patches_debug_39747 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-359 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor inconsistency between study config generation tool and study spec
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/apis/study_manager_spec.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import datetime
16
17
18 class Study:
19 def __init__(
20 self,
21 name: str,
22 description: str,
23 sites: [str],
24 users: [str],
25 start_time: datetime.datetime,
26 end_time: datetime.datetime,
27 reviewers=None,
28 ):
29 self.name = name
30 self.description = description
31 self.sites = sites
32 self.users = users
33 self.start_time = start_time
34 self.end_time = end_time
35 self.reviewers = reviewers
36 self.create_time = None
37
38
39 class StudyManagerSpec(object):
40 def create_study(self, study: Study) -> Study:
41 """Create the study object permanently
42
43 The caller must have validated the sites and users of the study.
44
45 Validate the study before saving:
46 The name of the study must be unique;
47 Sites and users must be defined;
48 Start and end time must make sense.
49
50 Args:
51 study: the caller-provided study info
52
53 Returns: updated study info (e.g. create_time is set)
54
55 """
56 pass
57
58 def list_studies(self) -> [str]:
59 """
60 List names of all defined studies
61
62 Returns: list of study names
63
64 """
65 pass
66
67 def list_active_studies(self) -> [str]:
68 """
69 List names of all active studies (started but not ended)
70
71 Returns: list of study names
72
73 """
74 pass
75
76 def get_study(self, name: str) -> Study:
77 """Get the Study object for the specified name.
78
79 Args:
80 name: unique name of the study
81
82 Returns: the Study object
83
84 """
85 pass
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvflare/apis/study_manager_spec.py b/nvflare/apis/study_manager_spec.py
--- a/nvflare/apis/study_manager_spec.py
+++ b/nvflare/apis/study_manager_spec.py
@@ -12,7 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import datetime
+from abc import ABC, abstractmethod
+from typing import Dict, List
+from datetime import datetime
+
+from .fl_context import FLContext
class Study:
@@ -20,32 +24,35 @@
self,
name: str,
description: str,
- sites: [str],
- users: [str],
- start_time: datetime.datetime,
- end_time: datetime.datetime,
+ contact: str,
+ participating_clients: List[str],
+ participating_admins: List[str],
+ start_date: datetime.date,
+ end_date: datetime.date,
reviewers=None,
):
self.name = name
self.description = description
- self.sites = sites
- self.users = users
- self.start_time = start_time
- self.end_time = end_time
+ self.contact = contact
+ self.participating_clients = participating_clients
+ self.participating_admins = participating_admins
+ self.start_date = start_date
+ self.end_date = end_date
self.reviewers = reviewers
- self.create_time = None
+ self.created_at = datetime.utcnow().isoformat()
-class StudyManagerSpec(object):
- def create_study(self, study: Study) -> Study:
- """Create the study object permanently
+class StudyManagerSpec(ABC):
+ @abstractmethod
+ def add_study(self, study: Study, fl_ctx: FLContext) -> Study:
+ """Add the study object permanently
- The caller must have validated the sites and users of the study.
+ The caller must have validated the participating_clients and participating_admins of the study.
Validate the study before saving:
The name of the study must be unique;
- Sites and users must be defined;
- Start and end time must make sense.
+ participating_clients and participating_admins must be defined;
+ Start and end date must make sense.
Args:
study: the caller-provided study info
@@ -55,7 +62,8 @@
"""
pass
- def list_studies(self) -> [str]:
+ @abstractmethod
+ def list_studies(self, fl_ctx: FLContext) -> List[str]:
"""
List names of all defined studies
@@ -64,7 +72,8 @@
"""
pass
- def list_active_studies(self) -> [str]:
+ @abstractmethod
+ def list_active_studies(self, fl_ctx: FLContext) -> List[str]:
"""
List names of all active studies (started but not ended)
@@ -73,7 +82,8 @@
"""
pass
- def get_study(self, name: str) -> Study:
+ @abstractmethod
+ def get_study(self, name: str, fl_ctx: FLContext) -> Study:
"""Get the Study object for the specified name.
Args:
| {"golden_diff": "diff --git a/nvflare/apis/study_manager_spec.py b/nvflare/apis/study_manager_spec.py\n--- a/nvflare/apis/study_manager_spec.py\n+++ b/nvflare/apis/study_manager_spec.py\n@@ -12,7 +12,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import datetime\n+from abc import ABC, abstractmethod\n+from typing import Dict, List\n+from datetime import datetime\n+\n+from .fl_context import FLContext\n \n \n class Study:\n@@ -20,32 +24,35 @@\n self,\n name: str,\n description: str,\n- sites: [str],\n- users: [str],\n- start_time: datetime.datetime,\n- end_time: datetime.datetime,\n+ contact: str,\n+ participating_clients: List[str],\n+ participating_admins: List[str],\n+ start_date: datetime.date,\n+ end_date: datetime.date,\n reviewers=None,\n ):\n self.name = name\n self.description = description\n- self.sites = sites\n- self.users = users\n- self.start_time = start_time\n- self.end_time = end_time\n+ self.contact = contact\n+ self.participating_clients = participating_clients\n+ self.participating_admins = participating_admins\n+ self.start_date = start_date\n+ self.end_date = end_date\n self.reviewers = reviewers\n- self.create_time = None\n+ self.created_at = datetime.utcnow().isoformat()\n \n \n-class StudyManagerSpec(object):\n- def create_study(self, study: Study) -> Study:\n- \"\"\"Create the study object permanently\n+class StudyManagerSpec(ABC):\n+ @abstractmethod\n+ def add_study(self, study: Study, fl_ctx: FLContext) -> Study:\n+ \"\"\"Add the study object permanently\n \n- The caller must have validated the sites and users of the study.\n+ The caller must have validated the participating_clients and participating_admins of the study.\n \n Validate the study before saving:\n The name of the study must be unique;\n- Sites and users must be defined;\n- Start and end time must make sense.\n+ participating_clients and participating_admins must be defined;\n+ Start and end date must make sense.\n \n Args:\n study: the caller-provided study info\n@@ -55,7 +62,8 @@\n \"\"\"\n pass\n \n- def list_studies(self) -> [str]:\n+ @abstractmethod\n+ def list_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all defined studies\n \n@@ -64,7 +72,8 @@\n \"\"\"\n pass\n \n- def list_active_studies(self) -> [str]:\n+ @abstractmethod\n+ def list_active_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all active studies (started but not ended)\n \n@@ -73,7 +82,8 @@\n \"\"\"\n pass\n \n- def get_study(self, name: str) -> Study:\n+ @abstractmethod\n+ def get_study(self, name: str, fl_ctx: FLContext) -> Study:\n \"\"\"Get the Study object for the specified name.\n \n Args:\n", "issue": "Minor inconsistency between study config generation tool and study spec\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\n\n\nclass Study:\n def __init__(\n self,\n name: str,\n description: str,\n sites: [str],\n users: [str],\n start_time: datetime.datetime,\n end_time: datetime.datetime,\n reviewers=None,\n ):\n self.name = name\n self.description = description\n self.sites = sites\n self.users = users\n self.start_time = start_time\n self.end_time = end_time\n self.reviewers = reviewers\n self.create_time = None\n\n\nclass StudyManagerSpec(object):\n def create_study(self, study: Study) -> Study:\n \"\"\"Create the study object permanently\n\n The caller must have validated the sites and users of the study.\n\n Validate the study before saving:\n The name of the study must be unique;\n Sites and users must be defined;\n Start and end time must make sense.\n\n Args:\n study: the caller-provided study info\n\n Returns: updated study info (e.g. create_time is set)\n\n \"\"\"\n pass\n\n def list_studies(self) -> [str]:\n \"\"\"\n List names of all defined studies\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n def list_active_studies(self) -> [str]:\n \"\"\"\n List names of all active studies (started but not ended)\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n def get_study(self, name: str) -> Study:\n \"\"\"Get the Study object for the specified name.\n\n Args:\n name: unique name of the study\n\n Returns: the Study object\n\n \"\"\"\n pass\n", "path": "nvflare/apis/study_manager_spec.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom typing import Dict, List\nfrom datetime import datetime\n\nfrom .fl_context import FLContext\n\n\nclass Study:\n def __init__(\n self,\n name: str,\n description: str,\n contact: str,\n participating_clients: List[str],\n participating_admins: List[str],\n start_date: datetime.date,\n end_date: datetime.date,\n reviewers=None,\n ):\n self.name = name\n self.description = description\n self.contact = contact\n self.participating_clients = participating_clients\n self.participating_admins = participating_admins\n self.start_date = start_date\n self.end_date = end_date\n self.reviewers = reviewers\n self.created_at = datetime.utcnow().isoformat()\n\n\nclass StudyManagerSpec(ABC):\n @abstractmethod\n def add_study(self, study: Study, fl_ctx: FLContext) -> Study:\n \"\"\"Add the study object permanently\n\n The caller must have validated the participating_clients and participating_admins of the study.\n\n Validate the study before saving:\n The name of the study must be unique;\n participating_clients and participating_admins must be defined;\n Start and end date must make sense.\n\n Args:\n study: the caller-provided study info\n\n Returns: updated study info (e.g. create_time is set)\n\n \"\"\"\n pass\n\n @abstractmethod\n def list_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all defined studies\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n @abstractmethod\n def list_active_studies(self, fl_ctx: FLContext) -> List[str]:\n \"\"\"\n List names of all active studies (started but not ended)\n\n Returns: list of study names\n\n \"\"\"\n pass\n\n @abstractmethod\n def get_study(self, name: str, fl_ctx: FLContext) -> Study:\n \"\"\"Get the Study object for the specified name.\n\n Args:\n name: unique name of the study\n\n Returns: the Study object\n\n \"\"\"\n pass\n", "path": "nvflare/apis/study_manager_spec.py"}]} | 941 | 724 |
gh_patches_debug_7650 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2830 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Moldova parser fails due to expired SSL certificate
Certificate expired on Friday, 20 November 2020 at 11:15:53
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/MD.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3
4 """Parser for Moldova."""
5
6 import arrow
7 from operator import itemgetter
8 import requests
9
10 TYPE_MAPPING = {
11 u'tmva476': 'hydro', # NHE Costeşti (run-of-river) #2 index
12 u'tmva112': 'hydro', # NHE Dubăsari (run-of-river) #4 index
13 u'tmva367': 'gas', # CET Nord (CHPP) #3 index
14 u'tmva42': 'gas', # CET-1 Chişinău (CHPP) #6 index
15 u'tmva378': 'gas', # CET-2 Chişinău (CHPP) #5 index
16 u'tmva1024': 'gas', # CERS Moldovenească (fuel mix 2017 99.92% gas, 0.08% oil) #7 index
17 }
18
19 display_url = 'http://www.moldelectrica.md/ro/activity/system_state'
20 data_url = 'http://www.moldelectrica.md/utils/load4.php'
21
22
23 def get_data(session=None):
24 """ Returns generation data as a list of floats."""
25
26 s = session or requests.Session()
27
28 #In order for the data url to return data, cookies from the display url must be obtained then reused.
29 response = s.get(display_url)
30 data_response = s.get(data_url)
31 raw_data = data_response.text
32 try:
33 data = [float(i) for i in raw_data.split(',')]
34 except:
35 raise Exception("Not able to parse received data. Check that the specifed URL returns correct data.")
36
37 return data
38
39
40 def fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):
41 """Requests the last known production mix (in MW) of a given country
42
43 Arguments:
44 zone_key (optional) -- used in case a parser is able to fetch multiple countries
45 session (optional) -- request session passed in order to re-use an existing session
46
47 Return:
48 A dictionary in the form:
49 {
50 'zoneKey': 'FR',
51 'datetime': '2017-01-01T00:00:00Z',
52 'production': {
53 'biomass': 0.0,
54 'coal': 0.0,
55 'gas': 0.0,
56 'hydro': 0.0,
57 'nuclear': null,
58 'oil': 0.0,
59 'solar': 0.0,
60 'wind': 0.0,
61 'geothermal': 0.0,
62 'unknown': 0.0
63 },
64 'storage': {
65 'hydro': -10.0,
66 },
67 'source': 'mysource.com'
68 }
69 """
70 if target_datetime:
71 raise NotImplementedError('This parser is not yet able to parse past dates')
72
73 grid_status = get_data(session=session)
74 production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}
75
76 production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))
77 production['hydro'] = sum(itemgetter(2, 4)(grid_status))
78 production['unknown'] = grid_status[7]
79
80 consumption = grid_status[-5]
81
82 dt = arrow.now('Europe/Chisinau').datetime
83
84 datapoint = {
85 'zoneKey': zone_key,
86 'datetime': dt,
87 'consumption': consumption,
88 'production': production,
89 'storage': {},
90 'source': 'moldelectrica.md'
91 }
92
93 return datapoint
94
95
96 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
97 """Requests the last known power exchange (in MW) between two countries
98 Arguments:
99 zone_key1 -- the first country code
100 zone_key2 -- the second country code; order of the two codes in params doesn't matter
101 session (optional) -- request session passed in order to re-use an existing session
102 Return:
103 A dictionary in the form:
104 {
105 'sortedZoneKeys': 'DK->NO',
106 'datetime': '2017-01-01T00:00:00Z',
107 'netFlow': 0.0,
108 'source': 'mysource.com'
109 }
110 where net flow is from DK into NO
111 """
112 if target_datetime:
113 raise NotImplementedError('This parser is not yet able to parse past dates')
114
115 sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))
116
117 exchange_status = get_data(session=session)
118
119 if sortedZoneKeys == 'MD->UA':
120 netflow = -1 * exchange_status[-3]
121 elif sortedZoneKeys == 'MD->RO':
122 netflow = -1 * exchange_status[-2]
123 else:
124 raise NotImplementedError('This exchange pair is not implemented')
125
126 dt = arrow.now('Europe/Chisinau').datetime
127
128 exchange = {
129 'sortedZoneKeys': sortedZoneKeys,
130 'datetime': dt,
131 'netFlow': netflow,
132 'source': 'moldelectrica.md'
133 }
134
135 return exchange
136
137
138 if __name__ == '__main__':
139 """Main method, never used by the Electricity Map backend, but handy for testing."""
140
141 print('fetch_production() ->')
142 print(fetch_production())
143 print('fetch_exchange(MD, UA) ->')
144 print(fetch_exchange('MD', 'UA'))
145 print('fetch_exchange(MD, RO) ->')
146 print(fetch_exchange('MD', 'RO'))
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/MD.py b/parsers/MD.py
--- a/parsers/MD.py
+++ b/parsers/MD.py
@@ -26,8 +26,8 @@
s = session or requests.Session()
#In order for the data url to return data, cookies from the display url must be obtained then reused.
- response = s.get(display_url)
- data_response = s.get(data_url)
+ response = s.get(display_url, verify=False)
+ data_response = s.get(data_url, verify=False)
raw_data = data_response.text
try:
data = [float(i) for i in raw_data.split(',')]
| {"golden_diff": "diff --git a/parsers/MD.py b/parsers/MD.py\n--- a/parsers/MD.py\n+++ b/parsers/MD.py\n@@ -26,8 +26,8 @@\n s = session or requests.Session()\n \n #In order for the data url to return data, cookies from the display url must be obtained then reused.\n- response = s.get(display_url)\n- data_response = s.get(data_url)\n+ response = s.get(display_url, verify=False)\n+ data_response = s.get(data_url, verify=False)\n raw_data = data_response.text\n try:\n data = [float(i) for i in raw_data.split(',')]\n", "issue": "Moldova parser fails due to expired SSL certificate\nCertificate expired on Friday, 20 November 2020 at 11:15:53\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"Parser for Moldova.\"\"\"\n\nimport arrow\nfrom operator import itemgetter\nimport requests\n\nTYPE_MAPPING = {\n u'tmva476': 'hydro', # NHE Coste\u015fti (run-of-river) #2 index\n u'tmva112': 'hydro', # NHE Dub\u0103sari (run-of-river) #4 index\n u'tmva367': 'gas', # CET Nord (CHPP) #3 index\n u'tmva42': 'gas', # CET-1 Chi\u015fin\u0103u (CHPP) #6 index\n u'tmva378': 'gas', # CET-2 Chi\u015fin\u0103u (CHPP) #5 index\n u'tmva1024': 'gas', # CERS Moldoveneasc\u0103 (fuel mix 2017 99.92% gas, 0.08% oil) #7 index\n}\n\ndisplay_url = 'http://www.moldelectrica.md/ro/activity/system_state'\ndata_url = 'http://www.moldelectrica.md/utils/load4.php'\n\n\ndef get_data(session=None):\n \"\"\" Returns generation data as a list of floats.\"\"\"\n\n s = session or requests.Session()\n\n #In order for the data url to return data, cookies from the display url must be obtained then reused.\n response = s.get(display_url)\n data_response = s.get(data_url)\n raw_data = data_response.text\n try:\n data = [float(i) for i in raw_data.split(',')]\n except:\n raise Exception(\"Not able to parse received data. Check that the specifed URL returns correct data.\")\n\n return data\n\n\ndef fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n grid_status = get_data(session=session)\n production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}\n\n production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))\n production['hydro'] = sum(itemgetter(2, 4)(grid_status))\n production['unknown'] = grid_status[7]\n\n consumption = grid_status[-5]\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'consumption': consumption,\n 'production': production,\n 'storage': {},\n 'source': 'moldelectrica.md'\n }\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))\n\n exchange_status = get_data(session=session)\n\n if sortedZoneKeys == 'MD->UA':\n netflow = -1 * exchange_status[-3]\n elif sortedZoneKeys == 'MD->RO':\n netflow = -1 * exchange_status[-2]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n exchange = {\n 'sortedZoneKeys': sortedZoneKeys,\n 'datetime': dt,\n 'netFlow': netflow,\n 'source': 'moldelectrica.md'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(MD, UA) ->')\n print(fetch_exchange('MD', 'UA'))\n print('fetch_exchange(MD, RO) ->')\n print(fetch_exchange('MD', 'RO'))\n", "path": "parsers/MD.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"Parser for Moldova.\"\"\"\n\nimport arrow\nfrom operator import itemgetter\nimport requests\n\nTYPE_MAPPING = {\n u'tmva476': 'hydro', # NHE Coste\u015fti (run-of-river) #2 index\n u'tmva112': 'hydro', # NHE Dub\u0103sari (run-of-river) #4 index\n u'tmva367': 'gas', # CET Nord (CHPP) #3 index\n u'tmva42': 'gas', # CET-1 Chi\u015fin\u0103u (CHPP) #6 index\n u'tmva378': 'gas', # CET-2 Chi\u015fin\u0103u (CHPP) #5 index\n u'tmva1024': 'gas', # CERS Moldoveneasc\u0103 (fuel mix 2017 99.92% gas, 0.08% oil) #7 index\n}\n\ndisplay_url = 'http://www.moldelectrica.md/ro/activity/system_state'\ndata_url = 'http://www.moldelectrica.md/utils/load4.php'\n\n\ndef get_data(session=None):\n \"\"\" Returns generation data as a list of floats.\"\"\"\n\n s = session or requests.Session()\n\n #In order for the data url to return data, cookies from the display url must be obtained then reused.\n response = s.get(display_url, verify=False)\n data_response = s.get(data_url, verify=False)\n raw_data = data_response.text\n try:\n data = [float(i) for i in raw_data.split(',')]\n except:\n raise Exception(\"Not able to parse received data. Check that the specifed URL returns correct data.\")\n\n return data\n\n\ndef fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n grid_status = get_data(session=session)\n production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}\n\n production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))\n production['hydro'] = sum(itemgetter(2, 4)(grid_status))\n production['unknown'] = grid_status[7]\n\n consumption = grid_status[-5]\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'consumption': consumption,\n 'production': production,\n 'storage': {},\n 'source': 'moldelectrica.md'\n }\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))\n\n exchange_status = get_data(session=session)\n\n if sortedZoneKeys == 'MD->UA':\n netflow = -1 * exchange_status[-3]\n elif sortedZoneKeys == 'MD->RO':\n netflow = -1 * exchange_status[-2]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n exchange = {\n 'sortedZoneKeys': sortedZoneKeys,\n 'datetime': dt,\n 'netFlow': netflow,\n 'source': 'moldelectrica.md'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(MD, UA) ->')\n print(fetch_exchange('MD', 'UA'))\n print('fetch_exchange(MD, RO) ->')\n print(fetch_exchange('MD', 'RO'))\n", "path": "parsers/MD.py"}]} | 1,908 | 146 |
gh_patches_debug_420 | rasdani/github-patches | git_diff | fidals__shopelectro-209 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move CI to our server
Трэвис на беспланом серве билдит не очень.
Сейчас у нас один travis ci идёт ~20 мин. Учитывая, что мы будем оперировать задачами с оценкой по 15-30 мин, это слишком долго.
Кроме того, Трэвис часто отваливается по ресурсам, что плодит нам в ci рандомные ошибки. А это очень дорого, конечно же.
Исследуй разные коробки. Например travis, jenkins, team-city и тд.
Нам нужно что-то простенькое, позже развернём коробку на своём серве.
Результат задачи - отчёт по исследованию здесь в комментах, решение по инструменту принято, создана новая задача по внедрению инструмента
Концы от нашего хоста бери у @duker33
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/settings/base.py`
Content:
```
1 """
2 Django settings for shopelectro project.
3
4 Generated by 'django-admin startproject' using Django 1.9.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.9/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.9/ref/settings/
11 """
12
13 import os
14 from datetime import datetime
15
16 import dj_database_url
17
18 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
19 BASE_DIR = os.path.dirname(os.path.dirname(
20 os.path.dirname(os.path.abspath(__file__))))
21
22 # Quick-start development settings - unsuitable for production
23 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
24
25 # SECURITY WARNING: keep the secret key used in production secret!
26 SECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')
27
28 # SECURITY WARNING: don't run with debug turned on in production!
29 DEBUG = True
30
31 # http://bit.ly/sorl-thumbnail-docs
32 THUMBNAIL_DEBUG = False
33
34 ALLOWED_HOSTS = ['*']
35
36 if os.environ.get('TEST_ENV', False):
37 # disable https in CI
38 # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
39 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')
40
41 # Enable in frame loading for Ya.Metric
42 # https://docs.djangoproject.com/es/1.10/ref/clickjacking/
43 # https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page
44 X_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'
45
46 # Application definition
47 INSTALLED_APPS = [
48 # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover
49 'django.contrib.contenttypes',
50 'django.contrib.auth',
51 'django.contrib.messages',
52 'django.contrib.redirects',
53 'django.contrib.sessions',
54 'django.contrib.sitemaps',
55 'django.contrib.sites',
56 'django.contrib.staticfiles',
57 'django.contrib.humanize',
58 'django_user_agents',
59 'generic_admin',
60 'django.contrib.admin.apps.SimpleAdminConfig',
61 'debug_toolbar',
62 'mptt',
63 'widget_tweaks',
64 'sorl.thumbnail',
65 'images',
66 'pages',
67 'catalog',
68 'ecommerce',
69 'shopelectro',
70 ]
71
72 MIDDLEWARE = [
73 'django.middleware.security.SecurityMiddleware',
74 'django.contrib.sessions.middleware.SessionMiddleware',
75 'django.middleware.common.CommonMiddleware',
76 'django.middleware.csrf.CsrfViewMiddleware',
77 'django.contrib.auth.middleware.AuthenticationMiddleware',
78 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
79 'django.contrib.messages.middleware.MessageMiddleware',
80 'django.middleware.clickjacking.XFrameOptionsMiddleware',
81 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
82 'django.middleware.locale.LocaleMiddleware',
83 'django_user_agents.middleware.UserAgentMiddleware',
84 'debug_toolbar.middleware.DebugToolbarMiddleware',
85 ]
86
87 ROOT_URLCONF = 'shopelectro.urls'
88
89 TEMPLATES = [
90 {
91 'BACKEND': 'django.template.backends.django.DjangoTemplates',
92 'DIRS': [os.path.join(BASE_DIR, 'templates')],
93 'APP_DIRS': True,
94 'OPTIONS': {
95 'context_processors': [
96 'django.template.context_processors.debug',
97 'django.template.context_processors.media',
98 'django.template.context_processors.request',
99 'django.template.context_processors.static',
100 'django.contrib.auth.context_processors.auth',
101 'django.contrib.messages.context_processors.messages',
102 'ecommerce.context_processors.cart',
103 'shopelectro.context_processors.shop',
104 ],
105 },
106 },
107 ]
108
109 WSGI_APPLICATION = 'shopelectro.wsgi.application'
110
111 # Password validation
112 # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
113
114 AUTH_PASSWORD_VALIDATORS = [
115 {
116 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
117 },
118 {
119 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
120 },
121 {
122 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
123 },
124 {
125 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
126 },
127 ]
128
129 # Internationalization
130 # https://docs.djangoproject.com/en/1.9/topics/i18n/
131
132 LOCALE_NAME = 'en_US'
133 TIME_ZONE = 'UTC'
134
135 USE_I18N = True
136 USE_L10N = True
137 USE_TZ = True
138
139 LOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]
140 FORMAT_MODULE_PATH = [
141 'shopelectro.formats',
142 ]
143
144 # Static files (CSS, JavaScript, Images)
145 # https://docs.djangoproject.com/en/1.9/howto/static-files/
146 STATIC_URL = '/static/'
147 STATIC_ROOT = os.path.join(BASE_DIR, 'static')
148 ASSETS_DIR = os.path.join(BASE_DIR, 'assets')
149
150 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
151
152 STATICFILES_DIRS = [
153 os.path.join(BASE_DIR, 'front/build'),
154 ASSETS_DIR,
155 ]
156
157 MEDIA_URL = '/media/'
158 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
159
160 # It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`
161 DATABASE_URL = 'postgres://user:pass@db_name/table'
162 DATABASES = {
163 'default': dj_database_url.config(
164 env='DATABASE_URL',
165 default=DATABASE_URL,
166 )
167 }
168
169 LOGGING = {
170 'version': 1,
171 'disable_existing_loggers': False,
172 'handlers': {
173 'console': {
174 'class': 'logging.StreamHandler',
175 },
176 },
177 'loggers': {
178 'django': {
179 'handlers': ['console'],
180 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
181 },
182 },
183 }
184
185 SITE_CREATED = datetime(2013, 1, 1)
186
187 LOCALHOST = 'http://127.0.0.1:8000/'
188 BASE_URL = 'https://www.shopelectro.ru'
189
190 PLACEHOLDER_IMAGE = 'images/logo.png'
191 PLACEHOLDER_ALT = 'Логотип компании Shopelectro'
192
193 # Autocomplete and search settings
194 SEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'
195
196 # For sitemaps and sites framework
197 SITE_ID = 1
198 SITE_DOMAIN_NAME = 'www.shopelectro.ru'
199
200 # Used to retrieve instances in ecommerce.Cart
201 CART_ID = 'cart'
202
203 # Used to define choices attr in definition of Order.payment_type field
204 PAYMENT_OPTIONS = (
205 ('cash', 'Наличные'),
206 ('cashless', 'Безналичные и денежные переводы'),
207 ('AC', 'Банковская карта'),
208 ('PC', 'Яндекс.Деньги'),
209 ('GP', 'Связной (терминал)'),
210 ('AB', 'Альфа-Клик'),
211 )
212
213 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`
214 YANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')
215
216 # Used for order's email in ecommerce app
217 FAKE_ORDER_NUMBER = 6000
218
219 # Subjects for different types of emails sent from SE.
220 EMAIL_SUBJECTS = {
221 'call': 'Обратный звонок',
222 'order': 'Заказ №{0.fake_order_number}',
223 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',
224 'one_click': 'Заказ в один клик №{0.fake_order_number}',
225 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',
226 }
227
228 # Email configs
229 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`
230 EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')
231 EMAIL_HOST_USER = '[email protected]'
232 EMAIL_USE_TLS = True
233 EMAIL_HOST = 'smtp.yandex.ru'
234 EMAIL_PORT = 587
235 EMAIL_SENDER = '[email protected]'
236 EMAIL_RECIPIENT = '[email protected]'
237 SHOP_EMAIL = '[email protected]'
238
239 # FTP configs
240 FTP_USER = os.environ.get('FTP_USER', 'user')
241 FTP_PASS = os.environ.get('FTP_PASS', 'pass')
242 FTP_IP = os.environ.get('FTP_IP', '0.0.0.0')
243
244 # Used in admin image uploads
245 MODEL_TYPES = {
246 'Product': {
247 'app_name': 'shopelectro',
248 'dir_name': 'products',
249 },
250 'Category': {
251 'app_name': 'shopelectro',
252 'dir_name': 'categories',
253 }
254 }
255
256 # This need for using {% debug %} variable in templates.
257 INTERNAL_IPS = (
258 '127.0.0.1',
259 )
260
261 TOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]
262
263 SHOP = {
264 'id': '69886',
265 'scid': '64788',
266 'success_url': BASE_URL + '/shop/order-success/',
267 'fail_url': BASE_URL + '/',
268 'cps_phone': '+78124163200',
269 'cps_email': '[email protected]',
270 'local_delivery_cost': 300,
271 'local_delivery_cost_threshold': 5000,
272 }
273
274 # used in data-migrations and tests
275 CUSTOM_PAGES = {
276 'index': {
277 'slug': '',
278 'name': 'Интернет-магазин элементов питания "ShopElectro"',
279 'menu_title': 'Главная',
280 'title': 'Интернет-магазин Элементов питания с доставкой по России',
281 },
282 'sitemap': {
283 'slug': 'sitemap',
284 'h1': 'Карта сайта',
285 'name': 'Карта сайта',
286 },
287 'order': {
288 'slug': 'order',
289 'name': 'Оформление заказа',
290 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',
291 },
292 'search': {
293 'slug': 'search',
294 'name': 'Результаты поиска',
295 },
296 'catalog': {
297 'slug': 'catalog',
298 'name': 'Каталог товаров',
299 'menu_title': 'Каталог',
300 },
301 'order_success': {
302 'slug': 'order-success',
303 'name': 'Заказ принят',
304 }
305 }
306
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py
--- a/shopelectro/settings/base.py
+++ b/shopelectro/settings/base.py
@@ -182,6 +182,8 @@
},
}
+SELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://se-selenium:4444/wd/hub')
+
SITE_CREATED = datetime(2013, 1, 1)
LOCALHOST = 'http://127.0.0.1:8000/'
| {"golden_diff": "diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py\n--- a/shopelectro/settings/base.py\n+++ b/shopelectro/settings/base.py\n@@ -182,6 +182,8 @@\n },\n }\n \n+SELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://se-selenium:4444/wd/hub')\n+\n SITE_CREATED = datetime(2013, 1, 1)\n \n LOCALHOST = 'http://127.0.0.1:8000/'\n", "issue": "Move CI to our server\n\u0422\u0440\u044d\u0432\u0438\u0441 \u043d\u0430 \u0431\u0435\u0441\u043f\u043b\u0430\u043d\u043e\u043c \u0441\u0435\u0440\u0432\u0435 \u0431\u0438\u043b\u0434\u0438\u0442 \u043d\u0435 \u043e\u0447\u0435\u043d\u044c.\r\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0443 \u043d\u0430\u0441 \u043e\u0434\u0438\u043d travis ci \u0438\u0434\u0451\u0442 ~20 \u043c\u0438\u043d. \u0423\u0447\u0438\u0442\u044b\u0432\u0430\u044f, \u0447\u0442\u043e \u043c\u044b \u0431\u0443\u0434\u0435\u043c \u043e\u043f\u0435\u0440\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u0437\u0430\u0434\u0430\u0447\u0430\u043c\u0438 \u0441 \u043e\u0446\u0435\u043d\u043a\u043e\u0439 \u043f\u043e 15-30 \u043c\u0438\u043d, \u044d\u0442\u043e \u0441\u043b\u0438\u0448\u043a\u043e\u043c \u0434\u043e\u043b\u0433\u043e.\r\n\u041a\u0440\u043e\u043c\u0435 \u0442\u043e\u0433\u043e, \u0422\u0440\u044d\u0432\u0438\u0441 \u0447\u0430\u0441\u0442\u043e \u043e\u0442\u0432\u0430\u043b\u0438\u0432\u0430\u0435\u0442\u0441\u044f \u043f\u043e \u0440\u0435\u0441\u0443\u0440\u0441\u0430\u043c, \u0447\u0442\u043e \u043f\u043b\u043e\u0434\u0438\u0442 \u043d\u0430\u043c \u0432 ci \u0440\u0430\u043d\u0434\u043e\u043c\u043d\u044b\u0435 \u043e\u0448\u0438\u0431\u043a\u0438. \u0410 \u044d\u0442\u043e \u043e\u0447\u0435\u043d\u044c \u0434\u043e\u0440\u043e\u0433\u043e, \u043a\u043e\u043d\u0435\u0447\u043d\u043e \u0436\u0435.\r\n\r\n\u0418\u0441\u0441\u043b\u0435\u0434\u0443\u0439 \u0440\u0430\u0437\u043d\u044b\u0435 \u043a\u043e\u0440\u043e\u0431\u043a\u0438. \u041d\u0430\u043f\u0440\u0438\u043c\u0435\u0440 travis, jenkins, team-city \u0438 \u0442\u0434.\r\n\u041d\u0430\u043c \u043d\u0443\u0436\u043d\u043e \u0447\u0442\u043e-\u0442\u043e \u043f\u0440\u043e\u0441\u0442\u0435\u043d\u044c\u043a\u043e\u0435, \u043f\u043e\u0437\u0436\u0435 \u0440\u0430\u0437\u0432\u0435\u0440\u043d\u0451\u043c \u043a\u043e\u0440\u043e\u0431\u043a\u0443 \u043d\u0430 \u0441\u0432\u043e\u0451\u043c \u0441\u0435\u0440\u0432\u0435.\r\n\r\n\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442 \u0437\u0430\u0434\u0430\u0447\u0438 - \u043e\u0442\u0447\u0451\u0442 \u043f\u043e \u0438\u0441\u0441\u043b\u0435\u0434\u043e\u0432\u0430\u043d\u0438\u044e \u0437\u0434\u0435\u0441\u044c \u0432 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0445, \u0440\u0435\u0448\u0435\u043d\u0438\u0435 \u043f\u043e \u0438\u043d\u0441\u0442\u0440\u0443\u043c\u0435\u043d\u0442\u0443 \u043f\u0440\u0438\u043d\u044f\u0442\u043e, \u0441\u043e\u0437\u0434\u0430\u043d\u0430 \u043d\u043e\u0432\u0430\u044f \u0437\u0430\u0434\u0430\u0447\u0430 \u043f\u043e \u0432\u043d\u0435\u0434\u0440\u0435\u043d\u0438\u044e \u0438\u043d\u0441\u0442\u0440\u0443\u043c\u0435\u043d\u0442\u0430\r\n\r\n\u041a\u043e\u043d\u0446\u044b \u043e\u0442 \u043d\u0430\u0448\u0435\u0433\u043e \u0445\u043e\u0441\u0442\u0430 \u0431\u0435\u0440\u0438 \u0443 @duker33\n", "before_files": [{"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = '\u0421\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0441\u0435 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'),\n ('cashless', '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'),\n ('AC', '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'),\n ('PC', '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'),\n ('GP', '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'),\n ('AB', '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a',\n 'order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number}',\n 'yandex_order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number} | \u042f\u043d\u0434\u0435\u043a\u0441.\u041a\u0430\u0441\u0441\u0430',\n 'one_click': '\u0417\u0430\u043a\u0430\u0437 \u0432 \u043e\u0434\u0438\u043d \u043a\u043b\u0438\u043a \u2116{0.fake_order_number}',\n 'ya_feedback_request': '\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u0441 \u043d\u0430 \u042f\u043d\u0434\u0435\u043a\u0441.\u041c\u0430\u0440\u043a\u0435\u0442\u0435',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \"ShopElectro\"',\n 'menu_title': '\u0413\u043b\u0430\u0432\u043d\u0430\u044f',\n 'title': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u042d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \u0441 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u043e\u0439 \u043f\u043e \u0420\u043e\u0441\u0441\u0438\u0438',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n 'name': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n },\n 'order': {\n 'slug': 'order',\n 'name': '\u041e\u0444\u043e\u0440\u043c\u043b\u0435\u043d\u0438\u0435 \u0437\u0430\u043a\u0430\u0437\u0430',\n 'title': '\u041a\u043e\u0440\u0437\u0438\u043d\u0430 \u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d shopelectro.ru \u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433',\n },\n 'search': {\n 'slug': 'search',\n 'name': '\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u043f\u043e\u0438\u0441\u043a\u0430',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n 'menu_title': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442',\n }\n}\n", "path": "shopelectro/settings/base.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSELENIUM_URL = os.environ.get('SELENIUM_URL', 'http://se-selenium:4444/wd/hub')\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = '\u0421\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0441\u0435 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'),\n ('cashless', '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'),\n ('AC', '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'),\n ('PC', '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'),\n ('GP', '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'),\n ('AB', '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a',\n 'order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number}',\n 'yandex_order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number} | \u042f\u043d\u0434\u0435\u043a\u0441.\u041a\u0430\u0441\u0441\u0430',\n 'one_click': '\u0417\u0430\u043a\u0430\u0437 \u0432 \u043e\u0434\u0438\u043d \u043a\u043b\u0438\u043a \u2116{0.fake_order_number}',\n 'ya_feedback_request': '\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u0441 \u043d\u0430 \u042f\u043d\u0434\u0435\u043a\u0441.\u041c\u0430\u0440\u043a\u0435\u0442\u0435',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \"ShopElectro\"',\n 'menu_title': '\u0413\u043b\u0430\u0432\u043d\u0430\u044f',\n 'title': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u042d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \u0441 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u043e\u0439 \u043f\u043e \u0420\u043e\u0441\u0441\u0438\u0438',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n 'name': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n },\n 'order': {\n 'slug': 'order',\n 'name': '\u041e\u0444\u043e\u0440\u043c\u043b\u0435\u043d\u0438\u0435 \u0437\u0430\u043a\u0430\u0437\u0430',\n 'title': '\u041a\u043e\u0440\u0437\u0438\u043d\u0430 \u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d shopelectro.ru \u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433',\n },\n 'search': {\n 'slug': 'search',\n 'name': '\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u043f\u043e\u0438\u0441\u043a\u0430',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n 'menu_title': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442',\n }\n}\n", "path": "shopelectro/settings/base.py"}]} | 3,666 | 130 |
gh_patches_debug_42518 | rasdani/github-patches | git_diff | keras-team__autokeras-479 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature_request: Support multicore/multiprocess for read_images function
### Feature Description
Support multicore for `read_images` function in `image/image_supervised.py`.
### Reason
As [read_images function in image_supervised.py](https://github.com/jhfjhfj1/autokeras/blob/36752fcbce58bd1a26c17144637cc0ecbef83da0/autokeras/image/image_supervised.py#L14) does not support multi-core(just read files one by one), so it's **too slow** when reading image files more than 10k files.
### Solution
Use multiprocessing or equivalent one in `read_images` function as a default, or optional.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autokeras/image/image_supervised.py`
Content:
```
1 import os
2 from abc import ABC
3 import numpy as np
4
5 from autokeras.constant import Constant
6 from autokeras.nn.loss_function import classification_loss, regression_loss
7 from autokeras.nn.metric import Accuracy, MSE
8 from autokeras.preprocessor import OneHotEncoder, ImageDataTransformer
9 from autokeras.supervised import PortableDeepSupervised, DeepSupervised
10 from autokeras.utils import pickle_to_file, \
11 read_csv_file, read_image, compute_image_resize_params, resize_image_data
12
13
14 def read_images(img_file_names, images_dir_path):
15 """Read the images from the path and return their numpy.ndarray instance.
16 Return a numpy.ndarray instance containing the training data.
17
18 Args:
19 img_file_names: List containing images names.
20 images_dir_path: Path to the directory containing images.
21 """
22 x_train = []
23 if os.path.isdir(images_dir_path):
24 for img_file in img_file_names:
25 img_path = os.path.join(images_dir_path, img_file)
26 if os.path.exists(img_path):
27 img = read_image(img_path)
28 if len(img.shape) < 3:
29 img = img[..., np.newaxis]
30 x_train.append(img)
31 else:
32 raise ValueError("%s image does not exist" % img_file)
33 else:
34 raise ValueError("Directory containing images does not exist")
35 return np.asanyarray(x_train)
36
37
38 def load_image_dataset(csv_file_path, images_path):
39 """Load images from the files and labels from a csv file.
40
41 Second, the dataset is a set of images and the labels are in a CSV file.
42 The CSV file should contain two columns whose names are 'File Name' and 'Label'.
43 The file names in the first column should match the file names of the images with extensions,
44 e.g., .jpg, .png.
45 The path to the CSV file should be passed through the `csv_file_path`.
46 The path to the directory containing all the images should be passed through `image_path`.
47
48 Args:
49 csv_file_path: CSV file path.
50 images_path: Path where images exist.
51
52 Returns:
53 x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.
54 y: The labels.
55 """
56 img_file_name, y = read_csv_file(csv_file_path)
57 x = read_images(img_file_name, images_path)
58 return np.array(x), np.array(y)
59
60
61 class ImageSupervised(DeepSupervised, ABC):
62 """Abstract image supervised class.
63
64 Attributes:
65 path: A path to the directory to save the classifier as well as intermediate results.
66 cnn: CNN module from net_module.py.
67 y_encoder: Label encoder, used in transform_y or inverse_transform_y for encode the label. For example,
68 if one hot encoder needed, y_encoder can be OneHotEncoder.
69 data_transformer: A transformer class to process the data. See example as ImageDataTransformer.
70 verbose: A boolean value indicating the verbosity mode which determines whether the search process
71 will be printed to stdout.
72 augment: A boolean value indicating whether the data needs augmentation. If not define, then it
73 will use the value of Constant.DATA_AUGMENTATION which is True by default.
74 searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
75 resize_height: resize image height.
76 resize_width: resize image width.
77 """
78
79 def __init__(self, augment=None, **kwargs):
80 """Initialize the instance.
81 The classifier will be loaded from the files in 'path' if parameter 'resume' is True.
82 Otherwise it would create a new one.
83 Args:
84 verbose: A boolean of whether the search process will be printed to stdout.
85 path: A string. The path to a directory, where the intermediate results are saved.
86 resume: A boolean. If True, the classifier will continue to previous work saved in path.
87 Otherwise, the classifier will start a new search.
88 searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
89 augment: A boolean value indicating whether the data needs augmentation. If not define, then it
90 will use the value of Constant.DATA_AUGMENTATION which is True by default.
91 """
92 self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION
93 self.resize_shape = []
94
95 super().__init__(**kwargs)
96
97 def fit(self, x, y, time_limit=None):
98 x = np.array(x)
99 y = np.array(y)
100
101 if self.verbose:
102 print("Preprocessing the images.")
103
104 self.resize_shape = compute_image_resize_params(x)
105
106 x = resize_image_data(x, self.resize_shape)
107
108 if self.verbose:
109 print("Preprocessing finished.")
110
111 super().fit(x, y, time_limit)
112
113 def init_transformer(self, x):
114 if self.data_transformer is None:
115 self.data_transformer = ImageDataTransformer(x, augment=self.augment)
116
117 def preprocess(self, x):
118 return resize_image_data(x, self.resize_shape)
119
120
121 class ImageClassifier(ImageSupervised):
122 """ImageClassifier class.
123
124 It is used for image classification. It searches convolutional neural network architectures
125 for the best configuration for the image dataset.
126 """
127
128 @property
129 def loss(self):
130 return classification_loss
131
132 @property
133 def metric(self):
134 return Accuracy
135
136 def transform_y(self, y_train):
137 # Transform y_train.
138 if self.y_encoder is None:
139 self.y_encoder = OneHotEncoder()
140 self.y_encoder.fit(y_train)
141 y_train = self.y_encoder.transform(y_train)
142 return y_train
143
144 def inverse_transform_y(self, output):
145 return self.y_encoder.inverse_transform(output)
146
147 def get_n_output_node(self):
148 return self.y_encoder.n_classes
149
150 def export_autokeras_model(self, model_file_name):
151 """ Creates and Exports the AutoKeras model to the given filename. """
152 portable_model = PortableImageClassifier(graph=self.cnn.best_model,
153 y_encoder=self.y_encoder,
154 data_transformer=self.data_transformer,
155 resize_params=self.resize_shape,
156 path=self.path)
157 pickle_to_file(portable_model, model_file_name)
158
159
160 class ImageClassifier1D(ImageClassifier):
161 """ ImageClassifier1D class.
162
163 It is used for 1D image classification. It searches convolutional neural network architectures
164 for the best configuration for the 1D image dataset.
165 """
166
167 def __init__(self, **kwargs):
168 kwargs['augment'] = False
169 super().__init__(**kwargs)
170
171
172 class ImageClassifier3D(ImageClassifier):
173 """ ImageClassifier3D class.
174
175 It is used for 3D image classification. It searches convolutional neural network architectures
176 for the best configuration for the 1D image dataset.
177 """
178
179 def __init__(self, **kwargs):
180 kwargs['augment'] = False
181 super().__init__(**kwargs)
182
183
184 class ImageRegressor(ImageSupervised):
185 """ImageRegressor class.
186
187 It is used for image regression. It searches convolutional neural network architectures
188 for the best configuration for the image dataset.
189 """
190
191 @property
192 def loss(self):
193 return regression_loss
194
195 @property
196 def metric(self):
197 return MSE
198
199 def get_n_output_node(self):
200 return 1
201
202 def transform_y(self, y_train):
203 return y_train.flatten().reshape(len(y_train), 1)
204
205 def inverse_transform_y(self, output):
206 return output.flatten()
207
208 def export_autokeras_model(self, model_file_name):
209 """ Creates and Exports the AutoKeras model to the given filename. """
210 portable_model = PortableImageRegressor(graph=self.cnn.best_model,
211 y_encoder=self.y_encoder,
212 data_transformer=self.data_transformer,
213 resize_params=self.resize_shape,
214 path=self.path)
215 pickle_to_file(portable_model, model_file_name)
216
217
218 class ImageRegressor1D(ImageRegressor):
219 """ ImageRegressor1D class.
220
221 It is used for 1D image regression. It searches convolutional neural network architectures
222 for the best configuration for the 1D image dataset.
223 """
224
225 def __init__(self, **kwargs):
226 kwargs['augment'] = False
227 super().__init__(**kwargs)
228
229
230 class ImageRegressor3D(ImageRegressor):
231 """ ImageRegressor3D class.
232
233 It is used for 3D image regression. It searches convolutional neural network architectures
234 for the best configuration for the 1D image dataset.
235 """
236
237 def __init__(self, **kwargs):
238 kwargs['augment'] = False
239 super().__init__(**kwargs)
240
241
242 class PortableImageSupervised(PortableDeepSupervised):
243 def __init__(self, graph, y_encoder, data_transformer, resize_params, verbose=False, path=None):
244 """Initialize the instance.
245 Args:
246 graph: The graph form of the learned model
247 """
248 super().__init__(graph, y_encoder, data_transformer, verbose, path)
249 self.resize_shape = resize_params
250
251 def preprocess(self, x):
252 return resize_image_data(x, self.resize_shape)
253
254
255 class PortableImageClassifier(PortableImageSupervised):
256 @property
257 def loss(self):
258 return classification_loss
259
260 @property
261 def metric(self):
262 return Accuracy
263
264 def transform_y(self, y_train):
265 return self.y_encoder.transform(y_train)
266
267 def inverse_transform_y(self, output):
268 return self.y_encoder.inverse_transform(output)
269
270
271 class PortableImageRegressor(PortableImageSupervised):
272 @property
273 def loss(self):
274 return regression_loss
275
276 @property
277 def metric(self):
278 return MSE
279
280 def transform_y(self, y_train):
281 return y_train.flatten().reshape(len(y_train), 1)
282
283 def inverse_transform_y(self, output):
284 return output.flatten()
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/autokeras/image/image_supervised.py b/autokeras/image/image_supervised.py
--- a/autokeras/image/image_supervised.py
+++ b/autokeras/image/image_supervised.py
@@ -1,6 +1,7 @@
import os
from abc import ABC
import numpy as np
+from multiprocessing import Pool, cpu_count
from autokeras.constant import Constant
from autokeras.nn.loss_function import classification_loss, regression_loss
@@ -11,31 +12,48 @@
read_csv_file, read_image, compute_image_resize_params, resize_image_data
-def read_images(img_file_names, images_dir_path):
+def _image_to_array(img_path):
+ """Read the image from the path and return image object.
+ Return an image object.
+
+ Args:
+ img_path: image file name in images_dir_path.
+ """
+ if os.path.exists(img_path):
+ img = read_image(img_path)
+ if len(img.shape) < 3:
+ img = img[..., np.newaxis]
+ return img
+ else:
+ raise ValueError("%s image does not exist" % img_path)
+
+
+def read_images(img_file_names, images_dir_path, parallel=True):
"""Read the images from the path and return their numpy.ndarray instance.
Return a numpy.ndarray instance containing the training data.
Args:
img_file_names: List containing images names.
images_dir_path: Path to the directory containing images.
+ parallel: (Default: True) Run _image_to_array will use multiprocessing.
"""
- x_train = []
+ img_paths = [os.path.join(images_dir_path, img_file)
+ for img_file in img_file_names]
+
if os.path.isdir(images_dir_path):
- for img_file in img_file_names:
- img_path = os.path.join(images_dir_path, img_file)
- if os.path.exists(img_path):
- img = read_image(img_path)
- if len(img.shape) < 3:
- img = img[..., np.newaxis]
- x_train.append(img)
- else:
- raise ValueError("%s image does not exist" % img_file)
+ if parallel:
+ pool = Pool(processes=cpu_count())
+ x_train = pool.map(_image_to_array, img_paths)
+ pool.close()
+ pool.join()
+ else:
+ x_train = [_image_to_array(img_path) for img_path in img_paths]
else:
raise ValueError("Directory containing images does not exist")
return np.asanyarray(x_train)
-def load_image_dataset(csv_file_path, images_path):
+def load_image_dataset(csv_file_path, images_path, parallel=True):
"""Load images from the files and labels from a csv file.
Second, the dataset is a set of images and the labels are in a CSV file.
@@ -48,13 +66,14 @@
Args:
csv_file_path: CSV file path.
images_path: Path where images exist.
+ parallel: (Default: True) Load dataset with multiprocessing.
Returns:
x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.
y: The labels.
"""
img_file_name, y = read_csv_file(csv_file_path)
- x = read_images(img_file_name, images_path)
+ x = read_images(img_file_name, images_path, parallel)
return np.array(x), np.array(y)
@@ -112,7 +131,8 @@
def init_transformer(self, x):
if self.data_transformer is None:
- self.data_transformer = ImageDataTransformer(x, augment=self.augment)
+ self.data_transformer = ImageDataTransformer(
+ x, augment=self.augment)
def preprocess(self, x):
return resize_image_data(x, self.resize_shape)
| {"golden_diff": "diff --git a/autokeras/image/image_supervised.py b/autokeras/image/image_supervised.py\n--- a/autokeras/image/image_supervised.py\n+++ b/autokeras/image/image_supervised.py\n@@ -1,6 +1,7 @@\n import os\n from abc import ABC\n import numpy as np\n+from multiprocessing import Pool, cpu_count\n \n from autokeras.constant import Constant\n from autokeras.nn.loss_function import classification_loss, regression_loss\n@@ -11,31 +12,48 @@\n read_csv_file, read_image, compute_image_resize_params, resize_image_data\n \n \n-def read_images(img_file_names, images_dir_path):\n+def _image_to_array(img_path):\n+ \"\"\"Read the image from the path and return image object.\n+ Return an image object.\n+\n+ Args:\n+ img_path: image file name in images_dir_path.\n+ \"\"\"\n+ if os.path.exists(img_path):\n+ img = read_image(img_path)\n+ if len(img.shape) < 3:\n+ img = img[..., np.newaxis]\n+ return img\n+ else:\n+ raise ValueError(\"%s image does not exist\" % img_path)\n+\n+\n+def read_images(img_file_names, images_dir_path, parallel=True):\n \"\"\"Read the images from the path and return their numpy.ndarray instance.\n Return a numpy.ndarray instance containing the training data.\n \n Args:\n img_file_names: List containing images names.\n images_dir_path: Path to the directory containing images.\n+ parallel: (Default: True) Run _image_to_array will use multiprocessing.\n \"\"\"\n- x_train = []\n+ img_paths = [os.path.join(images_dir_path, img_file)\n+ for img_file in img_file_names]\n+\n if os.path.isdir(images_dir_path):\n- for img_file in img_file_names:\n- img_path = os.path.join(images_dir_path, img_file)\n- if os.path.exists(img_path):\n- img = read_image(img_path)\n- if len(img.shape) < 3:\n- img = img[..., np.newaxis]\n- x_train.append(img)\n- else:\n- raise ValueError(\"%s image does not exist\" % img_file)\n+ if parallel:\n+ pool = Pool(processes=cpu_count())\n+ x_train = pool.map(_image_to_array, img_paths)\n+ pool.close()\n+ pool.join()\n+ else:\n+ x_train = [_image_to_array(img_path) for img_path in img_paths]\n else:\n raise ValueError(\"Directory containing images does not exist\")\n return np.asanyarray(x_train)\n \n \n-def load_image_dataset(csv_file_path, images_path):\n+def load_image_dataset(csv_file_path, images_path, parallel=True):\n \"\"\"Load images from the files and labels from a csv file.\n \n Second, the dataset is a set of images and the labels are in a CSV file.\n@@ -48,13 +66,14 @@\n Args:\n csv_file_path: CSV file path.\n images_path: Path where images exist.\n+ parallel: (Default: True) Load dataset with multiprocessing.\n \n Returns:\n x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.\n y: The labels.\n \"\"\"\n img_file_name, y = read_csv_file(csv_file_path)\n- x = read_images(img_file_name, images_path)\n+ x = read_images(img_file_name, images_path, parallel)\n return np.array(x), np.array(y)\n \n \n@@ -112,7 +131,8 @@\n \n def init_transformer(self, x):\n if self.data_transformer is None:\n- self.data_transformer = ImageDataTransformer(x, augment=self.augment)\n+ self.data_transformer = ImageDataTransformer(\n+ x, augment=self.augment)\n \n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n", "issue": "feature_request: Support multicore/multiprocess for read_images function\n### Feature Description\r\nSupport multicore for `read_images` function in `image/image_supervised.py`.\r\n\r\n### Reason\r\nAs [read_images function in image_supervised.py](https://github.com/jhfjhfj1/autokeras/blob/36752fcbce58bd1a26c17144637cc0ecbef83da0/autokeras/image/image_supervised.py#L14) does not support multi-core(just read files one by one), so it's **too slow** when reading image files more than 10k files.\r\n\r\n### Solution\r\nUse multiprocessing or equivalent one in `read_images` function as a default, or optional.\r\n\n", "before_files": [{"content": "import os\nfrom abc import ABC\nimport numpy as np\n\nfrom autokeras.constant import Constant\nfrom autokeras.nn.loss_function import classification_loss, regression_loss\nfrom autokeras.nn.metric import Accuracy, MSE\nfrom autokeras.preprocessor import OneHotEncoder, ImageDataTransformer\nfrom autokeras.supervised import PortableDeepSupervised, DeepSupervised\nfrom autokeras.utils import pickle_to_file, \\\n read_csv_file, read_image, compute_image_resize_params, resize_image_data\n\n\ndef read_images(img_file_names, images_dir_path):\n \"\"\"Read the images from the path and return their numpy.ndarray instance.\n Return a numpy.ndarray instance containing the training data.\n\n Args:\n img_file_names: List containing images names.\n images_dir_path: Path to the directory containing images.\n \"\"\"\n x_train = []\n if os.path.isdir(images_dir_path):\n for img_file in img_file_names:\n img_path = os.path.join(images_dir_path, img_file)\n if os.path.exists(img_path):\n img = read_image(img_path)\n if len(img.shape) < 3:\n img = img[..., np.newaxis]\n x_train.append(img)\n else:\n raise ValueError(\"%s image does not exist\" % img_file)\n else:\n raise ValueError(\"Directory containing images does not exist\")\n return np.asanyarray(x_train)\n\n\ndef load_image_dataset(csv_file_path, images_path):\n \"\"\"Load images from the files and labels from a csv file.\n\n Second, the dataset is a set of images and the labels are in a CSV file.\n The CSV file should contain two columns whose names are 'File Name' and 'Label'.\n The file names in the first column should match the file names of the images with extensions,\n e.g., .jpg, .png.\n The path to the CSV file should be passed through the `csv_file_path`.\n The path to the directory containing all the images should be passed through `image_path`.\n\n Args:\n csv_file_path: CSV file path.\n images_path: Path where images exist.\n\n Returns:\n x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.\n y: The labels.\n \"\"\"\n img_file_name, y = read_csv_file(csv_file_path)\n x = read_images(img_file_name, images_path)\n return np.array(x), np.array(y)\n\n\nclass ImageSupervised(DeepSupervised, ABC):\n \"\"\"Abstract image supervised class.\n\n Attributes:\n path: A path to the directory to save the classifier as well as intermediate results.\n cnn: CNN module from net_module.py.\n y_encoder: Label encoder, used in transform_y or inverse_transform_y for encode the label. For example,\n if one hot encoder needed, y_encoder can be OneHotEncoder.\n data_transformer: A transformer class to process the data. See example as ImageDataTransformer.\n verbose: A boolean value indicating the verbosity mode which determines whether the search process\n will be printed to stdout.\n augment: A boolean value indicating whether the data needs augmentation. If not define, then it\n will use the value of Constant.DATA_AUGMENTATION which is True by default.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n resize_height: resize image height.\n resize_width: resize image width.\n \"\"\"\n\n def __init__(self, augment=None, **kwargs):\n \"\"\"Initialize the instance.\n The classifier will be loaded from the files in 'path' if parameter 'resume' is True.\n Otherwise it would create a new one.\n Args:\n verbose: A boolean of whether the search process will be printed to stdout.\n path: A string. The path to a directory, where the intermediate results are saved.\n resume: A boolean. If True, the classifier will continue to previous work saved in path.\n Otherwise, the classifier will start a new search.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n augment: A boolean value indicating whether the data needs augmentation. If not define, then it\n will use the value of Constant.DATA_AUGMENTATION which is True by default.\n \"\"\"\n self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION\n self.resize_shape = []\n\n super().__init__(**kwargs)\n\n def fit(self, x, y, time_limit=None):\n x = np.array(x)\n y = np.array(y)\n\n if self.verbose:\n print(\"Preprocessing the images.\")\n\n self.resize_shape = compute_image_resize_params(x)\n\n x = resize_image_data(x, self.resize_shape)\n\n if self.verbose:\n print(\"Preprocessing finished.\")\n\n super().fit(x, y, time_limit)\n\n def init_transformer(self, x):\n if self.data_transformer is None:\n self.data_transformer = ImageDataTransformer(x, augment=self.augment)\n\n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n\n\nclass ImageClassifier(ImageSupervised):\n \"\"\"ImageClassifier class.\n\n It is used for image classification. It searches convolutional neural network architectures\n for the best configuration for the image dataset.\n \"\"\"\n\n @property\n def loss(self):\n return classification_loss\n\n @property\n def metric(self):\n return Accuracy\n\n def transform_y(self, y_train):\n # Transform y_train.\n if self.y_encoder is None:\n self.y_encoder = OneHotEncoder()\n self.y_encoder.fit(y_train)\n y_train = self.y_encoder.transform(y_train)\n return y_train\n\n def inverse_transform_y(self, output):\n return self.y_encoder.inverse_transform(output)\n\n def get_n_output_node(self):\n return self.y_encoder.n_classes\n\n def export_autokeras_model(self, model_file_name):\n \"\"\" Creates and Exports the AutoKeras model to the given filename. \"\"\"\n portable_model = PortableImageClassifier(graph=self.cnn.best_model,\n y_encoder=self.y_encoder,\n data_transformer=self.data_transformer,\n resize_params=self.resize_shape,\n path=self.path)\n pickle_to_file(portable_model, model_file_name)\n\n\nclass ImageClassifier1D(ImageClassifier):\n \"\"\" ImageClassifier1D class.\n\n It is used for 1D image classification. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageClassifier3D(ImageClassifier):\n \"\"\" ImageClassifier3D class.\n\n It is used for 3D image classification. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageRegressor(ImageSupervised):\n \"\"\"ImageRegressor class.\n\n It is used for image regression. It searches convolutional neural network architectures\n for the best configuration for the image dataset.\n \"\"\"\n\n @property\n def loss(self):\n return regression_loss\n\n @property\n def metric(self):\n return MSE\n\n def get_n_output_node(self):\n return 1\n\n def transform_y(self, y_train):\n return y_train.flatten().reshape(len(y_train), 1)\n\n def inverse_transform_y(self, output):\n return output.flatten()\n\n def export_autokeras_model(self, model_file_name):\n \"\"\" Creates and Exports the AutoKeras model to the given filename. \"\"\"\n portable_model = PortableImageRegressor(graph=self.cnn.best_model,\n y_encoder=self.y_encoder,\n data_transformer=self.data_transformer,\n resize_params=self.resize_shape,\n path=self.path)\n pickle_to_file(portable_model, model_file_name)\n\n\nclass ImageRegressor1D(ImageRegressor):\n \"\"\" ImageRegressor1D class.\n\n It is used for 1D image regression. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageRegressor3D(ImageRegressor):\n \"\"\" ImageRegressor3D class.\n\n It is used for 3D image regression. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass PortableImageSupervised(PortableDeepSupervised):\n def __init__(self, graph, y_encoder, data_transformer, resize_params, verbose=False, path=None):\n \"\"\"Initialize the instance.\n Args:\n graph: The graph form of the learned model\n \"\"\"\n super().__init__(graph, y_encoder, data_transformer, verbose, path)\n self.resize_shape = resize_params\n\n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n\n\nclass PortableImageClassifier(PortableImageSupervised):\n @property\n def loss(self):\n return classification_loss\n\n @property\n def metric(self):\n return Accuracy\n\n def transform_y(self, y_train):\n return self.y_encoder.transform(y_train)\n\n def inverse_transform_y(self, output):\n return self.y_encoder.inverse_transform(output)\n\n\nclass PortableImageRegressor(PortableImageSupervised):\n @property\n def loss(self):\n return regression_loss\n\n @property\n def metric(self):\n return MSE\n\n def transform_y(self, y_train):\n return y_train.flatten().reshape(len(y_train), 1)\n\n def inverse_transform_y(self, output):\n return output.flatten()\n", "path": "autokeras/image/image_supervised.py"}], "after_files": [{"content": "import os\nfrom abc import ABC\nimport numpy as np\nfrom multiprocessing import Pool, cpu_count\n\nfrom autokeras.constant import Constant\nfrom autokeras.nn.loss_function import classification_loss, regression_loss\nfrom autokeras.nn.metric import Accuracy, MSE\nfrom autokeras.preprocessor import OneHotEncoder, ImageDataTransformer\nfrom autokeras.supervised import PortableDeepSupervised, DeepSupervised\nfrom autokeras.utils import pickle_to_file, \\\n read_csv_file, read_image, compute_image_resize_params, resize_image_data\n\n\ndef _image_to_array(img_path):\n \"\"\"Read the image from the path and return image object.\n Return an image object.\n\n Args:\n img_path: image file name in images_dir_path.\n \"\"\"\n if os.path.exists(img_path):\n img = read_image(img_path)\n if len(img.shape) < 3:\n img = img[..., np.newaxis]\n return img\n else:\n raise ValueError(\"%s image does not exist\" % img_path)\n\n\ndef read_images(img_file_names, images_dir_path, parallel=True):\n \"\"\"Read the images from the path and return their numpy.ndarray instance.\n Return a numpy.ndarray instance containing the training data.\n\n Args:\n img_file_names: List containing images names.\n images_dir_path: Path to the directory containing images.\n parallel: (Default: True) Run _image_to_array will use multiprocessing.\n \"\"\"\n img_paths = [os.path.join(images_dir_path, img_file)\n for img_file in img_file_names]\n\n if os.path.isdir(images_dir_path):\n if parallel:\n pool = Pool(processes=cpu_count())\n x_train = pool.map(_image_to_array, img_paths)\n pool.close()\n pool.join()\n else:\n x_train = [_image_to_array(img_path) for img_path in img_paths]\n else:\n raise ValueError(\"Directory containing images does not exist\")\n return np.asanyarray(x_train)\n\n\ndef load_image_dataset(csv_file_path, images_path, parallel=True):\n \"\"\"Load images from the files and labels from a csv file.\n\n Second, the dataset is a set of images and the labels are in a CSV file.\n The CSV file should contain two columns whose names are 'File Name' and 'Label'.\n The file names in the first column should match the file names of the images with extensions,\n e.g., .jpg, .png.\n The path to the CSV file should be passed through the `csv_file_path`.\n The path to the directory containing all the images should be passed through `image_path`.\n\n Args:\n csv_file_path: CSV file path.\n images_path: Path where images exist.\n parallel: (Default: True) Load dataset with multiprocessing.\n\n Returns:\n x: Four dimensional numpy.ndarray. The channel dimension is the last dimension.\n y: The labels.\n \"\"\"\n img_file_name, y = read_csv_file(csv_file_path)\n x = read_images(img_file_name, images_path, parallel)\n return np.array(x), np.array(y)\n\n\nclass ImageSupervised(DeepSupervised, ABC):\n \"\"\"Abstract image supervised class.\n\n Attributes:\n path: A path to the directory to save the classifier as well as intermediate results.\n cnn: CNN module from net_module.py.\n y_encoder: Label encoder, used in transform_y or inverse_transform_y for encode the label. For example,\n if one hot encoder needed, y_encoder can be OneHotEncoder.\n data_transformer: A transformer class to process the data. See example as ImageDataTransformer.\n verbose: A boolean value indicating the verbosity mode which determines whether the search process\n will be printed to stdout.\n augment: A boolean value indicating whether the data needs augmentation. If not define, then it\n will use the value of Constant.DATA_AUGMENTATION which is True by default.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n resize_height: resize image height.\n resize_width: resize image width.\n \"\"\"\n\n def __init__(self, augment=None, **kwargs):\n \"\"\"Initialize the instance.\n The classifier will be loaded from the files in 'path' if parameter 'resume' is True.\n Otherwise it would create a new one.\n Args:\n verbose: A boolean of whether the search process will be printed to stdout.\n path: A string. The path to a directory, where the intermediate results are saved.\n resume: A boolean. If True, the classifier will continue to previous work saved in path.\n Otherwise, the classifier will start a new search.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n augment: A boolean value indicating whether the data needs augmentation. If not define, then it\n will use the value of Constant.DATA_AUGMENTATION which is True by default.\n \"\"\"\n self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION\n self.resize_shape = []\n\n super().__init__(**kwargs)\n\n def fit(self, x, y, time_limit=None):\n x = np.array(x)\n y = np.array(y)\n\n if self.verbose:\n print(\"Preprocessing the images.\")\n\n self.resize_shape = compute_image_resize_params(x)\n\n x = resize_image_data(x, self.resize_shape)\n\n if self.verbose:\n print(\"Preprocessing finished.\")\n\n super().fit(x, y, time_limit)\n\n def init_transformer(self, x):\n if self.data_transformer is None:\n self.data_transformer = ImageDataTransformer(\n x, augment=self.augment)\n\n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n\n\nclass ImageClassifier(ImageSupervised):\n \"\"\"ImageClassifier class.\n\n It is used for image classification. It searches convolutional neural network architectures\n for the best configuration for the image dataset.\n \"\"\"\n\n @property\n def loss(self):\n return classification_loss\n\n @property\n def metric(self):\n return Accuracy\n\n def transform_y(self, y_train):\n # Transform y_train.\n if self.y_encoder is None:\n self.y_encoder = OneHotEncoder()\n self.y_encoder.fit(y_train)\n y_train = self.y_encoder.transform(y_train)\n return y_train\n\n def inverse_transform_y(self, output):\n return self.y_encoder.inverse_transform(output)\n\n def get_n_output_node(self):\n return self.y_encoder.n_classes\n\n def export_autokeras_model(self, model_file_name):\n \"\"\" Creates and Exports the AutoKeras model to the given filename. \"\"\"\n portable_model = PortableImageClassifier(graph=self.cnn.best_model,\n y_encoder=self.y_encoder,\n data_transformer=self.data_transformer,\n resize_params=self.resize_shape,\n path=self.path)\n pickle_to_file(portable_model, model_file_name)\n\n\nclass ImageClassifier1D(ImageClassifier):\n \"\"\" ImageClassifier1D class.\n\n It is used for 1D image classification. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageClassifier3D(ImageClassifier):\n \"\"\" ImageClassifier3D class.\n\n It is used for 3D image classification. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageRegressor(ImageSupervised):\n \"\"\"ImageRegressor class.\n\n It is used for image regression. It searches convolutional neural network architectures\n for the best configuration for the image dataset.\n \"\"\"\n\n @property\n def loss(self):\n return regression_loss\n\n @property\n def metric(self):\n return MSE\n\n def get_n_output_node(self):\n return 1\n\n def transform_y(self, y_train):\n return y_train.flatten().reshape(len(y_train), 1)\n\n def inverse_transform_y(self, output):\n return output.flatten()\n\n def export_autokeras_model(self, model_file_name):\n \"\"\" Creates and Exports the AutoKeras model to the given filename. \"\"\"\n portable_model = PortableImageRegressor(graph=self.cnn.best_model,\n y_encoder=self.y_encoder,\n data_transformer=self.data_transformer,\n resize_params=self.resize_shape,\n path=self.path)\n pickle_to_file(portable_model, model_file_name)\n\n\nclass ImageRegressor1D(ImageRegressor):\n \"\"\" ImageRegressor1D class.\n\n It is used for 1D image regression. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass ImageRegressor3D(ImageRegressor):\n \"\"\" ImageRegressor3D class.\n\n It is used for 3D image regression. It searches convolutional neural network architectures\n for the best configuration for the 1D image dataset.\n \"\"\"\n\n def __init__(self, **kwargs):\n kwargs['augment'] = False\n super().__init__(**kwargs)\n\n\nclass PortableImageSupervised(PortableDeepSupervised):\n def __init__(self, graph, y_encoder, data_transformer, resize_params, verbose=False, path=None):\n \"\"\"Initialize the instance.\n Args:\n graph: The graph form of the learned model\n \"\"\"\n super().__init__(graph, y_encoder, data_transformer, verbose, path)\n self.resize_shape = resize_params\n\n def preprocess(self, x):\n return resize_image_data(x, self.resize_shape)\n\n\nclass PortableImageClassifier(PortableImageSupervised):\n @property\n def loss(self):\n return classification_loss\n\n @property\n def metric(self):\n return Accuracy\n\n def transform_y(self, y_train):\n return self.y_encoder.transform(y_train)\n\n def inverse_transform_y(self, output):\n return self.y_encoder.inverse_transform(output)\n\n\nclass PortableImageRegressor(PortableImageSupervised):\n @property\n def loss(self):\n return regression_loss\n\n @property\n def metric(self):\n return MSE\n\n def transform_y(self, y_train):\n return y_train.flatten().reshape(len(y_train), 1)\n\n def inverse_transform_y(self, output):\n return output.flatten()\n", "path": "autokeras/image/image_supervised.py"}]} | 3,301 | 847 |
gh_patches_debug_28340 | rasdani/github-patches | git_diff | facebookresearch__fairscale-175 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add mixed precision autocast to benchmarking script
## 🚀 Feature
It would be nice to make sure OSS/ShardedDDP plays nice with torch autocast! Adding it to the oss.py benchmark will allow us to ensure there is no glaring issues. I don't mind working on this!
## Motivation
Integrating OSS/ShardedDDP into PL :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benchmarks/oss.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2
3
4 import argparse
5 from enum import Enum
6 import importlib
7 import logging
8 import math
9 import shutil
10 import tempfile
11 import time
12 from typing import Any, List, Optional, cast
13
14 import numpy as np
15 import torch
16 import torch.autograd.profiler as profiler
17 import torch.distributed as dist
18 import torch.multiprocessing as mp
19 import torch.nn as nn
20 from torch.nn.parallel import DistributedDataParallel as DDP
21 from torch.utils.data import BatchSampler, DataLoader, Sampler
22 from torch.utils.data.distributed import DistributedSampler
23 from torchvision.datasets import MNIST
24 from torchvision.transforms import ToTensor
25
26 from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
27 from fairscale.optim import OSS
28
29 OPTIM = torch.optim.RMSprop
30 TEMPDIR = tempfile.gettempdir()
31
32
33 def dist_init(rank, world_size, backend):
34 logging.info(f"Using backend: {backend}")
35 dist.init_process_group(backend=backend, init_method="tcp://localhost:29501", rank=rank, world_size=world_size)
36
37
38 def get_problem(rank, world_size, batch_size, device, model_name: str):
39 # Select the desired model on the fly
40 logging.info(f"Using {model_name} for benchmarking")
41 model = getattr(importlib.import_module("torchvision.models"), model_name)(pretrained=False).to(device)
42
43 # Data setup, duplicate the grey channels to get pseudo color
44 def collate(inputs: List[Any]):
45 return {
46 "inputs": torch.stack([i[0] for i in inputs]).repeat(1, 3, 1, 1).to(device),
47 "label": torch.tensor([i[1] for i in inputs]).to(device),
48 }
49
50 dataset = MNIST(transform=ToTensor(), download=False, root=TEMPDIR)
51 sampler: Sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank)
52 batch_sampler = BatchSampler(sampler, batch_size, drop_last=True)
53 dataloader = DataLoader(dataset=dataset, batch_sampler=batch_sampler, collate_fn=collate)
54
55 loss_fn = nn.CrossEntropyLoss()
56 return model, dataloader, loss_fn
57
58
59 class OptimType(str, Enum):
60 vanilla = "pytorch"
61 oss_ddp = "oss_ddp"
62 oss_sharded_ddp = "oss_sharded_ddp"
63 everyone = "everyone"
64
65
66 def train(
67 rank: int,
68 args: argparse.Namespace,
69 backend: str = "gloo",
70 optim_type: OptimType = OptimType.vanilla,
71 check_regression: bool = True,
72 ):
73 logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
74
75 # DDP
76 dist_init(rank=rank, world_size=args.world_size, backend=backend)
77
78 # Setup
79 if not args.cpu:
80 torch.cuda.set_device(rank)
81 torch.cuda.manual_seed(0)
82 torch.manual_seed(0) # also sets the cuda seed
83 np.random.seed(0)
84
85 if backend == "nccl":
86 torch.backends.cudnn.deterministic = True
87 torch.backends.cudnn.benchmark = False
88
89 device = torch.device("cpu") if args.cpu else torch.device(rank)
90 model, dataloader, loss_fn = get_problem(rank, args.world_size, args.batch_size, device, args.torchvision_model)
91
92 # Shard the optimizer
93 optimizer: Optional[torch.optim.Optimizer] = None
94 model = cast(nn.Module, model)
95
96 if optim_type == OptimType.oss_sharded_ddp:
97 model = ShardedDDP(
98 model,
99 optimizer=OPTIM,
100 optimizer_params={"lr": 1e-4, "momentum": 0.9},
101 world_size=args.world_size,
102 broadcast_buffers=True,
103 )
104 optimizer = model.sharded_optimizer
105
106 else:
107 model = DDP(model, device_ids=[rank], find_unused_parameters=False) # type: ignore
108 optimizer = (
109 OSS(params=model.parameters(), optim=OPTIM, lr=1e-4, momentum=0.9)
110 if optim_type == OptimType.oss_ddp
111 else OPTIM(model.parameters(), lr=1e-4, momentum=0.9)
112 )
113 optimizer = cast(torch.optim.Optimizer, optimizer)
114
115 # Reset the memory use counter
116 if not args.cpu:
117 torch.cuda.reset_peak_memory_stats(rank)
118 torch.cuda.synchronize(rank)
119
120 # Standard training loop
121 training_start = time.monotonic()
122 model.train()
123
124 measurements = []
125 final_loss: Optional[float] = -1.0
126 need_profiling = args.profile
127
128 for epoch in range(args.epochs):
129 n_items = 0
130 epoch_runtime = 0.0
131
132 for batch in dataloader:
133 batch__start = time.monotonic()
134
135 def closure():
136 model.zero_grad()
137 if args.debug and rank == 0 and next(model.parameters()).grad is not None:
138 logging.debug(
139 "\nbefore: param {} -- grad {}".format(
140 next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()
141 )
142 )
143
144 outputs = model(batch["inputs"])
145 loss = loss_fn(outputs, batch["label"])
146 loss.backward()
147
148 if optim_type == OptimType.oss_sharded_ddp:
149 model.reduce()
150
151 if args.debug and rank == 0 and next(model.parameters()).grad is not None:
152 logging.debug(
153 "after BW: param {} -- grad {}".format(
154 next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()
155 )
156 )
157 return loss
158
159 if need_profiling and not args.cpu:
160 logging.info("Profiling the run")
161 with profiler.profile(use_cuda=True, record_shapes=True, profile_memory=True) as prof: # type: ignore
162 with profiler.record_function("batch"):
163 final_loss = optimizer.step(closure)
164 logging.info("profiling done")
165
166 if rank == 0:
167 prof.export_chrome_trace(f"{optim_type}_trace.json")
168
169 need_profiling = False # only profile once
170
171 else:
172 final_loss = optimizer.step(closure)
173
174 if args.debug and rank == 0:
175 logging.debug("buffer: {}".format(next(model.buffers()).norm().item()))
176 logging.debug(
177 "after update: param {} -- grad {}".format(
178 next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()
179 )
180 )
181
182 n_items += args.batch_size
183
184 batch_end = time.monotonic()
185 epoch_runtime += batch_end - batch__start
186
187 if optim_type == OptimType.oss_ddp or optim_type == OptimType.oss_sharded_ddp:
188 # Check the checkpointing in the case of the OSS optimizer
189 # Memory usage could spill over from there
190 optimizer = cast(OSS, optimizer)
191 optimizer.consolidate_state_dict()
192 if dist.get_rank() == 0:
193 _ = optimizer.state_dict()
194 logging.info("... State dict collected")
195
196 measurements.append(n_items / epoch_runtime)
197 if dist.get_rank() == 0:
198 logging.info(f"Epoch {epoch} - processed {measurements[-1]:.2f} img per sec. Loss {final_loss:.3f}")
199
200 max_memory = -1.0
201 if not args.cpu:
202 torch.cuda.synchronize(rank)
203 max_memory = torch.cuda.max_memory_allocated(rank) / 2 ** 20
204 logging.info(f"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB")
205
206 training_stop = time.monotonic()
207 img_per_sec = n_items / (training_stop - training_start) * args.epochs
208 max_memory = torch.cuda.max_memory_allocated(rank) / 2 ** 20
209
210 logging.info(f"[{dist.get_rank()}] : Training done. {img_per_sec:.2f} img per sec inc. checkpoint")
211 logging.info(f"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB")
212
213 # Compute the mean and average img per second
214 mean = sum(measurements) / len(measurements)
215 diff = map(lambda x: pow(x - mean, 2.0), measurements)
216 std = math.sqrt(sum(diff) / (len(measurements) - 1)) if args.epochs > 2 else -1
217 logging.info(f"[{dist.get_rank()}] : Mean speed: {mean:.2f} +/- {std:.2f}")
218
219 if check_regression and dist.get_rank() == 0:
220 assert (mean + 3.0 * std) > args.reference_speed, "Speed regression detected"
221 assert max_memory < 1.05 * args.reference_memory, "Memory use regression detected"
222 assert abs(cast(float, final_loss) - args.reference_loss) < 1e-3, "Loss regression detected"
223
224 logging.info("[Regression Test] VALID")
225
226 dist.destroy_process_group() # type: ignore
227
228
229 if __name__ == "__main__":
230 parser = argparse.ArgumentParser(
231 description="Benchmark the optimizer state sharding, on a typical computer vision workload"
232 )
233 parser.add_argument("--world_size", action="store", default=2, type=int)
234 parser.add_argument("--epochs", action="store", default=10, type=int)
235 parser.add_argument("--batch_size", action="store", default=256, type=int)
236 parser.add_argument("--check_regression", action="store_true", default=False)
237 parser.add_argument("--reference_speed", action="store", default=1430, type=float)
238 parser.add_argument("--reference_memory", action="store", default=1220, type=float)
239 parser.add_argument("--reference_loss", action="store", default=0.006, type=float)
240 parser.add_argument(
241 "--optim_type", type=OptimType, choices=[o.value for o in OptimType], default=OptimType.everyone
242 )
243 parser.add_argument("--gloo", action="store_true", default=False)
244 parser.add_argument("--profile", action="store_true", default=False)
245 parser.add_argument("--cpu", action="store_true", default=False)
246 parser.add_argument("--torchvision_model", type=str, help="Any torchvision model name (str)", default="resnet101")
247 parser.add_argument("--debug", action="store_true", default=False)
248
249 args = parser.parse_args()
250
251 logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)
252 logging.info(f"Benchmark arguments: {args}")
253
254 backend = "nccl" if (not args.gloo or not torch.cuda.is_available()) and not args.cpu else "gloo"
255
256 # Download dataset once for all processes
257 dataset, tentatives = None, 0
258 while dataset is None and tentatives < 5:
259 try:
260 dataset = MNIST(transform=None, download=True, root=TEMPDIR)
261 except (RuntimeError, EOFError) as e:
262 if isinstance(e, RuntimeError):
263 # Corrupted data, erase and restart
264 shutil.rmtree(TEMPDIR + "/MNIST")
265
266 logging.warning("Failed loading dataset: ", e)
267 tentatives += 1
268
269 if dataset is None:
270 logging.error("Could not download MNIST dataset")
271 exit(-1)
272 else:
273 logging.info("Dataset downloaded")
274
275 # Benchmark the different configurations, via multiple processes
276 if args.optim_type == OptimType.vanilla or args.optim_type == OptimType.everyone:
277 logging.info("\n*** Benchmark vanilla optimizer")
278 mp.spawn(
279 train,
280 args=(args, backend, OptimType.vanilla, False,), # no regression check
281 nprocs=args.world_size,
282 join=True,
283 )
284
285 if args.optim_type == OptimType.oss_ddp or args.optim_type == OptimType.everyone:
286 logging.info("\n*** Benchmark OSS with DDP")
287 mp.spawn(
288 train, args=(args, backend, OptimType.oss_ddp, args.check_regression), nprocs=args.world_size, join=True,
289 )
290
291 if args.optim_type == OptimType.oss_sharded_ddp or args.optim_type == OptimType.everyone:
292 logging.info("\n*** Benchmark OSS with ShardedDDP")
293 mp.spawn(
294 train,
295 args=(
296 args,
297 backend,
298 OptimType.oss_sharded_ddp,
299 False,
300 ), # FIXME: @lefaudeux - SDP should give the same results
301 nprocs=args.world_size,
302 join=True,
303 )
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benchmarks/oss.py b/benchmarks/oss.py
--- a/benchmarks/oss.py
+++ b/benchmarks/oss.py
@@ -140,9 +140,15 @@
next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()
)
)
+ if not args.cpu and args.amp:
+ # Automatically computes the FW pass in half precision
+ with torch.cuda.amp.autocast():
+ outputs = model(batch["inputs"])
+ loss = loss_fn(outputs, batch["label"])
+ else:
+ outputs = model(batch["inputs"])
+ loss = loss_fn(outputs, batch["label"])
- outputs = model(batch["inputs"])
- loss = loss_fn(outputs, batch["label"])
loss.backward()
if optim_type == OptimType.oss_sharded_ddp:
@@ -244,7 +250,8 @@
parser.add_argument("--profile", action="store_true", default=False)
parser.add_argument("--cpu", action="store_true", default=False)
parser.add_argument("--torchvision_model", type=str, help="Any torchvision model name (str)", default="resnet101")
- parser.add_argument("--debug", action="store_true", default=False)
+ parser.add_argument("--debug", action="store_true", default=False, help="Display additional debug information")
+ parser.add_argument("--amp", action="store_true", default=False, help="Activate torch AMP")
args = parser.parse_args()
| {"golden_diff": "diff --git a/benchmarks/oss.py b/benchmarks/oss.py\n--- a/benchmarks/oss.py\n+++ b/benchmarks/oss.py\n@@ -140,9 +140,15 @@\n next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()\n )\n )\n+ if not args.cpu and args.amp:\n+ # Automatically computes the FW pass in half precision\n+ with torch.cuda.amp.autocast():\n+ outputs = model(batch[\"inputs\"])\n+ loss = loss_fn(outputs, batch[\"label\"])\n+ else:\n+ outputs = model(batch[\"inputs\"])\n+ loss = loss_fn(outputs, batch[\"label\"])\n \n- outputs = model(batch[\"inputs\"])\n- loss = loss_fn(outputs, batch[\"label\"])\n loss.backward()\n \n if optim_type == OptimType.oss_sharded_ddp:\n@@ -244,7 +250,8 @@\n parser.add_argument(\"--profile\", action=\"store_true\", default=False)\n parser.add_argument(\"--cpu\", action=\"store_true\", default=False)\n parser.add_argument(\"--torchvision_model\", type=str, help=\"Any torchvision model name (str)\", default=\"resnet101\")\n- parser.add_argument(\"--debug\", action=\"store_true\", default=False)\n+ parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"Display additional debug information\")\n+ parser.add_argument(\"--amp\", action=\"store_true\", default=False, help=\"Activate torch AMP\")\n \n args = parser.parse_args()\n", "issue": "Add mixed precision autocast to benchmarking script \n## \ud83d\ude80 Feature\r\n\r\nIt would be nice to make sure OSS/ShardedDDP plays nice with torch autocast! Adding it to the oss.py benchmark will allow us to ensure there is no glaring issues. I don't mind working on this!\r\n\r\n## Motivation\r\n\r\nIntegrating OSS/ShardedDDP into PL :)\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\n\nimport argparse\nfrom enum import Enum\nimport importlib\nimport logging\nimport math\nimport shutil\nimport tempfile\nimport time\nfrom typing import Any, List, Optional, cast\n\nimport numpy as np\nimport torch\nimport torch.autograd.profiler as profiler\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import BatchSampler, DataLoader, Sampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import ToTensor\n\nfrom fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\nfrom fairscale.optim import OSS\n\nOPTIM = torch.optim.RMSprop\nTEMPDIR = tempfile.gettempdir()\n\n\ndef dist_init(rank, world_size, backend):\n logging.info(f\"Using backend: {backend}\")\n dist.init_process_group(backend=backend, init_method=\"tcp://localhost:29501\", rank=rank, world_size=world_size)\n\n\ndef get_problem(rank, world_size, batch_size, device, model_name: str):\n # Select the desired model on the fly\n logging.info(f\"Using {model_name} for benchmarking\")\n model = getattr(importlib.import_module(\"torchvision.models\"), model_name)(pretrained=False).to(device)\n\n # Data setup, duplicate the grey channels to get pseudo color\n def collate(inputs: List[Any]):\n return {\n \"inputs\": torch.stack([i[0] for i in inputs]).repeat(1, 3, 1, 1).to(device),\n \"label\": torch.tensor([i[1] for i in inputs]).to(device),\n }\n\n dataset = MNIST(transform=ToTensor(), download=False, root=TEMPDIR)\n sampler: Sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank)\n batch_sampler = BatchSampler(sampler, batch_size, drop_last=True)\n dataloader = DataLoader(dataset=dataset, batch_sampler=batch_sampler, collate_fn=collate)\n\n loss_fn = nn.CrossEntropyLoss()\n return model, dataloader, loss_fn\n\n\nclass OptimType(str, Enum):\n vanilla = \"pytorch\"\n oss_ddp = \"oss_ddp\"\n oss_sharded_ddp = \"oss_sharded_ddp\"\n everyone = \"everyone\"\n\n\ndef train(\n rank: int,\n args: argparse.Namespace,\n backend: str = \"gloo\",\n optim_type: OptimType = OptimType.vanilla,\n check_regression: bool = True,\n):\n logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)\n\n # DDP\n dist_init(rank=rank, world_size=args.world_size, backend=backend)\n\n # Setup\n if not args.cpu:\n torch.cuda.set_device(rank)\n torch.cuda.manual_seed(0)\n torch.manual_seed(0) # also sets the cuda seed\n np.random.seed(0)\n\n if backend == \"nccl\":\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n device = torch.device(\"cpu\") if args.cpu else torch.device(rank)\n model, dataloader, loss_fn = get_problem(rank, args.world_size, args.batch_size, device, args.torchvision_model)\n\n # Shard the optimizer\n optimizer: Optional[torch.optim.Optimizer] = None\n model = cast(nn.Module, model)\n\n if optim_type == OptimType.oss_sharded_ddp:\n model = ShardedDDP(\n model,\n optimizer=OPTIM,\n optimizer_params={\"lr\": 1e-4, \"momentum\": 0.9},\n world_size=args.world_size,\n broadcast_buffers=True,\n )\n optimizer = model.sharded_optimizer\n\n else:\n model = DDP(model, device_ids=[rank], find_unused_parameters=False) # type: ignore\n optimizer = (\n OSS(params=model.parameters(), optim=OPTIM, lr=1e-4, momentum=0.9)\n if optim_type == OptimType.oss_ddp\n else OPTIM(model.parameters(), lr=1e-4, momentum=0.9)\n )\n optimizer = cast(torch.optim.Optimizer, optimizer)\n\n # Reset the memory use counter\n if not args.cpu:\n torch.cuda.reset_peak_memory_stats(rank)\n torch.cuda.synchronize(rank)\n\n # Standard training loop\n training_start = time.monotonic()\n model.train()\n\n measurements = []\n final_loss: Optional[float] = -1.0\n need_profiling = args.profile\n\n for epoch in range(args.epochs):\n n_items = 0\n epoch_runtime = 0.0\n\n for batch in dataloader:\n batch__start = time.monotonic()\n\n def closure():\n model.zero_grad()\n if args.debug and rank == 0 and next(model.parameters()).grad is not None:\n logging.debug(\n \"\\nbefore: param {} -- grad {}\".format(\n next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()\n )\n )\n\n outputs = model(batch[\"inputs\"])\n loss = loss_fn(outputs, batch[\"label\"])\n loss.backward()\n\n if optim_type == OptimType.oss_sharded_ddp:\n model.reduce()\n\n if args.debug and rank == 0 and next(model.parameters()).grad is not None:\n logging.debug(\n \"after BW: param {} -- grad {}\".format(\n next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()\n )\n )\n return loss\n\n if need_profiling and not args.cpu:\n logging.info(\"Profiling the run\")\n with profiler.profile(use_cuda=True, record_shapes=True, profile_memory=True) as prof: # type: ignore\n with profiler.record_function(\"batch\"):\n final_loss = optimizer.step(closure)\n logging.info(\"profiling done\")\n\n if rank == 0:\n prof.export_chrome_trace(f\"{optim_type}_trace.json\")\n\n need_profiling = False # only profile once\n\n else:\n final_loss = optimizer.step(closure)\n\n if args.debug and rank == 0:\n logging.debug(\"buffer: {}\".format(next(model.buffers()).norm().item()))\n logging.debug(\n \"after update: param {} -- grad {}\".format(\n next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()\n )\n )\n\n n_items += args.batch_size\n\n batch_end = time.monotonic()\n epoch_runtime += batch_end - batch__start\n\n if optim_type == OptimType.oss_ddp or optim_type == OptimType.oss_sharded_ddp:\n # Check the checkpointing in the case of the OSS optimizer\n # Memory usage could spill over from there\n optimizer = cast(OSS, optimizer)\n optimizer.consolidate_state_dict()\n if dist.get_rank() == 0:\n _ = optimizer.state_dict()\n logging.info(\"... State dict collected\")\n\n measurements.append(n_items / epoch_runtime)\n if dist.get_rank() == 0:\n logging.info(f\"Epoch {epoch} - processed {measurements[-1]:.2f} img per sec. Loss {final_loss:.3f}\")\n\n max_memory = -1.0\n if not args.cpu:\n torch.cuda.synchronize(rank)\n max_memory = torch.cuda.max_memory_allocated(rank) / 2 ** 20\n logging.info(f\"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB\")\n\n training_stop = time.monotonic()\n img_per_sec = n_items / (training_stop - training_start) * args.epochs\n max_memory = torch.cuda.max_memory_allocated(rank) / 2 ** 20\n\n logging.info(f\"[{dist.get_rank()}] : Training done. {img_per_sec:.2f} img per sec inc. checkpoint\")\n logging.info(f\"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB\")\n\n # Compute the mean and average img per second\n mean = sum(measurements) / len(measurements)\n diff = map(lambda x: pow(x - mean, 2.0), measurements)\n std = math.sqrt(sum(diff) / (len(measurements) - 1)) if args.epochs > 2 else -1\n logging.info(f\"[{dist.get_rank()}] : Mean speed: {mean:.2f} +/- {std:.2f}\")\n\n if check_regression and dist.get_rank() == 0:\n assert (mean + 3.0 * std) > args.reference_speed, \"Speed regression detected\"\n assert max_memory < 1.05 * args.reference_memory, \"Memory use regression detected\"\n assert abs(cast(float, final_loss) - args.reference_loss) < 1e-3, \"Loss regression detected\"\n\n logging.info(\"[Regression Test] VALID\")\n\n dist.destroy_process_group() # type: ignore\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Benchmark the optimizer state sharding, on a typical computer vision workload\"\n )\n parser.add_argument(\"--world_size\", action=\"store\", default=2, type=int)\n parser.add_argument(\"--epochs\", action=\"store\", default=10, type=int)\n parser.add_argument(\"--batch_size\", action=\"store\", default=256, type=int)\n parser.add_argument(\"--check_regression\", action=\"store_true\", default=False)\n parser.add_argument(\"--reference_speed\", action=\"store\", default=1430, type=float)\n parser.add_argument(\"--reference_memory\", action=\"store\", default=1220, type=float)\n parser.add_argument(\"--reference_loss\", action=\"store\", default=0.006, type=float)\n parser.add_argument(\n \"--optim_type\", type=OptimType, choices=[o.value for o in OptimType], default=OptimType.everyone\n )\n parser.add_argument(\"--gloo\", action=\"store_true\", default=False)\n parser.add_argument(\"--profile\", action=\"store_true\", default=False)\n parser.add_argument(\"--cpu\", action=\"store_true\", default=False)\n parser.add_argument(\"--torchvision_model\", type=str, help=\"Any torchvision model name (str)\", default=\"resnet101\")\n parser.add_argument(\"--debug\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)\n logging.info(f\"Benchmark arguments: {args}\")\n\n backend = \"nccl\" if (not args.gloo or not torch.cuda.is_available()) and not args.cpu else \"gloo\"\n\n # Download dataset once for all processes\n dataset, tentatives = None, 0\n while dataset is None and tentatives < 5:\n try:\n dataset = MNIST(transform=None, download=True, root=TEMPDIR)\n except (RuntimeError, EOFError) as e:\n if isinstance(e, RuntimeError):\n # Corrupted data, erase and restart\n shutil.rmtree(TEMPDIR + \"/MNIST\")\n\n logging.warning(\"Failed loading dataset: \", e)\n tentatives += 1\n\n if dataset is None:\n logging.error(\"Could not download MNIST dataset\")\n exit(-1)\n else:\n logging.info(\"Dataset downloaded\")\n\n # Benchmark the different configurations, via multiple processes\n if args.optim_type == OptimType.vanilla or args.optim_type == OptimType.everyone:\n logging.info(\"\\n*** Benchmark vanilla optimizer\")\n mp.spawn(\n train,\n args=(args, backend, OptimType.vanilla, False,), # no regression check\n nprocs=args.world_size,\n join=True,\n )\n\n if args.optim_type == OptimType.oss_ddp or args.optim_type == OptimType.everyone:\n logging.info(\"\\n*** Benchmark OSS with DDP\")\n mp.spawn(\n train, args=(args, backend, OptimType.oss_ddp, args.check_regression), nprocs=args.world_size, join=True,\n )\n\n if args.optim_type == OptimType.oss_sharded_ddp or args.optim_type == OptimType.everyone:\n logging.info(\"\\n*** Benchmark OSS with ShardedDDP\")\n mp.spawn(\n train,\n args=(\n args,\n backend,\n OptimType.oss_sharded_ddp,\n False,\n ), # FIXME: @lefaudeux - SDP should give the same results\n nprocs=args.world_size,\n join=True,\n )\n", "path": "benchmarks/oss.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n\n\nimport argparse\nfrom enum import Enum\nimport importlib\nimport logging\nimport math\nimport shutil\nimport tempfile\nimport time\nfrom typing import Any, List, Optional, cast\n\nimport numpy as np\nimport torch\nimport torch.autograd.profiler as profiler\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import BatchSampler, DataLoader, Sampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import ToTensor\n\nfrom fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\nfrom fairscale.optim import OSS\n\nOPTIM = torch.optim.RMSprop\nTEMPDIR = tempfile.gettempdir()\n\n\ndef dist_init(rank, world_size, backend):\n logging.info(f\"Using backend: {backend}\")\n dist.init_process_group(backend=backend, init_method=\"tcp://localhost:29501\", rank=rank, world_size=world_size)\n\n\ndef get_problem(rank, world_size, batch_size, device, model_name: str):\n # Select the desired model on the fly\n logging.info(f\"Using {model_name} for benchmarking\")\n model = getattr(importlib.import_module(\"torchvision.models\"), model_name)(pretrained=False).to(device)\n\n # Data setup, duplicate the grey channels to get pseudo color\n def collate(inputs: List[Any]):\n return {\n \"inputs\": torch.stack([i[0] for i in inputs]).repeat(1, 3, 1, 1).to(device),\n \"label\": torch.tensor([i[1] for i in inputs]).to(device),\n }\n\n dataset = MNIST(transform=ToTensor(), download=False, root=TEMPDIR)\n sampler: Sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank)\n batch_sampler = BatchSampler(sampler, batch_size, drop_last=True)\n dataloader = DataLoader(dataset=dataset, batch_sampler=batch_sampler, collate_fn=collate)\n\n loss_fn = nn.CrossEntropyLoss()\n return model, dataloader, loss_fn\n\n\nclass OptimType(str, Enum):\n vanilla = \"pytorch\"\n oss_ddp = \"oss_ddp\"\n oss_sharded_ddp = \"oss_sharded_ddp\"\n everyone = \"everyone\"\n\n\ndef train(\n rank: int,\n args: argparse.Namespace,\n backend: str = \"gloo\",\n optim_type: OptimType = OptimType.vanilla,\n check_regression: bool = True,\n):\n logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)\n\n # DDP\n dist_init(rank=rank, world_size=args.world_size, backend=backend)\n\n # Setup\n if not args.cpu:\n torch.cuda.set_device(rank)\n torch.cuda.manual_seed(0)\n torch.manual_seed(0) # also sets the cuda seed\n np.random.seed(0)\n\n if backend == \"nccl\":\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n device = torch.device(\"cpu\") if args.cpu else torch.device(rank)\n model, dataloader, loss_fn = get_problem(rank, args.world_size, args.batch_size, device, args.torchvision_model)\n\n # Shard the optimizer\n optimizer: Optional[torch.optim.Optimizer] = None\n model = cast(nn.Module, model)\n\n if optim_type == OptimType.oss_sharded_ddp:\n model = ShardedDDP(\n model,\n optimizer=OPTIM,\n optimizer_params={\"lr\": 1e-4, \"momentum\": 0.9},\n world_size=args.world_size,\n broadcast_buffers=True,\n )\n optimizer = model.sharded_optimizer\n\n else:\n model = DDP(model, device_ids=[rank], find_unused_parameters=False) # type: ignore\n optimizer = (\n OSS(params=model.parameters(), optim=OPTIM, lr=1e-4, momentum=0.9)\n if optim_type == OptimType.oss_ddp\n else OPTIM(model.parameters(), lr=1e-4, momentum=0.9)\n )\n optimizer = cast(torch.optim.Optimizer, optimizer)\n\n # Reset the memory use counter\n if not args.cpu:\n torch.cuda.reset_peak_memory_stats(rank)\n torch.cuda.synchronize(rank)\n\n # Standard training loop\n training_start = time.monotonic()\n model.train()\n\n measurements = []\n final_loss: Optional[float] = -1.0\n need_profiling = args.profile\n\n for epoch in range(args.epochs):\n n_items = 0\n epoch_runtime = 0.0\n\n for batch in dataloader:\n batch__start = time.monotonic()\n\n def closure():\n model.zero_grad()\n if args.debug and rank == 0 and next(model.parameters()).grad is not None:\n logging.debug(\n \"\\nbefore: param {} -- grad {}\".format(\n next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()\n )\n )\n if not args.cpu and args.amp:\n # Automatically computes the FW pass in half precision\n with torch.cuda.amp.autocast():\n outputs = model(batch[\"inputs\"])\n loss = loss_fn(outputs, batch[\"label\"])\n else:\n outputs = model(batch[\"inputs\"])\n loss = loss_fn(outputs, batch[\"label\"])\n\n loss.backward()\n\n if optim_type == OptimType.oss_sharded_ddp:\n model.reduce()\n\n if args.debug and rank == 0 and next(model.parameters()).grad is not None:\n logging.debug(\n \"after BW: param {} -- grad {}\".format(\n next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()\n )\n )\n return loss\n\n if need_profiling and not args.cpu:\n logging.info(\"Profiling the run\")\n with profiler.profile(use_cuda=True, record_shapes=True, profile_memory=True) as prof: # type: ignore\n with profiler.record_function(\"batch\"):\n final_loss = optimizer.step(closure)\n logging.info(\"profiling done\")\n\n if rank == 0:\n prof.export_chrome_trace(f\"{optim_type}_trace.json\")\n\n need_profiling = False # only profile once\n\n else:\n final_loss = optimizer.step(closure)\n\n if args.debug and rank == 0:\n logging.debug(\"buffer: {}\".format(next(model.buffers()).norm().item()))\n logging.debug(\n \"after update: param {} -- grad {}\".format(\n next(model.parameters()).norm().item(), next(model.parameters()).grad.norm().item()\n )\n )\n\n n_items += args.batch_size\n\n batch_end = time.monotonic()\n epoch_runtime += batch_end - batch__start\n\n if optim_type == OptimType.oss_ddp or optim_type == OptimType.oss_sharded_ddp:\n # Check the checkpointing in the case of the OSS optimizer\n # Memory usage could spill over from there\n optimizer = cast(OSS, optimizer)\n optimizer.consolidate_state_dict()\n if dist.get_rank() == 0:\n _ = optimizer.state_dict()\n logging.info(\"... State dict collected\")\n\n measurements.append(n_items / epoch_runtime)\n if dist.get_rank() == 0:\n logging.info(f\"Epoch {epoch} - processed {measurements[-1]:.2f} img per sec. Loss {final_loss:.3f}\")\n\n max_memory = -1.0\n if not args.cpu:\n torch.cuda.synchronize(rank)\n max_memory = torch.cuda.max_memory_allocated(rank) / 2 ** 20\n logging.info(f\"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB\")\n\n training_stop = time.monotonic()\n img_per_sec = n_items / (training_stop - training_start) * args.epochs\n max_memory = torch.cuda.max_memory_allocated(rank) / 2 ** 20\n\n logging.info(f\"[{dist.get_rank()}] : Training done. {img_per_sec:.2f} img per sec inc. checkpoint\")\n logging.info(f\"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB\")\n\n # Compute the mean and average img per second\n mean = sum(measurements) / len(measurements)\n diff = map(lambda x: pow(x - mean, 2.0), measurements)\n std = math.sqrt(sum(diff) / (len(measurements) - 1)) if args.epochs > 2 else -1\n logging.info(f\"[{dist.get_rank()}] : Mean speed: {mean:.2f} +/- {std:.2f}\")\n\n if check_regression and dist.get_rank() == 0:\n assert (mean + 3.0 * std) > args.reference_speed, \"Speed regression detected\"\n assert max_memory < 1.05 * args.reference_memory, \"Memory use regression detected\"\n assert abs(cast(float, final_loss) - args.reference_loss) < 1e-3, \"Loss regression detected\"\n\n logging.info(\"[Regression Test] VALID\")\n\n dist.destroy_process_group() # type: ignore\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Benchmark the optimizer state sharding, on a typical computer vision workload\"\n )\n parser.add_argument(\"--world_size\", action=\"store\", default=2, type=int)\n parser.add_argument(\"--epochs\", action=\"store\", default=10, type=int)\n parser.add_argument(\"--batch_size\", action=\"store\", default=256, type=int)\n parser.add_argument(\"--check_regression\", action=\"store_true\", default=False)\n parser.add_argument(\"--reference_speed\", action=\"store\", default=1430, type=float)\n parser.add_argument(\"--reference_memory\", action=\"store\", default=1220, type=float)\n parser.add_argument(\"--reference_loss\", action=\"store\", default=0.006, type=float)\n parser.add_argument(\n \"--optim_type\", type=OptimType, choices=[o.value for o in OptimType], default=OptimType.everyone\n )\n parser.add_argument(\"--gloo\", action=\"store_true\", default=False)\n parser.add_argument(\"--profile\", action=\"store_true\", default=False)\n parser.add_argument(\"--cpu\", action=\"store_true\", default=False)\n parser.add_argument(\"--torchvision_model\", type=str, help=\"Any torchvision model name (str)\", default=\"resnet101\")\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"Display additional debug information\")\n parser.add_argument(\"--amp\", action=\"store_true\", default=False, help=\"Activate torch AMP\")\n\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO if not args.debug else logging.DEBUG)\n logging.info(f\"Benchmark arguments: {args}\")\n\n backend = \"nccl\" if (not args.gloo or not torch.cuda.is_available()) and not args.cpu else \"gloo\"\n\n # Download dataset once for all processes\n dataset, tentatives = None, 0\n while dataset is None and tentatives < 5:\n try:\n dataset = MNIST(transform=None, download=True, root=TEMPDIR)\n except (RuntimeError, EOFError) as e:\n if isinstance(e, RuntimeError):\n # Corrupted data, erase and restart\n shutil.rmtree(TEMPDIR + \"/MNIST\")\n\n logging.warning(\"Failed loading dataset: \", e)\n tentatives += 1\n\n if dataset is None:\n logging.error(\"Could not download MNIST dataset\")\n exit(-1)\n else:\n logging.info(\"Dataset downloaded\")\n\n # Benchmark the different configurations, via multiple processes\n if args.optim_type == OptimType.vanilla or args.optim_type == OptimType.everyone:\n logging.info(\"\\n*** Benchmark vanilla optimizer\")\n mp.spawn(\n train,\n args=(args, backend, OptimType.vanilla, False,), # no regression check\n nprocs=args.world_size,\n join=True,\n )\n\n if args.optim_type == OptimType.oss_ddp or args.optim_type == OptimType.everyone:\n logging.info(\"\\n*** Benchmark OSS with DDP\")\n mp.spawn(\n train, args=(args, backend, OptimType.oss_ddp, args.check_regression), nprocs=args.world_size, join=True,\n )\n\n if args.optim_type == OptimType.oss_sharded_ddp or args.optim_type == OptimType.everyone:\n logging.info(\"\\n*** Benchmark OSS with ShardedDDP\")\n mp.spawn(\n train,\n args=(\n args,\n backend,\n OptimType.oss_sharded_ddp,\n False,\n ), # FIXME: @lefaudeux - SDP should give the same results\n nprocs=args.world_size,\n join=True,\n )\n", "path": "benchmarks/oss.py"}]} | 3,893 | 336 |
gh_patches_debug_40015 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1091 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement display options for Duration type
## Problem
The Duration Mathesar type supports the following display options:
- Min unit
- Max unit
- Whether to show unit labels
We will combine this into a single `format` string.
## Solution
(1) We should store these display options in the following format in the `display_options` field of the corresponding column.
```
{
"format": "HH:mm:ss.SSS"
}
```
(2) We should also validate these so that:
- Only columns of this type can have these display options. They should not be able to be set if the column is of a different type.
- `format` should be a valid JavaScript duration format.
(3) If the column type is changed, the display options should be deleted.
(4) We should add supported display options to the `types` endpoint.
## Additional Context
- [Design of Duration type options on Figma](https://www.figma.com/proto/Uaf1ntcldzK2U41Jhw6vS2/Mathesar-MVP?page-id=4260%3A37440&node-id=4270%3A41231&viewport=324%2C48%2C0.29&scaling=contain&starting-point-node-id=4270%3A41231&show-proto-sidebar=1)
- Blocked by #658
- #392
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/display_options.py`
Content:
```
1 from mathesar.database.types import MathesarTypeIdentifier
2
3 DISPLAY_OPTIONS_BY_TYPE_IDENTIFIER = {
4 MathesarTypeIdentifier.BOOLEAN.value:
5 {
6 "options": [{"name": "input", "type": "string",
7 "enum": ['dropdown', 'checkbox']},
8 {'name': "custom_labels", "type": "object",
9 "items": [{"name": "TRUE", "type": "string"},
10 {'name': "FALSE", "type": "string"}]}]
11
12 },
13 MathesarTypeIdentifier.NUMBER.value:
14 {
15 "options": [{"name": "show_as_percentage", "type": "boolean"},
16 {"name": "locale", "type": "string"}]
17 },
18 MathesarTypeIdentifier.DATETIME.value:
19 {
20 "options": [{"name": "format", "type": "string"}]
21 }
22 }
23
```
Path: `mathesar/api/serializers/shared_serializers.py`
Content:
```
1 from abc import ABC, abstractmethod
2
3 import arrow
4 from django.core.exceptions import ImproperlyConfigured
5 from rest_framework import serializers
6
7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
8 from mathesar.database.types import MathesarTypeIdentifier, get_mathesar_type_from_db_type
9
10
11 class ReadOnlyPolymorphicSerializerMappingMixin:
12 """
13 This serializer mixin is helpful in serializing polymorphic models,
14 by switching to correct serializer based on the mapping field value.
15 """
16
17 def __new__(cls, *args, **kwargs):
18 if cls.serializers_mapping is None:
19 raise ImproperlyConfigured(
20 '`{cls}` is missing a '
21 '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)
22 )
23 return super().__new__(cls, *args, **kwargs)
24
25 def __init__(self, *args, **kwargs):
26 super().__init__(*args, **kwargs)
27 self.serializers_cls_mapping = {}
28 serializers_mapping = self.serializers_mapping
29 self.serializers_mapping = {}
30 for identifier, serializer_cls in serializers_mapping.items():
31 if callable(serializer_cls):
32 serializer = serializer_cls(*args, **kwargs)
33 serializer.parent = self
34 else:
35 serializer = serializer_cls
36 self.serializers_mapping[identifier] = serializer
37 self.serializers_cls_mapping[identifier] = serializer_cls
38
39 def to_representation(self, instance):
40 serializer = self.serializers_mapping.get(self.get_mapping_field(), None)
41 if serializer is not None:
42 self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())
43 return serializer.to_representation(instance)
44 else:
45 raise Exception(f"Cannot find a matching serializer for the specified type {self.get_mapping_field()}")
46
47 def get_mapping_field(self):
48 mapping_field = getattr(self, "mapping_field", None)
49 if mapping_field is None:
50 raise Exception(
51 "Add a `mapping_field` to be used as a identifier"
52 "or override this method to return a identifier to identify a proper serializer"
53 )
54 return mapping_field
55
56
57 class ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):
58 def to_internal_value(self, data):
59 serializer = self.serializers_mapping.get(self.get_mapping_field())
60 if serializer is not None:
61 self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())
62 return serializer.to_internal_value(data=data)
63 else:
64 raise Exception(f"Cannot find a matching serializer for the specified type {self.get_mapping_field()}")
65
66
67 class MonkeyPatchPartial:
68 """
69 Work around bug #3847 in djangorestframework by monkey-patching the partial
70 attribute of the root serializer during the call to validate_empty_values.
71 https://github.com/encode/django-rest-framework/issues/3847
72 """
73
74 def __init__(self, root):
75 self._root = root
76
77 def __enter__(self):
78 self._old = getattr(self._root, 'partial')
79 setattr(self._root, 'partial', False)
80
81 def __exit__(self, *args):
82 setattr(self._root, 'partial', self._old)
83
84
85 class OverrideRootPartialMixin:
86 """
87 This mixin is used to convert a serializer into a partial serializer,
88 based on the serializer `partial` property rather than the parent's `partial` property.
89 Refer to the issue
90 https://github.com/encode/django-rest-framework/issues/3847
91 """
92
93 def run_validation(self, *args, **kwargs):
94 if not self.partial:
95 with MonkeyPatchPartial(self.root):
96 return super().run_validation(*args, **kwargs)
97 return super().run_validation(*args, **kwargs)
98
99
100 class CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):
101 TRUE = serializers.CharField()
102 FALSE = serializers.CharField()
103
104
105 DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'mathesar_type'
106
107
108 class BooleanDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
109 input = serializers.ChoiceField(choices=[("dropdown", 1), ("checkbox", 2)])
110 custom_labels = CustomBooleanLabelSerializer(required=False)
111
112
113 class NumberDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
114 show_as_percentage = serializers.BooleanField(default=False)
115 locale = serializers.CharField(required=False)
116
117
118 class AbstractDateTimeFormatValidator(ABC):
119 requires_context = True
120
121 def __init__(self):
122 pass
123
124 def __call__(self, value, serializer_field):
125 self.date_format_validator(value, serializer_field)
126
127 def date_format_validator(self, value, serializer_field):
128 try:
129 timestamp_with_tz_obj = arrow.get('2013-09-30T15:34:00.000-07:00')
130 parsed_datetime_str = timestamp_with_tz_obj.format(value)
131 datetime_object = arrow.get(parsed_datetime_str, value)
132 except ValueError:
133 raise serializers.ValidationError(f"{value} is not a valid format used for parsing a datetime.")
134 else:
135 self.validate(datetime_object, value, serializer_field)
136
137 @abstractmethod
138 def validate(self, datetime_obj, display_format, serializer_field):
139 pass
140
141
142 class TimestampWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):
143
144 def validate(self, datetime_obj, display_format, serializer_field):
145 pass
146
147
148 class TimestampWithoutTimeZoneFormatValidator(AbstractDateTimeFormatValidator):
149
150 def validate(self, datetime_obj, display_format, serializer_field):
151 if 'z' in display_format.lower():
152 raise serializers.ValidationError(
153 "Timestamp without timezone column cannot contain timezone display format"
154 )
155
156
157 class DateFormatValidator(AbstractDateTimeFormatValidator):
158
159 def validate(self, datetime_obj, display_format, serializer_field):
160 date_obj = arrow.get('2013-09-30')
161 if datetime_obj.time() != date_obj.time():
162 raise serializers.ValidationError("Date column cannot contain time or timezone display format")
163
164
165 class TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):
166
167 def validate(self, datetime_obj, display_format, serializer_field):
168 time_only_format = 'HH:mm:ssZZ'
169 time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format)
170 parsed_time_str = arrow.get(time_str, time_only_format)
171 if parsed_time_str.date() != datetime_obj.date():
172 raise serializers.ValidationError("Time column cannot contain date display format")
173
174
175 class TimeWithoutTimeZoneFormatValidator(TimeWithTimeZoneFormatValidator):
176
177 def validate(self, datetime_obj, display_format, serializer_field):
178 if 'z' in display_format.lower():
179 raise serializers.ValidationError("Time without timezone column cannot contain timezone display format")
180 return super().validate(datetime_obj, display_format, serializer_field)
181
182
183 class DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
184 format = serializers.CharField(validators=[DateFormatValidator()])
185
186
187 class TimestampWithoutTimezoneDisplayOptionSerializer(
188 MathesarErrorMessageMixin,
189 OverrideRootPartialMixin,
190 serializers.Serializer
191 ):
192 format = serializers.CharField(validators=[TimestampWithoutTimeZoneFormatValidator()])
193
194
195 class TimestampWithTimezoneDisplayOptionSerializer(
196 MathesarErrorMessageMixin,
197 OverrideRootPartialMixin,
198 serializers.Serializer
199 ):
200 format = serializers.CharField(validators=[TimestampWithTimeZoneFormatValidator()])
201
202
203 class TimeWithTimezoneDisplayOptionSerializer(
204 MathesarErrorMessageMixin,
205 OverrideRootPartialMixin,
206 serializers.Serializer
207 ):
208 format = serializers.CharField(validators=[TimeWithTimeZoneFormatValidator()])
209
210
211 class TimeWithoutTimezoneDisplayOptionSerializer(
212 MathesarErrorMessageMixin,
213 OverrideRootPartialMixin,
214 serializers.Serializer
215 ):
216 format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()])
217
218
219 class DisplayOptionsMappingSerializer(
220 MathesarErrorMessageMixin,
221 ReadWritePolymorphicSerializerMappingMixin,
222 serializers.Serializer
223 ):
224 serializers_mapping = {
225 MathesarTypeIdentifier.BOOLEAN.value: BooleanDisplayOptionSerializer,
226 MathesarTypeIdentifier.NUMBER.value: NumberDisplayOptionSerializer,
227 ('timestamp with time zone',
228 MathesarTypeIdentifier.DATETIME.value): TimestampWithTimezoneDisplayOptionSerializer,
229 ('timestamp without time zone',
230 MathesarTypeIdentifier.DATETIME.value): TimestampWithoutTimezoneDisplayOptionSerializer,
231 ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer,
232 ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer,
233 ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer,
234 }
235
236 def get_mapping_field(self):
237 mathesar_type = get_mathesar_type_from_db_type(self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY])
238 if mathesar_type == MathesarTypeIdentifier.DATETIME.value:
239 return self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY].lower(), mathesar_type
240 else:
241 return mathesar_type
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/display_options.py b/mathesar/api/display_options.py
--- a/mathesar/api/display_options.py
+++ b/mathesar/api/display_options.py
@@ -16,6 +16,10 @@
{"name": "locale", "type": "string"}]
},
MathesarTypeIdentifier.DATETIME.value:
+ {
+ "options": [{"name": "format", "type": "string"}]
+ },
+ MathesarTypeIdentifier.DURATION.value:
{
"options": [{"name": "format", "type": "string"}]
}
diff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py
--- a/mathesar/api/serializers/shared_serializers.py
+++ b/mathesar/api/serializers/shared_serializers.py
@@ -165,7 +165,7 @@
class TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):
def validate(self, datetime_obj, display_format, serializer_field):
- time_only_format = 'HH:mm:ssZZ'
+ time_only_format = 'HH:mm:ss.SSSZZ'
time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format)
parsed_time_str = arrow.get(time_str, time_only_format)
if parsed_time_str.date() != datetime_obj.date():
@@ -180,6 +180,15 @@
return super().validate(datetime_obj, display_format, serializer_field)
+class DurationFormatValidator(AbstractDateTimeFormatValidator):
+
+ def validate(self, datetime_obj, display_format, serializer_field):
+ if 'z' in display_format.lower():
+ raise serializers.ValidationError(
+ "Duration column cannot contain timezone display format"
+ )
+
+
class DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
format = serializers.CharField(validators=[DateFormatValidator()])
@@ -216,6 +225,10 @@
format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()])
+class DurationDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):
+ format = serializers.CharField(validators=[DurationFormatValidator()])
+
+
class DisplayOptionsMappingSerializer(
MathesarErrorMessageMixin,
ReadWritePolymorphicSerializerMappingMixin,
@@ -231,6 +244,7 @@
('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer,
('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer,
('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer,
+ MathesarTypeIdentifier.DURATION.value: DurationDisplayOptionSerializer,
}
def get_mapping_field(self):
| {"golden_diff": "diff --git a/mathesar/api/display_options.py b/mathesar/api/display_options.py\n--- a/mathesar/api/display_options.py\n+++ b/mathesar/api/display_options.py\n@@ -16,6 +16,10 @@\n {\"name\": \"locale\", \"type\": \"string\"}]\n },\n MathesarTypeIdentifier.DATETIME.value:\n+ {\n+ \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n+ },\n+ MathesarTypeIdentifier.DURATION.value:\n {\n \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n }\ndiff --git a/mathesar/api/serializers/shared_serializers.py b/mathesar/api/serializers/shared_serializers.py\n--- a/mathesar/api/serializers/shared_serializers.py\n+++ b/mathesar/api/serializers/shared_serializers.py\n@@ -165,7 +165,7 @@\n class TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n \n def validate(self, datetime_obj, display_format, serializer_field):\n- time_only_format = 'HH:mm:ssZZ'\n+ time_only_format = 'HH:mm:ss.SSSZZ'\n time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format)\n parsed_time_str = arrow.get(time_str, time_only_format)\n if parsed_time_str.date() != datetime_obj.date():\n@@ -180,6 +180,15 @@\n return super().validate(datetime_obj, display_format, serializer_field)\n \n \n+class DurationFormatValidator(AbstractDateTimeFormatValidator):\n+\n+ def validate(self, datetime_obj, display_format, serializer_field):\n+ if 'z' in display_format.lower():\n+ raise serializers.ValidationError(\n+ \"Duration column cannot contain timezone display format\"\n+ )\n+\n+\n class DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n format = serializers.CharField(validators=[DateFormatValidator()])\n \n@@ -216,6 +225,10 @@\n format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()])\n \n \n+class DurationDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n+ format = serializers.CharField(validators=[DurationFormatValidator()])\n+\n+\n class DisplayOptionsMappingSerializer(\n MathesarErrorMessageMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n@@ -231,6 +244,7 @@\n ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer,\n ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer,\n ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer,\n+ MathesarTypeIdentifier.DURATION.value: DurationDisplayOptionSerializer,\n }\n \n def get_mapping_field(self):\n", "issue": "Implement display options for Duration type\n## Problem\r\nThe Duration Mathesar type supports the following display options:\r\n- Min unit\r\n- Max unit\r\n- Whether to show unit labels\r\n\r\nWe will combine this into a single `format` string.\r\n\r\n## Solution\r\n(1) We should store these display options in the following format in the `display_options` field of the corresponding column.\r\n```\r\n{\r\n \"format\": \"HH:mm:ss.SSS\"\r\n}\r\n```\r\n\r\n(2) We should also validate these so that:\r\n- Only columns of this type can have these display options. They should not be able to be set if the column is of a different type.\r\n- `format` should be a valid JavaScript duration format.\r\n\r\n(3) If the column type is changed, the display options should be deleted.\r\n\r\n(4) We should add supported display options to the `types` endpoint.\r\n\r\n## Additional Context\r\n- [Design of Duration type options on Figma](https://www.figma.com/proto/Uaf1ntcldzK2U41Jhw6vS2/Mathesar-MVP?page-id=4260%3A37440&node-id=4270%3A41231&viewport=324%2C48%2C0.29&scaling=contain&starting-point-node-id=4270%3A41231&show-proto-sidebar=1)\r\n- Blocked by #658\r\n- #392 \n", "before_files": [{"content": "from mathesar.database.types import MathesarTypeIdentifier\n\nDISPLAY_OPTIONS_BY_TYPE_IDENTIFIER = {\n MathesarTypeIdentifier.BOOLEAN.value:\n {\n \"options\": [{\"name\": \"input\", \"type\": \"string\",\n \"enum\": ['dropdown', 'checkbox']},\n {'name': \"custom_labels\", \"type\": \"object\",\n \"items\": [{\"name\": \"TRUE\", \"type\": \"string\"},\n {'name': \"FALSE\", \"type\": \"string\"}]}]\n\n },\n MathesarTypeIdentifier.NUMBER.value:\n {\n \"options\": [{\"name\": \"show_as_percentage\", \"type\": \"boolean\"},\n {\"name\": \"locale\", \"type\": \"string\"}]\n },\n MathesarTypeIdentifier.DATETIME.value:\n {\n \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n }\n}\n", "path": "mathesar/api/display_options.py"}, {"content": "from abc import ABC, abstractmethod\n\nimport arrow\nfrom django.core.exceptions import ImproperlyConfigured\nfrom rest_framework import serializers\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.database.types import MathesarTypeIdentifier, get_mathesar_type_from_db_type\n\n\nclass ReadOnlyPolymorphicSerializerMappingMixin:\n \"\"\"\n This serializer mixin is helpful in serializing polymorphic models,\n by switching to correct serializer based on the mapping field value.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls.serializers_mapping is None:\n raise ImproperlyConfigured(\n '`{cls}` is missing a '\n '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)\n )\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.serializers_cls_mapping = {}\n serializers_mapping = self.serializers_mapping\n self.serializers_mapping = {}\n for identifier, serializer_cls in serializers_mapping.items():\n if callable(serializer_cls):\n serializer = serializer_cls(*args, **kwargs)\n serializer.parent = self\n else:\n serializer = serializer_cls\n self.serializers_mapping[identifier] = serializer\n self.serializers_cls_mapping[identifier] = serializer_cls\n\n def to_representation(self, instance):\n serializer = self.serializers_mapping.get(self.get_mapping_field(), None)\n if serializer is not None:\n self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())\n return serializer.to_representation(instance)\n else:\n raise Exception(f\"Cannot find a matching serializer for the specified type {self.get_mapping_field()}\")\n\n def get_mapping_field(self):\n mapping_field = getattr(self, \"mapping_field\", None)\n if mapping_field is None:\n raise Exception(\n \"Add a `mapping_field` to be used as a identifier\"\n \"or override this method to return a identifier to identify a proper serializer\"\n )\n return mapping_field\n\n\nclass ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):\n def to_internal_value(self, data):\n serializer = self.serializers_mapping.get(self.get_mapping_field())\n if serializer is not None:\n self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())\n return serializer.to_internal_value(data=data)\n else:\n raise Exception(f\"Cannot find a matching serializer for the specified type {self.get_mapping_field()}\")\n\n\nclass MonkeyPatchPartial:\n \"\"\"\n Work around bug #3847 in djangorestframework by monkey-patching the partial\n attribute of the root serializer during the call to validate_empty_values.\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def __init__(self, root):\n self._root = root\n\n def __enter__(self):\n self._old = getattr(self._root, 'partial')\n setattr(self._root, 'partial', False)\n\n def __exit__(self, *args):\n setattr(self._root, 'partial', self._old)\n\n\nclass OverrideRootPartialMixin:\n \"\"\"\n This mixin is used to convert a serializer into a partial serializer,\n based on the serializer `partial` property rather than the parent's `partial` property.\n Refer to the issue\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def run_validation(self, *args, **kwargs):\n if not self.partial:\n with MonkeyPatchPartial(self.root):\n return super().run_validation(*args, **kwargs)\n return super().run_validation(*args, **kwargs)\n\n\nclass CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n TRUE = serializers.CharField()\n FALSE = serializers.CharField()\n\n\nDISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'mathesar_type'\n\n\nclass BooleanDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n input = serializers.ChoiceField(choices=[(\"dropdown\", 1), (\"checkbox\", 2)])\n custom_labels = CustomBooleanLabelSerializer(required=False)\n\n\nclass NumberDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n show_as_percentage = serializers.BooleanField(default=False)\n locale = serializers.CharField(required=False)\n\n\nclass AbstractDateTimeFormatValidator(ABC):\n requires_context = True\n\n def __init__(self):\n pass\n\n def __call__(self, value, serializer_field):\n self.date_format_validator(value, serializer_field)\n\n def date_format_validator(self, value, serializer_field):\n try:\n timestamp_with_tz_obj = arrow.get('2013-09-30T15:34:00.000-07:00')\n parsed_datetime_str = timestamp_with_tz_obj.format(value)\n datetime_object = arrow.get(parsed_datetime_str, value)\n except ValueError:\n raise serializers.ValidationError(f\"{value} is not a valid format used for parsing a datetime.\")\n else:\n self.validate(datetime_object, value, serializer_field)\n\n @abstractmethod\n def validate(self, datetime_obj, display_format, serializer_field):\n pass\n\n\nclass TimestampWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n pass\n\n\nclass TimestampWithoutTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n if 'z' in display_format.lower():\n raise serializers.ValidationError(\n \"Timestamp without timezone column cannot contain timezone display format\"\n )\n\n\nclass DateFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n date_obj = arrow.get('2013-09-30')\n if datetime_obj.time() != date_obj.time():\n raise serializers.ValidationError(\"Date column cannot contain time or timezone display format\")\n\n\nclass TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n time_only_format = 'HH:mm:ssZZ'\n time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format)\n parsed_time_str = arrow.get(time_str, time_only_format)\n if parsed_time_str.date() != datetime_obj.date():\n raise serializers.ValidationError(\"Time column cannot contain date display format\")\n\n\nclass TimeWithoutTimeZoneFormatValidator(TimeWithTimeZoneFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n if 'z' in display_format.lower():\n raise serializers.ValidationError(\"Time without timezone column cannot contain timezone display format\")\n return super().validate(datetime_obj, display_format, serializer_field)\n\n\nclass DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n format = serializers.CharField(validators=[DateFormatValidator()])\n\n\nclass TimestampWithoutTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimestampWithoutTimeZoneFormatValidator()])\n\n\nclass TimestampWithTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimestampWithTimeZoneFormatValidator()])\n\n\nclass TimeWithTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimeWithTimeZoneFormatValidator()])\n\n\nclass TimeWithoutTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()])\n\n\nclass DisplayOptionsMappingSerializer(\n MathesarErrorMessageMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n serializers.Serializer\n):\n serializers_mapping = {\n MathesarTypeIdentifier.BOOLEAN.value: BooleanDisplayOptionSerializer,\n MathesarTypeIdentifier.NUMBER.value: NumberDisplayOptionSerializer,\n ('timestamp with time zone',\n MathesarTypeIdentifier.DATETIME.value): TimestampWithTimezoneDisplayOptionSerializer,\n ('timestamp without time zone',\n MathesarTypeIdentifier.DATETIME.value): TimestampWithoutTimezoneDisplayOptionSerializer,\n ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer,\n ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer,\n ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer,\n }\n\n def get_mapping_field(self):\n mathesar_type = get_mathesar_type_from_db_type(self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY])\n if mathesar_type == MathesarTypeIdentifier.DATETIME.value:\n return self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY].lower(), mathesar_type\n else:\n return mathesar_type\n", "path": "mathesar/api/serializers/shared_serializers.py"}], "after_files": [{"content": "from mathesar.database.types import MathesarTypeIdentifier\n\nDISPLAY_OPTIONS_BY_TYPE_IDENTIFIER = {\n MathesarTypeIdentifier.BOOLEAN.value:\n {\n \"options\": [{\"name\": \"input\", \"type\": \"string\",\n \"enum\": ['dropdown', 'checkbox']},\n {'name': \"custom_labels\", \"type\": \"object\",\n \"items\": [{\"name\": \"TRUE\", \"type\": \"string\"},\n {'name': \"FALSE\", \"type\": \"string\"}]}]\n\n },\n MathesarTypeIdentifier.NUMBER.value:\n {\n \"options\": [{\"name\": \"show_as_percentage\", \"type\": \"boolean\"},\n {\"name\": \"locale\", \"type\": \"string\"}]\n },\n MathesarTypeIdentifier.DATETIME.value:\n {\n \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n },\n MathesarTypeIdentifier.DURATION.value:\n {\n \"options\": [{\"name\": \"format\", \"type\": \"string\"}]\n }\n}\n", "path": "mathesar/api/display_options.py"}, {"content": "from abc import ABC, abstractmethod\n\nimport arrow\nfrom django.core.exceptions import ImproperlyConfigured\nfrom rest_framework import serializers\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.database.types import MathesarTypeIdentifier, get_mathesar_type_from_db_type\n\n\nclass ReadOnlyPolymorphicSerializerMappingMixin:\n \"\"\"\n This serializer mixin is helpful in serializing polymorphic models,\n by switching to correct serializer based on the mapping field value.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls.serializers_mapping is None:\n raise ImproperlyConfigured(\n '`{cls}` is missing a '\n '`{cls}.model_serializer_mapping` attribute'.format(cls=cls.__name__)\n )\n return super().__new__(cls, *args, **kwargs)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.serializers_cls_mapping = {}\n serializers_mapping = self.serializers_mapping\n self.serializers_mapping = {}\n for identifier, serializer_cls in serializers_mapping.items():\n if callable(serializer_cls):\n serializer = serializer_cls(*args, **kwargs)\n serializer.parent = self\n else:\n serializer = serializer_cls\n self.serializers_mapping[identifier] = serializer\n self.serializers_cls_mapping[identifier] = serializer_cls\n\n def to_representation(self, instance):\n serializer = self.serializers_mapping.get(self.get_mapping_field(), None)\n if serializer is not None:\n self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())\n return serializer.to_representation(instance)\n else:\n raise Exception(f\"Cannot find a matching serializer for the specified type {self.get_mapping_field()}\")\n\n def get_mapping_field(self):\n mapping_field = getattr(self, \"mapping_field\", None)\n if mapping_field is None:\n raise Exception(\n \"Add a `mapping_field` to be used as a identifier\"\n \"or override this method to return a identifier to identify a proper serializer\"\n )\n return mapping_field\n\n\nclass ReadWritePolymorphicSerializerMappingMixin(ReadOnlyPolymorphicSerializerMappingMixin):\n def to_internal_value(self, data):\n serializer = self.serializers_mapping.get(self.get_mapping_field())\n if serializer is not None:\n self.__class__ = self.serializers_cls_mapping.get(self.get_mapping_field())\n return serializer.to_internal_value(data=data)\n else:\n raise Exception(f\"Cannot find a matching serializer for the specified type {self.get_mapping_field()}\")\n\n\nclass MonkeyPatchPartial:\n \"\"\"\n Work around bug #3847 in djangorestframework by monkey-patching the partial\n attribute of the root serializer during the call to validate_empty_values.\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def __init__(self, root):\n self._root = root\n\n def __enter__(self):\n self._old = getattr(self._root, 'partial')\n setattr(self._root, 'partial', False)\n\n def __exit__(self, *args):\n setattr(self._root, 'partial', self._old)\n\n\nclass OverrideRootPartialMixin:\n \"\"\"\n This mixin is used to convert a serializer into a partial serializer,\n based on the serializer `partial` property rather than the parent's `partial` property.\n Refer to the issue\n https://github.com/encode/django-rest-framework/issues/3847\n \"\"\"\n\n def run_validation(self, *args, **kwargs):\n if not self.partial:\n with MonkeyPatchPartial(self.root):\n return super().run_validation(*args, **kwargs)\n return super().run_validation(*args, **kwargs)\n\n\nclass CustomBooleanLabelSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n TRUE = serializers.CharField()\n FALSE = serializers.CharField()\n\n\nDISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY = 'mathesar_type'\n\n\nclass BooleanDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n input = serializers.ChoiceField(choices=[(\"dropdown\", 1), (\"checkbox\", 2)])\n custom_labels = CustomBooleanLabelSerializer(required=False)\n\n\nclass NumberDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n show_as_percentage = serializers.BooleanField(default=False)\n locale = serializers.CharField(required=False)\n\n\nclass AbstractDateTimeFormatValidator(ABC):\n requires_context = True\n\n def __init__(self):\n pass\n\n def __call__(self, value, serializer_field):\n self.date_format_validator(value, serializer_field)\n\n def date_format_validator(self, value, serializer_field):\n try:\n timestamp_with_tz_obj = arrow.get('2013-09-30T15:34:00.000-07:00')\n parsed_datetime_str = timestamp_with_tz_obj.format(value)\n datetime_object = arrow.get(parsed_datetime_str, value)\n except ValueError:\n raise serializers.ValidationError(f\"{value} is not a valid format used for parsing a datetime.\")\n else:\n self.validate(datetime_object, value, serializer_field)\n\n @abstractmethod\n def validate(self, datetime_obj, display_format, serializer_field):\n pass\n\n\nclass TimestampWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n pass\n\n\nclass TimestampWithoutTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n if 'z' in display_format.lower():\n raise serializers.ValidationError(\n \"Timestamp without timezone column cannot contain timezone display format\"\n )\n\n\nclass DateFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n date_obj = arrow.get('2013-09-30')\n if datetime_obj.time() != date_obj.time():\n raise serializers.ValidationError(\"Date column cannot contain time or timezone display format\")\n\n\nclass TimeWithTimeZoneFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n time_only_format = 'HH:mm:ss.SSSZZ'\n time_str = arrow.get('2013-09-30T15:34:00.000-07:00').format(time_only_format)\n parsed_time_str = arrow.get(time_str, time_only_format)\n if parsed_time_str.date() != datetime_obj.date():\n raise serializers.ValidationError(\"Time column cannot contain date display format\")\n\n\nclass TimeWithoutTimeZoneFormatValidator(TimeWithTimeZoneFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n if 'z' in display_format.lower():\n raise serializers.ValidationError(\"Time without timezone column cannot contain timezone display format\")\n return super().validate(datetime_obj, display_format, serializer_field)\n\n\nclass DurationFormatValidator(AbstractDateTimeFormatValidator):\n\n def validate(self, datetime_obj, display_format, serializer_field):\n if 'z' in display_format.lower():\n raise serializers.ValidationError(\n \"Duration column cannot contain timezone display format\"\n )\n\n\nclass DateDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n format = serializers.CharField(validators=[DateFormatValidator()])\n\n\nclass TimestampWithoutTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimestampWithoutTimeZoneFormatValidator()])\n\n\nclass TimestampWithTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimestampWithTimeZoneFormatValidator()])\n\n\nclass TimeWithTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimeWithTimeZoneFormatValidator()])\n\n\nclass TimeWithoutTimezoneDisplayOptionSerializer(\n MathesarErrorMessageMixin,\n OverrideRootPartialMixin,\n serializers.Serializer\n):\n format = serializers.CharField(validators=[TimeWithoutTimeZoneFormatValidator()])\n\n\nclass DurationDisplayOptionSerializer(MathesarErrorMessageMixin, OverrideRootPartialMixin, serializers.Serializer):\n format = serializers.CharField(validators=[DurationFormatValidator()])\n\n\nclass DisplayOptionsMappingSerializer(\n MathesarErrorMessageMixin,\n ReadWritePolymorphicSerializerMappingMixin,\n serializers.Serializer\n):\n serializers_mapping = {\n MathesarTypeIdentifier.BOOLEAN.value: BooleanDisplayOptionSerializer,\n MathesarTypeIdentifier.NUMBER.value: NumberDisplayOptionSerializer,\n ('timestamp with time zone',\n MathesarTypeIdentifier.DATETIME.value): TimestampWithTimezoneDisplayOptionSerializer,\n ('timestamp without time zone',\n MathesarTypeIdentifier.DATETIME.value): TimestampWithoutTimezoneDisplayOptionSerializer,\n ('date', MathesarTypeIdentifier.DATETIME.value): DateDisplayOptionSerializer,\n ('time with time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithTimezoneDisplayOptionSerializer,\n ('time without time zone', MathesarTypeIdentifier.DATETIME.value): TimeWithoutTimezoneDisplayOptionSerializer,\n MathesarTypeIdentifier.DURATION.value: DurationDisplayOptionSerializer,\n }\n\n def get_mapping_field(self):\n mathesar_type = get_mathesar_type_from_db_type(self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY])\n if mathesar_type == MathesarTypeIdentifier.DATETIME.value:\n return self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY].lower(), mathesar_type\n else:\n return mathesar_type\n", "path": "mathesar/api/serializers/shared_serializers.py"}]} | 3,349 | 626 |
gh_patches_debug_31667 | rasdani/github-patches | git_diff | networkx__networkx-1317 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xrange vs python 3
Searching the networkx code for `xrange` I see it's used in some "Shapefile" related code and tests. Should this be updated for python 3 compatibility, and is it not tested in the TravisCI testing?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/readwrite/nx_shp.py`
Content:
```
1 """
2 *********
3 Shapefile
4 *********
5
6 Generates a networkx.DiGraph from point and line shapefiles.
7
8 "The Esri Shapefile or simply a shapefile is a popular geospatial vector
9 data format for geographic information systems software. It is developed
10 and regulated by Esri as a (mostly) open specification for data
11 interoperability among Esri and other software products."
12 See http://en.wikipedia.org/wiki/Shapefile for additional information.
13 """
14 # Copyright (C) 2004-2010 by
15 # Ben Reilly <[email protected]>
16 # Aric Hagberg <[email protected]>
17 # Dan Schult <[email protected]>
18 # Pieter Swart <[email protected]>
19 # All rights reserved.
20 # BSD license.
21 import networkx as nx
22 __author__ = """Ben Reilly ([email protected])"""
23 __all__ = ['read_shp', 'write_shp']
24
25
26 def read_shp(path):
27 """Generates a networkx.DiGraph from shapefiles. Point geometries are
28 translated into nodes, lines into edges. Coordinate tuples are used as
29 keys. Attributes are preserved, line geometries are simplified into start
30 and end coordinates. Accepts a single shapefile or directory of many
31 shapefiles.
32
33 "The Esri Shapefile or simply a shapefile is a popular geospatial vector
34 data format for geographic information systems software [1]_."
35
36 Parameters
37 ----------
38 path : file or string
39 File, directory, or filename to read.
40
41 Returns
42 -------
43 G : NetworkX graph
44
45 Examples
46 --------
47 >>> G=nx.read_shp('test.shp') # doctest: +SKIP
48
49 References
50 ----------
51 .. [1] http://en.wikipedia.org/wiki/Shapefile
52 """
53 try:
54 from osgeo import ogr
55 except ImportError:
56 raise ImportError("read_shp requires OGR: http://www.gdal.org/")
57
58 net = nx.DiGraph()
59
60 def getfieldinfo(lyr, feature, flds):
61 f = feature
62 return [f.GetField(f.GetFieldIndex(x)) for x in flds]
63
64 def addlyr(lyr, fields):
65 for findex in range(lyr.GetFeatureCount()):
66 f = lyr.GetFeature(findex)
67 flddata = getfieldinfo(lyr, f, fields)
68 g = f.geometry()
69 attributes = dict(zip(fields, flddata))
70 attributes["ShpName"] = lyr.GetName()
71 if g.GetGeometryType() == 1: # point
72 net.add_node((g.GetPoint_2D(0)), attributes)
73 if g.GetGeometryType() == 2: # linestring
74 attributes["Wkb"] = g.ExportToWkb()
75 attributes["Wkt"] = g.ExportToWkt()
76 attributes["Json"] = g.ExportToJson()
77 last = g.GetPointCount() - 1
78 net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)
79
80 if isinstance(path, str):
81 shp = ogr.Open(path)
82 lyrcount = shp.GetLayerCount() # multiple layers indicate a directory
83 for lyrindex in range(lyrcount):
84 lyr = shp.GetLayerByIndex(lyrindex)
85 flds = [x.GetName() for x in lyr.schema]
86 addlyr(lyr, flds)
87 return net
88
89
90 def write_shp(G, outdir):
91 """Writes a networkx.DiGraph to two shapefiles, edges and nodes.
92 Nodes and edges are expected to have a Well Known Binary (Wkb) or
93 Well Known Text (Wkt) key in order to generate geometries. Also
94 acceptable are nodes with a numeric tuple key (x,y).
95
96 "The Esri Shapefile or simply a shapefile is a popular geospatial vector
97 data format for geographic information systems software [1]_."
98
99 Parameters
100 ----------
101 outdir : directory path
102 Output directory for the two shapefiles.
103
104 Returns
105 -------
106 None
107
108 Examples
109 --------
110 nx.write_shp(digraph, '/shapefiles') # doctest +SKIP
111
112 References
113 ----------
114 .. [1] http://en.wikipedia.org/wiki/Shapefile
115 """
116 try:
117 from osgeo import ogr
118 except ImportError:
119 raise ImportError("write_shp requires OGR: http://www.gdal.org/")
120 # easier to debug in python if ogr throws exceptions
121 ogr.UseExceptions()
122
123 def netgeometry(key, data):
124 if 'Wkb' in data:
125 geom = ogr.CreateGeometryFromWkb(data['Wkb'])
126 elif 'Wkt' in data:
127 geom = ogr.CreateGeometryFromWkt(data['Wkt'])
128 elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples
129 geom = ogr.Geometry(ogr.wkbLineString)
130 _from, _to = key[0], key[1]
131 try:
132 geom.SetPoint(0, *_from)
133 geom.SetPoint(1, *_to)
134 except TypeError:
135 # assume user used tuple of int and choked ogr
136 _ffrom = [float(x) for x in _from]
137 _fto = [float(x) for x in _to]
138 geom.SetPoint(0, *_ffrom)
139 geom.SetPoint(1, *_fto)
140 else:
141 geom = ogr.Geometry(ogr.wkbPoint)
142 try:
143 geom.SetPoint(0, *key)
144 except TypeError:
145 # assume user used tuple of int and choked ogr
146 fkey = [float(x) for x in key]
147 geom.SetPoint(0, *fkey)
148
149 return geom
150
151 # Create_feature with new optional attributes arg (should be dict type)
152 def create_feature(geometry, lyr, attributes=None):
153 feature = ogr.Feature(lyr.GetLayerDefn())
154 feature.SetGeometry(g)
155 if attributes != None:
156 # Loop through attributes, assigning data to each field
157 for field, data in attributes.items():
158 feature.SetField(field, data)
159 lyr.CreateFeature(feature)
160 feature.Destroy()
161
162 drv = ogr.GetDriverByName("ESRI Shapefile")
163 shpdir = drv.CreateDataSource(outdir)
164 # delete pre-existing output first otherwise ogr chokes
165 try:
166 shpdir.DeleteLayer("nodes")
167 except:
168 pass
169 nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint)
170 for n in G:
171 data = G.node[n] or {}
172 g = netgeometry(n, data)
173 create_feature(g, nodes)
174 try:
175 shpdir.DeleteLayer("edges")
176 except:
177 pass
178 edges = shpdir.CreateLayer("edges", None, ogr.wkbLineString)
179
180 # New edge attribute write support merged into edge loop
181 fields = {} # storage for field names and their data types
182 attributes = {} # storage for attribute data (indexed by field names)
183
184 # Conversion dict between python and ogr types
185 OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}
186
187 # Edge loop
188 for e in G.edges(data=True):
189 data = G.get_edge_data(*e)
190 g = netgeometry(e, data)
191 # Loop through attribute data in edges
192 for key, data in e[2].items():
193 # Reject spatial data not required for attribute table
194 if (key != 'Json' and key != 'Wkt' and key != 'Wkb'
195 and key != 'ShpName'):
196 # For all edges check/add field and data type to fields dict
197 if key not in fields:
198 # Field not in previous edges so add to dict
199 if type(data) in OGRTypes:
200 fields[key] = OGRTypes[type(data)]
201 else:
202 # Data type not supported, default to string (char 80)
203 fields[key] = ogr.OFTString
204 # Create the new field
205 newfield = ogr.FieldDefn(key, fields[key])
206 edges.CreateField(newfield)
207 # Store the data from new field to dict for CreateLayer()
208 attributes[key] = data
209 else:
210 # Field already exists, add data to dict for CreateLayer()
211 attributes[key] = data
212 # Create the feature with, passing new attribute data
213 create_feature(g, edges, attributes)
214
215 nodes, edges = None, None
216
217
218 # fixture for nose tests
219 def setup_module(module):
220 from nose import SkipTest
221 try:
222 import ogr
223 except:
224 raise SkipTest("OGR not available")
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/networkx/readwrite/nx_shp.py b/networkx/readwrite/nx_shp.py
--- a/networkx/readwrite/nx_shp.py
+++ b/networkx/readwrite/nx_shp.py
@@ -55,16 +55,15 @@
except ImportError:
raise ImportError("read_shp requires OGR: http://www.gdal.org/")
- net = nx.DiGraph()
-
- def getfieldinfo(lyr, feature, flds):
- f = feature
- return [f.GetField(f.GetFieldIndex(x)) for x in flds]
+ if not isinstance(path, str):
+ return
- def addlyr(lyr, fields):
- for findex in range(lyr.GetFeatureCount()):
- f = lyr.GetFeature(findex)
- flddata = getfieldinfo(lyr, f, fields)
+ net = nx.DiGraph()
+ shp = ogr.Open(path)
+ for lyr in shp:
+ fields = [x.GetName() for x in lyr.schema]
+ for f in lyr:
+ flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]
g = f.geometry()
attributes = dict(zip(fields, flddata))
attributes["ShpName"] = lyr.GetName()
@@ -76,14 +75,6 @@
attributes["Json"] = g.ExportToJson()
last = g.GetPointCount() - 1
net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)
-
- if isinstance(path, str):
- shp = ogr.Open(path)
- lyrcount = shp.GetLayerCount() # multiple layers indicate a directory
- for lyrindex in range(lyrcount):
- lyr = shp.GetLayerByIndex(lyrindex)
- flds = [x.GetName() for x in lyr.schema]
- addlyr(lyr, flds)
return net
@@ -168,7 +159,7 @@
pass
nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint)
for n in G:
- data = G.node[n] or {}
+ data = G.node[n]
g = netgeometry(n, data)
create_feature(g, nodes)
try:
| {"golden_diff": "diff --git a/networkx/readwrite/nx_shp.py b/networkx/readwrite/nx_shp.py\n--- a/networkx/readwrite/nx_shp.py\n+++ b/networkx/readwrite/nx_shp.py\n@@ -55,16 +55,15 @@\n except ImportError:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\")\n \n- net = nx.DiGraph()\n-\n- def getfieldinfo(lyr, feature, flds):\n- f = feature\n- return [f.GetField(f.GetFieldIndex(x)) for x in flds]\n+ if not isinstance(path, str):\n+ return\n \n- def addlyr(lyr, fields):\n- for findex in range(lyr.GetFeatureCount()):\n- f = lyr.GetFeature(findex)\n- flddata = getfieldinfo(lyr, f, fields)\n+ net = nx.DiGraph()\n+ shp = ogr.Open(path)\n+ for lyr in shp:\n+ fields = [x.GetName() for x in lyr.schema]\n+ for f in lyr:\n+ flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]\n g = f.geometry()\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n@@ -76,14 +75,6 @@\n attributes[\"Json\"] = g.ExportToJson()\n last = g.GetPointCount() - 1\n net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)\n-\n- if isinstance(path, str):\n- shp = ogr.Open(path)\n- lyrcount = shp.GetLayerCount() # multiple layers indicate a directory\n- for lyrindex in range(lyrcount):\n- lyr = shp.GetLayerByIndex(lyrindex)\n- flds = [x.GetName() for x in lyr.schema]\n- addlyr(lyr, flds)\n return net\n \n \n@@ -168,7 +159,7 @@\n pass\n nodes = shpdir.CreateLayer(\"nodes\", None, ogr.wkbPoint)\n for n in G:\n- data = G.node[n] or {}\n+ data = G.node[n]\n g = netgeometry(n, data)\n create_feature(g, nodes)\n try:\n", "issue": "xrange vs python 3\nSearching the networkx code for `xrange` I see it's used in some \"Shapefile\" related code and tests. Should this be updated for python 3 compatibility, and is it not tested in the TravisCI testing?\n\n", "before_files": [{"content": "\"\"\"\n*********\nShapefile\n*********\n\nGenerates a networkx.DiGraph from point and line shapefiles.\n\n\"The Esri Shapefile or simply a shapefile is a popular geospatial vector\ndata format for geographic information systems software. It is developed\nand regulated by Esri as a (mostly) open specification for data\ninteroperability among Esri and other software products.\"\nSee http://en.wikipedia.org/wiki/Shapefile for additional information.\n\"\"\"\n# Copyright (C) 2004-2010 by\n# Ben Reilly <[email protected]>\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport networkx as nx\n__author__ = \"\"\"Ben Reilly ([email protected])\"\"\"\n__all__ = ['read_shp', 'write_shp']\n\n\ndef read_shp(path):\n \"\"\"Generates a networkx.DiGraph from shapefiles. Point geometries are\n translated into nodes, lines into edges. Coordinate tuples are used as\n keys. Attributes are preserved, line geometries are simplified into start\n and end coordinates. Accepts a single shapefile or directory of many\n shapefiles.\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n path : file or string\n File, directory, or filename to read.\n\n Returns\n -------\n G : NetworkX graph\n\n Examples\n --------\n >>> G=nx.read_shp('test.shp') # doctest: +SKIP\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\")\n\n net = nx.DiGraph()\n\n def getfieldinfo(lyr, feature, flds):\n f = feature\n return [f.GetField(f.GetFieldIndex(x)) for x in flds]\n\n def addlyr(lyr, fields):\n for findex in range(lyr.GetFeatureCount()):\n f = lyr.GetFeature(findex)\n flddata = getfieldinfo(lyr, f, fields)\n g = f.geometry()\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n if g.GetGeometryType() == 1: # point\n net.add_node((g.GetPoint_2D(0)), attributes)\n if g.GetGeometryType() == 2: # linestring\n attributes[\"Wkb\"] = g.ExportToWkb()\n attributes[\"Wkt\"] = g.ExportToWkt()\n attributes[\"Json\"] = g.ExportToJson()\n last = g.GetPointCount() - 1\n net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)\n\n if isinstance(path, str):\n shp = ogr.Open(path)\n lyrcount = shp.GetLayerCount() # multiple layers indicate a directory\n for lyrindex in range(lyrcount):\n lyr = shp.GetLayerByIndex(lyrindex)\n flds = [x.GetName() for x in lyr.schema]\n addlyr(lyr, flds)\n return net\n\n\ndef write_shp(G, outdir):\n \"\"\"Writes a networkx.DiGraph to two shapefiles, edges and nodes.\n Nodes and edges are expected to have a Well Known Binary (Wkb) or\n Well Known Text (Wkt) key in order to generate geometries. Also\n acceptable are nodes with a numeric tuple key (x,y).\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n outdir : directory path\n Output directory for the two shapefiles.\n\n Returns\n -------\n None\n\n Examples\n --------\n nx.write_shp(digraph, '/shapefiles') # doctest +SKIP\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"write_shp requires OGR: http://www.gdal.org/\")\n # easier to debug in python if ogr throws exceptions\n ogr.UseExceptions()\n\n def netgeometry(key, data):\n if 'Wkb' in data:\n geom = ogr.CreateGeometryFromWkb(data['Wkb'])\n elif 'Wkt' in data:\n geom = ogr.CreateGeometryFromWkt(data['Wkt'])\n elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples\n geom = ogr.Geometry(ogr.wkbLineString)\n _from, _to = key[0], key[1]\n try:\n geom.SetPoint(0, *_from)\n geom.SetPoint(1, *_to)\n except TypeError:\n # assume user used tuple of int and choked ogr\n _ffrom = [float(x) for x in _from]\n _fto = [float(x) for x in _to]\n geom.SetPoint(0, *_ffrom)\n geom.SetPoint(1, *_fto)\n else:\n geom = ogr.Geometry(ogr.wkbPoint)\n try:\n geom.SetPoint(0, *key)\n except TypeError:\n # assume user used tuple of int and choked ogr\n fkey = [float(x) for x in key]\n geom.SetPoint(0, *fkey)\n\n return geom\n\n # Create_feature with new optional attributes arg (should be dict type)\n def create_feature(geometry, lyr, attributes=None):\n feature = ogr.Feature(lyr.GetLayerDefn())\n feature.SetGeometry(g)\n if attributes != None:\n # Loop through attributes, assigning data to each field\n for field, data in attributes.items():\n feature.SetField(field, data)\n lyr.CreateFeature(feature)\n feature.Destroy()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n shpdir = drv.CreateDataSource(outdir)\n # delete pre-existing output first otherwise ogr chokes\n try:\n shpdir.DeleteLayer(\"nodes\")\n except:\n pass\n nodes = shpdir.CreateLayer(\"nodes\", None, ogr.wkbPoint)\n for n in G:\n data = G.node[n] or {}\n g = netgeometry(n, data)\n create_feature(g, nodes)\n try:\n shpdir.DeleteLayer(\"edges\")\n except:\n pass\n edges = shpdir.CreateLayer(\"edges\", None, ogr.wkbLineString)\n\n # New edge attribute write support merged into edge loop\n fields = {} # storage for field names and their data types\n attributes = {} # storage for attribute data (indexed by field names)\n\n # Conversion dict between python and ogr types\n OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}\n\n # Edge loop\n for e in G.edges(data=True):\n data = G.get_edge_data(*e)\n g = netgeometry(e, data)\n # Loop through attribute data in edges\n for key, data in e[2].items():\n # Reject spatial data not required for attribute table\n if (key != 'Json' and key != 'Wkt' and key != 'Wkb'\n and key != 'ShpName'):\n # For all edges check/add field and data type to fields dict\n if key not in fields:\n # Field not in previous edges so add to dict\n if type(data) in OGRTypes:\n fields[key] = OGRTypes[type(data)]\n else:\n # Data type not supported, default to string (char 80)\n fields[key] = ogr.OFTString\n # Create the new field\n newfield = ogr.FieldDefn(key, fields[key])\n edges.CreateField(newfield)\n # Store the data from new field to dict for CreateLayer()\n attributes[key] = data\n else:\n # Field already exists, add data to dict for CreateLayer()\n attributes[key] = data\n # Create the feature with, passing new attribute data\n create_feature(g, edges, attributes)\n\n nodes, edges = None, None\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import ogr\n except:\n raise SkipTest(\"OGR not available\")\n", "path": "networkx/readwrite/nx_shp.py"}], "after_files": [{"content": "\"\"\"\n*********\nShapefile\n*********\n\nGenerates a networkx.DiGraph from point and line shapefiles.\n\n\"The Esri Shapefile or simply a shapefile is a popular geospatial vector\ndata format for geographic information systems software. It is developed\nand regulated by Esri as a (mostly) open specification for data\ninteroperability among Esri and other software products.\"\nSee http://en.wikipedia.org/wiki/Shapefile for additional information.\n\"\"\"\n# Copyright (C) 2004-2010 by\n# Ben Reilly <[email protected]>\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport networkx as nx\n__author__ = \"\"\"Ben Reilly ([email protected])\"\"\"\n__all__ = ['read_shp', 'write_shp']\n\n\ndef read_shp(path):\n \"\"\"Generates a networkx.DiGraph from shapefiles. Point geometries are\n translated into nodes, lines into edges. Coordinate tuples are used as\n keys. Attributes are preserved, line geometries are simplified into start\n and end coordinates. Accepts a single shapefile or directory of many\n shapefiles.\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n path : file or string\n File, directory, or filename to read.\n\n Returns\n -------\n G : NetworkX graph\n\n Examples\n --------\n >>> G=nx.read_shp('test.shp') # doctest: +SKIP\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\")\n\n if not isinstance(path, str):\n return\n\n net = nx.DiGraph()\n shp = ogr.Open(path)\n for lyr in shp:\n fields = [x.GetName() for x in lyr.schema]\n for f in lyr:\n flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]\n g = f.geometry()\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n if g.GetGeometryType() == 1: # point\n net.add_node((g.GetPoint_2D(0)), attributes)\n if g.GetGeometryType() == 2: # linestring\n attributes[\"Wkb\"] = g.ExportToWkb()\n attributes[\"Wkt\"] = g.ExportToWkt()\n attributes[\"Json\"] = g.ExportToJson()\n last = g.GetPointCount() - 1\n net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)\n return net\n\n\ndef write_shp(G, outdir):\n \"\"\"Writes a networkx.DiGraph to two shapefiles, edges and nodes.\n Nodes and edges are expected to have a Well Known Binary (Wkb) or\n Well Known Text (Wkt) key in order to generate geometries. Also\n acceptable are nodes with a numeric tuple key (x,y).\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n outdir : directory path\n Output directory for the two shapefiles.\n\n Returns\n -------\n None\n\n Examples\n --------\n nx.write_shp(digraph, '/shapefiles') # doctest +SKIP\n\n References\n ----------\n .. [1] http://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"write_shp requires OGR: http://www.gdal.org/\")\n # easier to debug in python if ogr throws exceptions\n ogr.UseExceptions()\n\n def netgeometry(key, data):\n if 'Wkb' in data:\n geom = ogr.CreateGeometryFromWkb(data['Wkb'])\n elif 'Wkt' in data:\n geom = ogr.CreateGeometryFromWkt(data['Wkt'])\n elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples\n geom = ogr.Geometry(ogr.wkbLineString)\n _from, _to = key[0], key[1]\n try:\n geom.SetPoint(0, *_from)\n geom.SetPoint(1, *_to)\n except TypeError:\n # assume user used tuple of int and choked ogr\n _ffrom = [float(x) for x in _from]\n _fto = [float(x) for x in _to]\n geom.SetPoint(0, *_ffrom)\n geom.SetPoint(1, *_fto)\n else:\n geom = ogr.Geometry(ogr.wkbPoint)\n try:\n geom.SetPoint(0, *key)\n except TypeError:\n # assume user used tuple of int and choked ogr\n fkey = [float(x) for x in key]\n geom.SetPoint(0, *fkey)\n\n return geom\n\n # Create_feature with new optional attributes arg (should be dict type)\n def create_feature(geometry, lyr, attributes=None):\n feature = ogr.Feature(lyr.GetLayerDefn())\n feature.SetGeometry(g)\n if attributes != None:\n # Loop through attributes, assigning data to each field\n for field, data in attributes.items():\n feature.SetField(field, data)\n lyr.CreateFeature(feature)\n feature.Destroy()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n shpdir = drv.CreateDataSource(outdir)\n # delete pre-existing output first otherwise ogr chokes\n try:\n shpdir.DeleteLayer(\"nodes\")\n except:\n pass\n nodes = shpdir.CreateLayer(\"nodes\", None, ogr.wkbPoint)\n for n in G:\n data = G.node[n]\n g = netgeometry(n, data)\n create_feature(g, nodes)\n try:\n shpdir.DeleteLayer(\"edges\")\n except:\n pass\n edges = shpdir.CreateLayer(\"edges\", None, ogr.wkbLineString)\n\n # New edge attribute write support merged into edge loop\n fields = {} # storage for field names and their data types\n attributes = {} # storage for attribute data (indexed by field names)\n\n # Conversion dict between python and ogr types\n OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}\n\n # Edge loop\n for e in G.edges(data=True):\n data = G.get_edge_data(*e)\n g = netgeometry(e, data)\n # Loop through attribute data in edges\n for key, data in e[2].items():\n # Reject spatial data not required for attribute table\n if (key != 'Json' and key != 'Wkt' and key != 'Wkb'\n and key != 'ShpName'):\n # For all edges check/add field and data type to fields dict\n if key not in fields:\n # Field not in previous edges so add to dict\n if type(data) in OGRTypes:\n fields[key] = OGRTypes[type(data)]\n else:\n # Data type not supported, default to string (char 80)\n fields[key] = ogr.OFTString\n # Create the new field\n newfield = ogr.FieldDefn(key, fields[key])\n edges.CreateField(newfield)\n # Store the data from new field to dict for CreateLayer()\n attributes[key] = data\n else:\n # Field already exists, add data to dict for CreateLayer()\n attributes[key] = data\n # Create the feature with, passing new attribute data\n create_feature(g, edges, attributes)\n\n nodes, edges = None, None\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import ogr\n except:\n raise SkipTest(\"OGR not available\")\n", "path": "networkx/readwrite/nx_shp.py"}]} | 2,800 | 525 |
gh_patches_debug_27616 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-383 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Yaml loader should be replaced by safe_loader or other more secure loader
To load yaml files from unknown source, we should avoid using yaml's loader. A better way is to use either safe_loader or other mechanism.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nvflare/ha/overseer/utils.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import uuid
17 from datetime import datetime, timedelta
18
19 import yaml
20
21 OVERSEER_STORE = os.environ.get("OVERSEER_STORE")
22
23 if OVERSEER_STORE == "REDIS":
24 from .redis_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp
25 elif OVERSEER_STORE == "SQL":
26 from .sql_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp
27 elif OVERSEER_STORE == "MEM":
28 from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp
29 else:
30 print("Using default STORE (MEM)")
31 from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp # noqa
32
33
34 def load_privilege():
35 privilege_file = os.environ.get("AUTHZ_FILE", "privilege.yml")
36 try:
37 privilege = yaml.load(open(privilege_file, "tr"), Loader=yaml.Loader)
38 except:
39 privilege = dict()
40 return privilege
41
42
43 def update_sp_state(project, now, heartbeat_timeout=10):
44 valid_starting = now - timedelta(seconds=heartbeat_timeout)
45 # mark all late SP as offline and not primary
46 # print(f"{now=} {valid_starting=}")
47 for sp in get_all_sp(project):
48 if datetime.fromisoformat(sp["last_heartbeat"]) < valid_starting:
49 sp["state"] = "offline"
50 sp["primary"] = False
51 else:
52 sp["state"] = "online"
53 update_sp(sp)
54
55
56 def simple_PSP_policy(incoming_sp, now):
57 """Find the primary SP (PSP).
58
59 If there is no PSP or current PSP timeout, choose one without heartbeat timeout.
60 """
61 project = incoming_sp["project"]
62 sp = get_sp_by(dict(project=project, sp_end_point=incoming_sp["sp_end_point"]))
63 if sp:
64 sp["last_heartbeat"] = now.isoformat()
65 update_sp(sp)
66 else:
67 update_sp(
68 dict(
69 project=incoming_sp["project"],
70 sp_end_point=incoming_sp["sp_end_point"],
71 last_heartbeat=now.isoformat(),
72 state="online",
73 primary=False,
74 )
75 )
76
77 psp = get_primary_sp(project)
78 if not psp:
79 psp = get_sp_by(dict(project=project, state="online"))
80 if psp:
81 print(f"{psp['sp_end_point']} online")
82 psp["primary"] = True
83 psp["service_session_id"] = str(uuid.uuid4())
84 update_sp(psp)
85
86 return psp
87
88
89 def promote_sp(sp):
90 psp = get_sp_by(sp)
91 project = sp["project"]
92 sp_end_point = sp["sp_end_point"]
93 if psp and psp["state"] == "online":
94 current_psp = get_primary_sp(project)
95 if all(current_psp[k] == v for k, v in sp.items()):
96 return True, f"Same sp_end_point, no need to promote {sp_end_point}."
97 psp["primary"] = True
98 current_psp["primary"] = False
99 psp["service_session_id"] = str(uuid.uuid4())
100 print(f"{psp['sp_end_point']} promoted")
101 print(f"{current_psp['sp_end_point']} demoted")
102 update_sp(psp)
103 update_sp(current_psp)
104 return False, psp
105 else:
106 return True, f"Unable to promote {sp_end_point}, either offline or not registered."
107
```
Path: `nvflare/lighter/study.py`
Content:
```
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import argparse
18 import json
19 import os
20 from datetime import datetime
21
22 import yaml
23
24
25 def get_input(prompt, item_list, multiple=False):
26 while True:
27 answer = input(prompt)
28 result = None
29 if multiple:
30 try:
31 if answer == "":
32 print("None of the choices is selected.")
33 result = []
34 else:
35 trimmed = set(answer.split(","))
36 result = [item_list[int(i)] for i in trimmed]
37 print(f"{result} selected after duplicate inputs removed.")
38 except BaseException:
39 print("Input contains errors (non-integer or out of index range)")
40 else:
41 try:
42 result = item_list[int(answer)]
43 except ValueError:
44 print(f"Expect integer but got {answer.__class__.__name__}")
45 except IndexError:
46 print("Number out of index range")
47 if result is not None:
48 break
49 return result
50
51
52 def get_date_input(prompt):
53 while True:
54 answer = input(prompt)
55 try:
56 result = datetime.strptime(answer, "%m/%d/%Y").date().isoformat()
57 break
58 except:
59 print(f"Expect MM/DD/YYYY but got {answer}")
60 return result
61
62
63 def main():
64 parser = argparse.ArgumentParser()
65 parser.add_argument("-p", "--project_file", type=str, default="project.yml", help="file to describe FL project")
66
67 args = parser.parse_args()
68
69 current_path = os.getcwd()
70
71 # main project file
72 project_file = args.project_file
73 project_full_path = os.path.join(current_path, project_file)
74 if not os.path.exists(project_full_path):
75 print(f"{project_full_path} not found. Running study requires that file.")
76 exit(0)
77
78 project = yaml.load(open(project_full_path, "r"), Loader=yaml.Loader)
79 api_version = project.get("api_version")
80 if api_version not in [3]:
81 raise ValueError(f"API version expected 3 but found {api_version}")
82
83 admin_list = list()
84 client_list = list()
85 for p in project.get("participants"):
86 if p.get("type") == "admin":
87 admin_list.append(p.get("name"))
88 elif p.get("type") == "client":
89 client_list.append(p.get("name"))
90
91 admin_list_string = ", ".join([f"{i}:{v}" for i, v in enumerate(admin_list)])
92 client_list_string = ", ".join([f"{i}:{v}" for i, v in enumerate(client_list)])
93
94 name = input("Please enter the name of this study: ")
95 description = input("and brief description: ")
96 contact = get_input(f"select one admin for contact {admin_list_string}: ", admin_list)
97
98 participating_admins = get_input(
99 f"select participating_admins admins (separated by ',') {admin_list_string} ", admin_list, multiple=True
100 )
101 participating_clients = get_input(
102 f"select participating clients (separated by ',') {client_list_string} ", client_list, multiple=True
103 )
104 participating_clients_string = ", ".join([f"{i}:{v}" for i, v in enumerate(participating_clients)])
105 # reviewer_dict = dict()
106 # for admin in participating_admins:
107 # reviewed_clients = get_input(
108 # f"what clients will reviewer {admin} review {participating_clients_string} ",
109 # participating_clients,
110 # multiple=True,
111 # )
112 # reviewer_dict[admin] = reviewed_clients
113 start_date = get_date_input("input start date of this study (MM/DD/YYYY): ")
114 end_date = get_date_input("input end date of this study (MM/DD/YYYY): ")
115
116 study_config = dict(
117 name=name,
118 description=description,
119 contact=contact,
120 participating_admins=participating_admins,
121 participating_clients=participating_clients,
122 # reviewers=reviewer_dict,
123 start_date=start_date,
124 end_date=end_date,
125 )
126 with open(f"{name}.json", "wt") as f:
127 f.write(json.dumps(study_config, indent=2))
128 print(f"study config file was generated at {name}.json")
129
130
131 if __name__ == "__main__":
132 main()
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nvflare/ha/overseer/utils.py b/nvflare/ha/overseer/utils.py
--- a/nvflare/ha/overseer/utils.py
+++ b/nvflare/ha/overseer/utils.py
@@ -16,7 +16,7 @@
import uuid
from datetime import datetime, timedelta
-import yaml
+from nvflare.lighter.utils import load_yaml
OVERSEER_STORE = os.environ.get("OVERSEER_STORE")
@@ -34,7 +34,7 @@
def load_privilege():
privilege_file = os.environ.get("AUTHZ_FILE", "privilege.yml")
try:
- privilege = yaml.load(open(privilege_file, "tr"), Loader=yaml.Loader)
+ privilege = load_yaml(privilege_file)
except:
privilege = dict()
return privilege
diff --git a/nvflare/lighter/study.py b/nvflare/lighter/study.py
--- a/nvflare/lighter/study.py
+++ b/nvflare/lighter/study.py
@@ -19,7 +19,7 @@
import os
from datetime import datetime
-import yaml
+from nvflare.lighter.utils import load_yaml
def get_input(prompt, item_list, multiple=False):
@@ -75,7 +75,7 @@
print(f"{project_full_path} not found. Running study requires that file.")
exit(0)
- project = yaml.load(open(project_full_path, "r"), Loader=yaml.Loader)
+ project = load_yaml(project_full_path)
api_version = project.get("api_version")
if api_version not in [3]:
raise ValueError(f"API version expected 3 but found {api_version}")
| {"golden_diff": "diff --git a/nvflare/ha/overseer/utils.py b/nvflare/ha/overseer/utils.py\n--- a/nvflare/ha/overseer/utils.py\n+++ b/nvflare/ha/overseer/utils.py\n@@ -16,7 +16,7 @@\n import uuid\n from datetime import datetime, timedelta\n \n-import yaml\n+from nvflare.lighter.utils import load_yaml\n \n OVERSEER_STORE = os.environ.get(\"OVERSEER_STORE\")\n \n@@ -34,7 +34,7 @@\n def load_privilege():\n privilege_file = os.environ.get(\"AUTHZ_FILE\", \"privilege.yml\")\n try:\n- privilege = yaml.load(open(privilege_file, \"tr\"), Loader=yaml.Loader)\n+ privilege = load_yaml(privilege_file)\n except:\n privilege = dict()\n return privilege\ndiff --git a/nvflare/lighter/study.py b/nvflare/lighter/study.py\n--- a/nvflare/lighter/study.py\n+++ b/nvflare/lighter/study.py\n@@ -19,7 +19,7 @@\n import os\n from datetime import datetime\n \n-import yaml\n+from nvflare.lighter.utils import load_yaml\n \n \n def get_input(prompt, item_list, multiple=False):\n@@ -75,7 +75,7 @@\n print(f\"{project_full_path} not found. Running study requires that file.\")\n exit(0)\n \n- project = yaml.load(open(project_full_path, \"r\"), Loader=yaml.Loader)\n+ project = load_yaml(project_full_path)\n api_version = project.get(\"api_version\")\n if api_version not in [3]:\n raise ValueError(f\"API version expected 3 but found {api_version}\")\n", "issue": "Yaml loader should be replaced by safe_loader or other more secure loader\nTo load yaml files from unknown source, we should avoid using yaml's loader. A better way is to use either safe_loader or other mechanism.\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport uuid\nfrom datetime import datetime, timedelta\n\nimport yaml\n\nOVERSEER_STORE = os.environ.get(\"OVERSEER_STORE\")\n\nif OVERSEER_STORE == \"REDIS\":\n from .redis_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelif OVERSEER_STORE == \"SQL\":\n from .sql_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelif OVERSEER_STORE == \"MEM\":\n from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelse:\n print(\"Using default STORE (MEM)\")\n from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp # noqa\n\n\ndef load_privilege():\n privilege_file = os.environ.get(\"AUTHZ_FILE\", \"privilege.yml\")\n try:\n privilege = yaml.load(open(privilege_file, \"tr\"), Loader=yaml.Loader)\n except:\n privilege = dict()\n return privilege\n\n\ndef update_sp_state(project, now, heartbeat_timeout=10):\n valid_starting = now - timedelta(seconds=heartbeat_timeout)\n # mark all late SP as offline and not primary\n # print(f\"{now=} {valid_starting=}\")\n for sp in get_all_sp(project):\n if datetime.fromisoformat(sp[\"last_heartbeat\"]) < valid_starting:\n sp[\"state\"] = \"offline\"\n sp[\"primary\"] = False\n else:\n sp[\"state\"] = \"online\"\n update_sp(sp)\n\n\ndef simple_PSP_policy(incoming_sp, now):\n \"\"\"Find the primary SP (PSP).\n\n If there is no PSP or current PSP timeout, choose one without heartbeat timeout.\n \"\"\"\n project = incoming_sp[\"project\"]\n sp = get_sp_by(dict(project=project, sp_end_point=incoming_sp[\"sp_end_point\"]))\n if sp:\n sp[\"last_heartbeat\"] = now.isoformat()\n update_sp(sp)\n else:\n update_sp(\n dict(\n project=incoming_sp[\"project\"],\n sp_end_point=incoming_sp[\"sp_end_point\"],\n last_heartbeat=now.isoformat(),\n state=\"online\",\n primary=False,\n )\n )\n\n psp = get_primary_sp(project)\n if not psp:\n psp = get_sp_by(dict(project=project, state=\"online\"))\n if psp:\n print(f\"{psp['sp_end_point']} online\")\n psp[\"primary\"] = True\n psp[\"service_session_id\"] = str(uuid.uuid4())\n update_sp(psp)\n\n return psp\n\n\ndef promote_sp(sp):\n psp = get_sp_by(sp)\n project = sp[\"project\"]\n sp_end_point = sp[\"sp_end_point\"]\n if psp and psp[\"state\"] == \"online\":\n current_psp = get_primary_sp(project)\n if all(current_psp[k] == v for k, v in sp.items()):\n return True, f\"Same sp_end_point, no need to promote {sp_end_point}.\"\n psp[\"primary\"] = True\n current_psp[\"primary\"] = False\n psp[\"service_session_id\"] = str(uuid.uuid4())\n print(f\"{psp['sp_end_point']} promoted\")\n print(f\"{current_psp['sp_end_point']} demoted\")\n update_sp(psp)\n update_sp(current_psp)\n return False, psp\n else:\n return True, f\"Unable to promote {sp_end_point}, either offline or not registered.\"\n", "path": "nvflare/ha/overseer/utils.py"}, {"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport argparse\nimport json\nimport os\nfrom datetime import datetime\n\nimport yaml\n\n\ndef get_input(prompt, item_list, multiple=False):\n while True:\n answer = input(prompt)\n result = None\n if multiple:\n try:\n if answer == \"\":\n print(\"None of the choices is selected.\")\n result = []\n else:\n trimmed = set(answer.split(\",\"))\n result = [item_list[int(i)] for i in trimmed]\n print(f\"{result} selected after duplicate inputs removed.\")\n except BaseException:\n print(\"Input contains errors (non-integer or out of index range)\")\n else:\n try:\n result = item_list[int(answer)]\n except ValueError:\n print(f\"Expect integer but got {answer.__class__.__name__}\")\n except IndexError:\n print(\"Number out of index range\")\n if result is not None:\n break\n return result\n\n\ndef get_date_input(prompt):\n while True:\n answer = input(prompt)\n try:\n result = datetime.strptime(answer, \"%m/%d/%Y\").date().isoformat()\n break\n except:\n print(f\"Expect MM/DD/YYYY but got {answer}\")\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--project_file\", type=str, default=\"project.yml\", help=\"file to describe FL project\")\n\n args = parser.parse_args()\n\n current_path = os.getcwd()\n\n # main project file\n project_file = args.project_file\n project_full_path = os.path.join(current_path, project_file)\n if not os.path.exists(project_full_path):\n print(f\"{project_full_path} not found. Running study requires that file.\")\n exit(0)\n\n project = yaml.load(open(project_full_path, \"r\"), Loader=yaml.Loader)\n api_version = project.get(\"api_version\")\n if api_version not in [3]:\n raise ValueError(f\"API version expected 3 but found {api_version}\")\n\n admin_list = list()\n client_list = list()\n for p in project.get(\"participants\"):\n if p.get(\"type\") == \"admin\":\n admin_list.append(p.get(\"name\"))\n elif p.get(\"type\") == \"client\":\n client_list.append(p.get(\"name\"))\n\n admin_list_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(admin_list)])\n client_list_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(client_list)])\n\n name = input(\"Please enter the name of this study: \")\n description = input(\"and brief description: \")\n contact = get_input(f\"select one admin for contact {admin_list_string}: \", admin_list)\n\n participating_admins = get_input(\n f\"select participating_admins admins (separated by ',') {admin_list_string} \", admin_list, multiple=True\n )\n participating_clients = get_input(\n f\"select participating clients (separated by ',') {client_list_string} \", client_list, multiple=True\n )\n participating_clients_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(participating_clients)])\n # reviewer_dict = dict()\n # for admin in participating_admins:\n # reviewed_clients = get_input(\n # f\"what clients will reviewer {admin} review {participating_clients_string} \",\n # participating_clients,\n # multiple=True,\n # )\n # reviewer_dict[admin] = reviewed_clients\n start_date = get_date_input(\"input start date of this study (MM/DD/YYYY): \")\n end_date = get_date_input(\"input end date of this study (MM/DD/YYYY): \")\n\n study_config = dict(\n name=name,\n description=description,\n contact=contact,\n participating_admins=participating_admins,\n participating_clients=participating_clients,\n # reviewers=reviewer_dict,\n start_date=start_date,\n end_date=end_date,\n )\n with open(f\"{name}.json\", \"wt\") as f:\n f.write(json.dumps(study_config, indent=2))\n print(f\"study config file was generated at {name}.json\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "nvflare/lighter/study.py"}], "after_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport uuid\nfrom datetime import datetime, timedelta\n\nfrom nvflare.lighter.utils import load_yaml\n\nOVERSEER_STORE = os.environ.get(\"OVERSEER_STORE\")\n\nif OVERSEER_STORE == \"REDIS\":\n from .redis_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelif OVERSEER_STORE == \"SQL\":\n from .sql_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelif OVERSEER_STORE == \"MEM\":\n from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp\nelse:\n print(\"Using default STORE (MEM)\")\n from .mem_store import do_refresh, get_all_sp, get_primary_sp, get_sp_by, update_sp # noqa\n\n\ndef load_privilege():\n privilege_file = os.environ.get(\"AUTHZ_FILE\", \"privilege.yml\")\n try:\n privilege = load_yaml(privilege_file)\n except:\n privilege = dict()\n return privilege\n\n\ndef update_sp_state(project, now, heartbeat_timeout=10):\n valid_starting = now - timedelta(seconds=heartbeat_timeout)\n # mark all late SP as offline and not primary\n # print(f\"{now=} {valid_starting=}\")\n for sp in get_all_sp(project):\n if datetime.fromisoformat(sp[\"last_heartbeat\"]) < valid_starting:\n sp[\"state\"] = \"offline\"\n sp[\"primary\"] = False\n else:\n sp[\"state\"] = \"online\"\n update_sp(sp)\n\n\ndef simple_PSP_policy(incoming_sp, now):\n \"\"\"Find the primary SP (PSP).\n\n If there is no PSP or current PSP timeout, choose one without heartbeat timeout.\n \"\"\"\n project = incoming_sp[\"project\"]\n sp = get_sp_by(dict(project=project, sp_end_point=incoming_sp[\"sp_end_point\"]))\n if sp:\n sp[\"last_heartbeat\"] = now.isoformat()\n update_sp(sp)\n else:\n update_sp(\n dict(\n project=incoming_sp[\"project\"],\n sp_end_point=incoming_sp[\"sp_end_point\"],\n last_heartbeat=now.isoformat(),\n state=\"online\",\n primary=False,\n )\n )\n\n psp = get_primary_sp(project)\n if not psp:\n psp = get_sp_by(dict(project=project, state=\"online\"))\n if psp:\n print(f\"{psp['sp_end_point']} online\")\n psp[\"primary\"] = True\n psp[\"service_session_id\"] = str(uuid.uuid4())\n update_sp(psp)\n\n return psp\n\n\ndef promote_sp(sp):\n psp = get_sp_by(sp)\n project = sp[\"project\"]\n sp_end_point = sp[\"sp_end_point\"]\n if psp and psp[\"state\"] == \"online\":\n current_psp = get_primary_sp(project)\n if all(current_psp[k] == v for k, v in sp.items()):\n return True, f\"Same sp_end_point, no need to promote {sp_end_point}.\"\n psp[\"primary\"] = True\n current_psp[\"primary\"] = False\n psp[\"service_session_id\"] = str(uuid.uuid4())\n print(f\"{psp['sp_end_point']} promoted\")\n print(f\"{current_psp['sp_end_point']} demoted\")\n update_sp(psp)\n update_sp(current_psp)\n return False, psp\n else:\n return True, f\"Unable to promote {sp_end_point}, either offline or not registered.\"\n", "path": "nvflare/ha/overseer/utils.py"}, {"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport argparse\nimport json\nimport os\nfrom datetime import datetime\n\nfrom nvflare.lighter.utils import load_yaml\n\n\ndef get_input(prompt, item_list, multiple=False):\n while True:\n answer = input(prompt)\n result = None\n if multiple:\n try:\n if answer == \"\":\n print(\"None of the choices is selected.\")\n result = []\n else:\n trimmed = set(answer.split(\",\"))\n result = [item_list[int(i)] for i in trimmed]\n print(f\"{result} selected after duplicate inputs removed.\")\n except BaseException:\n print(\"Input contains errors (non-integer or out of index range)\")\n else:\n try:\n result = item_list[int(answer)]\n except ValueError:\n print(f\"Expect integer but got {answer.__class__.__name__}\")\n except IndexError:\n print(\"Number out of index range\")\n if result is not None:\n break\n return result\n\n\ndef get_date_input(prompt):\n while True:\n answer = input(prompt)\n try:\n result = datetime.strptime(answer, \"%m/%d/%Y\").date().isoformat()\n break\n except:\n print(f\"Expect MM/DD/YYYY but got {answer}\")\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--project_file\", type=str, default=\"project.yml\", help=\"file to describe FL project\")\n\n args = parser.parse_args()\n\n current_path = os.getcwd()\n\n # main project file\n project_file = args.project_file\n project_full_path = os.path.join(current_path, project_file)\n if not os.path.exists(project_full_path):\n print(f\"{project_full_path} not found. Running study requires that file.\")\n exit(0)\n\n project = load_yaml(project_full_path)\n api_version = project.get(\"api_version\")\n if api_version not in [3]:\n raise ValueError(f\"API version expected 3 but found {api_version}\")\n\n admin_list = list()\n client_list = list()\n for p in project.get(\"participants\"):\n if p.get(\"type\") == \"admin\":\n admin_list.append(p.get(\"name\"))\n elif p.get(\"type\") == \"client\":\n client_list.append(p.get(\"name\"))\n\n admin_list_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(admin_list)])\n client_list_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(client_list)])\n\n name = input(\"Please enter the name of this study: \")\n description = input(\"and brief description: \")\n contact = get_input(f\"select one admin for contact {admin_list_string}: \", admin_list)\n\n participating_admins = get_input(\n f\"select participating_admins admins (separated by ',') {admin_list_string} \", admin_list, multiple=True\n )\n participating_clients = get_input(\n f\"select participating clients (separated by ',') {client_list_string} \", client_list, multiple=True\n )\n participating_clients_string = \", \".join([f\"{i}:{v}\" for i, v in enumerate(participating_clients)])\n # reviewer_dict = dict()\n # for admin in participating_admins:\n # reviewed_clients = get_input(\n # f\"what clients will reviewer {admin} review {participating_clients_string} \",\n # participating_clients,\n # multiple=True,\n # )\n # reviewer_dict[admin] = reviewed_clients\n start_date = get_date_input(\"input start date of this study (MM/DD/YYYY): \")\n end_date = get_date_input(\"input end date of this study (MM/DD/YYYY): \")\n\n study_config = dict(\n name=name,\n description=description,\n contact=contact,\n participating_admins=participating_admins,\n participating_clients=participating_clients,\n # reviewers=reviewer_dict,\n start_date=start_date,\n end_date=end_date,\n )\n with open(f\"{name}.json\", \"wt\") as f:\n f.write(json.dumps(study_config, indent=2))\n print(f\"study config file was generated at {name}.json\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "nvflare/lighter/study.py"}]} | 2,813 | 375 |
gh_patches_debug_17658 | rasdani/github-patches | git_diff | pantsbuild__pants-12060 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`./pants run ...` does not work for non-venv-mode `pex_binary` targets that re-exec sys.argv[0].
In short, the `run` goal executes via ~:
```
export PEX_PATH=/path/to/requirements.pex
export PEX_EXTRA_SYS_PATH=/path/to/source_root_1:/path/to/cource_root2
./only-contains-entry-point-metadata.pex
```
If the executed code then tries to re-execute via argv[0] (the PEX file itself), then sys.path scrubbing is engaged which strips back off the PEX_PATH and PEX_EXTRA_SYS_PATH triggered sys.path additions since those two env vars are also stripped by default. Either Pants needs to expose the `--no-strip-pex-env` option as a `pex_binary` parameter or else it needs to set this option for `pants run` unconditionally.
The concrete example of apps that re-exec via sys.argv[0] are django manage.py apps.
See https://github.com/pantsbuild/pex/issues/1349 where @asherf discovered the issue and the mechanism behind it was all worked out.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/goals/run_pex_binary.py`
Content:
```
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import os
5
6 from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
7 from pants.backend.python.target_types import (
8 PexBinaryDefaults,
9 ResolvedPexEntryPoint,
10 ResolvePexEntryPointRequest,
11 )
12 from pants.backend.python.util_rules.pex import Pex, PexRequest
13 from pants.backend.python.util_rules.pex_environment import PexEnvironment
14 from pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest
15 from pants.backend.python.util_rules.python_sources import (
16 PythonSourceFiles,
17 PythonSourceFilesRequest,
18 )
19 from pants.core.goals.run import RunFieldSet, RunRequest
20 from pants.engine.fs import Digest, MergeDigests
21 from pants.engine.rules import Get, MultiGet, collect_rules, rule
22 from pants.engine.target import TransitiveTargets, TransitiveTargetsRequest
23 from pants.engine.unions import UnionRule
24 from pants.util.logging import LogLevel
25
26
27 @rule(level=LogLevel.DEBUG)
28 async def create_pex_binary_run_request(
29 field_set: PexBinaryFieldSet,
30 pex_binary_defaults: PexBinaryDefaults,
31 pex_env: PexEnvironment,
32 ) -> RunRequest:
33 entry_point, transitive_targets = await MultiGet(
34 Get(
35 ResolvedPexEntryPoint,
36 ResolvePexEntryPointRequest(field_set.entry_point),
37 ),
38 Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),
39 )
40
41 # Note that we get an intermediate PexRequest here (instead of going straight to a Pex)
42 # so that we can get the interpreter constraints for use in runner_pex_request.
43 requirements_pex_request = await Get(
44 PexRequest,
45 PexFromTargetsRequest,
46 PexFromTargetsRequest.for_requirements([field_set.address], internal_only=True),
47 )
48
49 requirements_request = Get(Pex, PexRequest, requirements_pex_request)
50
51 sources_request = Get(
52 PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)
53 )
54
55 output_filename = f"{field_set.address.target_name}.pex"
56 runner_pex_request = Get(
57 Pex,
58 PexRequest(
59 output_filename=output_filename,
60 interpreter_constraints=requirements_pex_request.interpreter_constraints,
61 additional_args=field_set.generate_additional_args(pex_binary_defaults),
62 internal_only=True,
63 # Note that the entry point file is not in the PEX itself. It's loaded by setting
64 # `PEX_EXTRA_SYS_PATH`.
65 # TODO(John Sirois): Support ConsoleScript in PexBinary targets:
66 # https://github.com/pantsbuild/pants/issues/11619
67 main=entry_point.val,
68 ),
69 )
70
71 requirements, sources, runner_pex = await MultiGet(
72 requirements_request, sources_request, runner_pex_request
73 )
74
75 merged_digest = await Get(
76 Digest,
77 MergeDigests(
78 [requirements.digest, sources.source_files.snapshot.digest, runner_pex.digest]
79 ),
80 )
81
82 def in_chroot(relpath: str) -> str:
83 return os.path.join("{chroot}", relpath)
84
85 args = pex_env.create_argv(in_chroot(runner_pex.name), python=runner_pex.python)
86
87 chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]
88 extra_env = {
89 **pex_env.environment_dict(python_configured=runner_pex.python is not None),
90 "PEX_PATH": in_chroot(requirements_pex_request.output_filename),
91 "PEX_EXTRA_SYS_PATH": ":".join(chrooted_source_roots),
92 }
93
94 return RunRequest(digest=merged_digest, args=args, extra_env=extra_env)
95
96
97 def rules():
98 return [*collect_rules(), UnionRule(RunFieldSet, PexBinaryFieldSet)]
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/python/goals/run_pex_binary.py b/src/python/pants/backend/python/goals/run_pex_binary.py
--- a/src/python/pants/backend/python/goals/run_pex_binary.py
+++ b/src/python/pants/backend/python/goals/run_pex_binary.py
@@ -58,7 +58,13 @@
PexRequest(
output_filename=output_filename,
interpreter_constraints=requirements_pex_request.interpreter_constraints,
- additional_args=field_set.generate_additional_args(pex_binary_defaults),
+ additional_args=(
+ *field_set.generate_additional_args(pex_binary_defaults),
+ # N.B.: Since we cobble together the runtime environment via PEX_PATH and
+ # PEX_EXTRA_SYS_PATH below, it's important for any app that re-executes itself that
+ # these environment variables are not stripped.
+ "--no-strip-pex-env",
+ ),
internal_only=True,
# Note that the entry point file is not in the PEX itself. It's loaded by setting
# `PEX_EXTRA_SYS_PATH`.
| {"golden_diff": "diff --git a/src/python/pants/backend/python/goals/run_pex_binary.py b/src/python/pants/backend/python/goals/run_pex_binary.py\n--- a/src/python/pants/backend/python/goals/run_pex_binary.py\n+++ b/src/python/pants/backend/python/goals/run_pex_binary.py\n@@ -58,7 +58,13 @@\n PexRequest(\n output_filename=output_filename,\n interpreter_constraints=requirements_pex_request.interpreter_constraints,\n- additional_args=field_set.generate_additional_args(pex_binary_defaults),\n+ additional_args=(\n+ *field_set.generate_additional_args(pex_binary_defaults),\n+ # N.B.: Since we cobble together the runtime environment via PEX_PATH and\n+ # PEX_EXTRA_SYS_PATH below, it's important for any app that re-executes itself that\n+ # these environment variables are not stripped.\n+ \"--no-strip-pex-env\",\n+ ),\n internal_only=True,\n # Note that the entry point file is not in the PEX itself. It's loaded by setting\n # `PEX_EXTRA_SYS_PATH`.\n", "issue": "`./pants run ...` does not work for non-venv-mode `pex_binary` targets that re-exec sys.argv[0].\nIn short, the `run` goal executes via ~:\r\n```\r\nexport PEX_PATH=/path/to/requirements.pex\r\nexport PEX_EXTRA_SYS_PATH=/path/to/source_root_1:/path/to/cource_root2\r\n./only-contains-entry-point-metadata.pex\r\n```\r\n\r\nIf the executed code then tries to re-execute via argv[0] (the PEX file itself), then sys.path scrubbing is engaged which strips back off the PEX_PATH and PEX_EXTRA_SYS_PATH triggered sys.path additions since those two env vars are also stripped by default. Either Pants needs to expose the `--no-strip-pex-env` option as a `pex_binary` parameter or else it needs to set this option for `pants run` unconditionally.\r\n\r\nThe concrete example of apps that re-exec via sys.argv[0] are django manage.py apps.\r\n\r\nSee https://github.com/pantsbuild/pex/issues/1349 where @asherf discovered the issue and the mechanism behind it was all worked out.\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet\nfrom pants.backend.python.target_types import (\n PexBinaryDefaults,\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n)\nfrom pants.backend.python.util_rules.pex import Pex, PexRequest\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment\nfrom pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.run import RunFieldSet, RunRequest\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\nfrom pants.engine.unions import UnionRule\nfrom pants.util.logging import LogLevel\n\n\n@rule(level=LogLevel.DEBUG)\nasync def create_pex_binary_run_request(\n field_set: PexBinaryFieldSet,\n pex_binary_defaults: PexBinaryDefaults,\n pex_env: PexEnvironment,\n) -> RunRequest:\n entry_point, transitive_targets = await MultiGet(\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(field_set.entry_point),\n ),\n Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),\n )\n\n # Note that we get an intermediate PexRequest here (instead of going straight to a Pex)\n # so that we can get the interpreter constraints for use in runner_pex_request.\n requirements_pex_request = await Get(\n PexRequest,\n PexFromTargetsRequest,\n PexFromTargetsRequest.for_requirements([field_set.address], internal_only=True),\n )\n\n requirements_request = Get(Pex, PexRequest, requirements_pex_request)\n\n sources_request = Get(\n PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)\n )\n\n output_filename = f\"{field_set.address.target_name}.pex\"\n runner_pex_request = Get(\n Pex,\n PexRequest(\n output_filename=output_filename,\n interpreter_constraints=requirements_pex_request.interpreter_constraints,\n additional_args=field_set.generate_additional_args(pex_binary_defaults),\n internal_only=True,\n # Note that the entry point file is not in the PEX itself. It's loaded by setting\n # `PEX_EXTRA_SYS_PATH`.\n # TODO(John Sirois): Support ConsoleScript in PexBinary targets:\n # https://github.com/pantsbuild/pants/issues/11619\n main=entry_point.val,\n ),\n )\n\n requirements, sources, runner_pex = await MultiGet(\n requirements_request, sources_request, runner_pex_request\n )\n\n merged_digest = await Get(\n Digest,\n MergeDigests(\n [requirements.digest, sources.source_files.snapshot.digest, runner_pex.digest]\n ),\n )\n\n def in_chroot(relpath: str) -> str:\n return os.path.join(\"{chroot}\", relpath)\n\n args = pex_env.create_argv(in_chroot(runner_pex.name), python=runner_pex.python)\n\n chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]\n extra_env = {\n **pex_env.environment_dict(python_configured=runner_pex.python is not None),\n \"PEX_PATH\": in_chroot(requirements_pex_request.output_filename),\n \"PEX_EXTRA_SYS_PATH\": \":\".join(chrooted_source_roots),\n }\n\n return RunRequest(digest=merged_digest, args=args, extra_env=extra_env)\n\n\ndef rules():\n return [*collect_rules(), UnionRule(RunFieldSet, PexBinaryFieldSet)]\n", "path": "src/python/pants/backend/python/goals/run_pex_binary.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet\nfrom pants.backend.python.target_types import (\n PexBinaryDefaults,\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest,\n)\nfrom pants.backend.python.util_rules.pex import Pex, PexRequest\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment\nfrom pants.backend.python.util_rules.pex_from_targets import PexFromTargetsRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.run import RunFieldSet, RunRequest\nfrom pants.engine.fs import Digest, MergeDigests\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import TransitiveTargets, TransitiveTargetsRequest\nfrom pants.engine.unions import UnionRule\nfrom pants.util.logging import LogLevel\n\n\n@rule(level=LogLevel.DEBUG)\nasync def create_pex_binary_run_request(\n field_set: PexBinaryFieldSet,\n pex_binary_defaults: PexBinaryDefaults,\n pex_env: PexEnvironment,\n) -> RunRequest:\n entry_point, transitive_targets = await MultiGet(\n Get(\n ResolvedPexEntryPoint,\n ResolvePexEntryPointRequest(field_set.entry_point),\n ),\n Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),\n )\n\n # Note that we get an intermediate PexRequest here (instead of going straight to a Pex)\n # so that we can get the interpreter constraints for use in runner_pex_request.\n requirements_pex_request = await Get(\n PexRequest,\n PexFromTargetsRequest,\n PexFromTargetsRequest.for_requirements([field_set.address], internal_only=True),\n )\n\n requirements_request = Get(Pex, PexRequest, requirements_pex_request)\n\n sources_request = Get(\n PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure, include_files=True)\n )\n\n output_filename = f\"{field_set.address.target_name}.pex\"\n runner_pex_request = Get(\n Pex,\n PexRequest(\n output_filename=output_filename,\n interpreter_constraints=requirements_pex_request.interpreter_constraints,\n additional_args=(\n *field_set.generate_additional_args(pex_binary_defaults),\n # N.B.: Since we cobble together the runtime environment via PEX_PATH and\n # PEX_EXTRA_SYS_PATH below, it's important for any app that re-executes itself that\n # these environment variables are not stripped.\n \"--no-strip-pex-env\",\n ),\n internal_only=True,\n # Note that the entry point file is not in the PEX itself. It's loaded by setting\n # `PEX_EXTRA_SYS_PATH`.\n # TODO(John Sirois): Support ConsoleScript in PexBinary targets:\n # https://github.com/pantsbuild/pants/issues/11619\n main=entry_point.val,\n ),\n )\n\n requirements, sources, runner_pex = await MultiGet(\n requirements_request, sources_request, runner_pex_request\n )\n\n merged_digest = await Get(\n Digest,\n MergeDigests(\n [requirements.digest, sources.source_files.snapshot.digest, runner_pex.digest]\n ),\n )\n\n def in_chroot(relpath: str) -> str:\n return os.path.join(\"{chroot}\", relpath)\n\n args = pex_env.create_argv(in_chroot(runner_pex.name), python=runner_pex.python)\n\n chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]\n extra_env = {\n **pex_env.environment_dict(python_configured=runner_pex.python is not None),\n \"PEX_PATH\": in_chroot(requirements_pex_request.output_filename),\n \"PEX_EXTRA_SYS_PATH\": \":\".join(chrooted_source_roots),\n }\n\n return RunRequest(digest=merged_digest, args=args, extra_env=extra_env)\n\n\ndef rules():\n return [*collect_rules(), UnionRule(RunFieldSet, PexBinaryFieldSet)]\n", "path": "src/python/pants/backend/python/goals/run_pex_binary.py"}]} | 1,542 | 234 |
gh_patches_debug_8408 | rasdani/github-patches | git_diff | ckan__ckan-3735 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tolerate missing system_info table
If you have an old or incomplete database some commands (such as db clean) will fail because they can't find the system_info table.
```python-traceback
$ cd ckan; paster db clean -c test-core.ini; paster db init -c test-core.ini
Traceback (most recent call last):
File "/home/ubuntu/virtualenvs/venv-system/bin/paster", line 11, in <module>
sys.exit(run())
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py", line 102, in run
invoke(command, command_name, options, args[1:])
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py", line 141, in invoke
exit_code = runner.run(args)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py", line 236, in run
result = self.command()
File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py", line 217, in command
self._load_config(cmd!='upgrade')
File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py", line 161, in _load_config
load_environment(conf.global_conf, conf.local_conf)
File "/home/ubuntu/ckanext-scheming/ckan/ckan/config/environment.py", line 99, in load_environment
app_globals.reset()
File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py", line 172, in reset
get_config_value(key)
File "/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py", line 139, in get_config_value
value = model.get_system_info(key)
File "/home/ubuntu/ckanext-scheming/ckan/ckan/model/system_info.py", line 56, in get_system_info
obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2334, in first
ret = list(self[0:1])
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2201, in __getitem__
return list(res)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2405, in __iter__
return self._execute_and_instances(context)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 2420, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 727, in execute
return meth(self, multiparams, params)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/sql/elements.py", line 322, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 824, in _execute_clauseelement
compiled_sql, distilled_params
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 954, in _execute_context
context)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 1116, in _handle_dbapi_exception
exc_info
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py", line 189, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py", line 947, in _execute_context
context)
File "/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py", line 435, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (ProgrammingError) column system_info.state does not exist
LINE 1: ...info_key, system_info.value AS system_info_value, system_inf...
^
'SELECT system_info.id AS system_info_id, system_info.key AS system_info_key, system_info.value AS system_info_value, system_info.state AS system_info_state, system_info.revision_id AS system_info_revision_id \nFROM system_info \nWHERE system_info.key = %(key_1)s \n LIMIT %(param_1)s' {'param_1': 1, 'key_1': 'ckan.site_description'}
```
This change treats a missing system_info table the same as no overridden configuration.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/model/system_info.py`
Content:
```
1 # encoding: utf-8
2
3 '''
4 The system_info table and SystemInfo mapped class store runtime-editable
5 configuration options.
6
7 For more details, check :doc:`maintaining/configuration`.
8 '''
9
10 from sqlalchemy import types, Column, Table
11
12 import vdm.sqlalchemy
13 import meta
14 import core
15 import domain_object
16
17 __all__ = ['system_info_revision_table', 'system_info_table', 'SystemInfo',
18 'SystemInfoRevision', 'get_system_info', 'set_system_info']
19
20 system_info_table = Table(
21 'system_info', meta.metadata,
22 Column('id', types.Integer(), primary_key=True, nullable=False),
23 Column('key', types.Unicode(100), unique=True, nullable=False),
24 Column('value', types.UnicodeText),
25 )
26
27 vdm.sqlalchemy.make_table_stateful(system_info_table)
28 system_info_revision_table = core.make_revisioned_table(system_info_table)
29
30
31 class SystemInfo(vdm.sqlalchemy.RevisionedObjectMixin,
32 vdm.sqlalchemy.StatefulObjectMixin,
33 domain_object.DomainObject):
34
35 def __init__(self, key, value):
36
37 super(SystemInfo, self).__init__()
38
39 self.key = key
40 self.value = unicode(value)
41
42
43 meta.mapper(SystemInfo, system_info_table,
44 extension=[
45 vdm.sqlalchemy.Revisioner(system_info_revision_table),
46 ])
47
48 vdm.sqlalchemy.modify_base_object_mapper(SystemInfo, core.Revision, core.State)
49 SystemInfoRevision = vdm.sqlalchemy.create_object_version(meta.mapper,
50 SystemInfo,
51 system_info_revision_table)
52
53
54 def get_system_info(key, default=None):
55 ''' get data from system_info table '''
56 obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
57 if obj:
58 return obj.value
59 else:
60 return default
61
62
63 def delete_system_info(key, default=None):
64 ''' delete data from system_info table '''
65 obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
66 if obj:
67 meta.Session.delete(obj)
68 meta.Session.commit()
69
70
71 def set_system_info(key, value):
72 ''' save data in the system_info table '''
73 obj = None
74 obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
75 if obj and obj.value == unicode(value):
76 return
77 if not obj:
78 obj = SystemInfo(key, value)
79 else:
80 obj.value = unicode(value)
81
82 from ckan.model import repo
83 rev = repo.new_revision()
84 rev.message = 'Set {0} setting in system_info table'.format(key)
85 meta.Session.add(obj)
86 meta.Session.commit()
87
88 return True
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/model/system_info.py b/ckan/model/system_info.py
--- a/ckan/model/system_info.py
+++ b/ckan/model/system_info.py
@@ -53,11 +53,14 @@
def get_system_info(key, default=None):
''' get data from system_info table '''
- obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
- if obj:
- return obj.value
- else:
- return default
+ from sqlalchemy.exc import ProgrammingError
+ try:
+ obj = meta.Session.query(SystemInfo).filter_by(key=key).first()
+ if obj:
+ return obj.value
+ except ProgrammingError:
+ meta.Session.rollback()
+ return default
def delete_system_info(key, default=None):
| {"golden_diff": "diff --git a/ckan/model/system_info.py b/ckan/model/system_info.py\n--- a/ckan/model/system_info.py\n+++ b/ckan/model/system_info.py\n@@ -53,11 +53,14 @@\n \n def get_system_info(key, default=None):\n ''' get data from system_info table '''\n- obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n- if obj:\n- return obj.value\n- else:\n- return default\n+ from sqlalchemy.exc import ProgrammingError\n+ try:\n+ obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n+ if obj:\n+ return obj.value\n+ except ProgrammingError:\n+ meta.Session.rollback()\n+ return default\n \n \n def delete_system_info(key, default=None):\n", "issue": "Tolerate missing system_info table\nIf you have an old or incomplete database some commands (such as db clean) will fail because they can't find the system_info table.\r\n\r\n```python-traceback\r\n$ cd ckan; paster db clean -c test-core.ini; paster db init -c test-core.ini\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/virtualenvs/venv-system/bin/paster\", line 11, in <module>\r\n sys.exit(run())\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py\", line 102, in run\r\n invoke(command, command_name, options, args[1:])\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py\", line 141, in invoke\r\n exit_code = runner.run(args)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/paste/script/command.py\", line 236, in run\r\n result = self.command()\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py\", line 217, in command\r\n self._load_config(cmd!='upgrade')\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/cli.py\", line 161, in _load_config\r\n load_environment(conf.global_conf, conf.local_conf)\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/config/environment.py\", line 99, in load_environment\r\n app_globals.reset()\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py\", line 172, in reset\r\n get_config_value(key)\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/lib/app_globals.py\", line 139, in get_config_value\r\n value = model.get_system_info(key)\r\n File \"/home/ubuntu/ckanext-scheming/ckan/ckan/model/system_info.py\", line 56, in get_system_info\r\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2334, in first\r\n ret = list(self[0:1])\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2201, in __getitem__\r\n return list(res)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2405, in __iter__\r\n return self._execute_and_instances(context)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/orm/query.py\", line 2420, in _execute_and_instances\r\n result = conn.execute(querycontext.statement, self._params)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 727, in execute\r\n return meth(self, multiparams, params)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/sql/elements.py\", line 322, in _execute_on_connection\r\n return connection._execute_clauseelement(self, multiparams, params)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 824, in _execute_clauseelement\r\n compiled_sql, distilled_params\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 954, in _execute_context\r\n context)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1116, in _handle_dbapi_exception\r\n exc_info\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/util/compat.py\", line 189, in raise_from_cause\r\n reraise(type(exception), exception, tb=exc_tb)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/base.py\", line 947, in _execute_context\r\n context)\r\n File \"/home/ubuntu/virtualenvs/venv-system/local/lib/python2.7/site-packages/sqlalchemy/engine/default.py\", line 435, in do_execute\r\n cursor.execute(statement, parameters)\r\nsqlalchemy.exc.ProgrammingError: (ProgrammingError) column system_info.state does not exist\r\nLINE 1: ...info_key, system_info.value AS system_info_value, system_inf...\r\n ^\r\n 'SELECT system_info.id AS system_info_id, system_info.key AS system_info_key, system_info.value AS system_info_value, system_info.state AS system_info_state, system_info.revision_id AS system_info_revision_id \\nFROM system_info \\nWHERE system_info.key = %(key_1)s \\n LIMIT %(param_1)s' {'param_1': 1, 'key_1': 'ckan.site_description'}\r\n```\r\n\r\nThis change treats a missing system_info table the same as no overridden configuration.\n", "before_files": [{"content": "# encoding: utf-8\n\n'''\nThe system_info table and SystemInfo mapped class store runtime-editable\nconfiguration options.\n\nFor more details, check :doc:`maintaining/configuration`.\n'''\n\nfrom sqlalchemy import types, Column, Table\n\nimport vdm.sqlalchemy\nimport meta\nimport core\nimport domain_object\n\n__all__ = ['system_info_revision_table', 'system_info_table', 'SystemInfo',\n 'SystemInfoRevision', 'get_system_info', 'set_system_info']\n\nsystem_info_table = Table(\n 'system_info', meta.metadata,\n Column('id', types.Integer(), primary_key=True, nullable=False),\n Column('key', types.Unicode(100), unique=True, nullable=False),\n Column('value', types.UnicodeText),\n)\n\nvdm.sqlalchemy.make_table_stateful(system_info_table)\nsystem_info_revision_table = core.make_revisioned_table(system_info_table)\n\n\nclass SystemInfo(vdm.sqlalchemy.RevisionedObjectMixin,\n vdm.sqlalchemy.StatefulObjectMixin,\n domain_object.DomainObject):\n\n def __init__(self, key, value):\n\n super(SystemInfo, self).__init__()\n\n self.key = key\n self.value = unicode(value)\n\n\nmeta.mapper(SystemInfo, system_info_table,\n extension=[\n vdm.sqlalchemy.Revisioner(system_info_revision_table),\n ])\n\nvdm.sqlalchemy.modify_base_object_mapper(SystemInfo, core.Revision, core.State)\nSystemInfoRevision = vdm.sqlalchemy.create_object_version(meta.mapper,\n SystemInfo,\n system_info_revision_table)\n\n\ndef get_system_info(key, default=None):\n ''' get data from system_info table '''\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj:\n return obj.value\n else:\n return default\n\n\ndef delete_system_info(key, default=None):\n ''' delete data from system_info table '''\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj:\n meta.Session.delete(obj)\n meta.Session.commit()\n\n\ndef set_system_info(key, value):\n ''' save data in the system_info table '''\n obj = None\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj and obj.value == unicode(value):\n return\n if not obj:\n obj = SystemInfo(key, value)\n else:\n obj.value = unicode(value)\n\n from ckan.model import repo\n rev = repo.new_revision()\n rev.message = 'Set {0} setting in system_info table'.format(key)\n meta.Session.add(obj)\n meta.Session.commit()\n\n return True\n", "path": "ckan/model/system_info.py"}], "after_files": [{"content": "# encoding: utf-8\n\n'''\nThe system_info table and SystemInfo mapped class store runtime-editable\nconfiguration options.\n\nFor more details, check :doc:`maintaining/configuration`.\n'''\n\nfrom sqlalchemy import types, Column, Table\n\nimport vdm.sqlalchemy\nimport meta\nimport core\nimport domain_object\n\n__all__ = ['system_info_revision_table', 'system_info_table', 'SystemInfo',\n 'SystemInfoRevision', 'get_system_info', 'set_system_info']\n\nsystem_info_table = Table(\n 'system_info', meta.metadata,\n Column('id', types.Integer(), primary_key=True, nullable=False),\n Column('key', types.Unicode(100), unique=True, nullable=False),\n Column('value', types.UnicodeText),\n)\n\nvdm.sqlalchemy.make_table_stateful(system_info_table)\nsystem_info_revision_table = core.make_revisioned_table(system_info_table)\n\n\nclass SystemInfo(vdm.sqlalchemy.RevisionedObjectMixin,\n vdm.sqlalchemy.StatefulObjectMixin,\n domain_object.DomainObject):\n\n def __init__(self, key, value):\n\n super(SystemInfo, self).__init__()\n\n self.key = key\n self.value = unicode(value)\n\n\nmeta.mapper(SystemInfo, system_info_table,\n extension=[\n vdm.sqlalchemy.Revisioner(system_info_revision_table),\n ])\n\nvdm.sqlalchemy.modify_base_object_mapper(SystemInfo, core.Revision, core.State)\nSystemInfoRevision = vdm.sqlalchemy.create_object_version(meta.mapper,\n SystemInfo,\n system_info_revision_table)\n\n\ndef get_system_info(key, default=None):\n ''' get data from system_info table '''\n from sqlalchemy.exc import ProgrammingError\n try:\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj:\n return obj.value\n except ProgrammingError:\n meta.Session.rollback()\n return default\n\n\ndef delete_system_info(key, default=None):\n ''' delete data from system_info table '''\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj:\n meta.Session.delete(obj)\n meta.Session.commit()\n\n\ndef set_system_info(key, value):\n ''' save data in the system_info table '''\n obj = None\n obj = meta.Session.query(SystemInfo).filter_by(key=key).first()\n if obj and obj.value == unicode(value):\n return\n if not obj:\n obj = SystemInfo(key, value)\n else:\n obj.value = unicode(value)\n\n from ckan.model import repo\n rev = repo.new_revision()\n rev.message = 'Set {0} setting in system_info table'.format(key)\n meta.Session.add(obj)\n meta.Session.commit()\n\n return True\n", "path": "ckan/model/system_info.py"}]} | 2,196 | 178 |
gh_patches_debug_5087 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-3641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve security contact webpage
We need to improve our documentation about a user who found a security issue could contact us to report this vulnerability in a confidential way.
This page should be clear regarding how to report the issue, how submit a patch (wihtout making it public) and what to do receive feedback / discuss about the solution.
There is a page already but it's too poor: https://readthedocs.org/security/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/urls.py`
Content:
```
1 # pylint: disable=missing-docstring
2 from __future__ import absolute_import
3
4 from functools import reduce
5 from operator import add
6
7 from django.conf.urls import url, include
8 from django.contrib import admin
9 from django.conf import settings
10 from django.conf.urls.static import static
11 from django.views.generic.base import TemplateView
12 from tastypie.api import Api
13
14 from readthedocs.api.base import (ProjectResource, UserResource,
15 VersionResource, FileResource)
16 from readthedocs.core.urls import docs_urls, core_urls, deprecated_urls
17 from readthedocs.core.views import (HomepageView, SupportView,
18 server_error_404, server_error_500)
19 from readthedocs.search import views as search_views
20
21
22 v1_api = Api(api_name='v1')
23 v1_api.register(UserResource())
24 v1_api.register(ProjectResource())
25 v1_api.register(VersionResource())
26 v1_api.register(FileResource())
27
28 admin.autodiscover()
29
30 handler404 = server_error_404
31 handler500 = server_error_500
32
33 basic_urls = [
34 url(r'^$', HomepageView.as_view(), name='homepage'),
35 url(r'^support/', SupportView.as_view(), name='support'),
36 url(r'^security/', TemplateView.as_view(template_name='security.html')),
37 ]
38
39 rtd_urls = [
40 url(r'^bookmarks/', include('readthedocs.bookmarks.urls')),
41 url(r'^search/$', search_views.elastic_search, name='search'),
42 url(r'^dashboard/', include('readthedocs.projects.urls.private')),
43 url(r'^profiles/', include('readthedocs.profiles.urls.public')),
44 url(r'^accounts/', include('readthedocs.profiles.urls.private')),
45 url(r'^accounts/', include('allauth.urls')),
46 url(r'^notifications/', include('readthedocs.notifications.urls')),
47 url(r'^accounts/gold/', include('readthedocs.gold.urls')),
48 # For redirects
49 url(r'^builds/', include('readthedocs.builds.urls')),
50 # For testing the 404's with DEBUG on.
51 url(r'^404/$', handler404),
52 # For testing the 500's with DEBUG on.
53 url(r'^500/$', handler500),
54 ]
55
56 project_urls = [
57 url(r'^projects/', include('readthedocs.projects.urls.public')),
58 ]
59
60 api_urls = [
61 url(r'^api/', include(v1_api.urls)),
62 url(r'^api/v2/', include('readthedocs.restapi.urls')),
63 url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
64 url(r'^websupport/', include('readthedocs.comments.urls')),
65 ]
66
67 i18n_urls = [
68 url(r'^i18n/', include('django.conf.urls.i18n')),
69 ]
70
71 admin_urls = [
72 url(r'^admin/', include(admin.site.urls)),
73 ]
74
75 debug_urls = add(
76 [
77 url('style-catalog/$',
78 TemplateView.as_view(template_name='style_catalog.html')),
79 ],
80 static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
81 )
82
83 # Export URLs
84 groups = [basic_urls, rtd_urls, project_urls, api_urls, core_urls, i18n_urls,
85 deprecated_urls]
86
87 if settings.USE_PROMOS:
88 # Include donation URL's
89 groups.append([
90 url(r'^sustainability/', include('readthedocsext.donate.urls')),
91 ])
92
93 if 'readthedocsext.embed' in settings.INSTALLED_APPS:
94 api_urls.insert(
95 0,
96 url(r'^api/v1/embed/', include('readthedocsext.embed.urls'))
97 )
98
99 if not getattr(settings, 'USE_SUBDOMAIN', False) or settings.DEBUG:
100 groups.insert(0, docs_urls)
101 if getattr(settings, 'ALLOW_ADMIN', True):
102 groups.append(admin_urls)
103 if getattr(settings, 'DEBUG', False):
104 groups.append(debug_urls)
105
106 urlpatterns = reduce(add, groups)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/urls.py b/readthedocs/urls.py
--- a/readthedocs/urls.py
+++ b/readthedocs/urls.py
@@ -34,6 +34,8 @@
url(r'^$', HomepageView.as_view(), name='homepage'),
url(r'^support/', SupportView.as_view(), name='support'),
url(r'^security/', TemplateView.as_view(template_name='security.html')),
+ url(r'^.well-known/security.txt',
+ TemplateView.as_view(template_name='security.txt', content_type='text/plain')),
]
rtd_urls = [
| {"golden_diff": "diff --git a/readthedocs/urls.py b/readthedocs/urls.py\n--- a/readthedocs/urls.py\n+++ b/readthedocs/urls.py\n@@ -34,6 +34,8 @@\n url(r'^$', HomepageView.as_view(), name='homepage'),\n url(r'^support/', SupportView.as_view(), name='support'),\n url(r'^security/', TemplateView.as_view(template_name='security.html')),\n+ url(r'^.well-known/security.txt',\n+ TemplateView.as_view(template_name='security.txt', content_type='text/plain')),\n ]\n \n rtd_urls = [\n", "issue": "Improve security contact webpage\nWe need to improve our documentation about a user who found a security issue could contact us to report this vulnerability in a confidential way.\r\n\r\nThis page should be clear regarding how to report the issue, how submit a patch (wihtout making it public) and what to do receive feedback / discuss about the solution.\r\n\r\nThere is a page already but it's too poor: https://readthedocs.org/security/\n", "before_files": [{"content": "# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\n\nfrom functools import reduce\nfrom operator import add\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic.base import TemplateView\nfrom tastypie.api import Api\n\nfrom readthedocs.api.base import (ProjectResource, UserResource,\n VersionResource, FileResource)\nfrom readthedocs.core.urls import docs_urls, core_urls, deprecated_urls\nfrom readthedocs.core.views import (HomepageView, SupportView,\n server_error_404, server_error_500)\nfrom readthedocs.search import views as search_views\n\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserResource())\nv1_api.register(ProjectResource())\nv1_api.register(VersionResource())\nv1_api.register(FileResource())\n\nadmin.autodiscover()\n\nhandler404 = server_error_404\nhandler500 = server_error_500\n\nbasic_urls = [\n url(r'^$', HomepageView.as_view(), name='homepage'),\n url(r'^support/', SupportView.as_view(), name='support'),\n url(r'^security/', TemplateView.as_view(template_name='security.html')),\n]\n\nrtd_urls = [\n url(r'^bookmarks/', include('readthedocs.bookmarks.urls')),\n url(r'^search/$', search_views.elastic_search, name='search'),\n url(r'^dashboard/', include('readthedocs.projects.urls.private')),\n url(r'^profiles/', include('readthedocs.profiles.urls.public')),\n url(r'^accounts/', include('readthedocs.profiles.urls.private')),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^notifications/', include('readthedocs.notifications.urls')),\n url(r'^accounts/gold/', include('readthedocs.gold.urls')),\n # For redirects\n url(r'^builds/', include('readthedocs.builds.urls')),\n # For testing the 404's with DEBUG on.\n url(r'^404/$', handler404),\n # For testing the 500's with DEBUG on.\n url(r'^500/$', handler500),\n]\n\nproject_urls = [\n url(r'^projects/', include('readthedocs.projects.urls.public')),\n]\n\napi_urls = [\n url(r'^api/', include(v1_api.urls)),\n url(r'^api/v2/', include('readthedocs.restapi.urls')),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^websupport/', include('readthedocs.comments.urls')),\n]\n\ni18n_urls = [\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n\nadmin_urls = [\n url(r'^admin/', include(admin.site.urls)),\n]\n\ndebug_urls = add(\n [\n url('style-catalog/$',\n TemplateView.as_view(template_name='style_catalog.html')),\n ],\n static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n)\n\n# Export URLs\ngroups = [basic_urls, rtd_urls, project_urls, api_urls, core_urls, i18n_urls,\n deprecated_urls]\n\nif settings.USE_PROMOS:\n # Include donation URL's\n groups.append([\n url(r'^sustainability/', include('readthedocsext.donate.urls')),\n ])\n\nif 'readthedocsext.embed' in settings.INSTALLED_APPS:\n api_urls.insert(\n 0,\n url(r'^api/v1/embed/', include('readthedocsext.embed.urls'))\n )\n\nif not getattr(settings, 'USE_SUBDOMAIN', False) or settings.DEBUG:\n groups.insert(0, docs_urls)\nif getattr(settings, 'ALLOW_ADMIN', True):\n groups.append(admin_urls)\nif getattr(settings, 'DEBUG', False):\n groups.append(debug_urls)\n\nurlpatterns = reduce(add, groups)\n", "path": "readthedocs/urls.py"}], "after_files": [{"content": "# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\n\nfrom functools import reduce\nfrom operator import add\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic.base import TemplateView\nfrom tastypie.api import Api\n\nfrom readthedocs.api.base import (ProjectResource, UserResource,\n VersionResource, FileResource)\nfrom readthedocs.core.urls import docs_urls, core_urls, deprecated_urls\nfrom readthedocs.core.views import (HomepageView, SupportView,\n server_error_404, server_error_500)\nfrom readthedocs.search import views as search_views\n\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserResource())\nv1_api.register(ProjectResource())\nv1_api.register(VersionResource())\nv1_api.register(FileResource())\n\nadmin.autodiscover()\n\nhandler404 = server_error_404\nhandler500 = server_error_500\n\nbasic_urls = [\n url(r'^$', HomepageView.as_view(), name='homepage'),\n url(r'^support/', SupportView.as_view(), name='support'),\n url(r'^security/', TemplateView.as_view(template_name='security.html')),\n url(r'^.well-known/security.txt',\n TemplateView.as_view(template_name='security.txt', content_type='text/plain')),\n]\n\nrtd_urls = [\n url(r'^bookmarks/', include('readthedocs.bookmarks.urls')),\n url(r'^search/$', search_views.elastic_search, name='search'),\n url(r'^dashboard/', include('readthedocs.projects.urls.private')),\n url(r'^profiles/', include('readthedocs.profiles.urls.public')),\n url(r'^accounts/', include('readthedocs.profiles.urls.private')),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^notifications/', include('readthedocs.notifications.urls')),\n url(r'^accounts/gold/', include('readthedocs.gold.urls')),\n # For redirects\n url(r'^builds/', include('readthedocs.builds.urls')),\n # For testing the 404's with DEBUG on.\n url(r'^404/$', handler404),\n # For testing the 500's with DEBUG on.\n url(r'^500/$', handler500),\n]\n\nproject_urls = [\n url(r'^projects/', include('readthedocs.projects.urls.public')),\n]\n\napi_urls = [\n url(r'^api/', include(v1_api.urls)),\n url(r'^api/v2/', include('readthedocs.restapi.urls')),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^websupport/', include('readthedocs.comments.urls')),\n]\n\ni18n_urls = [\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n\nadmin_urls = [\n url(r'^admin/', include(admin.site.urls)),\n]\n\ndebug_urls = add(\n [\n url('style-catalog/$',\n TemplateView.as_view(template_name='style_catalog.html')),\n ],\n static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n)\n\n# Export URLs\ngroups = [basic_urls, rtd_urls, project_urls, api_urls, core_urls, i18n_urls,\n deprecated_urls]\n\nif settings.USE_PROMOS:\n # Include donation URL's\n groups.append([\n url(r'^sustainability/', include('readthedocsext.donate.urls')),\n ])\n\nif 'readthedocsext.embed' in settings.INSTALLED_APPS:\n api_urls.insert(\n 0,\n url(r'^api/v1/embed/', include('readthedocsext.embed.urls'))\n )\n\nif not getattr(settings, 'USE_SUBDOMAIN', False) or settings.DEBUG:\n groups.insert(0, docs_urls)\nif getattr(settings, 'ALLOW_ADMIN', True):\n groups.append(admin_urls)\nif getattr(settings, 'DEBUG', False):\n groups.append(debug_urls)\n\nurlpatterns = reduce(add, groups)\n", "path": "readthedocs/urls.py"}]} | 1,403 | 128 |
gh_patches_debug_876 | rasdani/github-patches | git_diff | microsoft__Qcodes-867 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
missing dependency`jsonschema` in requirements.txt
The latest pip installable version of QCoDeS does not list jsonschema as a dependency but requires it.
This problem came to light when running tests on a project that depeneds on QCoDeS. Part of my build script installs qcodes (pip install qcodes). Importing qcodes then raises an exception because jsonschema is missing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from distutils.version import StrictVersion
3 from importlib import import_module
4 import re
5
6 def get_version(verbose=1):
7 """ Extract version information from source code """
8
9 try:
10 with open('qcodes/version.py', 'r') as f:
11 ln = f.readline()
12 # print(ln)
13 m = re.search('.* ''(.*)''', ln)
14 version = (m.group(1)).strip('\'')
15 except Exception as E:
16 print(E)
17 version = 'none'
18 if verbose:
19 print('get_version: %s' % version)
20 return version
21
22
23 def readme():
24 with open('README.rst') as f:
25 return f.read()
26
27 extras = {
28 'MatPlot': ('matplotlib', '2.0.2'),
29 'QtPlot': ('pyqtgraph', '0.10.0'),
30 'coverage tests': ('coverage', '4.0'),
31 'Slack': ('slacker', '0.9.42')
32 }
33 extras_require = {k: '>='.join(v) for k, v in extras.items()}
34
35 setup(name='qcodes',
36 version=get_version(),
37 use_2to3=False,
38
39 maintainer='Jens H Nielsen',
40 maintainer_email='[email protected]',
41 description='Python-based data acquisition framework developed by the '
42 'Copenhagen / Delft / Sydney / Microsoft quantum computing '
43 'consortium',
44 long_description=readme(),
45 url='https://github.com/QCoDeS/Qcodes',
46 classifiers=[
47 'Development Status :: 3 - Alpha',
48 'Intended Audience :: Science/Research',
49 'Programming Language :: Python :: 3 :: Only',
50 'Programming Language :: Python :: 3.5',
51 'Programming Language :: Python :: 3.6',
52 'Topic :: Scientific/Engineering'
53 ],
54 license='MIT',
55 # if we want to install without tests:
56 # packages=find_packages(exclude=["*.tests", "tests"]),
57 packages=find_packages(),
58 package_data={'qcodes': ['monitor/dist/*', 'monitor/dist/js/*',
59 'monitor/dist/css/*', 'config/*.json']},
60 install_requires=[
61 'numpy>=1.10',
62 'pyvisa>=1.8',
63 'h5py>=2.6',
64 'websockets>=3.2,<3.4'
65 ],
66
67 test_suite='qcodes.tests',
68 extras_require=extras_require,
69
70 # I think the only part of qcodes that would care about zip_safe
71 # is utils.helpers.reload_code; users of a zip-installed package
72 # shouldn't be needing to do this anyway, but we should test first.
73 zip_safe=False)
74
75 version_template = '''
76 *****
77 ***** package {0} must be at least version {1}.
78 ***** Please upgrade it (pip install -U {0} or conda install {0})
79 ***** in order to use {2}
80 *****
81 '''
82
83 missing_template = '''
84 *****
85 ***** package {0} not found
86 ***** Please install it (pip install {0} or conda install {0})
87 ***** in order to use {1}
88 *****
89 '''
90
91 valueerror_template = '''
92 *****
93 ***** package {0} version not understood
94 ***** Please make sure the installed version ({1})
95 ***** is compatible with the minimum required version ({2})
96 ***** in order to use {3}
97 *****
98 '''
99
100 # now test the versions of extras
101 for extra, (module_name, min_version) in extras.items():
102 try:
103 module = import_module(module_name)
104 if StrictVersion(module.__version__) < StrictVersion(min_version):
105 print(version_template.format(module_name, min_version, extra))
106 except ImportError:
107 print(missing_template.format(module_name, extra))
108 except ValueError:
109 print(valueerror_template.format(
110 module_name, module.__version__, min_version, extra))
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,8 @@
'numpy>=1.10',
'pyvisa>=1.8',
'h5py>=2.6',
- 'websockets>=3.2,<3.4'
+ 'websockets>=3.2,<3.4',
+ 'jsonschema'
],
test_suite='qcodes.tests',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,8 @@\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'h5py>=2.6',\n- 'websockets>=3.2,<3.4'\n+ 'websockets>=3.2,<3.4',\n+ 'jsonschema'\n ],\n \n test_suite='qcodes.tests',\n", "issue": "missing dependency`jsonschema` in requirements.txt\nThe latest pip installable version of QCoDeS does not list jsonschema as a dependency but requires it. \r\n\r\nThis problem came to light when running tests on a project that depeneds on QCoDeS. Part of my build script installs qcodes (pip install qcodes). Importing qcodes then raises an exception because jsonschema is missing. \n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom distutils.version import StrictVersion\nfrom importlib import import_module\nimport re\n\ndef get_version(verbose=1):\n \"\"\" Extract version information from source code \"\"\"\n\n try:\n with open('qcodes/version.py', 'r') as f:\n ln = f.readline()\n # print(ln)\n m = re.search('.* ''(.*)''', ln)\n version = (m.group(1)).strip('\\'')\n except Exception as E:\n print(E)\n version = 'none'\n if verbose:\n print('get_version: %s' % version)\n return version\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nextras = {\n 'MatPlot': ('matplotlib', '2.0.2'),\n 'QtPlot': ('pyqtgraph', '0.10.0'),\n 'coverage tests': ('coverage', '4.0'),\n 'Slack': ('slacker', '0.9.42')\n}\nextras_require = {k: '>='.join(v) for k, v in extras.items()}\n\nsetup(name='qcodes',\n version=get_version(),\n use_2to3=False,\n\n maintainer='Jens H Nielsen',\n maintainer_email='[email protected]',\n description='Python-based data acquisition framework developed by the '\n 'Copenhagen / Delft / Sydney / Microsoft quantum computing '\n 'consortium',\n long_description=readme(),\n url='https://github.com/QCoDeS/Qcodes',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering'\n ],\n license='MIT',\n # if we want to install without tests:\n # packages=find_packages(exclude=[\"*.tests\", \"tests\"]),\n packages=find_packages(),\n package_data={'qcodes': ['monitor/dist/*', 'monitor/dist/js/*',\n 'monitor/dist/css/*', 'config/*.json']},\n install_requires=[\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'h5py>=2.6',\n 'websockets>=3.2,<3.4'\n ],\n\n test_suite='qcodes.tests',\n extras_require=extras_require,\n\n # I think the only part of qcodes that would care about zip_safe\n # is utils.helpers.reload_code; users of a zip-installed package\n # shouldn't be needing to do this anyway, but we should test first.\n zip_safe=False)\n\nversion_template = '''\n*****\n***** package {0} must be at least version {1}.\n***** Please upgrade it (pip install -U {0} or conda install {0})\n***** in order to use {2}\n*****\n'''\n\nmissing_template = '''\n*****\n***** package {0} not found\n***** Please install it (pip install {0} or conda install {0})\n***** in order to use {1}\n*****\n'''\n\nvalueerror_template = '''\n*****\n***** package {0} version not understood\n***** Please make sure the installed version ({1})\n***** is compatible with the minimum required version ({2})\n***** in order to use {3}\n*****\n'''\n\n# now test the versions of extras\nfor extra, (module_name, min_version) in extras.items():\n try:\n module = import_module(module_name)\n if StrictVersion(module.__version__) < StrictVersion(min_version):\n print(version_template.format(module_name, min_version, extra))\n except ImportError:\n print(missing_template.format(module_name, extra))\n except ValueError:\n print(valueerror_template.format(\n module_name, module.__version__, min_version, extra))\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom distutils.version import StrictVersion\nfrom importlib import import_module\nimport re\n\ndef get_version(verbose=1):\n \"\"\" Extract version information from source code \"\"\"\n\n try:\n with open('qcodes/version.py', 'r') as f:\n ln = f.readline()\n # print(ln)\n m = re.search('.* ''(.*)''', ln)\n version = (m.group(1)).strip('\\'')\n except Exception as E:\n print(E)\n version = 'none'\n if verbose:\n print('get_version: %s' % version)\n return version\n\n\ndef readme():\n with open('README.rst') as f:\n return f.read()\n\nextras = {\n 'MatPlot': ('matplotlib', '2.0.2'),\n 'QtPlot': ('pyqtgraph', '0.10.0'),\n 'coverage tests': ('coverage', '4.0'),\n 'Slack': ('slacker', '0.9.42')\n}\nextras_require = {k: '>='.join(v) for k, v in extras.items()}\n\nsetup(name='qcodes',\n version=get_version(),\n use_2to3=False,\n\n maintainer='Jens H Nielsen',\n maintainer_email='[email protected]',\n description='Python-based data acquisition framework developed by the '\n 'Copenhagen / Delft / Sydney / Microsoft quantum computing '\n 'consortium',\n long_description=readme(),\n url='https://github.com/QCoDeS/Qcodes',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering'\n ],\n license='MIT',\n # if we want to install without tests:\n # packages=find_packages(exclude=[\"*.tests\", \"tests\"]),\n packages=find_packages(),\n package_data={'qcodes': ['monitor/dist/*', 'monitor/dist/js/*',\n 'monitor/dist/css/*', 'config/*.json']},\n install_requires=[\n 'numpy>=1.10',\n 'pyvisa>=1.8',\n 'h5py>=2.6',\n 'websockets>=3.2,<3.4',\n 'jsonschema'\n ],\n\n test_suite='qcodes.tests',\n extras_require=extras_require,\n\n # I think the only part of qcodes that would care about zip_safe\n # is utils.helpers.reload_code; users of a zip-installed package\n # shouldn't be needing to do this anyway, but we should test first.\n zip_safe=False)\n\nversion_template = '''\n*****\n***** package {0} must be at least version {1}.\n***** Please upgrade it (pip install -U {0} or conda install {0})\n***** in order to use {2}\n*****\n'''\n\nmissing_template = '''\n*****\n***** package {0} not found\n***** Please install it (pip install {0} or conda install {0})\n***** in order to use {1}\n*****\n'''\n\nvalueerror_template = '''\n*****\n***** package {0} version not understood\n***** Please make sure the installed version ({1})\n***** is compatible with the minimum required version ({2})\n***** in order to use {3}\n*****\n'''\n\n# now test the versions of extras\nfor extra, (module_name, min_version) in extras.items():\n try:\n module = import_module(module_name)\n if StrictVersion(module.__version__) < StrictVersion(min_version):\n print(version_template.format(module_name, min_version, extra))\n except ImportError:\n print(missing_template.format(module_name, extra))\n except ValueError:\n print(valueerror_template.format(\n module_name, module.__version__, min_version, extra))\n", "path": "setup.py"}]} | 1,403 | 105 |
gh_patches_debug_15498 | rasdani/github-patches | git_diff | obspy__obspy-3407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possible typo on the documentation page of `obspy.signal.rotate.rotate_ne_rt`
On its documentation page: https://docs.obspy.org/packages/autogen/obspy.signal.rotate.rotate_rt_ne.html
the inputs of the `obspy.signal.rotate_rt_ne()` are shown as the north and east components, which should be the radial and transverse components instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/signal/rotate.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # ------------------------------------------------------------------
4 # Filename: rotate.py
5 # Purpose: Various Seismogram Rotation Functions
6 # Author: Tobias Megies, Tom Richter, Lion Krischer
7 # Email: [email protected]
8 #
9 # Copyright (C) 2009-2013 Tobias Megies, Tom Richter, Lion Krischer
10 # --------------------------------------------------------------------
11 """
12 Various Seismogram Rotation Functions
13
14 :copyright:
15 The ObsPy Development Team ([email protected])
16 :license:
17 GNU Lesser General Public License, Version 3
18 (https://www.gnu.org/copyleft/lesser.html)
19 """
20 import warnings
21 from math import cos, sin, radians
22
23 import numpy as np
24
25
26 def rotate_ne_rt(n, e, ba):
27 """
28 Rotates horizontal components of a seismogram.
29
30 The North- and East-Component of a seismogram will be rotated in Radial
31 and Transversal Component. The angle is given as the back-azimuth, that is
32 defined as the angle measured between the vector pointing from the station
33 to the source and the vector pointing from the station to the North.
34
35 :type n: :class:`~numpy.ndarray`
36 :param n: Data of the North component of the seismogram.
37 :type e: :class:`~numpy.ndarray`
38 :param e: Data of the East component of the seismogram.
39 :type ba: float
40 :param ba: The back azimuth from station to source in degrees.
41 :return: Radial and Transversal component of seismogram.
42 """
43 if len(n) != len(e):
44 raise TypeError("North and East component have different length.")
45 if ba < 0 or ba > 360:
46 raise ValueError("Back Azimuth should be between 0 and 360 degrees.")
47 ba = radians(ba)
48 r = - e * sin(ba) - n * cos(ba)
49 t = - e * cos(ba) + n * sin(ba)
50 return r, t
51
52
53 def rotate_rt_ne(n, e, ba):
54 """
55 Rotates horizontal components of a seismogram.
56
57 Rotates from radial and transversal components to North and East
58 components.
59
60 This is the inverse transformation of the transformation described
61 in :func:`rotate_ne_rt`.
62 """
63 ba = 360.0 - ba
64 return rotate_ne_rt(n, e, ba)
65
66
67 def rotate_zne_lqt(z, n, e, ba, inc):
68 """
69 Rotates all components of a seismogram.
70
71 The components will be rotated from ZNE (Z, North, East, left-handed) to
72 LQT (e.g. ray coordinate system, right-handed). The rotation angles are
73 given as the back-azimuth and inclination.
74
75 The transformation consists of 3 steps::
76
77 1. mirroring of E-component at ZN plain: ZNE -> ZNW
78 2. negative rotation of coordinate system around Z-axis with angle ba:
79 ZNW -> ZRT
80 3. negative rotation of coordinate system around T-axis with angle inc:
81 ZRT -> LQT
82
83 :type z: :class:`~numpy.ndarray`
84 :param z: Data of the Z component of the seismogram.
85 :type n: :class:`~numpy.ndarray`
86 :param n: Data of the North component of the seismogram.
87 :type e: :class:`~numpy.ndarray`
88 :param e: Data of the East component of the seismogram.
89 :type ba: float
90 :param ba: The back azimuth from station to source in degrees.
91 :type inc: float
92 :param inc: The inclination of the ray at the station in degrees.
93 :return: L-, Q- and T-component of seismogram.
94 """
95 if len(z) != len(n) or len(z) != len(e):
96 raise TypeError("Z, North and East component have different length!?!")
97 if ba < 0 or ba > 360:
98 raise ValueError("Back Azimuth should be between 0 and 360 degrees!")
99 if inc < 0 or inc > 360:
100 raise ValueError("Inclination should be between 0 and 360 degrees!")
101 ba = radians(ba)
102 inc = radians(inc)
103 l = z * cos(inc) - n * sin(inc) * cos(ba) - e * sin(inc) * sin(ba) # NOQA
104 q = z * sin(inc) + n * cos(inc) * cos(ba) + e * cos(inc) * sin(ba) # NOQA
105 t = n * sin(ba) - e * cos(ba) # NOQA
106 return l, q, t
107
108
109 def rotate_lqt_zne(l, q, t, ba, inc): # NOQA
110 """
111 Rotates all components of a seismogram.
112
113 The components will be rotated from LQT to ZNE.
114 This is the inverse transformation of the transformation described
115 in :func:`rotate_zne_lqt`.
116 """
117 if len(l) != len(q) or len(l) != len(t):
118 raise TypeError("L, Q and T component have different length!?!")
119 if ba < 0 or ba > 360:
120 raise ValueError("Back Azimuth should be between 0 and 360 degrees!")
121 if inc < 0 or inc > 360:
122 raise ValueError("Inclination should be between 0 and 360 degrees!")
123 ba = radians(ba)
124 inc = radians(inc)
125 z = l * cos(inc) + q * sin(inc)
126 n = -l * sin(inc) * cos(ba) + q * cos(inc) * cos(ba) + t * sin(ba)
127 e = -l * sin(inc) * sin(ba) + q * cos(inc) * sin(ba) - t * cos(ba)
128 return z, n, e
129
130
131 def _dip_azimuth2zne_base_vector(dip, azimuth):
132 """
133 Helper function converting a vector described with azimuth and dip of unit
134 length to a vector in the ZNE (Vertical, North, East) base.
135
136 The definition of azimuth and dip is according to the SEED reference
137 manual.
138 """
139 dip = np.deg2rad(dip)
140 azimuth = np.deg2rad(azimuth)
141
142 return np.array([-np.sin(dip),
143 np.cos(azimuth) * np.cos(dip),
144 np.sin(azimuth) * np.cos(dip)])
145
146
147 def rotate2zne(data_1, azimuth_1, dip_1, data_2, azimuth_2, dip_2, data_3,
148 azimuth_3, dip_3, inverse=False):
149 """
150 Rotates an arbitrarily oriented three-component vector to ZNE.
151
152 Each components orientation is described with a azimuth and a dip. The
153 azimuth is defined as the degrees from North, clockwise and the dip is the
154 defined as the number of degrees, down from horizontal. Both definitions
155 are according to the SEED standard.
156
157 The three components need not be orthogonal to each other but the
158 components have to be linearly independent. The function performs a full
159 base change to orthogonal Vertical, North, and East orientations.
160
161 :param data_1: Data component 1.
162 :param azimuth_1: The azimuth of component 1.
163 :param dip_1: The dip of component 1.
164 :param data_2: Data component 2.
165 :param azimuth_2: The azimuth of component 2.
166 :param dip_2: The dip of component 2.
167 :param data_3: Data component 3.
168 :param azimuth_3: The azimuth of component 3.
169 :param dip_3: The dip of component 3.
170 :param inverse: If `True`, the data arrays will be converted from ZNE to
171 whatever coordinate system the azimuths and dips specify. In that
172 case data_1, data_2, data_3 have to be data arrays for Z, N,
173 and E and the dips and azimuths specify where to transform to.
174 :type inverse: bool
175
176 :rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`,
177 :class:`numpy.ndarray`)
178 :returns: The three rotated components, oriented in Z, N, and E if
179 `inverse` is `False`. Otherwise they will be oriented as specified
180 by the dips and azimuths.
181
182 An input of ZNE yields an output of ZNE
183
184 >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 0, 0, \
185 np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE
186 (array([ 0., 1., 2.]), array([ 0., 2., 4.]), array([ 0., 3., 6.]))
187
188 An input of ZSE yields an output of ZNE
189
190 >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 180, 0, \
191 np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE
192 (array([ 0., 1., 2.]), array([ 0., -2., -4.]), array([ 0., 3., 6.]))
193
194 Mixed up components should get rotated to ZNE.
195
196 >>> rotate2zne(np.arange(3), 0, 0, np.arange(3) * 2, 90, 0, \
197 np.arange(3) * 3, 0, -90) # doctest: +NORMALIZE_WHITESPACE
198 (array([ 0., 3., 6.]), array([ 0., 1., 2.]), array([ 0., 2., 4.]))
199 """
200 if len(set(len(i_) for i_ in (data_1, data_2, data_3))) != 1:
201 msg = "All three data arrays must be of same length."
202 raise ValueError(msg)
203
204 # Define the base vectors of the old base in terms of the new base vectors.
205 base_vector_1 = _dip_azimuth2zne_base_vector(dip_1, azimuth_1)
206 base_vector_2 = _dip_azimuth2zne_base_vector(dip_2, azimuth_2)
207 base_vector_3 = _dip_azimuth2zne_base_vector(dip_3, azimuth_3)
208
209 # Base change matrix.
210 m = np.array([base_vector_1,
211 base_vector_2,
212 base_vector_3])
213
214 # Determinant gives the volume change of a unit cube going from one
215 # basis to the next. It should neither be too small nor to large. These
216 # here are arbitrary limits.
217 with warnings.catch_warnings():
218 warnings.filterwarnings('ignore',
219 '.*invalid value encountered in det.*')
220 det = np.linalg.det(m)
221 if not (1E-6 < abs(det) < 1E6):
222 raise ValueError("The given directions are not linearly independent, "
223 "at least within numerical precision. Determinant "
224 "of the base change matrix: %g" % det)
225
226 if not inverse:
227 m = np.linalg.inv(m)
228
229 z, n, e = np.dot(m, [data_1, data_2, data_3])
230
231 # Replace all negative zeros. These might confuse some further
232 # processing programs.
233 z = np.array(z).ravel()
234 z[z == -0.0] = 0
235 n = np.array(n).ravel()
236 n[n == -0.0] = 0
237 e = np.array(e).ravel()
238 e[e == -0.0] = 0
239
240 return z, n, e
241
242
243 if __name__ == '__main__':
244 import doctest
245 doctest.testmod(exclude_empty=True)
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/obspy/signal/rotate.py b/obspy/signal/rotate.py
--- a/obspy/signal/rotate.py
+++ b/obspy/signal/rotate.py
@@ -50,7 +50,7 @@
return r, t
-def rotate_rt_ne(n, e, ba):
+def rotate_rt_ne(r, t, ba):
"""
Rotates horizontal components of a seismogram.
@@ -59,9 +59,15 @@
This is the inverse transformation of the transformation described
in :func:`rotate_ne_rt`.
+
+ :type r: :class:`~numpy.ndarray`
+ :param r: Data of the Radial component of the seismogram.
+ :type t: :class:`~numpy.ndarray`
+ :param t: Data of the Transverse component of the seismogram.
+ :returns: North and East component of seismogram.
"""
ba = 360.0 - ba
- return rotate_ne_rt(n, e, ba)
+ return rotate_ne_rt(r, t, ba)
def rotate_zne_lqt(z, n, e, ba, inc):
| {"golden_diff": "diff --git a/obspy/signal/rotate.py b/obspy/signal/rotate.py\n--- a/obspy/signal/rotate.py\n+++ b/obspy/signal/rotate.py\n@@ -50,7 +50,7 @@\n return r, t\n \n \n-def rotate_rt_ne(n, e, ba):\n+def rotate_rt_ne(r, t, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n \n@@ -59,9 +59,15 @@\n \n This is the inverse transformation of the transformation described\n in :func:`rotate_ne_rt`.\n+\n+ :type r: :class:`~numpy.ndarray`\n+ :param r: Data of the Radial component of the seismogram.\n+ :type t: :class:`~numpy.ndarray`\n+ :param t: Data of the Transverse component of the seismogram.\n+ :returns: North and East component of seismogram.\n \"\"\"\n ba = 360.0 - ba\n- return rotate_ne_rt(n, e, ba)\n+ return rotate_ne_rt(r, t, ba)\n \n \n def rotate_zne_lqt(z, n, e, ba, inc):\n", "issue": "Possible typo on the documentation page of `obspy.signal.rotate.rotate_ne_rt`\nOn its documentation page: https://docs.obspy.org/packages/autogen/obspy.signal.rotate.rotate_rt_ne.html\r\nthe inputs of the `obspy.signal.rotate_rt_ne()` are shown as the north and east components, which should be the radial and transverse components instead. \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------\n# Filename: rotate.py\n# Purpose: Various Seismogram Rotation Functions\n# Author: Tobias Megies, Tom Richter, Lion Krischer\n# Email: [email protected]\n#\n# Copyright (C) 2009-2013 Tobias Megies, Tom Richter, Lion Krischer\n# --------------------------------------------------------------------\n\"\"\"\nVarious Seismogram Rotation Functions\n\n:copyright:\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport warnings\nfrom math import cos, sin, radians\n\nimport numpy as np\n\n\ndef rotate_ne_rt(n, e, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n\n The North- and East-Component of a seismogram will be rotated in Radial\n and Transversal Component. The angle is given as the back-azimuth, that is\n defined as the angle measured between the vector pointing from the station\n to the source and the vector pointing from the station to the North.\n\n :type n: :class:`~numpy.ndarray`\n :param n: Data of the North component of the seismogram.\n :type e: :class:`~numpy.ndarray`\n :param e: Data of the East component of the seismogram.\n :type ba: float\n :param ba: The back azimuth from station to source in degrees.\n :return: Radial and Transversal component of seismogram.\n \"\"\"\n if len(n) != len(e):\n raise TypeError(\"North and East component have different length.\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees.\")\n ba = radians(ba)\n r = - e * sin(ba) - n * cos(ba)\n t = - e * cos(ba) + n * sin(ba)\n return r, t\n\n\ndef rotate_rt_ne(n, e, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n\n Rotates from radial and transversal components to North and East\n components.\n\n This is the inverse transformation of the transformation described\n in :func:`rotate_ne_rt`.\n \"\"\"\n ba = 360.0 - ba\n return rotate_ne_rt(n, e, ba)\n\n\ndef rotate_zne_lqt(z, n, e, ba, inc):\n \"\"\"\n Rotates all components of a seismogram.\n\n The components will be rotated from ZNE (Z, North, East, left-handed) to\n LQT (e.g. ray coordinate system, right-handed). The rotation angles are\n given as the back-azimuth and inclination.\n\n The transformation consists of 3 steps::\n\n 1. mirroring of E-component at ZN plain: ZNE -> ZNW\n 2. negative rotation of coordinate system around Z-axis with angle ba:\n ZNW -> ZRT\n 3. negative rotation of coordinate system around T-axis with angle inc:\n ZRT -> LQT\n\n :type z: :class:`~numpy.ndarray`\n :param z: Data of the Z component of the seismogram.\n :type n: :class:`~numpy.ndarray`\n :param n: Data of the North component of the seismogram.\n :type e: :class:`~numpy.ndarray`\n :param e: Data of the East component of the seismogram.\n :type ba: float\n :param ba: The back azimuth from station to source in degrees.\n :type inc: float\n :param inc: The inclination of the ray at the station in degrees.\n :return: L-, Q- and T-component of seismogram.\n \"\"\"\n if len(z) != len(n) or len(z) != len(e):\n raise TypeError(\"Z, North and East component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba = radians(ba)\n inc = radians(inc)\n l = z * cos(inc) - n * sin(inc) * cos(ba) - e * sin(inc) * sin(ba) # NOQA\n q = z * sin(inc) + n * cos(inc) * cos(ba) + e * cos(inc) * sin(ba) # NOQA\n t = n * sin(ba) - e * cos(ba) # NOQA\n return l, q, t\n\n\ndef rotate_lqt_zne(l, q, t, ba, inc): # NOQA\n \"\"\"\n Rotates all components of a seismogram.\n\n The components will be rotated from LQT to ZNE.\n This is the inverse transformation of the transformation described\n in :func:`rotate_zne_lqt`.\n \"\"\"\n if len(l) != len(q) or len(l) != len(t):\n raise TypeError(\"L, Q and T component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba = radians(ba)\n inc = radians(inc)\n z = l * cos(inc) + q * sin(inc)\n n = -l * sin(inc) * cos(ba) + q * cos(inc) * cos(ba) + t * sin(ba)\n e = -l * sin(inc) * sin(ba) + q * cos(inc) * sin(ba) - t * cos(ba)\n return z, n, e\n\n\ndef _dip_azimuth2zne_base_vector(dip, azimuth):\n \"\"\"\n Helper function converting a vector described with azimuth and dip of unit\n length to a vector in the ZNE (Vertical, North, East) base.\n\n The definition of azimuth and dip is according to the SEED reference\n manual.\n \"\"\"\n dip = np.deg2rad(dip)\n azimuth = np.deg2rad(azimuth)\n\n return np.array([-np.sin(dip),\n np.cos(azimuth) * np.cos(dip),\n np.sin(azimuth) * np.cos(dip)])\n\n\ndef rotate2zne(data_1, azimuth_1, dip_1, data_2, azimuth_2, dip_2, data_3,\n azimuth_3, dip_3, inverse=False):\n \"\"\"\n Rotates an arbitrarily oriented three-component vector to ZNE.\n\n Each components orientation is described with a azimuth and a dip. The\n azimuth is defined as the degrees from North, clockwise and the dip is the\n defined as the number of degrees, down from horizontal. Both definitions\n are according to the SEED standard.\n\n The three components need not be orthogonal to each other but the\n components have to be linearly independent. The function performs a full\n base change to orthogonal Vertical, North, and East orientations.\n\n :param data_1: Data component 1.\n :param azimuth_1: The azimuth of component 1.\n :param dip_1: The dip of component 1.\n :param data_2: Data component 2.\n :param azimuth_2: The azimuth of component 2.\n :param dip_2: The dip of component 2.\n :param data_3: Data component 3.\n :param azimuth_3: The azimuth of component 3.\n :param dip_3: The dip of component 3.\n :param inverse: If `True`, the data arrays will be converted from ZNE to\n whatever coordinate system the azimuths and dips specify. In that\n case data_1, data_2, data_3 have to be data arrays for Z, N,\n and E and the dips and azimuths specify where to transform to.\n :type inverse: bool\n\n :rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`,\n :class:`numpy.ndarray`)\n :returns: The three rotated components, oriented in Z, N, and E if\n `inverse` is `False`. Otherwise they will be oriented as specified\n by the dips and azimuths.\n\n An input of ZNE yields an output of ZNE\n\n >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 0, 0, \\\n np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 1., 2.]), array([ 0., 2., 4.]), array([ 0., 3., 6.]))\n\n An input of ZSE yields an output of ZNE\n\n >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 180, 0, \\\n np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 1., 2.]), array([ 0., -2., -4.]), array([ 0., 3., 6.]))\n\n Mixed up components should get rotated to ZNE.\n\n >>> rotate2zne(np.arange(3), 0, 0, np.arange(3) * 2, 90, 0, \\\n np.arange(3) * 3, 0, -90) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 3., 6.]), array([ 0., 1., 2.]), array([ 0., 2., 4.]))\n \"\"\"\n if len(set(len(i_) for i_ in (data_1, data_2, data_3))) != 1:\n msg = \"All three data arrays must be of same length.\"\n raise ValueError(msg)\n\n # Define the base vectors of the old base in terms of the new base vectors.\n base_vector_1 = _dip_azimuth2zne_base_vector(dip_1, azimuth_1)\n base_vector_2 = _dip_azimuth2zne_base_vector(dip_2, azimuth_2)\n base_vector_3 = _dip_azimuth2zne_base_vector(dip_3, azimuth_3)\n\n # Base change matrix.\n m = np.array([base_vector_1,\n base_vector_2,\n base_vector_3])\n\n # Determinant gives the volume change of a unit cube going from one\n # basis to the next. It should neither be too small nor to large. These\n # here are arbitrary limits.\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore',\n '.*invalid value encountered in det.*')\n det = np.linalg.det(m)\n if not (1E-6 < abs(det) < 1E6):\n raise ValueError(\"The given directions are not linearly independent, \"\n \"at least within numerical precision. Determinant \"\n \"of the base change matrix: %g\" % det)\n\n if not inverse:\n m = np.linalg.inv(m)\n\n z, n, e = np.dot(m, [data_1, data_2, data_3])\n\n # Replace all negative zeros. These might confuse some further\n # processing programs.\n z = np.array(z).ravel()\n z[z == -0.0] = 0\n n = np.array(n).ravel()\n n[n == -0.0] = 0\n e = np.array(e).ravel()\n e[e == -0.0] = 0\n\n return z, n, e\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/signal/rotate.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------\n# Filename: rotate.py\n# Purpose: Various Seismogram Rotation Functions\n# Author: Tobias Megies, Tom Richter, Lion Krischer\n# Email: [email protected]\n#\n# Copyright (C) 2009-2013 Tobias Megies, Tom Richter, Lion Krischer\n# --------------------------------------------------------------------\n\"\"\"\nVarious Seismogram Rotation Functions\n\n:copyright:\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport warnings\nfrom math import cos, sin, radians\n\nimport numpy as np\n\n\ndef rotate_ne_rt(n, e, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n\n The North- and East-Component of a seismogram will be rotated in Radial\n and Transversal Component. The angle is given as the back-azimuth, that is\n defined as the angle measured between the vector pointing from the station\n to the source and the vector pointing from the station to the North.\n\n :type n: :class:`~numpy.ndarray`\n :param n: Data of the North component of the seismogram.\n :type e: :class:`~numpy.ndarray`\n :param e: Data of the East component of the seismogram.\n :type ba: float\n :param ba: The back azimuth from station to source in degrees.\n :return: Radial and Transversal component of seismogram.\n \"\"\"\n if len(n) != len(e):\n raise TypeError(\"North and East component have different length.\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees.\")\n ba = radians(ba)\n r = - e * sin(ba) - n * cos(ba)\n t = - e * cos(ba) + n * sin(ba)\n return r, t\n\n\ndef rotate_rt_ne(r, t, ba):\n \"\"\"\n Rotates horizontal components of a seismogram.\n\n Rotates from radial and transversal components to North and East\n components.\n\n This is the inverse transformation of the transformation described\n in :func:`rotate_ne_rt`.\n\n :type r: :class:`~numpy.ndarray`\n :param r: Data of the Radial component of the seismogram.\n :type t: :class:`~numpy.ndarray`\n :param t: Data of the Transverse component of the seismogram.\n :returns: North and East component of seismogram.\n \"\"\"\n ba = 360.0 - ba\n return rotate_ne_rt(r, t, ba)\n\n\ndef rotate_zne_lqt(z, n, e, ba, inc):\n \"\"\"\n Rotates all components of a seismogram.\n\n The components will be rotated from ZNE (Z, North, East, left-handed) to\n LQT (e.g. ray coordinate system, right-handed). The rotation angles are\n given as the back-azimuth and inclination.\n\n The transformation consists of 3 steps::\n\n 1. mirroring of E-component at ZN plain: ZNE -> ZNW\n 2. negative rotation of coordinate system around Z-axis with angle ba:\n ZNW -> ZRT\n 3. negative rotation of coordinate system around T-axis with angle inc:\n ZRT -> LQT\n\n :type z: :class:`~numpy.ndarray`\n :param z: Data of the Z component of the seismogram.\n :type n: :class:`~numpy.ndarray`\n :param n: Data of the North component of the seismogram.\n :type e: :class:`~numpy.ndarray`\n :param e: Data of the East component of the seismogram.\n :type ba: float\n :param ba: The back azimuth from station to source in degrees.\n :type inc: float\n :param inc: The inclination of the ray at the station in degrees.\n :return: L-, Q- and T-component of seismogram.\n \"\"\"\n if len(z) != len(n) or len(z) != len(e):\n raise TypeError(\"Z, North and East component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba = radians(ba)\n inc = radians(inc)\n l = z * cos(inc) - n * sin(inc) * cos(ba) - e * sin(inc) * sin(ba) # NOQA\n q = z * sin(inc) + n * cos(inc) * cos(ba) + e * cos(inc) * sin(ba) # NOQA\n t = n * sin(ba) - e * cos(ba) # NOQA\n return l, q, t\n\n\ndef rotate_lqt_zne(l, q, t, ba, inc): # NOQA\n \"\"\"\n Rotates all components of a seismogram.\n\n The components will be rotated from LQT to ZNE.\n This is the inverse transformation of the transformation described\n in :func:`rotate_zne_lqt`.\n \"\"\"\n if len(l) != len(q) or len(l) != len(t):\n raise TypeError(\"L, Q and T component have different length!?!\")\n if ba < 0 or ba > 360:\n raise ValueError(\"Back Azimuth should be between 0 and 360 degrees!\")\n if inc < 0 or inc > 360:\n raise ValueError(\"Inclination should be between 0 and 360 degrees!\")\n ba = radians(ba)\n inc = radians(inc)\n z = l * cos(inc) + q * sin(inc)\n n = -l * sin(inc) * cos(ba) + q * cos(inc) * cos(ba) + t * sin(ba)\n e = -l * sin(inc) * sin(ba) + q * cos(inc) * sin(ba) - t * cos(ba)\n return z, n, e\n\n\ndef _dip_azimuth2zne_base_vector(dip, azimuth):\n \"\"\"\n Helper function converting a vector described with azimuth and dip of unit\n length to a vector in the ZNE (Vertical, North, East) base.\n\n The definition of azimuth and dip is according to the SEED reference\n manual.\n \"\"\"\n dip = np.deg2rad(dip)\n azimuth = np.deg2rad(azimuth)\n\n return np.array([-np.sin(dip),\n np.cos(azimuth) * np.cos(dip),\n np.sin(azimuth) * np.cos(dip)])\n\n\ndef rotate2zne(data_1, azimuth_1, dip_1, data_2, azimuth_2, dip_2, data_3,\n azimuth_3, dip_3, inverse=False):\n \"\"\"\n Rotates an arbitrarily oriented three-component vector to ZNE.\n\n Each components orientation is described with a azimuth and a dip. The\n azimuth is defined as the degrees from North, clockwise and the dip is the\n defined as the number of degrees, down from horizontal. Both definitions\n are according to the SEED standard.\n\n The three components need not be orthogonal to each other but the\n components have to be linearly independent. The function performs a full\n base change to orthogonal Vertical, North, and East orientations.\n\n :param data_1: Data component 1.\n :param azimuth_1: The azimuth of component 1.\n :param dip_1: The dip of component 1.\n :param data_2: Data component 2.\n :param azimuth_2: The azimuth of component 2.\n :param dip_2: The dip of component 2.\n :param data_3: Data component 3.\n :param azimuth_3: The azimuth of component 3.\n :param dip_3: The dip of component 3.\n :param inverse: If `True`, the data arrays will be converted from ZNE to\n whatever coordinate system the azimuths and dips specify. In that\n case data_1, data_2, data_3 have to be data arrays for Z, N,\n and E and the dips and azimuths specify where to transform to.\n :type inverse: bool\n\n :rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`,\n :class:`numpy.ndarray`)\n :returns: The three rotated components, oriented in Z, N, and E if\n `inverse` is `False`. Otherwise they will be oriented as specified\n by the dips and azimuths.\n\n An input of ZNE yields an output of ZNE\n\n >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 0, 0, \\\n np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 1., 2.]), array([ 0., 2., 4.]), array([ 0., 3., 6.]))\n\n An input of ZSE yields an output of ZNE\n\n >>> rotate2zne(np.arange(3), 0, -90, np.arange(3) * 2, 180, 0, \\\n np.arange(3) * 3, 90, 0) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 1., 2.]), array([ 0., -2., -4.]), array([ 0., 3., 6.]))\n\n Mixed up components should get rotated to ZNE.\n\n >>> rotate2zne(np.arange(3), 0, 0, np.arange(3) * 2, 90, 0, \\\n np.arange(3) * 3, 0, -90) # doctest: +NORMALIZE_WHITESPACE\n (array([ 0., 3., 6.]), array([ 0., 1., 2.]), array([ 0., 2., 4.]))\n \"\"\"\n if len(set(len(i_) for i_ in (data_1, data_2, data_3))) != 1:\n msg = \"All three data arrays must be of same length.\"\n raise ValueError(msg)\n\n # Define the base vectors of the old base in terms of the new base vectors.\n base_vector_1 = _dip_azimuth2zne_base_vector(dip_1, azimuth_1)\n base_vector_2 = _dip_azimuth2zne_base_vector(dip_2, azimuth_2)\n base_vector_3 = _dip_azimuth2zne_base_vector(dip_3, azimuth_3)\n\n # Base change matrix.\n m = np.array([base_vector_1,\n base_vector_2,\n base_vector_3])\n\n # Determinant gives the volume change of a unit cube going from one\n # basis to the next. It should neither be too small nor to large. These\n # here are arbitrary limits.\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore',\n '.*invalid value encountered in det.*')\n det = np.linalg.det(m)\n if not (1E-6 < abs(det) < 1E6):\n raise ValueError(\"The given directions are not linearly independent, \"\n \"at least within numerical precision. Determinant \"\n \"of the base change matrix: %g\" % det)\n\n if not inverse:\n m = np.linalg.inv(m)\n\n z, n, e = np.dot(m, [data_1, data_2, data_3])\n\n # Replace all negative zeros. These might confuse some further\n # processing programs.\n z = np.array(z).ravel()\n z[z == -0.0] = 0\n n = np.array(n).ravel()\n n[n == -0.0] = 0\n e = np.array(e).ravel()\n e[e == -0.0] = 0\n\n return z, n, e\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/signal/rotate.py"}]} | 3,691 | 262 |
gh_patches_debug_60939 | rasdani/github-patches | git_diff | Netflix__lemur-796 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding domain fails on unselectable "sensitive"
Client side

Server side
```
May 12 09:05:48 lemur supervisord: lemur-web [2017-05-12 09:05:48,892] ERROR in schema: 'sensitive'
May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function
May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post
May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])
May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'
May 12 09:05:48 lemur supervisord: lemur-web
May 12 09:05:48 lemur supervisord: lemur-web 'sensitive'
May 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/common/schema.py", line 158, in decorated_function
May 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)
May 12 09:05:48 lemur supervisord: lemur-web File "/var/www/lemur/lemur/domains/views.py", line 126, in post
May 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])
May 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/domains/schemas.py`
Content:
```
1 """
2 .. module: lemur.domains.schemas
3 :platform: unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6 .. moduleauthor:: Kevin Glisson <[email protected]>
7 """
8 from marshmallow import fields
9 from lemur.common.schema import LemurInputSchema, LemurOutputSchema
10 from lemur.schemas import AssociatedCertificateSchema
11
12 # from lemur.certificates.schemas import CertificateNestedOutputSchema
13
14
15 class DomainInputSchema(LemurInputSchema):
16 id = fields.Integer()
17 name = fields.String(required=True)
18 sensitive = fields.Boolean()
19 certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
20
21
22 class DomainOutputSchema(LemurOutputSchema):
23 id = fields.Integer()
24 name = fields.String()
25 sensitive = fields.Boolean()
26 # certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[])
27
28
29 class DomainNestedOutputSchema(DomainOutputSchema):
30 __envelope__ = False
31
32
33 domain_input_schema = DomainInputSchema()
34 domain_output_schema = DomainOutputSchema()
35 domains_output_schema = DomainOutputSchema(many=True)
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lemur/domains/schemas.py b/lemur/domains/schemas.py
--- a/lemur/domains/schemas.py
+++ b/lemur/domains/schemas.py
@@ -15,7 +15,7 @@
class DomainInputSchema(LemurInputSchema):
id = fields.Integer()
name = fields.String(required=True)
- sensitive = fields.Boolean()
+ sensitive = fields.Boolean(missing=False)
certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
| {"golden_diff": "diff --git a/lemur/domains/schemas.py b/lemur/domains/schemas.py\n--- a/lemur/domains/schemas.py\n+++ b/lemur/domains/schemas.py\n@@ -15,7 +15,7 @@\n class DomainInputSchema(LemurInputSchema):\n id = fields.Integer()\n name = fields.String(required=True)\n- sensitive = fields.Boolean()\n+ sensitive = fields.Boolean(missing=False)\n certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])\n", "issue": "Adding domain fails on unselectable \"sensitive\"\nClient side\r\n\r\n\r\n\r\n\r\nServer side\r\n\r\n```\r\nMay 12 09:05:48 lemur supervisord: lemur-web [2017-05-12 09:05:48,892] ERROR in schema: 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/common/schema.py\", line 158, in decorated_function\r\nMay 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/domains/views.py\", line 126, in post\r\nMay 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])\r\nMay 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web\r\nMay 12 09:05:48 lemur supervisord: lemur-web 'sensitive'\r\nMay 12 09:05:48 lemur supervisord: lemur-web Traceback (most recent call last):\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/common/schema.py\", line 158, in decorated_function\r\nMay 12 09:05:48 lemur supervisord: lemur-web resp = f(*args, **kwargs)\r\nMay 12 09:05:48 lemur supervisord: lemur-web File \"/var/www/lemur/lemur/domains/views.py\", line 126, in post\r\nMay 12 09:05:48 lemur supervisord: lemur-web return service.create(data['name'], data['sensitive'])\r\nMay 12 09:05:48 lemur supervisord: lemur-web KeyError: 'sensitive'\r\n```\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.domains.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom marshmallow import fields\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.schemas import AssociatedCertificateSchema\n\n# from lemur.certificates.schemas import CertificateNestedOutputSchema\n\n\nclass DomainInputSchema(LemurInputSchema):\n id = fields.Integer()\n name = fields.String(required=True)\n sensitive = fields.Boolean()\n certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])\n\n\nclass DomainOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n name = fields.String()\n sensitive = fields.Boolean()\n # certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[])\n\n\nclass DomainNestedOutputSchema(DomainOutputSchema):\n __envelope__ = False\n\n\ndomain_input_schema = DomainInputSchema()\ndomain_output_schema = DomainOutputSchema()\ndomains_output_schema = DomainOutputSchema(many=True)\n", "path": "lemur/domains/schemas.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.domains.schemas\n :platform: unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom marshmallow import fields\nfrom lemur.common.schema import LemurInputSchema, LemurOutputSchema\nfrom lemur.schemas import AssociatedCertificateSchema\n\n# from lemur.certificates.schemas import CertificateNestedOutputSchema\n\n\nclass DomainInputSchema(LemurInputSchema):\n id = fields.Integer()\n name = fields.String(required=True)\n sensitive = fields.Boolean(missing=False)\n certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])\n\n\nclass DomainOutputSchema(LemurOutputSchema):\n id = fields.Integer()\n name = fields.String()\n sensitive = fields.Boolean()\n # certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[])\n\n\nclass DomainNestedOutputSchema(DomainOutputSchema):\n __envelope__ = False\n\n\ndomain_input_schema = DomainInputSchema()\ndomain_output_schema = DomainOutputSchema()\ndomains_output_schema = DomainOutputSchema(many=True)\n", "path": "lemur/domains/schemas.py"}]} | 1,232 | 116 |
gh_patches_debug_10372 | rasdani/github-patches | git_diff | scrapy__scrapy-4170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle it gracefully when start_url is used instead of start_urls
Over the last year I’ve seen a few cases ([recent example](https://stackoverflow.com/q/58664004/939364)) of this, people missing the `s` at the end of the `start_urls`.
It may be nice to find a way to gracefully let the developer know where the issue is, why there is no crawling happening.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/spiders/__init__.py`
Content:
```
1 """
2 Base class for Scrapy spiders
3
4 See documentation in docs/topics/spiders.rst
5 """
6 import logging
7 import warnings
8
9 from scrapy import signals
10 from scrapy.http import Request
11 from scrapy.utils.trackref import object_ref
12 from scrapy.utils.url import url_is_from_spider
13 from scrapy.exceptions import ScrapyDeprecationWarning
14 from scrapy.utils.deprecate import method_is_overridden
15
16
17 class Spider(object_ref):
18 """Base class for scrapy spiders. All spiders must inherit from this
19 class.
20 """
21
22 name = None
23 custom_settings = None
24
25 def __init__(self, name=None, **kwargs):
26 if name is not None:
27 self.name = name
28 elif not getattr(self, 'name', None):
29 raise ValueError("%s must have a name" % type(self).__name__)
30 self.__dict__.update(kwargs)
31 if not hasattr(self, 'start_urls'):
32 self.start_urls = []
33
34 @property
35 def logger(self):
36 logger = logging.getLogger(self.name)
37 return logging.LoggerAdapter(logger, {'spider': self})
38
39 def log(self, message, level=logging.DEBUG, **kw):
40 """Log the given message at the given log level
41
42 This helper wraps a log call to the logger within the spider, but you
43 can use it directly (e.g. Spider.logger.info('msg')) or use any other
44 Python logger too.
45 """
46 self.logger.log(level, message, **kw)
47
48 @classmethod
49 def from_crawler(cls, crawler, *args, **kwargs):
50 spider = cls(*args, **kwargs)
51 spider._set_crawler(crawler)
52 return spider
53
54 def _set_crawler(self, crawler):
55 self.crawler = crawler
56 self.settings = crawler.settings
57 crawler.signals.connect(self.close, signals.spider_closed)
58
59 def start_requests(self):
60 cls = self.__class__
61 if method_is_overridden(cls, Spider, 'make_requests_from_url'):
62 warnings.warn(
63 "Spider.make_requests_from_url method is deprecated; it "
64 "won't be called in future Scrapy releases. Please "
65 "override Spider.start_requests method instead (see %s.%s)." % (
66 cls.__module__, cls.__name__
67 ),
68 )
69 for url in self.start_urls:
70 yield self.make_requests_from_url(url)
71 else:
72 for url in self.start_urls:
73 yield Request(url, dont_filter=True)
74
75 def make_requests_from_url(self, url):
76 """ This method is deprecated. """
77 return Request(url, dont_filter=True)
78
79 def parse(self, response):
80 raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))
81
82 @classmethod
83 def update_settings(cls, settings):
84 settings.setdict(cls.custom_settings or {}, priority='spider')
85
86 @classmethod
87 def handles_request(cls, request):
88 return url_is_from_spider(request.url, cls)
89
90 @staticmethod
91 def close(spider, reason):
92 closed = getattr(spider, 'closed', None)
93 if callable(closed):
94 return closed(reason)
95
96 def __str__(self):
97 return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
98
99 __repr__ = __str__
100
101
102 # Top-level imports
103 from scrapy.spiders.crawl import CrawlSpider, Rule
104 from scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider
105 from scrapy.spiders.sitemap import SitemapSpider
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py
--- a/scrapy/spiders/__init__.py
+++ b/scrapy/spiders/__init__.py
@@ -58,6 +58,11 @@
def start_requests(self):
cls = self.__class__
+ if not self.start_urls and hasattr(self, 'start_url'):
+ raise AttributeError(
+ "Crawling could not start: 'start_urls' not found "
+ "or empty (but found 'start_url' attribute instead, "
+ "did you miss an 's'?)")
if method_is_overridden(cls, Spider, 'make_requests_from_url'):
warnings.warn(
"Spider.make_requests_from_url method is deprecated; it "
| {"golden_diff": "diff --git a/scrapy/spiders/__init__.py b/scrapy/spiders/__init__.py\n--- a/scrapy/spiders/__init__.py\n+++ b/scrapy/spiders/__init__.py\n@@ -58,6 +58,11 @@\n \n def start_requests(self):\n cls = self.__class__\n+ if not self.start_urls and hasattr(self, 'start_url'):\n+ raise AttributeError(\n+ \"Crawling could not start: 'start_urls' not found \"\n+ \"or empty (but found 'start_url' attribute instead, \"\n+ \"did you miss an 's'?)\")\n if method_is_overridden(cls, Spider, 'make_requests_from_url'):\n warnings.warn(\n \"Spider.make_requests_from_url method is deprecated; it \"\n", "issue": "Handle it gracefully when start_url is used instead of start_urls\nOver the last year I\u2019ve seen a few cases ([recent example](https://stackoverflow.com/q/58664004/939364)) of this, people missing the `s` at the end of the `start_urls`.\r\n\r\nIt may be nice to find a way to gracefully let the developer know where the issue is, why there is no crawling happening.\n", "before_files": [{"content": "\"\"\"\nBase class for Scrapy spiders\n\nSee documentation in docs/topics/spiders.rst\n\"\"\"\nimport logging\nimport warnings\n\nfrom scrapy import signals\nfrom scrapy.http import Request\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import url_is_from_spider\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.deprecate import method_is_overridden\n\n\nclass Spider(object_ref):\n \"\"\"Base class for scrapy spiders. All spiders must inherit from this\n class.\n \"\"\"\n\n name = None\n custom_settings = None\n\n def __init__(self, name=None, **kwargs):\n if name is not None:\n self.name = name\n elif not getattr(self, 'name', None):\n raise ValueError(\"%s must have a name\" % type(self).__name__)\n self.__dict__.update(kwargs)\n if not hasattr(self, 'start_urls'):\n self.start_urls = []\n\n @property\n def logger(self):\n logger = logging.getLogger(self.name)\n return logging.LoggerAdapter(logger, {'spider': self})\n\n def log(self, message, level=logging.DEBUG, **kw):\n \"\"\"Log the given message at the given log level\n\n This helper wraps a log call to the logger within the spider, but you\n can use it directly (e.g. Spider.logger.info('msg')) or use any other\n Python logger too.\n \"\"\"\n self.logger.log(level, message, **kw)\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = cls(*args, **kwargs)\n spider._set_crawler(crawler)\n return spider\n\n def _set_crawler(self, crawler):\n self.crawler = crawler\n self.settings = crawler.settings\n crawler.signals.connect(self.close, signals.spider_closed)\n\n def start_requests(self):\n cls = self.__class__\n if method_is_overridden(cls, Spider, 'make_requests_from_url'):\n warnings.warn(\n \"Spider.make_requests_from_url method is deprecated; it \"\n \"won't be called in future Scrapy releases. Please \"\n \"override Spider.start_requests method instead (see %s.%s).\" % (\n cls.__module__, cls.__name__\n ),\n )\n for url in self.start_urls:\n yield self.make_requests_from_url(url)\n else:\n for url in self.start_urls:\n yield Request(url, dont_filter=True)\n\n def make_requests_from_url(self, url):\n \"\"\" This method is deprecated. \"\"\"\n return Request(url, dont_filter=True)\n\n def parse(self, response):\n raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))\n\n @classmethod\n def update_settings(cls, settings):\n settings.setdict(cls.custom_settings or {}, priority='spider')\n\n @classmethod\n def handles_request(cls, request):\n return url_is_from_spider(request.url, cls)\n\n @staticmethod\n def close(spider, reason):\n closed = getattr(spider, 'closed', None)\n if callable(closed):\n return closed(reason)\n\n def __str__(self):\n return \"<%s %r at 0x%0x>\" % (type(self).__name__, self.name, id(self))\n\n __repr__ = __str__\n\n\n# Top-level imports\nfrom scrapy.spiders.crawl import CrawlSpider, Rule\nfrom scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider\nfrom scrapy.spiders.sitemap import SitemapSpider\n", "path": "scrapy/spiders/__init__.py"}], "after_files": [{"content": "\"\"\"\nBase class for Scrapy spiders\n\nSee documentation in docs/topics/spiders.rst\n\"\"\"\nimport logging\nimport warnings\n\nfrom scrapy import signals\nfrom scrapy.http import Request\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.utils.url import url_is_from_spider\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.deprecate import method_is_overridden\n\n\nclass Spider(object_ref):\n \"\"\"Base class for scrapy spiders. All spiders must inherit from this\n class.\n \"\"\"\n\n name = None\n custom_settings = None\n\n def __init__(self, name=None, **kwargs):\n if name is not None:\n self.name = name\n elif not getattr(self, 'name', None):\n raise ValueError(\"%s must have a name\" % type(self).__name__)\n self.__dict__.update(kwargs)\n if not hasattr(self, 'start_urls'):\n self.start_urls = []\n\n @property\n def logger(self):\n logger = logging.getLogger(self.name)\n return logging.LoggerAdapter(logger, {'spider': self})\n\n def log(self, message, level=logging.DEBUG, **kw):\n \"\"\"Log the given message at the given log level\n\n This helper wraps a log call to the logger within the spider, but you\n can use it directly (e.g. Spider.logger.info('msg')) or use any other\n Python logger too.\n \"\"\"\n self.logger.log(level, message, **kw)\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n spider = cls(*args, **kwargs)\n spider._set_crawler(crawler)\n return spider\n\n def _set_crawler(self, crawler):\n self.crawler = crawler\n self.settings = crawler.settings\n crawler.signals.connect(self.close, signals.spider_closed)\n\n def start_requests(self):\n cls = self.__class__\n if not self.start_urls and hasattr(self, 'start_url'):\n raise AttributeError(\n \"Crawling could not start: 'start_urls' not found \"\n \"or empty (but found 'start_url' attribute instead, \"\n \"did you miss an 's'?)\")\n if method_is_overridden(cls, Spider, 'make_requests_from_url'):\n warnings.warn(\n \"Spider.make_requests_from_url method is deprecated; it \"\n \"won't be called in future Scrapy releases. Please \"\n \"override Spider.start_requests method instead (see %s.%s).\" % (\n cls.__module__, cls.__name__\n ),\n )\n for url in self.start_urls:\n yield self.make_requests_from_url(url)\n else:\n for url in self.start_urls:\n yield Request(url, dont_filter=True)\n\n def make_requests_from_url(self, url):\n \"\"\" This method is deprecated. \"\"\"\n return Request(url, dont_filter=True)\n\n def parse(self, response):\n raise NotImplementedError('{}.parse callback is not defined'.format(self.__class__.__name__))\n\n @classmethod\n def update_settings(cls, settings):\n settings.setdict(cls.custom_settings or {}, priority='spider')\n\n @classmethod\n def handles_request(cls, request):\n return url_is_from_spider(request.url, cls)\n\n @staticmethod\n def close(spider, reason):\n closed = getattr(spider, 'closed', None)\n if callable(closed):\n return closed(reason)\n\n def __str__(self):\n return \"<%s %r at 0x%0x>\" % (type(self).__name__, self.name, id(self))\n\n __repr__ = __str__\n\n\n# Top-level imports\nfrom scrapy.spiders.crawl import CrawlSpider, Rule\nfrom scrapy.spiders.feed import XMLFeedSpider, CSVFeedSpider\nfrom scrapy.spiders.sitemap import SitemapSpider\n", "path": "scrapy/spiders/__init__.py"}]} | 1,318 | 169 |
gh_patches_debug_866 | rasdani/github-patches | git_diff | streamlit__streamlit-5184 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
It should be :
https://github.com/streamlit/streamlit/blob/535f11765817657892506d6904bbbe04908dbdf3/lib/streamlit/elements/alert.py#L145
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/elements/alert.py`
Content:
```
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import cast, Optional, TYPE_CHECKING
16
17 from streamlit.errors import StreamlitAPIException
18 from streamlit.proto.Alert_pb2 import Alert as AlertProto
19 from streamlit.string_util import clean_text, is_emoji
20
21 if TYPE_CHECKING:
22 from streamlit.delta_generator import DeltaGenerator
23 from streamlit.type_util import SupportsStr
24
25
26 def validate_emoji(maybe_emoji: Optional[str]) -> str:
27 if maybe_emoji is None:
28 return ""
29 elif is_emoji(maybe_emoji):
30 return maybe_emoji
31 else:
32 raise StreamlitAPIException(
33 f'The value "{maybe_emoji}" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.'
34 )
35
36
37 class AlertMixin:
38 def error(
39 self,
40 body: "SupportsStr",
41 *, # keyword-only args:
42 icon: Optional[str] = None,
43 ) -> "DeltaGenerator":
44 """Display error message.
45
46 Parameters
47 ----------
48 icon : None
49 An optional parameter, that adds an emoji to the alert.
50 The default is None.
51 This argument can only be supplied by keyword.
52 body : str
53 The error text to display.
54
55 Example
56 -------
57 >>> st.error('This is an error', icon="🚨")
58
59 """
60 alert_proto = AlertProto()
61 alert_proto.icon = validate_emoji(icon)
62 alert_proto.body = clean_text(body)
63 alert_proto.format = AlertProto.ERROR
64 return self.dg._enqueue("alert", alert_proto)
65
66 def warning(
67 self,
68 body: "SupportsStr",
69 *, # keyword-only args:
70 icon: Optional[str] = None,
71 ) -> "DeltaGenerator":
72 """Display warning message.
73
74 Parameters
75 ----------
76 icon : None
77 An optional parameter, that adds an emoji to the alert.
78 The default is None.
79 This argument can only be supplied by keyword.
80
81 body : str
82 The warning text to display.
83
84 Example
85 -------
86 >>> st.warning('This is a warning', icon="⚠️")
87
88 """
89 alert_proto = AlertProto()
90 alert_proto.body = clean_text(body)
91 alert_proto.icon = validate_emoji(icon)
92 alert_proto.format = AlertProto.WARNING
93 return self.dg._enqueue("alert", alert_proto)
94
95 def info(
96 self,
97 body: "SupportsStr",
98 *, # keyword-only args:
99 icon: Optional[str] = None,
100 ) -> "DeltaGenerator":
101 """Display an informational message.
102
103 Parameters
104 ----------
105 icon : None
106 An optional parameter, that adds an emoji to the alert.
107 The default is None.
108 This argument can only be supplied by keyword.
109
110 body : str
111 The info text to display.
112
113 Example
114 -------
115 >>> st.info('This is a purely informational message', icon="ℹ️")
116
117 """
118
119 alert_proto = AlertProto()
120 alert_proto.body = clean_text(body)
121 alert_proto.icon = validate_emoji(icon)
122 alert_proto.format = AlertProto.INFO
123 return self.dg._enqueue("alert", alert_proto)
124
125 def success(
126 self,
127 body: "SupportsStr",
128 *, # keyword-only args:
129 icon: Optional[str] = None,
130 ) -> "DeltaGenerator":
131 """Display a success message.
132
133 Parameters
134 ----------
135 icon : None
136 An optional parameter, that adds an emoji to the alert.
137 The default is None.
138 This argument can only be supplied by keyword.
139
140 body : str
141 The success text to display.
142
143 Example
144 -------
145 >>> st.success('This is a success message!', icon:"✅")
146
147 """
148 alert_proto = AlertProto()
149 alert_proto.body = clean_text(body)
150 alert_proto.icon = validate_emoji(icon)
151 alert_proto.format = AlertProto.SUCCESS
152 return self.dg._enqueue("alert", alert_proto)
153
154 @property
155 def dg(self) -> "DeltaGenerator":
156 """Get our DeltaGenerator."""
157 return cast("DeltaGenerator", self)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/streamlit/elements/alert.py b/lib/streamlit/elements/alert.py
--- a/lib/streamlit/elements/alert.py
+++ b/lib/streamlit/elements/alert.py
@@ -142,7 +142,7 @@
Example
-------
- >>> st.success('This is a success message!', icon:"✅")
+ >>> st.success('This is a success message!', icon="✅")
"""
alert_proto = AlertProto()
| {"golden_diff": "diff --git a/lib/streamlit/elements/alert.py b/lib/streamlit/elements/alert.py\n--- a/lib/streamlit/elements/alert.py\n+++ b/lib/streamlit/elements/alert.py\n@@ -142,7 +142,7 @@\n \n Example\n -------\n- >>> st.success('This is a success message!', icon:\"\u2705\")\n+ >>> st.success('This is a success message!', icon=\"\u2705\")\n \n \"\"\"\n alert_proto = AlertProto()\n", "issue": "It should be :\nhttps://github.com/streamlit/streamlit/blob/535f11765817657892506d6904bbbe04908dbdf3/lib/streamlit/elements/alert.py#L145\r\n\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast, Optional, TYPE_CHECKING\n\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.Alert_pb2 import Alert as AlertProto\nfrom streamlit.string_util import clean_text, is_emoji\n\nif TYPE_CHECKING:\n from streamlit.delta_generator import DeltaGenerator\n from streamlit.type_util import SupportsStr\n\n\ndef validate_emoji(maybe_emoji: Optional[str]) -> str:\n if maybe_emoji is None:\n return \"\"\n elif is_emoji(maybe_emoji):\n return maybe_emoji\n else:\n raise StreamlitAPIException(\n f'The value \"{maybe_emoji}\" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.'\n )\n\n\nclass AlertMixin:\n def error(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display error message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n body : str\n The error text to display.\n\n Example\n -------\n >>> st.error('This is an error', icon=\"\ud83d\udea8\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.icon = validate_emoji(icon)\n alert_proto.body = clean_text(body)\n alert_proto.format = AlertProto.ERROR\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def warning(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display warning message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The warning text to display.\n\n Example\n -------\n >>> st.warning('This is a warning', icon=\"\u26a0\ufe0f\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.WARNING\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def info(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display an informational message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The info text to display.\n\n Example\n -------\n >>> st.info('This is a purely informational message', icon=\"\u2139\ufe0f\")\n\n \"\"\"\n\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.INFO\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def success(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display a success message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The success text to display.\n\n Example\n -------\n >>> st.success('This is a success message!', icon:\"\u2705\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.SUCCESS\n return self.dg._enqueue(\"alert\", alert_proto)\n\n @property\n def dg(self) -> \"DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/alert.py"}], "after_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast, Optional, TYPE_CHECKING\n\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.Alert_pb2 import Alert as AlertProto\nfrom streamlit.string_util import clean_text, is_emoji\n\nif TYPE_CHECKING:\n from streamlit.delta_generator import DeltaGenerator\n from streamlit.type_util import SupportsStr\n\n\ndef validate_emoji(maybe_emoji: Optional[str]) -> str:\n if maybe_emoji is None:\n return \"\"\n elif is_emoji(maybe_emoji):\n return maybe_emoji\n else:\n raise StreamlitAPIException(\n f'The value \"{maybe_emoji}\" is not a valid emoji. Shortcodes are not allowed, please use a single character instead.'\n )\n\n\nclass AlertMixin:\n def error(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display error message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n body : str\n The error text to display.\n\n Example\n -------\n >>> st.error('This is an error', icon=\"\ud83d\udea8\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.icon = validate_emoji(icon)\n alert_proto.body = clean_text(body)\n alert_proto.format = AlertProto.ERROR\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def warning(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display warning message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The warning text to display.\n\n Example\n -------\n >>> st.warning('This is a warning', icon=\"\u26a0\ufe0f\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.WARNING\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def info(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display an informational message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The info text to display.\n\n Example\n -------\n >>> st.info('This is a purely informational message', icon=\"\u2139\ufe0f\")\n\n \"\"\"\n\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.INFO\n return self.dg._enqueue(\"alert\", alert_proto)\n\n def success(\n self,\n body: \"SupportsStr\",\n *, # keyword-only args:\n icon: Optional[str] = None,\n ) -> \"DeltaGenerator\":\n \"\"\"Display a success message.\n\n Parameters\n ----------\n icon : None\n An optional parameter, that adds an emoji to the alert.\n The default is None.\n This argument can only be supplied by keyword.\n\n body : str\n The success text to display.\n\n Example\n -------\n >>> st.success('This is a success message!', icon=\"\u2705\")\n\n \"\"\"\n alert_proto = AlertProto()\n alert_proto.body = clean_text(body)\n alert_proto.icon = validate_emoji(icon)\n alert_proto.format = AlertProto.SUCCESS\n return self.dg._enqueue(\"alert\", alert_proto)\n\n @property\n def dg(self) -> \"DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/alert.py"}]} | 1,710 | 102 |
gh_patches_debug_8773 | rasdani/github-patches | git_diff | google__fuzzbench-72 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Eclipser maxfilelen value
I experienced the same problem that you had in choosing maxfilelen to evaluate Eclipser some time ago.
I found that they used 1048576 in their experiments (https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/master/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25), so maybe you want to use this value to produce consistent results with the paper.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fuzzers/eclipser/fuzzer.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Integration code for Eclipser fuzzer."""
15
16 import os
17 import subprocess
18 import time
19 from multiprocessing import Process
20
21 from fuzzers import utils
22
23
24 def build():
25 """Build fuzzer."""
26 # QEMU does not work with sanitizers, so skip -fsanitize=. See
27 # https://github.com/SoftSec-KAIST/Eclipser/issues/5
28 utils.set_no_sanitizer_compilation_flags()
29 cflags = [
30 '-O2',
31 '-fno-omit-frame-pointer',
32 ]
33 utils.append_flags('CFLAGS', cflags)
34 utils.append_flags('CXXFLAGS', cflags)
35
36 os.environ['CC'] = 'clang'
37 os.environ['CXX'] = 'clang++'
38 os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'
39
40 utils.build_benchmark()
41
42
43 def fuzz(input_corpus, output_corpus, target_binary):
44 """Run fuzzer."""
45 # Create an encoded temp corpus directory.
46 encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus),
47 'temp-corpus')
48 if not os.path.exists(encoded_temp_corpus):
49 os.mkdir(encoded_temp_corpus)
50
51 print('[run_fuzzer] Running target with Eclipser')
52 command = [
53 'dotnet',
54 '/Eclipser/build/Eclipser.dll',
55 'fuzz',
56 '-p',
57 target_binary,
58 '-t',
59 '1048576', # FIXME: Find the max value allowed here.
60 '-o',
61 encoded_temp_corpus,
62 '--src',
63 'file',
64 '--initarg',
65 'foo', # Specifies how command line argument is passed, just a file.
66 '-f',
67 'foo',
68 '--maxfilelen',
69 str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).
70 ]
71 if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.
72 command += ['-i', input_corpus]
73 subprocess.Popen(command)
74
75 process = Process(target=copy_corpus_directory,
76 args=(
77 encoded_temp_corpus,
78 output_corpus,
79 ))
80 process.start()
81
82
83 def copy_corpus_directory(encoded_temp_corpus, output_corpus):
84 """Copies corpus periodically from encoded corpus directory into output
85 directory."""
86 while True:
87 # Wait for initial fuzzer initialization, and after every copy.
88 time.sleep(120)
89
90 subprocess.call([
91 'dotnet',
92 '/Eclipser/build/Eclipser.dll',
93 'decode',
94 '-i',
95 os.path.join(encoded_temp_corpus, 'testcase'),
96 '-o',
97 output_corpus,
98 ])
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fuzzers/eclipser/fuzzer.py b/fuzzers/eclipser/fuzzer.py
--- a/fuzzers/eclipser/fuzzer.py
+++ b/fuzzers/eclipser/fuzzer.py
@@ -66,7 +66,9 @@
'-f',
'foo',
'--maxfilelen',
- str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).
+ # Default is too low (8 bytes), match experiment config at:
+ # https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25
+ '1048576',
]
if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.
command += ['-i', input_corpus]
| {"golden_diff": "diff --git a/fuzzers/eclipser/fuzzer.py b/fuzzers/eclipser/fuzzer.py\n--- a/fuzzers/eclipser/fuzzer.py\n+++ b/fuzzers/eclipser/fuzzer.py\n@@ -66,7 +66,9 @@\n '-f',\n 'foo',\n '--maxfilelen',\n- str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).\n+ # Default is too low (8 bytes), match experiment config at:\n+ # https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25\n+ '1048576',\n ]\n if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\n command += ['-i', input_corpus]\n", "issue": "Eclipser maxfilelen value\nI experienced the same problem that you had in choosing maxfilelen to evaluate Eclipser some time ago.\r\nI found that they used 1048576 in their experiments (https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/master/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25), so maybe you want to use this value to produce consistent results with the paper.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Integration code for Eclipser fuzzer.\"\"\"\n\nimport os\nimport subprocess\nimport time\nfrom multiprocessing import Process\n\nfrom fuzzers import utils\n\n\ndef build():\n \"\"\"Build fuzzer.\"\"\"\n # QEMU does not work with sanitizers, so skip -fsanitize=. See\n # https://github.com/SoftSec-KAIST/Eclipser/issues/5\n utils.set_no_sanitizer_compilation_flags()\n cflags = [\n '-O2',\n '-fno-omit-frame-pointer',\n ]\n utils.append_flags('CFLAGS', cflags)\n utils.append_flags('CXXFLAGS', cflags)\n\n os.environ['CC'] = 'clang'\n os.environ['CXX'] = 'clang++'\n os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'\n\n utils.build_benchmark()\n\n\ndef fuzz(input_corpus, output_corpus, target_binary):\n \"\"\"Run fuzzer.\"\"\"\n # Create an encoded temp corpus directory.\n encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus),\n 'temp-corpus')\n if not os.path.exists(encoded_temp_corpus):\n os.mkdir(encoded_temp_corpus)\n\n print('[run_fuzzer] Running target with Eclipser')\n command = [\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'fuzz',\n '-p',\n target_binary,\n '-t',\n '1048576', # FIXME: Find the max value allowed here.\n '-o',\n encoded_temp_corpus,\n '--src',\n 'file',\n '--initarg',\n 'foo', # Specifies how command line argument is passed, just a file.\n '-f',\n 'foo',\n '--maxfilelen',\n str(10 * 1024 * 1024), # Increase since default is too low (8 bytes).\n ]\n if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\n command += ['-i', input_corpus]\n subprocess.Popen(command)\n\n process = Process(target=copy_corpus_directory,\n args=(\n encoded_temp_corpus,\n output_corpus,\n ))\n process.start()\n\n\ndef copy_corpus_directory(encoded_temp_corpus, output_corpus):\n \"\"\"Copies corpus periodically from encoded corpus directory into output\n directory.\"\"\"\n while True:\n # Wait for initial fuzzer initialization, and after every copy.\n time.sleep(120)\n\n subprocess.call([\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'decode',\n '-i',\n os.path.join(encoded_temp_corpus, 'testcase'),\n '-o',\n output_corpus,\n ])\n", "path": "fuzzers/eclipser/fuzzer.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Integration code for Eclipser fuzzer.\"\"\"\n\nimport os\nimport subprocess\nimport time\nfrom multiprocessing import Process\n\nfrom fuzzers import utils\n\n\ndef build():\n \"\"\"Build fuzzer.\"\"\"\n # QEMU does not work with sanitizers, so skip -fsanitize=. See\n # https://github.com/SoftSec-KAIST/Eclipser/issues/5\n utils.set_no_sanitizer_compilation_flags()\n cflags = [\n '-O2',\n '-fno-omit-frame-pointer',\n ]\n utils.append_flags('CFLAGS', cflags)\n utils.append_flags('CXXFLAGS', cflags)\n\n os.environ['CC'] = 'clang'\n os.environ['CXX'] = 'clang++'\n os.environ['FUZZER_LIB'] = '/libStandaloneFuzzTarget.a'\n\n utils.build_benchmark()\n\n\ndef fuzz(input_corpus, output_corpus, target_binary):\n \"\"\"Run fuzzer.\"\"\"\n # Create an encoded temp corpus directory.\n encoded_temp_corpus = os.path.join(os.path.dirname(input_corpus),\n 'temp-corpus')\n if not os.path.exists(encoded_temp_corpus):\n os.mkdir(encoded_temp_corpus)\n\n print('[run_fuzzer] Running target with Eclipser')\n command = [\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'fuzz',\n '-p',\n target_binary,\n '-t',\n '1048576', # FIXME: Find the max value allowed here.\n '-o',\n encoded_temp_corpus,\n '--src',\n 'file',\n '--initarg',\n 'foo', # Specifies how command line argument is passed, just a file.\n '-f',\n 'foo',\n '--maxfilelen',\n # Default is too low (8 bytes), match experiment config at:\n # https://github.com/SoftSec-KAIST/Eclipser-Artifact/blob/6aadf02eeadb0416bd4c5edeafc8627bc24ebc82/docker-scripts/experiment-scripts/package-exp/run_eclipser.sh#L25\n '1048576',\n ]\n if os.listdir(input_corpus): # Important, otherwise Eclipser crashes.\n command += ['-i', input_corpus]\n subprocess.Popen(command)\n\n process = Process(target=copy_corpus_directory,\n args=(\n encoded_temp_corpus,\n output_corpus,\n ))\n process.start()\n\n\ndef copy_corpus_directory(encoded_temp_corpus, output_corpus):\n \"\"\"Copies corpus periodically from encoded corpus directory into output\n directory.\"\"\"\n while True:\n # Wait for initial fuzzer initialization, and after every copy.\n time.sleep(120)\n\n subprocess.call([\n 'dotnet',\n '/Eclipser/build/Eclipser.dll',\n 'decode',\n '-i',\n os.path.join(encoded_temp_corpus, 'testcase'),\n '-o',\n output_corpus,\n ])\n", "path": "fuzzers/eclipser/fuzzer.py"}]} | 1,253 | 226 |
gh_patches_debug_9888 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1415 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Django Extensions, deprecate and remove ptvsd
[Django Extensions](https://django-extensions.readthedocs.io/en/latest/) are a really useful set of management and development tools. One of the most useful ones (I find) is `runserver_plus`, which you can run instead of the normal `runserver` when developing. This gives you access to an in-browser debugger tool, replacing the standard Django error pages with an interactive traceback and debugger.
Another useful one is `shell_plus` which can pre-load all of your models into an interactive Python shell. If you also have iPython installed it will use that, making the Python repl much easier to use.
With a move to these tools, I think [the modifications](https://github.com/DDMAL/CantusDB/blob/develop/django/cantusdb_project/manage.py#L9-L18) to `manage.py` can be un-done, and the dependency on the ptvsd module can be removed. This module anyway [seems to be deprecated](https://github.com/microsoft/ptvsd).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/manage.py`
Content:
```
1 #!/usr/bin/env python
2 """Django's command-line utility for administrative tasks."""
3 import os
4 import sys
5
6
7 def main():
8 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cantusdb.settings")
9 # start new section
10 from django.conf import settings
11
12 if settings.DEBUG:
13 if os.environ.get("RUN_MAIN") or os.environ.get("WERKZEUG_RUN_MAIN"):
14 import ptvsd
15
16 ptvsd.enable_attach(address=("0.0.0.0", 3000))
17 print("Attached!")
18 # end new section
19
20 try:
21 from django.core.management import execute_from_command_line
22 except ImportError as exc:
23 raise ImportError(
24 "Couldn't import Django. Are you sure it's installed and "
25 "available on your PYTHONPATH environment variable? Did you "
26 "forget to activate a virtual environment?"
27 ) from exc
28 execute_from_command_line(sys.argv)
29
30
31 if __name__ == "__main__":
32 main()
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/manage.py b/django/cantusdb_project/manage.py
--- a/django/cantusdb_project/manage.py
+++ b/django/cantusdb_project/manage.py
@@ -6,17 +6,6 @@
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cantusdb.settings")
- # start new section
- from django.conf import settings
-
- if settings.DEBUG:
- if os.environ.get("RUN_MAIN") or os.environ.get("WERKZEUG_RUN_MAIN"):
- import ptvsd
-
- ptvsd.enable_attach(address=("0.0.0.0", 3000))
- print("Attached!")
- # end new section
-
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
| {"golden_diff": "diff --git a/django/cantusdb_project/manage.py b/django/cantusdb_project/manage.py\n--- a/django/cantusdb_project/manage.py\n+++ b/django/cantusdb_project/manage.py\n@@ -6,17 +6,6 @@\n \n def main():\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cantusdb.settings\")\n- # start new section\n- from django.conf import settings\n-\n- if settings.DEBUG:\n- if os.environ.get(\"RUN_MAIN\") or os.environ.get(\"WERKZEUG_RUN_MAIN\"):\n- import ptvsd\n-\n- ptvsd.enable_attach(address=(\"0.0.0.0\", 3000))\n- print(\"Attached!\")\n- # end new section\n-\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n", "issue": "Use Django Extensions, deprecate and remove ptvsd\n[Django Extensions](https://django-extensions.readthedocs.io/en/latest/) are a really useful set of management and development tools. One of the most useful ones (I find) is `runserver_plus`, which you can run instead of the normal `runserver` when developing. This gives you access to an in-browser debugger tool, replacing the standard Django error pages with an interactive traceback and debugger.\r\n\r\nAnother useful one is `shell_plus` which can pre-load all of your models into an interactive Python shell. If you also have iPython installed it will use that, making the Python repl much easier to use.\r\n\r\nWith a move to these tools, I think [the modifications](https://github.com/DDMAL/CantusDB/blob/develop/django/cantusdb_project/manage.py#L9-L18) to `manage.py` can be un-done, and the dependency on the ptvsd module can be removed. This module anyway [seems to be deprecated](https://github.com/microsoft/ptvsd). \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\n\ndef main():\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cantusdb.settings\")\n # start new section\n from django.conf import settings\n\n if settings.DEBUG:\n if os.environ.get(\"RUN_MAIN\") or os.environ.get(\"WERKZEUG_RUN_MAIN\"):\n import ptvsd\n\n ptvsd.enable_attach(address=(\"0.0.0.0\", 3000))\n print(\"Attached!\")\n # end new section\n\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "django/cantusdb_project/manage.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Django's command-line utility for administrative tasks.\"\"\"\nimport os\nimport sys\n\n\ndef main():\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cantusdb.settings\")\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "django/cantusdb_project/manage.py"}]} | 758 | 188 |
gh_patches_debug_12066 | rasdani/github-patches | git_diff | getredash__redash-2071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'ApiUser' object has no attribute 'email'
### Issue Summary
API POST calls to /api/queries/###/refresh throw this error on 1.0.3+b2850
### Steps to Reproduce
1. Make a POST call to /api/queries/###/refresh
2. See error in api_error.log and 500 result
Full error with trace:
```
Exception on /api/queries/###/refresh [POST]
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1639, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1625, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_login/utils.py", line 228, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/opt/redash/redash.1.0.3.b2850/redash/handlers/base.py", line 28, in dispatch_request
return super(BaseResource, self).dispatch_request(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/opt/redash/redash.1.0.3.b2850/redash/handlers/queries.py", line 266, in post
return run_query(query.data_source, parameter_values, query.query_text, query.id)
File "/opt/redash/redash.1.0.3.b2850/redash/handlers/query_results.py", line 45, in run_query
job = enqueue_query(query_text, data_source, current_user.id, metadata={"Username": current_user.email, "Query ID": query_id})
File "/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 343, in __getattr__
return getattr(self._get_current_object(), name)
AttributeError: 'ApiUser' object has no attribute 'email'
```
Same request worked in some previous version of Redash. Looking at the current code on master, it seems to still make the same reference to `current_user.email` so assuming this bug still exists, but haven't confirmed that yet.
### Technical details:
* Redash Version: 1.0.3+b2850
* Browser/OS: N/A
* How did you install Redash: N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/queries.py`
Content:
```
1 from itertools import chain
2
3 import sqlparse
4 from flask import jsonify, request
5 from flask_login import login_required
6 from flask_restful import abort
7 from funcy import distinct, take
8 from sqlalchemy.orm.exc import StaleDataError
9
10 from redash import models, settings
11 from redash.handlers.base import (BaseResource, get_object_or_404,
12 org_scoped_rule, paginate, routes)
13 from redash.handlers.query_results import run_query
14 from redash.permissions import (can_modify, not_view_only, require_access,
15 require_admin_or_owner,
16 require_object_modify_permission,
17 require_permission, view_only)
18 from redash.utils import collect_parameters_from_request
19
20
21 @routes.route(org_scoped_rule('/api/queries/format'), methods=['POST'])
22 @login_required
23 def format_sql_query(org_slug=None):
24 """
25 Formats an SQL query using the Python ``sqlparse`` formatter.
26
27 :<json string query: The SQL text to format
28 :>json string query: Formatted SQL text
29 """
30 arguments = request.get_json(force=True)
31 query = arguments.get("query", "")
32
33 return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})
34
35
36 class QuerySearchResource(BaseResource):
37 @require_permission('view_query')
38 def get(self):
39 """
40 Search query text, titles, and descriptions.
41
42 :qparam string q: Search term
43
44 Responds with a list of :ref:`query <query-response-label>` objects.
45 """
46 term = request.args.get('q', '')
47 include_drafts = request.args.get('include_drafts') is not None
48
49 return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids, include_drafts=include_drafts)]
50
51
52 class QueryRecentResource(BaseResource):
53 @require_permission('view_query')
54 def get(self):
55 """
56 Retrieve up to 20 queries modified in the last 7 days.
57
58 Responds with a list of :ref:`query <query-response-label>` objects.
59 """
60
61 if settings.FEATURE_DUMB_RECENTS:
62 results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)
63 queries = [q.to_dict(with_last_modified_by=False, with_user=False) for q in results]
64 else:
65 queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)
66 recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in queries]
67
68 global_recent = []
69 if len(recent) < 10:
70 global_recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in models.Query.recent(self.current_user.group_ids)]
71
72 queries = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
73
74 return queries
75
76
77 class QueryListResource(BaseResource):
78 @require_permission('create_query')
79 def post(self):
80 """
81 Create a new query.
82
83 :<json number data_source_id: The ID of the data source this query will run on
84 :<json string query: Query text
85 :<json string name:
86 :<json string description:
87 :<json string schedule: Schedule interval, in seconds, for repeated execution of this query
88 :<json object options: Query options
89
90 .. _query-response-label:
91
92 :>json number id: Query ID
93 :>json number latest_query_data_id: ID for latest output data from this query
94 :>json string name:
95 :>json string description:
96 :>json string query: Query text
97 :>json string query_hash: Hash of query text
98 :>json string schedule: Schedule interval, in seconds, for repeated execution of this query
99 :>json string api_key: Key for public access to this query's results.
100 :>json boolean is_archived: Whether this query is displayed in indexes and search results or not.
101 :>json boolean is_draft: Whether this query is a draft or not
102 :>json string updated_at: Time of last modification, in ISO format
103 :>json string created_at: Time of creation, in ISO format
104 :>json number data_source_id: ID of the data source this query will run on
105 :>json object options: Query options
106 :>json number version: Revision version (for update conflict avoidance)
107 :>json number user_id: ID of query creator
108 :>json number last_modified_by_id: ID of user who last modified this query
109 :>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)
110 :>json number runtime: Runtime of last query execution, in seconds (may be null)
111 """
112 query_def = request.get_json(force=True)
113 data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)
114 require_access(data_source.groups, self.current_user, not_view_only)
115
116 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
117 query_def.pop(field, None)
118
119 query_def['query_text'] = query_def.pop('query')
120 query_def['user'] = self.current_user
121 query_def['data_source'] = data_source
122 query_def['org'] = self.current_org
123 query_def['is_draft'] = True
124 query = models.Query.create(**query_def)
125 models.db.session.add(query)
126 models.db.session.commit()
127
128 self.record_event({
129 'action': 'create',
130 'object_id': query.id,
131 'object_type': 'query'
132 })
133
134 return query.to_dict()
135
136 @require_permission('view_query')
137 def get(self):
138 """
139 Retrieve a list of queries.
140
141 :qparam number page_size: Number of queries to return
142 :qparam number page: Page number to retrieve
143
144 Responds with an array of :ref:`query <query-response-label>` objects.
145 """
146
147 results = models.Query.all_queries(self.current_user.group_ids, self.current_user.id)
148 page = request.args.get('page', 1, type=int)
149 page_size = request.args.get('page_size', 25, type=int)
150 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
151
152
153 class MyQueriesResource(BaseResource):
154 @require_permission('view_query')
155 def get(self):
156 """
157 Retrieve a list of queries created by the current user.
158
159 :qparam number page_size: Number of queries to return
160 :qparam number page: Page number to retrieve
161
162 Responds with an array of :ref:`query <query-response-label>` objects.
163 """
164 drafts = request.args.get('drafts') is not None
165 results = models.Query.by_user(self.current_user)
166 page = request.args.get('page', 1, type=int)
167 page_size = request.args.get('page_size', 25, type=int)
168 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
169
170
171 class QueryResource(BaseResource):
172 @require_permission('edit_query')
173 def post(self, query_id):
174 """
175 Modify a query.
176
177 :param query_id: ID of query to update
178 :<json number data_source_id: The ID of the data source this query will run on
179 :<json string query: Query text
180 :<json string name:
181 :<json string description:
182 :<json string schedule: Schedule interval, in seconds, for repeated execution of this query
183 :<json object options: Query options
184
185 Responds with the updated :ref:`query <query-response-label>` object.
186 """
187 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
188 query_def = request.get_json(force=True)
189
190 require_object_modify_permission(query, self.current_user)
191
192 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:
193 query_def.pop(field, None)
194
195 if 'query' in query_def:
196 query_def['query_text'] = query_def.pop('query')
197
198 query_def['last_modified_by'] = self.current_user
199 query_def['changed_by'] = self.current_user
200 # SQLAlchemy handles the case where a concurrent transaction beats us
201 # to the update. But we still have to make sure that we're not starting
202 # out behind.
203 if 'version' in query_def and query_def['version'] != query.version:
204 abort(409)
205
206 try:
207 self.update_model(query, query_def)
208 models.db.session.commit()
209 except StaleDataError:
210 abort(409)
211
212 return query.to_dict(with_visualizations=True)
213
214 @require_permission('view_query')
215 def get(self, query_id):
216 """
217 Retrieve a query.
218
219 :param query_id: ID of query to fetch
220
221 Responds with the :ref:`query <query-response-label>` contents.
222 """
223 q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
224 require_access(q.groups, self.current_user, view_only)
225
226 result = q.to_dict(with_visualizations=True)
227 result['can_edit'] = can_modify(q, self.current_user)
228 return result
229
230 # TODO: move to resource of its own? (POST /queries/{id}/archive)
231 def delete(self, query_id):
232 """
233 Archives a query.
234
235 :param query_id: ID of query to archive
236 """
237 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
238 require_admin_or_owner(query.user_id)
239 query.archive(self.current_user)
240 models.db.session.commit()
241
242
243 class QueryForkResource(BaseResource):
244 @require_permission('edit_query')
245 def post(self, query_id):
246 """
247 Creates a new query, copying the query text from an existing one.
248
249 :param query_id: ID of query to fork
250
251 Responds with created :ref:`query <query-response-label>` object.
252 """
253 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
254 require_access(query.data_source.groups, self.current_user, not_view_only)
255 forked_query = query.fork(self.current_user)
256 models.db.session.commit()
257 return forked_query.to_dict(with_visualizations=True)
258
259
260 class QueryRefreshResource(BaseResource):
261 def post(self, query_id):
262 """
263 Execute a query, updating the query object with the results.
264
265 :param query_id: ID of query to execute
266
267 Responds with query task details.
268 """
269 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
270 require_access(query.groups, self.current_user, not_view_only)
271
272 parameter_values = collect_parameters_from_request(request.args)
273
274 return run_query(query.data_source, parameter_values, query.query_text, query.id)
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py
--- a/redash/handlers/queries.py
+++ b/redash/handlers/queries.py
@@ -266,6 +266,12 @@
Responds with query task details.
"""
+ # TODO: this should actually check for permissions, but because currently you can only
+ # get here either with a user API key or a query one, we can just check whether it's
+ # an api key (meaning this is a query API key, which only grants read access).
+ if self.current_user.is_api_user():
+ abort(403, message="Please use a user API key.")
+
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
require_access(query.groups, self.current_user, not_view_only)
| {"golden_diff": "diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py\n--- a/redash/handlers/queries.py\n+++ b/redash/handlers/queries.py\n@@ -266,6 +266,12 @@\n \n Responds with query task details.\n \"\"\"\n+ # TODO: this should actually check for permissions, but because currently you can only\n+ # get here either with a user API key or a query one, we can just check whether it's\n+ # an api key (meaning this is a query API key, which only grants read access).\n+ if self.current_user.is_api_user():\n+ abort(403, message=\"Please use a user API key.\")\n+\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.groups, self.current_user, not_view_only)\n", "issue": "AttributeError: 'ApiUser' object has no attribute 'email'\n### Issue Summary\r\n\r\nAPI POST calls to /api/queries/###/refresh throw this error on 1.0.3+b2850\r\n\r\n### Steps to Reproduce\r\n\r\n1. Make a POST call to /api/queries/###/refresh\r\n2. See error in api_error.log and 500 result\r\n\r\nFull error with trace:\r\n\r\n```\r\nException on /api/queries/###/refresh [POST]\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1639, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1625, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 477, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/dist-packages/flask_login/utils.py\", line 228, in decorated_view\r\n return func(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/dist-packages/flask/views.py\", line 84, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/opt/redash/redash.1.0.3.b2850/redash/handlers/base.py\", line 28, in dispatch_request\r\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\r\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 587, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/opt/redash/redash.1.0.3.b2850/redash/handlers/queries.py\", line 266, in post\r\n return run_query(query.data_source, parameter_values, query.query_text, query.id)\r\n File \"/opt/redash/redash.1.0.3.b2850/redash/handlers/query_results.py\", line 45, in run_query\r\n job = enqueue_query(query_text, data_source, current_user.id, metadata={\"Username\": current_user.email, \"Query ID\": query_id})\r\n File \"/usr/local/lib/python2.7/dist-packages/werkzeug/local.py\", line 343, in __getattr__\r\n return getattr(self._get_current_object(), name)\r\nAttributeError: 'ApiUser' object has no attribute 'email'\r\n```\r\n\r\nSame request worked in some previous version of Redash. Looking at the current code on master, it seems to still make the same reference to `current_user.email` so assuming this bug still exists, but haven't confirmed that yet.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.3+b2850\r\n* Browser/OS: N/A\r\n* How did you install Redash: N/A\r\n\n", "before_files": [{"content": "from itertools import chain\n\nimport sqlparse\nfrom flask import jsonify, request\nfrom flask_login import login_required\nfrom flask_restful import abort\nfrom funcy import distinct, take\nfrom sqlalchemy.orm.exc import StaleDataError\n\nfrom redash import models, settings\nfrom redash.handlers.base import (BaseResource, get_object_or_404,\n org_scoped_rule, paginate, routes)\nfrom redash.handlers.query_results import run_query\nfrom redash.permissions import (can_modify, not_view_only, require_access,\n require_admin_or_owner,\n require_object_modify_permission,\n require_permission, view_only)\nfrom redash.utils import collect_parameters_from_request\n\n\[email protected](org_scoped_rule('/api/queries/format'), methods=['POST'])\n@login_required\ndef format_sql_query(org_slug=None):\n \"\"\"\n Formats an SQL query using the Python ``sqlparse`` formatter.\n\n :<json string query: The SQL text to format\n :>json string query: Formatted SQL text\n \"\"\"\n arguments = request.get_json(force=True)\n query = arguments.get(\"query\", \"\")\n\n return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})\n\n\nclass QuerySearchResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Search query text, titles, and descriptions.\n\n :qparam string q: Search term\n\n Responds with a list of :ref:`query <query-response-label>` objects.\n \"\"\"\n term = request.args.get('q', '')\n include_drafts = request.args.get('include_drafts') is not None\n\n return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids, include_drafts=include_drafts)]\n\n\nclass QueryRecentResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve up to 20 queries modified in the last 7 days.\n\n Responds with a list of :ref:`query <query-response-label>` objects.\n \"\"\"\n\n if settings.FEATURE_DUMB_RECENTS:\n results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)\n queries = [q.to_dict(with_last_modified_by=False, with_user=False) for q in results]\n else:\n queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)\n recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in queries]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in models.Query.recent(self.current_user.group_ids)]\n\n queries = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n return queries\n\n\nclass QueryListResource(BaseResource):\n @require_permission('create_query')\n def post(self):\n \"\"\"\n Create a new query.\n\n :<json number data_source_id: The ID of the data source this query will run on\n :<json string query: Query text\n :<json string name:\n :<json string description:\n :<json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :<json object options: Query options\n\n .. _query-response-label:\n\n :>json number id: Query ID\n :>json number latest_query_data_id: ID for latest output data from this query\n :>json string name:\n :>json string description:\n :>json string query: Query text\n :>json string query_hash: Hash of query text\n :>json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :>json string api_key: Key for public access to this query's results.\n :>json boolean is_archived: Whether this query is displayed in indexes and search results or not.\n :>json boolean is_draft: Whether this query is a draft or not\n :>json string updated_at: Time of last modification, in ISO format\n :>json string created_at: Time of creation, in ISO format\n :>json number data_source_id: ID of the data source this query will run on\n :>json object options: Query options\n :>json number version: Revision version (for update conflict avoidance)\n :>json number user_id: ID of query creator\n :>json number last_modified_by_id: ID of user who last modified this query\n :>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)\n :>json number runtime: Runtime of last query execution, in seconds (may be null)\n \"\"\"\n query_def = request.get_json(force=True)\n data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)\n require_access(data_source.groups, self.current_user, not_view_only)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n query_def['org'] = self.current_org\n query_def['is_draft'] = True\n query = models.Query.create(**query_def)\n models.db.session.add(query)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'create',\n 'object_id': query.id,\n 'object_type': 'query'\n })\n\n return query.to_dict()\n\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve a list of queries.\n\n :qparam number page_size: Number of queries to return\n :qparam number page: Page number to retrieve\n\n Responds with an array of :ref:`query <query-response-label>` objects.\n \"\"\"\n\n results = models.Query.all_queries(self.current_user.group_ids, self.current_user.id)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass MyQueriesResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve a list of queries created by the current user.\n\n :qparam number page_size: Number of queries to return\n :qparam number page: Page number to retrieve\n\n Responds with an array of :ref:`query <query-response-label>` objects.\n \"\"\"\n drafts = request.args.get('drafts') is not None\n results = models.Query.by_user(self.current_user)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass QueryResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n \"\"\"\n Modify a query.\n\n :param query_id: ID of query to update\n :<json number data_source_id: The ID of the data source this query will run on\n :<json string query: Query text\n :<json string name:\n :<json string description:\n :<json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :<json object options: Query options\n\n Responds with the updated :ref:`query <query-response-label>` object.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n query_def = request.get_json(force=True)\n\n require_object_modify_permission(query, self.current_user)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:\n query_def.pop(field, None)\n\n if 'query' in query_def:\n query_def['query_text'] = query_def.pop('query')\n\n query_def['last_modified_by'] = self.current_user\n query_def['changed_by'] = self.current_user\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in query_def and query_def['version'] != query.version:\n abort(409)\n\n try:\n self.update_model(query, query_def)\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n return query.to_dict(with_visualizations=True)\n\n @require_permission('view_query')\n def get(self, query_id):\n \"\"\"\n Retrieve a query.\n\n :param query_id: ID of query to fetch\n\n Responds with the :ref:`query <query-response-label>` contents.\n \"\"\"\n q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(q.groups, self.current_user, view_only)\n\n result = q.to_dict(with_visualizations=True)\n result['can_edit'] = can_modify(q, self.current_user)\n return result\n\n # TODO: move to resource of its own? (POST /queries/{id}/archive)\n def delete(self, query_id):\n \"\"\"\n Archives a query.\n\n :param query_id: ID of query to archive\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_admin_or_owner(query.user_id)\n query.archive(self.current_user)\n models.db.session.commit()\n\n\nclass QueryForkResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n \"\"\"\n Creates a new query, copying the query text from an existing one.\n\n :param query_id: ID of query to fork\n\n Responds with created :ref:`query <query-response-label>` object.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.data_source.groups, self.current_user, not_view_only)\n forked_query = query.fork(self.current_user)\n models.db.session.commit()\n return forked_query.to_dict(with_visualizations=True)\n\n\nclass QueryRefreshResource(BaseResource):\n def post(self, query_id):\n \"\"\"\n Execute a query, updating the query object with the results.\n\n :param query_id: ID of query to execute\n\n Responds with query task details.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.groups, self.current_user, not_view_only)\n\n parameter_values = collect_parameters_from_request(request.args)\n\n return run_query(query.data_source, parameter_values, query.query_text, query.id)\n", "path": "redash/handlers/queries.py"}], "after_files": [{"content": "from itertools import chain\n\nimport sqlparse\nfrom flask import jsonify, request\nfrom flask_login import login_required\nfrom flask_restful import abort\nfrom funcy import distinct, take\nfrom sqlalchemy.orm.exc import StaleDataError\n\nfrom redash import models, settings\nfrom redash.handlers.base import (BaseResource, get_object_or_404,\n org_scoped_rule, paginate, routes)\nfrom redash.handlers.query_results import run_query\nfrom redash.permissions import (can_modify, not_view_only, require_access,\n require_admin_or_owner,\n require_object_modify_permission,\n require_permission, view_only)\nfrom redash.utils import collect_parameters_from_request\n\n\[email protected](org_scoped_rule('/api/queries/format'), methods=['POST'])\n@login_required\ndef format_sql_query(org_slug=None):\n \"\"\"\n Formats an SQL query using the Python ``sqlparse`` formatter.\n\n :<json string query: The SQL text to format\n :>json string query: Formatted SQL text\n \"\"\"\n arguments = request.get_json(force=True)\n query = arguments.get(\"query\", \"\")\n\n return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})\n\n\nclass QuerySearchResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Search query text, titles, and descriptions.\n\n :qparam string q: Search term\n\n Responds with a list of :ref:`query <query-response-label>` objects.\n \"\"\"\n term = request.args.get('q', '')\n include_drafts = request.args.get('include_drafts') is not None\n\n return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids, include_drafts=include_drafts)]\n\n\nclass QueryRecentResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve up to 20 queries modified in the last 7 days.\n\n Responds with a list of :ref:`query <query-response-label>` objects.\n \"\"\"\n\n if settings.FEATURE_DUMB_RECENTS:\n results = models.Query.by_user(self.current_user).order_by(models.Query.updated_at.desc()).limit(10)\n queries = [q.to_dict(with_last_modified_by=False, with_user=False) for q in results]\n else:\n queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)\n recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in queries]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict(with_last_modified_by=False, with_user=False) for d in models.Query.recent(self.current_user.group_ids)]\n\n queries = take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n return queries\n\n\nclass QueryListResource(BaseResource):\n @require_permission('create_query')\n def post(self):\n \"\"\"\n Create a new query.\n\n :<json number data_source_id: The ID of the data source this query will run on\n :<json string query: Query text\n :<json string name:\n :<json string description:\n :<json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :<json object options: Query options\n\n .. _query-response-label:\n\n :>json number id: Query ID\n :>json number latest_query_data_id: ID for latest output data from this query\n :>json string name:\n :>json string description:\n :>json string query: Query text\n :>json string query_hash: Hash of query text\n :>json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :>json string api_key: Key for public access to this query's results.\n :>json boolean is_archived: Whether this query is displayed in indexes and search results or not.\n :>json boolean is_draft: Whether this query is a draft or not\n :>json string updated_at: Time of last modification, in ISO format\n :>json string created_at: Time of creation, in ISO format\n :>json number data_source_id: ID of the data source this query will run on\n :>json object options: Query options\n :>json number version: Revision version (for update conflict avoidance)\n :>json number user_id: ID of query creator\n :>json number last_modified_by_id: ID of user who last modified this query\n :>json string retrieved_at: Time when query results were last retrieved, in ISO format (may be null)\n :>json number runtime: Runtime of last query execution, in seconds (may be null)\n \"\"\"\n query_def = request.get_json(force=True)\n data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)\n require_access(data_source.groups, self.current_user, not_view_only)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n query_def['org'] = self.current_org\n query_def['is_draft'] = True\n query = models.Query.create(**query_def)\n models.db.session.add(query)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'create',\n 'object_id': query.id,\n 'object_type': 'query'\n })\n\n return query.to_dict()\n\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve a list of queries.\n\n :qparam number page_size: Number of queries to return\n :qparam number page: Page number to retrieve\n\n Responds with an array of :ref:`query <query-response-label>` objects.\n \"\"\"\n\n results = models.Query.all_queries(self.current_user.group_ids, self.current_user.id)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass MyQueriesResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n \"\"\"\n Retrieve a list of queries created by the current user.\n\n :qparam number page_size: Number of queries to return\n :qparam number page: Page number to retrieve\n\n Responds with an array of :ref:`query <query-response-label>` objects.\n \"\"\"\n drafts = request.args.get('drafts') is not None\n results = models.Query.by_user(self.current_user)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass QueryResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n \"\"\"\n Modify a query.\n\n :param query_id: ID of query to update\n :<json number data_source_id: The ID of the data source this query will run on\n :<json string query: Query text\n :<json string name:\n :<json string description:\n :<json string schedule: Schedule interval, in seconds, for repeated execution of this query\n :<json object options: Query options\n\n Responds with the updated :ref:`query <query-response-label>` object.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n query_def = request.get_json(force=True)\n\n require_object_modify_permission(query, self.current_user)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:\n query_def.pop(field, None)\n\n if 'query' in query_def:\n query_def['query_text'] = query_def.pop('query')\n\n query_def['last_modified_by'] = self.current_user\n query_def['changed_by'] = self.current_user\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in query_def and query_def['version'] != query.version:\n abort(409)\n\n try:\n self.update_model(query, query_def)\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n return query.to_dict(with_visualizations=True)\n\n @require_permission('view_query')\n def get(self, query_id):\n \"\"\"\n Retrieve a query.\n\n :param query_id: ID of query to fetch\n\n Responds with the :ref:`query <query-response-label>` contents.\n \"\"\"\n q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(q.groups, self.current_user, view_only)\n\n result = q.to_dict(with_visualizations=True)\n result['can_edit'] = can_modify(q, self.current_user)\n return result\n\n # TODO: move to resource of its own? (POST /queries/{id}/archive)\n def delete(self, query_id):\n \"\"\"\n Archives a query.\n\n :param query_id: ID of query to archive\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_admin_or_owner(query.user_id)\n query.archive(self.current_user)\n models.db.session.commit()\n\n\nclass QueryForkResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n \"\"\"\n Creates a new query, copying the query text from an existing one.\n\n :param query_id: ID of query to fork\n\n Responds with created :ref:`query <query-response-label>` object.\n \"\"\"\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.data_source.groups, self.current_user, not_view_only)\n forked_query = query.fork(self.current_user)\n models.db.session.commit()\n return forked_query.to_dict(with_visualizations=True)\n\n\nclass QueryRefreshResource(BaseResource):\n def post(self, query_id):\n \"\"\"\n Execute a query, updating the query object with the results.\n\n :param query_id: ID of query to execute\n\n Responds with query task details.\n \"\"\"\n # TODO: this should actually check for permissions, but because currently you can only\n # get here either with a user API key or a query one, we can just check whether it's\n # an api key (meaning this is a query API key, which only grants read access).\n if self.current_user.is_api_user():\n abort(403, message=\"Please use a user API key.\")\n\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.groups, self.current_user, not_view_only)\n\n parameter_values = collect_parameters_from_request(request.args)\n\n return run_query(query.data_source, parameter_values, query.query_text, query.id)\n", "path": "redash/handlers/queries.py"}]} | 4,080 | 201 |
gh_patches_debug_29332 | rasdani/github-patches | git_diff | fossasia__open-event-server-6924 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error while sending monthly invoice
```
NoResultFound: No row was found for one()
File "app/api/helpers/scheduled_jobs.py", line 256, in send_monthly_event_invoice
db.session.query(TicketFees).filter_by(currency=currency).one()
File "sqlalchemy/orm/query.py", line 3354, in one
raise orm_exc.NoResultFound("No row was found for one()")
ObjectNotFound: ({'source': ''}, 'Ticket Fee not set for None')
File "app/api/helpers/scheduled_jobs.py", line 260, in send_monthly_event_invoice
{'source': ''}, 'Ticket Fee not set for {}'.format(currency)
```
We should never be throwing unhandled exceptions in jobs where no one can handle them
```
events = [
<Event 'Yogesh Sharma'>
,
<Event '"/><img src="x" onerror="alert(document.domain)">'>
,
<Event 'testing'>
,
<Event 'Hackathon'>
,
<Event 'Jamia Hamdard Convocation'>
,
<Event 'OpenTechSummit Day Mekong'>
,
<Event 'FOSSASIA Meetup - PSLab'>
,
<Event 'Introduction to open source'>
,
<Event 'Yogesh Sharma'>
,
<Event 'OpenTechSummit India'>
]
event = <Event 'Yogesh Sharma'>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/scheduled_jobs.py`
Content:
```
1 import datetime
2
3 import pytz
4 from dateutil.relativedelta import relativedelta
5 from sqlalchemy.orm.exc import NoResultFound
6 from flask_rest_jsonapi.exceptions import ObjectNotFound
7 from flask import render_template
8 from flask_celeryext import RequestContextTask
9
10 from app.api.helpers.db import save_to_db
11 from app.api.helpers.files import create_save_pdf
12 from app.api.helpers.mail import (
13 send_email_after_event,
14 send_email_for_monthly_fee_payment,
15 send_followup_email_for_monthly_fee_payment,
16 )
17 from app.api.helpers.notification import (
18 send_followup_notif_monthly_fee_payment,
19 send_notif_after_event,
20 send_notif_monthly_fee_payment,
21 )
22 from app.api.helpers.query import get_user_event_roles_by_role_name
23 from app.api.helpers.storage import UPLOAD_PATHS
24 from app.api.helpers.utilities import monthdelta
25 from app.instance import celery
26 from app.models import db
27 from app.models.event import Event
28 from app.models.event_invoice import EventInvoice
29 from app.models.order import Order
30 from app.models.session import Session
31 from app.models.speaker import Speaker
32 from app.models.ticket_fee import TicketFees, get_fee
33 from app.models.ticket_holder import TicketHolder
34 from app.settings import get_settings
35
36
37 @celery.task(base=RequestContextTask, name='send.after.event.mail')
38 def send_after_event_mail():
39 from app.instance import current_app as app
40
41 with app.app_context():
42 events = Event.query.filter_by(state='published', deleted_at=None).all()
43 for event in events:
44 organizers = get_user_event_roles_by_role_name(event.id, 'organizer')
45 speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()
46 owner = get_user_event_roles_by_role_name(event.id, 'owner').first()
47 current_time = datetime.datetime.now(pytz.timezone(event.timezone))
48 time_difference = current_time - event.ends_at
49 time_difference_minutes = (time_difference.days * 24 * 60) + (
50 time_difference.seconds / 60
51 )
52 frontend_url = get_settings()['frontend_url']
53 if current_time > event.ends_at and time_difference_minutes < 1440:
54 for speaker in speakers:
55 if not speaker.is_email_overridden:
56 send_email_after_event(
57 speaker.user.email, event.name, frontend_url
58 )
59 send_notif_after_event(speaker.user, event.name)
60 for organizer in organizers:
61 send_email_after_event(organizer.user.email, event.name, frontend_url)
62 send_notif_after_event(organizer.user, event.name)
63 if owner:
64 send_email_after_event(owner.user.email, event.name, frontend_url)
65 send_notif_after_event(owner.user, event.name)
66
67
68 @celery.task(base=RequestContextTask, name='change.session.state.on.event.completion')
69 def change_session_state_on_event_completion():
70 from app.instance import current_app as app
71
72 with app.app_context():
73 sessions_to_be_changed = (
74 Session.query.join(Event)
75 .filter(Session.state == 'pending')
76 .filter(Event.ends_at < datetime.datetime.now())
77 )
78 for session in sessions_to_be_changed:
79 session.state = 'rejected'
80 save_to_db(
81 session, 'Changed {} session state to rejected'.format(session.title)
82 )
83
84
85 @celery.task(base=RequestContextTask, name='send.event.fee.notification')
86 def send_event_fee_notification():
87 from app.instance import current_app as app
88
89 with app.app_context():
90 events = Event.query.filter_by(deleted_at=None, state='published').all()
91 for event in events:
92 latest_invoice = (
93 EventInvoice.query.filter_by(event_id=event.id)
94 .order_by(EventInvoice.created_at.desc())
95 .first()
96 )
97
98 if latest_invoice:
99 orders = (
100 Order.query.filter_by(event_id=event.id)
101 .filter_by(status='completed')
102 .filter(Order.completed_at > latest_invoice.created_at)
103 .all()
104 )
105 else:
106 orders = (
107 Order.query.filter_by(event_id=event.id)
108 .filter_by(status='completed')
109 .all()
110 )
111
112 fee_total = 0
113 for order in orders:
114 for ticket in order.tickets:
115 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:
116 fee = ticket.price * (
117 get_fee(event.payment_country, order.event.payment_currency)
118 / 100.0
119 )
120 fee_total += fee
121
122 if fee_total > 0:
123 owner = get_user_event_roles_by_role_name(event.id, 'owner').first()
124 new_invoice = EventInvoice(
125 amount=fee_total, event_id=event.id, user_id=owner.user.id
126 )
127
128 if event.discount_code_id and event.discount_code:
129 r = relativedelta(datetime.datetime.utcnow(), event.created_at)
130 if r <= event.discount_code.valid_till:
131 new_invoice.amount = fee_total - (
132 fee_total * (event.discount_code.value / 100.0)
133 )
134 new_invoice.discount_code_id = event.discount_code_id
135
136 save_to_db(new_invoice)
137 prev_month = monthdelta(new_invoice.created_at, 1).strftime(
138 "%b %Y"
139 ) # Displayed as Aug 2016
140 app_name = get_settings()['app_name']
141 frontend_url = get_settings()['frontend_url']
142 link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)
143 send_email_for_monthly_fee_payment(
144 new_invoice.user.email,
145 event.name,
146 prev_month,
147 new_invoice.amount,
148 app_name,
149 link,
150 )
151 send_notif_monthly_fee_payment(
152 new_invoice.user,
153 event.name,
154 prev_month,
155 new_invoice.amount,
156 app_name,
157 link,
158 new_invoice.event_id,
159 )
160
161
162 @celery.task(base=RequestContextTask, name='send.event.fee.notification.followup')
163 def send_event_fee_notification_followup():
164 from app.instance import current_app as app
165
166 with app.app_context():
167 incomplete_invoices = EventInvoice.query.filter(
168 EventInvoice.status != 'paid'
169 ).all()
170 for incomplete_invoice in incomplete_invoices:
171 if incomplete_invoice.amount > 0:
172 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(
173 "%b %Y"
174 ) # Displayed as Aug 2016
175 app_name = get_settings()['app_name']
176 frontend_url = get_settings()['frontend_url']
177 link = '{}/event-invoice/{}/review'.format(
178 frontend_url, incomplete_invoice.identifier
179 )
180 send_followup_email_for_monthly_fee_payment(
181 incomplete_invoice.user.email,
182 incomplete_invoice.event.name,
183 prev_month,
184 incomplete_invoice.amount,
185 app_name,
186 link,
187 )
188 send_followup_notif_monthly_fee_payment(
189 incomplete_invoice.user,
190 incomplete_invoice.event.name,
191 prev_month,
192 incomplete_invoice.amount,
193 app_name,
194 link,
195 incomplete_invoice.event.id,
196 )
197
198
199 @celery.task(base=RequestContextTask, name='expire.pending.tickets')
200 def expire_pending_tickets():
201 from app.instance import current_app as app
202
203 with app.app_context():
204 db.session.query(Order).filter(
205 Order.status == 'pending',
206 (Order.created_at + datetime.timedelta(minutes=30))
207 <= datetime.datetime.now(),
208 ).update({'status': 'expired'})
209 db.session.commit()
210
211
212 @celery.task(base=RequestContextTask, name='delete.ticket.holders.no.order.id')
213 def delete_ticket_holders_no_order_id():
214 from app.instance import current_app as app
215
216 with app.app_context():
217 order_expiry_time = get_settings()['order_expiry_time']
218 TicketHolder.query.filter(
219 TicketHolder.order_id == None,
220 TicketHolder.deleted_at.is_(None),
221 TicketHolder.created_at + datetime.timedelta(minutes=order_expiry_time)
222 < datetime.datetime.utcnow(),
223 ).delete(synchronize_session=False)
224 db.session.commit()
225
226
227 @celery.task(base=RequestContextTask, name='event.invoices.mark.due')
228 def event_invoices_mark_due():
229 from app.instance import current_app as app
230
231 with app.app_context():
232 db.session.query(EventInvoice).filter(
233 EventInvoice.status == 'upcoming',
234 Event.id == EventInvoice.event_id,
235 Event.ends_at >= datetime.datetime.now(),
236 (
237 EventInvoice.created_at + datetime.timedelta(days=30)
238 <= datetime.datetime.now()
239 ),
240 ).update({EventInvoice.status: 'due'}, synchronize_session=False)
241
242
243 @celery.task(base=RequestContextTask, name='send.monthly.event.invoice')
244 def send_monthly_event_invoice():
245 from app.instance import current_app as app
246
247 with app.app_context():
248 events = Event.query.filter_by(deleted_at=None, state='published').all()
249 for event in events:
250 # calculate net & gross revenues
251 user = event.owner
252 admin_info = get_settings()
253 currency = event.payment_currency
254 try:
255 ticket_fee_object = (
256 db.session.query(TicketFees).filter_by(currency=currency).one()
257 )
258 except NoResultFound:
259 raise ObjectNotFound(
260 {'source': ''}, 'Ticket Fee not set for {}'.format(currency)
261 )
262 ticket_fee_percentage = ticket_fee_object.service_fee
263 ticket_fee_maximum = ticket_fee_object.maximum_fee
264 orders = Order.query.filter_by(event=event).all()
265 gross_revenue = event.calc_monthly_revenue()
266 invoice_amount = gross_revenue * (ticket_fee_percentage / 100)
267 if invoice_amount > ticket_fee_maximum:
268 invoice_amount = ticket_fee_maximum
269 net_revenue = gross_revenue - invoice_amount
270 payment_details = {
271 'tickets_sold': event.tickets_sold,
272 'gross_revenue': gross_revenue,
273 'net_revenue': net_revenue,
274 'amount_payable': invoice_amount,
275 }
276 # save invoice as pdf
277 pdf = create_save_pdf(
278 render_template(
279 'pdf/event_invoice.html',
280 orders=orders,
281 user=user,
282 admin_info=admin_info,
283 currency=currency,
284 event=event,
285 ticket_fee_object=ticket_fee_object,
286 payment_details=payment_details,
287 net_revenue=net_revenue,
288 ),
289 UPLOAD_PATHS['pdf']['event_invoice'],
290 dir_path='/static/uploads/pdf/event_invoices/',
291 identifier=event.identifier,
292 )
293 # save event_invoice info to DB
294
295 event_invoice = EventInvoice(
296 amount=invoice_amount, invoice_pdf_url=pdf, event_id=event.id
297 )
298 save_to_db(event_invoice)
299
300
301 @celery.on_after_configure.connect
302 def setup_scheduled_task(sender, **kwargs):
303 from celery.schedules import crontab
304
305 # Every day at 5:30
306 sender.add_periodic_task(crontab(hour=5, minute=30), send_after_event_mail)
307 # Every 1st day of month at 0:00
308 sender.add_periodic_task(
309 crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification
310 )
311 # Every 1st day of month at 0:00
312 sender.add_periodic_task(
313 crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification_followup
314 )
315 # Every day at 5:30
316 sender.add_periodic_task(
317 crontab(hour=5, minute=30), change_session_state_on_event_completion
318 )
319 # Every 45 minutes
320 sender.add_periodic_task(crontab(minute='*/45'), expire_pending_tickets)
321 # Every 1st day of month at 0:00
322 sender.add_periodic_task(
323 crontab(minute=0, hour=0, day_of_month=1), send_monthly_event_invoice
324 )
325 # Every day at 5:00
326 sender.add_periodic_task(crontab(minute=0, hour=5), event_invoices_mark_due)
327 # Every 5 minutes
328 sender.add_periodic_task(crontab(minute='*/5'), delete_ticket_holders_no_order_id)
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py
--- a/app/api/helpers/scheduled_jobs.py
+++ b/app/api/helpers/scheduled_jobs.py
@@ -3,7 +3,6 @@
import pytz
from dateutil.relativedelta import relativedelta
from sqlalchemy.orm.exc import NoResultFound
-from flask_rest_jsonapi.exceptions import ObjectNotFound
from flask import render_template
from flask_celeryext import RequestContextTask
@@ -32,7 +31,9 @@
from app.models.ticket_fee import TicketFees, get_fee
from app.models.ticket_holder import TicketHolder
from app.settings import get_settings
+import logging
+logger = logging.getLogger(__name__)
@celery.task(base=RequestContextTask, name='send.after.event.mail')
def send_after_event_mail():
@@ -256,9 +257,9 @@
db.session.query(TicketFees).filter_by(currency=currency).one()
)
except NoResultFound:
- raise ObjectNotFound(
- {'source': ''}, 'Ticket Fee not set for {}'.format(currency)
- )
+ logger.error('Ticket Fee not found for event id {id}'.format(id=event.id))
+ continue
+
ticket_fee_percentage = ticket_fee_object.service_fee
ticket_fee_maximum = ticket_fee_object.maximum_fee
orders = Order.query.filter_by(event=event).all()
| {"golden_diff": "diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py\n--- a/app/api/helpers/scheduled_jobs.py\n+++ b/app/api/helpers/scheduled_jobs.py\n@@ -3,7 +3,6 @@\n import pytz\n from dateutil.relativedelta import relativedelta\n from sqlalchemy.orm.exc import NoResultFound\n-from flask_rest_jsonapi.exceptions import ObjectNotFound\n from flask import render_template\n from flask_celeryext import RequestContextTask\n \n@@ -32,7 +31,9 @@\n from app.models.ticket_fee import TicketFees, get_fee\n from app.models.ticket_holder import TicketHolder\n from app.settings import get_settings\n+import logging\n \n+logger = logging.getLogger(__name__)\n \n @celery.task(base=RequestContextTask, name='send.after.event.mail')\n def send_after_event_mail():\n@@ -256,9 +257,9 @@\n db.session.query(TicketFees).filter_by(currency=currency).one()\n )\n except NoResultFound:\n- raise ObjectNotFound(\n- {'source': ''}, 'Ticket Fee not set for {}'.format(currency)\n- )\n+ logger.error('Ticket Fee not found for event id {id}'.format(id=event.id))\n+ continue\n+\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n", "issue": "Error while sending monthly invoice\n```\r\nNoResultFound: No row was found for one()\r\n File \"app/api/helpers/scheduled_jobs.py\", line 256, in send_monthly_event_invoice\r\n db.session.query(TicketFees).filter_by(currency=currency).one()\r\n File \"sqlalchemy/orm/query.py\", line 3354, in one\r\n raise orm_exc.NoResultFound(\"No row was found for one()\")\r\nObjectNotFound: ({'source': ''}, 'Ticket Fee not set for None')\r\n File \"app/api/helpers/scheduled_jobs.py\", line 260, in send_monthly_event_invoice\r\n {'source': ''}, 'Ticket Fee not set for {}'.format(currency)\r\n```\r\n\r\nWe should never be throwing unhandled exceptions in jobs where no one can handle them\r\n\r\n```\r\nevents = [\r\n<Event 'Yogesh Sharma'>\r\n, \r\n<Event '\"/><img src=\"x\" onerror=\"alert(document.domain)\">'>\r\n, \r\n<Event 'testing'>\r\n, \r\n<Event 'Hackathon'>\r\n, \r\n<Event 'Jamia Hamdard Convocation'>\r\n, \r\n<Event 'OpenTechSummit Day Mekong'>\r\n, \r\n<Event 'FOSSASIA Meetup - PSLab'>\r\n, \r\n<Event 'Introduction to open source'>\r\n, \r\n<Event 'Yogesh Sharma'>\r\n, \r\n<Event 'OpenTechSummit India'>\r\n]\r\n\r\nevent = <Event 'Yogesh Sharma'>\r\n```\n", "before_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom flask import render_template\nfrom flask_celeryext import RequestContextTask\n\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.mail import (\n send_email_after_event,\n send_email_for_monthly_fee_payment,\n send_followup_email_for_monthly_fee_payment,\n)\nfrom app.api.helpers.notification import (\n send_followup_notif_monthly_fee_payment,\n send_notif_after_event,\n send_notif_monthly_fee_payment,\n)\nfrom app.api.helpers.query import get_user_event_roles_by_role_name\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.api.helpers.utilities import monthdelta\nfrom app.instance import celery\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_fee import TicketFees, get_fee\nfrom app.models.ticket_holder import TicketHolder\nfrom app.settings import get_settings\n\n\[email protected](base=RequestContextTask, name='send.after.event.mail')\ndef send_after_event_mail():\n from app.instance import current_app as app\n\n with app.app_context():\n events = Event.query.filter_by(state='published', deleted_at=None).all()\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + (\n time_difference.seconds / 60\n )\n frontend_url = get_settings()['frontend_url']\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n if not speaker.is_email_overridden:\n send_email_after_event(\n speaker.user.email, event.name, frontend_url\n )\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, frontend_url)\n send_notif_after_event(organizer.user, event.name)\n if owner:\n send_email_after_event(owner.user.email, event.name, frontend_url)\n send_notif_after_event(owner.user, event.name)\n\n\[email protected](base=RequestContextTask, name='change.session.state.on.event.completion')\ndef change_session_state_on_event_completion():\n from app.instance import current_app as app\n\n with app.app_context():\n sessions_to_be_changed = (\n Session.query.join(Event)\n .filter(Session.state == 'pending')\n .filter(Event.ends_at < datetime.datetime.now())\n )\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(\n session, 'Changed {} session state to rejected'.format(session.title)\n )\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification')\ndef send_event_fee_notification():\n from app.instance import current_app as app\n\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n latest_invoice = (\n EventInvoice.query.filter_by(event_id=event.id)\n .order_by(EventInvoice.created_at.desc())\n .first()\n )\n\n if latest_invoice:\n orders = (\n Order.query.filter_by(event_id=event.id)\n .filter_by(status='completed')\n .filter(Order.completed_at > latest_invoice.created_at)\n .all()\n )\n else:\n orders = (\n Order.query.filter_by(event_id=event.id)\n .filter_by(status='completed')\n .all()\n )\n\n fee_total = 0\n for order in orders:\n for ticket in order.tickets:\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (\n get_fee(event.payment_country, order.event.payment_currency)\n / 100.0\n )\n fee_total += fee\n\n if fee_total > 0:\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=owner.user.id\n )\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - (\n fee_total * (event.discount_code.value / 100.0)\n )\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\"\n ) # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(\n new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n )\n send_notif_monthly_fee_payment(\n new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id,\n )\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification.followup')\ndef send_event_fee_notification_followup():\n from app.instance import current_app as app\n\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(\n EventInvoice.status != 'paid'\n ).all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\"\n ) # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/event-invoice/{}/review'.format(\n frontend_url, incomplete_invoice.identifier\n )\n send_followup_email_for_monthly_fee_payment(\n incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n )\n send_followup_notif_monthly_fee_payment(\n incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id,\n )\n\n\[email protected](base=RequestContextTask, name='expire.pending.tickets')\ndef expire_pending_tickets():\n from app.instance import current_app as app\n\n with app.app_context():\n db.session.query(Order).filter(\n Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30))\n <= datetime.datetime.now(),\n ).update({'status': 'expired'})\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='delete.ticket.holders.no.order.id')\ndef delete_ticket_holders_no_order_id():\n from app.instance import current_app as app\n\n with app.app_context():\n order_expiry_time = get_settings()['order_expiry_time']\n TicketHolder.query.filter(\n TicketHolder.order_id == None,\n TicketHolder.deleted_at.is_(None),\n TicketHolder.created_at + datetime.timedelta(minutes=order_expiry_time)\n < datetime.datetime.utcnow(),\n ).delete(synchronize_session=False)\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='event.invoices.mark.due')\ndef event_invoices_mark_due():\n from app.instance import current_app as app\n\n with app.app_context():\n db.session.query(EventInvoice).filter(\n EventInvoice.status == 'upcoming',\n Event.id == EventInvoice.event_id,\n Event.ends_at >= datetime.datetime.now(),\n (\n EventInvoice.created_at + datetime.timedelta(days=30)\n <= datetime.datetime.now()\n ),\n ).update({EventInvoice.status: 'due'}, synchronize_session=False)\n\n\[email protected](base=RequestContextTask, name='send.monthly.event.invoice')\ndef send_monthly_event_invoice():\n from app.instance import current_app as app\n\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n # calculate net & gross revenues\n user = event.owner\n admin_info = get_settings()\n currency = event.payment_currency\n try:\n ticket_fee_object = (\n db.session.query(TicketFees).filter_by(currency=currency).one()\n )\n except NoResultFound:\n raise ObjectNotFound(\n {'source': ''}, 'Ticket Fee not set for {}'.format(currency)\n )\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n gross_revenue = event.calc_monthly_revenue()\n invoice_amount = gross_revenue * (ticket_fee_percentage / 100)\n if invoice_amount > ticket_fee_maximum:\n invoice_amount = ticket_fee_maximum\n net_revenue = gross_revenue - invoice_amount\n payment_details = {\n 'tickets_sold': event.tickets_sold,\n 'gross_revenue': gross_revenue,\n 'net_revenue': net_revenue,\n 'amount_payable': invoice_amount,\n }\n # save invoice as pdf\n pdf = create_save_pdf(\n render_template(\n 'pdf/event_invoice.html',\n orders=orders,\n user=user,\n admin_info=admin_info,\n currency=currency,\n event=event,\n ticket_fee_object=ticket_fee_object,\n payment_details=payment_details,\n net_revenue=net_revenue,\n ),\n UPLOAD_PATHS['pdf']['event_invoice'],\n dir_path='/static/uploads/pdf/event_invoices/',\n identifier=event.identifier,\n )\n # save event_invoice info to DB\n\n event_invoice = EventInvoice(\n amount=invoice_amount, invoice_pdf_url=pdf, event_id=event.id\n )\n save_to_db(event_invoice)\n\n\[email protected]_after_configure.connect\ndef setup_scheduled_task(sender, **kwargs):\n from celery.schedules import crontab\n\n # Every day at 5:30\n sender.add_periodic_task(crontab(hour=5, minute=30), send_after_event_mail)\n # Every 1st day of month at 0:00\n sender.add_periodic_task(\n crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification\n )\n # Every 1st day of month at 0:00\n sender.add_periodic_task(\n crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification_followup\n )\n # Every day at 5:30\n sender.add_periodic_task(\n crontab(hour=5, minute=30), change_session_state_on_event_completion\n )\n # Every 45 minutes\n sender.add_periodic_task(crontab(minute='*/45'), expire_pending_tickets)\n # Every 1st day of month at 0:00\n sender.add_periodic_task(\n crontab(minute=0, hour=0, day_of_month=1), send_monthly_event_invoice\n )\n # Every day at 5:00\n sender.add_periodic_task(crontab(minute=0, hour=5), event_invoices_mark_due)\n # Every 5 minutes\n sender.add_periodic_task(crontab(minute='*/5'), delete_ticket_holders_no_order_id)\n", "path": "app/api/helpers/scheduled_jobs.py"}], "after_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom flask import render_template\nfrom flask_celeryext import RequestContextTask\n\nfrom app.api.helpers.db import save_to_db\nfrom app.api.helpers.files import create_save_pdf\nfrom app.api.helpers.mail import (\n send_email_after_event,\n send_email_for_monthly_fee_payment,\n send_followup_email_for_monthly_fee_payment,\n)\nfrom app.api.helpers.notification import (\n send_followup_notif_monthly_fee_payment,\n send_notif_after_event,\n send_notif_monthly_fee_payment,\n)\nfrom app.api.helpers.query import get_user_event_roles_by_role_name\nfrom app.api.helpers.storage import UPLOAD_PATHS\nfrom app.api.helpers.utilities import monthdelta\nfrom app.instance import celery\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.ticket_fee import TicketFees, get_fee\nfrom app.models.ticket_holder import TicketHolder\nfrom app.settings import get_settings\nimport logging\n\nlogger = logging.getLogger(__name__)\n\[email protected](base=RequestContextTask, name='send.after.event.mail')\ndef send_after_event_mail():\n from app.instance import current_app as app\n\n with app.app_context():\n events = Event.query.filter_by(state='published', deleted_at=None).all()\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + (\n time_difference.seconds / 60\n )\n frontend_url = get_settings()['frontend_url']\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n if not speaker.is_email_overridden:\n send_email_after_event(\n speaker.user.email, event.name, frontend_url\n )\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, frontend_url)\n send_notif_after_event(organizer.user, event.name)\n if owner:\n send_email_after_event(owner.user.email, event.name, frontend_url)\n send_notif_after_event(owner.user, event.name)\n\n\[email protected](base=RequestContextTask, name='change.session.state.on.event.completion')\ndef change_session_state_on_event_completion():\n from app.instance import current_app as app\n\n with app.app_context():\n sessions_to_be_changed = (\n Session.query.join(Event)\n .filter(Session.state == 'pending')\n .filter(Event.ends_at < datetime.datetime.now())\n )\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(\n session, 'Changed {} session state to rejected'.format(session.title)\n )\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification')\ndef send_event_fee_notification():\n from app.instance import current_app as app\n\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n latest_invoice = (\n EventInvoice.query.filter_by(event_id=event.id)\n .order_by(EventInvoice.created_at.desc())\n .first()\n )\n\n if latest_invoice:\n orders = (\n Order.query.filter_by(event_id=event.id)\n .filter_by(status='completed')\n .filter(Order.completed_at > latest_invoice.created_at)\n .all()\n )\n else:\n orders = (\n Order.query.filter_by(event_id=event.id)\n .filter_by(status='completed')\n .all()\n )\n\n fee_total = 0\n for order in orders:\n for ticket in order.tickets:\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (\n get_fee(event.payment_country, order.event.payment_currency)\n / 100.0\n )\n fee_total += fee\n\n if fee_total > 0:\n owner = get_user_event_roles_by_role_name(event.id, 'owner').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=owner.user.id\n )\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - (\n fee_total * (event.discount_code.value / 100.0)\n )\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\"\n ) # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(\n new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n )\n send_notif_monthly_fee_payment(\n new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id,\n )\n\n\[email protected](base=RequestContextTask, name='send.event.fee.notification.followup')\ndef send_event_fee_notification_followup():\n from app.instance import current_app as app\n\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(\n EventInvoice.status != 'paid'\n ).all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\"\n ) # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/event-invoice/{}/review'.format(\n frontend_url, incomplete_invoice.identifier\n )\n send_followup_email_for_monthly_fee_payment(\n incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n )\n send_followup_notif_monthly_fee_payment(\n incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id,\n )\n\n\[email protected](base=RequestContextTask, name='expire.pending.tickets')\ndef expire_pending_tickets():\n from app.instance import current_app as app\n\n with app.app_context():\n db.session.query(Order).filter(\n Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30))\n <= datetime.datetime.now(),\n ).update({'status': 'expired'})\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='delete.ticket.holders.no.order.id')\ndef delete_ticket_holders_no_order_id():\n from app.instance import current_app as app\n\n with app.app_context():\n order_expiry_time = get_settings()['order_expiry_time']\n TicketHolder.query.filter(\n TicketHolder.order_id == None,\n TicketHolder.deleted_at.is_(None),\n TicketHolder.created_at + datetime.timedelta(minutes=order_expiry_time)\n < datetime.datetime.utcnow(),\n ).delete(synchronize_session=False)\n db.session.commit()\n\n\[email protected](base=RequestContextTask, name='event.invoices.mark.due')\ndef event_invoices_mark_due():\n from app.instance import current_app as app\n\n with app.app_context():\n db.session.query(EventInvoice).filter(\n EventInvoice.status == 'upcoming',\n Event.id == EventInvoice.event_id,\n Event.ends_at >= datetime.datetime.now(),\n (\n EventInvoice.created_at + datetime.timedelta(days=30)\n <= datetime.datetime.now()\n ),\n ).update({EventInvoice.status: 'due'}, synchronize_session=False)\n\n\[email protected](base=RequestContextTask, name='send.monthly.event.invoice')\ndef send_monthly_event_invoice():\n from app.instance import current_app as app\n\n with app.app_context():\n events = Event.query.filter_by(deleted_at=None, state='published').all()\n for event in events:\n # calculate net & gross revenues\n user = event.owner\n admin_info = get_settings()\n currency = event.payment_currency\n try:\n ticket_fee_object = (\n db.session.query(TicketFees).filter_by(currency=currency).one()\n )\n except NoResultFound:\n logger.error('Ticket Fee not found for event id {id}'.format(id=event.id))\n continue\n\n ticket_fee_percentage = ticket_fee_object.service_fee\n ticket_fee_maximum = ticket_fee_object.maximum_fee\n orders = Order.query.filter_by(event=event).all()\n gross_revenue = event.calc_monthly_revenue()\n invoice_amount = gross_revenue * (ticket_fee_percentage / 100)\n if invoice_amount > ticket_fee_maximum:\n invoice_amount = ticket_fee_maximum\n net_revenue = gross_revenue - invoice_amount\n payment_details = {\n 'tickets_sold': event.tickets_sold,\n 'gross_revenue': gross_revenue,\n 'net_revenue': net_revenue,\n 'amount_payable': invoice_amount,\n }\n # save invoice as pdf\n pdf = create_save_pdf(\n render_template(\n 'pdf/event_invoice.html',\n orders=orders,\n user=user,\n admin_info=admin_info,\n currency=currency,\n event=event,\n ticket_fee_object=ticket_fee_object,\n payment_details=payment_details,\n net_revenue=net_revenue,\n ),\n UPLOAD_PATHS['pdf']['event_invoice'],\n dir_path='/static/uploads/pdf/event_invoices/',\n identifier=event.identifier,\n )\n # save event_invoice info to DB\n\n event_invoice = EventInvoice(\n amount=invoice_amount, invoice_pdf_url=pdf, event_id=event.id\n )\n save_to_db(event_invoice)\n\n\[email protected]_after_configure.connect\ndef setup_scheduled_task(sender, **kwargs):\n from celery.schedules import crontab\n\n # Every day at 5:30\n sender.add_periodic_task(crontab(hour=5, minute=30), send_after_event_mail)\n # Every 1st day of month at 0:00\n sender.add_periodic_task(\n crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification\n )\n # Every 1st day of month at 0:00\n sender.add_periodic_task(\n crontab(minute=0, hour=0, day_of_month=1), send_event_fee_notification_followup\n )\n # Every day at 5:30\n sender.add_periodic_task(\n crontab(hour=5, minute=30), change_session_state_on_event_completion\n )\n # Every 45 minutes\n sender.add_periodic_task(crontab(minute='*/45'), expire_pending_tickets)\n # Every 1st day of month at 0:00\n sender.add_periodic_task(\n crontab(minute=0, hour=0, day_of_month=1), send_monthly_event_invoice\n )\n # Every day at 5:00\n sender.add_periodic_task(crontab(minute=0, hour=5), event_invoices_mark_due)\n # Every 5 minutes\n sender.add_periodic_task(crontab(minute='*/5'), delete_ticket_holders_no_order_id)\n", "path": "app/api/helpers/scheduled_jobs.py"}]} | 4,045 | 302 |
gh_patches_debug_23871 | rasdani/github-patches | git_diff | learningequality__kolibri-6104 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
import is broken
### Observed behavior
trying to import doesn't work:

### Expected behavior
import should work
### User-facing consequences
cannot import
### Errors and logs
none observed
### Steps to reproduce
unsure. it happened during all imports, but then I cleared my `KOLIBRI_HOME` folder and things started working again
### Context
0.13.0 alpha 3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/utils/cache.py`
Content:
```
1 from django.core.cache import caches
2
3 from kolibri.utils.conf import OPTIONS
4
5
6 cache_options = OPTIONS["Cache"]
7
8
9 class CrossProcessCache(object):
10 def __init__(self, default_timeout=cache_options["CACHE_TIMEOUT"]):
11 self.default_timeout = default_timeout
12
13 def __contains__(self, key):
14 if key in caches["default"]:
15 return True
16 if cache_options["CACHE_BACKEND"] != "redis" and key in caches["process_cache"]:
17 return True
18 return False
19
20 def get(self, key, default=None, version=None):
21 if key in caches["default"] or cache_options["CACHE_BACKEND"] == "redis":
22 return caches["default"].get(key, default=default, version=version)
23 item = caches["process_cache"].get(key, default=None, version=None)
24 caches["default"].set(key, item, timeout=self.default_timeout, version=version)
25 return item
26
27 def set(self, key, value, timeout=None, version=None):
28 caches["default"].set(
29 key, value, timeout=timeout or self.default_timeout, version=version
30 )
31 if cache_options["CACHE_BACKEND"] != "redis":
32 caches["process_cache"].set(
33 key, value, timeout=timeout or self.default_timeout, version=version
34 )
35
```
Path: `kolibri/core/content/utils/import_export_content.py`
Content:
```
1 import hashlib
2
3 from django.db.models import Sum
4 from requests.exceptions import ChunkedEncodingError
5 from requests.exceptions import ConnectionError
6 from requests.exceptions import HTTPError
7 from requests.exceptions import Timeout
8
9 from kolibri.core.content.models import ContentNode
10 from kolibri.core.content.models import LocalFile
11 from kolibri.core.content.utils.content_types_tools import (
12 renderable_contentnodes_q_filter,
13 )
14 from kolibri.core.content.utils.importability_annotation import (
15 get_channel_stats_from_disk,
16 )
17 from kolibri.core.content.utils.importability_annotation import (
18 get_channel_stats_from_peer,
19 )
20
21 try:
22 import OpenSSL
23
24 SSLERROR = OpenSSL.SSL.Error
25 except ImportError:
26 import requests
27
28 SSLERROR = requests.exceptions.SSLError
29
30 RETRY_STATUS_CODE = [502, 503, 504, 521, 522, 523, 524]
31
32
33 def get_nodes_to_transfer(
34 channel_id,
35 node_ids,
36 exclude_node_ids,
37 available,
38 renderable_only=True,
39 drive_id=None,
40 peer_id=None,
41 ):
42 nodes_to_include = ContentNode.objects.filter(channel_id=channel_id)
43
44 # if requested, filter down to only include particular topics/nodes
45 if node_ids:
46 nodes_to_include = nodes_to_include.filter(pk__in=node_ids).get_descendants(
47 include_self=True
48 )
49
50 # if requested, filter out nodes we're not able to render
51 if renderable_only:
52 nodes_to_include = nodes_to_include.filter(renderable_contentnodes_q_filter)
53
54 # filter down the query to remove files associated with nodes we've specifically been asked to exclude
55 if exclude_node_ids:
56 nodes_to_exclude = ContentNode.objects.filter(
57 pk__in=exclude_node_ids
58 ).get_descendants(include_self=True)
59
60 nodes_to_include = nodes_to_include.order_by().difference(
61 nodes_to_exclude.order_by()
62 )
63
64 # By default don't filter node ids by their underlying file importability
65 file_based_node_id_list = None
66 if drive_id:
67 file_based_node_id_list = get_channel_stats_from_disk(
68 channel_id, drive_id
69 ).keys()
70
71 if peer_id:
72 file_based_node_id_list = get_channel_stats_from_peer(
73 channel_id, peer_id
74 ).keys()
75 if file_based_node_id_list is not None:
76 nodes_to_include = nodes_to_include.filter(pk__in=file_based_node_id_list)
77 return nodes_to_include.filter(available=available).order_by()
78
79
80 def get_files_to_transfer(
81 channel_id,
82 node_ids,
83 exclude_node_ids,
84 available,
85 renderable_only=True,
86 drive_id=None,
87 peer_id=None,
88 ):
89
90 nodes_to_include = get_nodes_to_transfer(
91 channel_id,
92 node_ids,
93 exclude_node_ids,
94 renderable_only,
95 available,
96 drive_id=drive_id,
97 peer_id=peer_id,
98 )
99 return calculate_files_to_transfer(nodes_to_include, available)
100
101
102 def calculate_files_to_transfer(nodes_to_include, available):
103 files_to_transfer = LocalFile.objects.filter(
104 available=available, files__contentnode__in=nodes_to_include
105 )
106
107 # Make sure the files are unique, to avoid duplicating downloads
108 files_to_transfer = files_to_transfer.distinct()
109
110 # calculate the total file sizes across all files being returned in the queryset
111 total_bytes_to_transfer = (
112 files_to_transfer.aggregate(Sum("file_size"))["file_size__sum"] or 0
113 )
114
115 return files_to_transfer, total_bytes_to_transfer
116
117
118 def _get_node_ids(node_ids):
119
120 return (
121 ContentNode.objects.filter(pk__in=node_ids)
122 .get_descendants(include_self=True)
123 .values_list("id", flat=True)
124 )
125
126
127 def retry_import(e, **kwargs):
128 """
129 When an exception occurs during channel/content import, if
130 * there is an Internet connection error or timeout error,
131 or HTTPError where the error code is one of the RETRY_STATUS_CODE,
132 return return True to retry the file transfer
133 * the file does not exist on the server or disk, skip the file and return False.
134 This only applies to content import not channel import.
135 * otherwise, raise the exception.
136 return value:
137 * True - needs retry.
138 * False - file is skipped. Does not need retry.
139 """
140
141 skip_404 = kwargs.pop("skip_404")
142
143 if (
144 isinstance(e, ConnectionError)
145 or isinstance(e, Timeout)
146 or isinstance(e, ChunkedEncodingError)
147 or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE)
148 or (isinstance(e, SSLERROR) and "decryption failed or bad record mac" in str(e))
149 ):
150 return True
151
152 elif skip_404 and (
153 (isinstance(e, HTTPError) and e.response.status_code == 404)
154 or (isinstance(e, OSError) and e.errno == 2)
155 ):
156 return False
157
158 else:
159 raise e
160
161
162 def compare_checksums(file_name, file_id):
163 hasher = hashlib.md5()
164 with open(file_name, "rb") as f:
165 # Read chunks of 4096 bytes for memory efficiency
166 for chunk in iter(lambda: f.read(4096), b""):
167 hasher.update(chunk)
168 checksum = hasher.hexdigest()
169 return checksum == file_id
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/content/utils/import_export_content.py b/kolibri/core/content/utils/import_export_content.py
--- a/kolibri/core/content/utils/import_export_content.py
+++ b/kolibri/core/content/utils/import_export_content.py
@@ -91,8 +91,8 @@
channel_id,
node_ids,
exclude_node_ids,
- renderable_only,
available,
+ renderable_only=renderable_only,
drive_id=drive_id,
peer_id=peer_id,
)
diff --git a/kolibri/core/utils/cache.py b/kolibri/core/utils/cache.py
--- a/kolibri/core/utils/cache.py
+++ b/kolibri/core/utils/cache.py
@@ -20,9 +20,13 @@
def get(self, key, default=None, version=None):
if key in caches["default"] or cache_options["CACHE_BACKEND"] == "redis":
return caches["default"].get(key, default=default, version=version)
- item = caches["process_cache"].get(key, default=None, version=None)
- caches["default"].set(key, item, timeout=self.default_timeout, version=version)
- return item
+ if key in caches["process_cache"]:
+ item = caches["process_cache"].get(key, default=None, version=None)
+ caches["default"].set(
+ key, item, timeout=self.default_timeout, version=version
+ )
+ return item
+ return default
def set(self, key, value, timeout=None, version=None):
caches["default"].set(
| {"golden_diff": "diff --git a/kolibri/core/content/utils/import_export_content.py b/kolibri/core/content/utils/import_export_content.py\n--- a/kolibri/core/content/utils/import_export_content.py\n+++ b/kolibri/core/content/utils/import_export_content.py\n@@ -91,8 +91,8 @@\n channel_id,\n node_ids,\n exclude_node_ids,\n- renderable_only,\n available,\n+ renderable_only=renderable_only,\n drive_id=drive_id,\n peer_id=peer_id,\n )\ndiff --git a/kolibri/core/utils/cache.py b/kolibri/core/utils/cache.py\n--- a/kolibri/core/utils/cache.py\n+++ b/kolibri/core/utils/cache.py\n@@ -20,9 +20,13 @@\n def get(self, key, default=None, version=None):\n if key in caches[\"default\"] or cache_options[\"CACHE_BACKEND\"] == \"redis\":\n return caches[\"default\"].get(key, default=default, version=version)\n- item = caches[\"process_cache\"].get(key, default=None, version=None)\n- caches[\"default\"].set(key, item, timeout=self.default_timeout, version=version)\n- return item\n+ if key in caches[\"process_cache\"]:\n+ item = caches[\"process_cache\"].get(key, default=None, version=None)\n+ caches[\"default\"].set(\n+ key, item, timeout=self.default_timeout, version=version\n+ )\n+ return item\n+ return default\n \n def set(self, key, value, timeout=None, version=None):\n caches[\"default\"].set(\n", "issue": "import is broken\n### Observed behavior\r\n\r\ntrying to import doesn't work:\r\n\r\n\r\n\r\n\r\n### Expected behavior\r\n\r\nimport should work\r\n\r\n### User-facing consequences\r\n\r\ncannot import\r\n\r\n### Errors and logs\r\n\r\nnone observed\r\n\r\n### Steps to reproduce\r\n\r\nunsure. it happened during all imports, but then I cleared my `KOLIBRI_HOME` folder and things started working again\r\n\r\n### Context\r\n\r\n0.13.0 alpha 3\r\n\n", "before_files": [{"content": "from django.core.cache import caches\n\nfrom kolibri.utils.conf import OPTIONS\n\n\ncache_options = OPTIONS[\"Cache\"]\n\n\nclass CrossProcessCache(object):\n def __init__(self, default_timeout=cache_options[\"CACHE_TIMEOUT\"]):\n self.default_timeout = default_timeout\n\n def __contains__(self, key):\n if key in caches[\"default\"]:\n return True\n if cache_options[\"CACHE_BACKEND\"] != \"redis\" and key in caches[\"process_cache\"]:\n return True\n return False\n\n def get(self, key, default=None, version=None):\n if key in caches[\"default\"] or cache_options[\"CACHE_BACKEND\"] == \"redis\":\n return caches[\"default\"].get(key, default=default, version=version)\n item = caches[\"process_cache\"].get(key, default=None, version=None)\n caches[\"default\"].set(key, item, timeout=self.default_timeout, version=version)\n return item\n\n def set(self, key, value, timeout=None, version=None):\n caches[\"default\"].set(\n key, value, timeout=timeout or self.default_timeout, version=version\n )\n if cache_options[\"CACHE_BACKEND\"] != \"redis\":\n caches[\"process_cache\"].set(\n key, value, timeout=timeout or self.default_timeout, version=version\n )\n", "path": "kolibri/core/utils/cache.py"}, {"content": "import hashlib\n\nfrom django.db.models import Sum\nfrom requests.exceptions import ChunkedEncodingError\nfrom requests.exceptions import ConnectionError\nfrom requests.exceptions import HTTPError\nfrom requests.exceptions import Timeout\n\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.content.models import LocalFile\nfrom kolibri.core.content.utils.content_types_tools import (\n renderable_contentnodes_q_filter,\n)\nfrom kolibri.core.content.utils.importability_annotation import (\n get_channel_stats_from_disk,\n)\nfrom kolibri.core.content.utils.importability_annotation import (\n get_channel_stats_from_peer,\n)\n\ntry:\n import OpenSSL\n\n SSLERROR = OpenSSL.SSL.Error\nexcept ImportError:\n import requests\n\n SSLERROR = requests.exceptions.SSLError\n\nRETRY_STATUS_CODE = [502, 503, 504, 521, 522, 523, 524]\n\n\ndef get_nodes_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n available,\n renderable_only=True,\n drive_id=None,\n peer_id=None,\n):\n nodes_to_include = ContentNode.objects.filter(channel_id=channel_id)\n\n # if requested, filter down to only include particular topics/nodes\n if node_ids:\n nodes_to_include = nodes_to_include.filter(pk__in=node_ids).get_descendants(\n include_self=True\n )\n\n # if requested, filter out nodes we're not able to render\n if renderable_only:\n nodes_to_include = nodes_to_include.filter(renderable_contentnodes_q_filter)\n\n # filter down the query to remove files associated with nodes we've specifically been asked to exclude\n if exclude_node_ids:\n nodes_to_exclude = ContentNode.objects.filter(\n pk__in=exclude_node_ids\n ).get_descendants(include_self=True)\n\n nodes_to_include = nodes_to_include.order_by().difference(\n nodes_to_exclude.order_by()\n )\n\n # By default don't filter node ids by their underlying file importability\n file_based_node_id_list = None\n if drive_id:\n file_based_node_id_list = get_channel_stats_from_disk(\n channel_id, drive_id\n ).keys()\n\n if peer_id:\n file_based_node_id_list = get_channel_stats_from_peer(\n channel_id, peer_id\n ).keys()\n if file_based_node_id_list is not None:\n nodes_to_include = nodes_to_include.filter(pk__in=file_based_node_id_list)\n return nodes_to_include.filter(available=available).order_by()\n\n\ndef get_files_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n available,\n renderable_only=True,\n drive_id=None,\n peer_id=None,\n):\n\n nodes_to_include = get_nodes_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n renderable_only,\n available,\n drive_id=drive_id,\n peer_id=peer_id,\n )\n return calculate_files_to_transfer(nodes_to_include, available)\n\n\ndef calculate_files_to_transfer(nodes_to_include, available):\n files_to_transfer = LocalFile.objects.filter(\n available=available, files__contentnode__in=nodes_to_include\n )\n\n # Make sure the files are unique, to avoid duplicating downloads\n files_to_transfer = files_to_transfer.distinct()\n\n # calculate the total file sizes across all files being returned in the queryset\n total_bytes_to_transfer = (\n files_to_transfer.aggregate(Sum(\"file_size\"))[\"file_size__sum\"] or 0\n )\n\n return files_to_transfer, total_bytes_to_transfer\n\n\ndef _get_node_ids(node_ids):\n\n return (\n ContentNode.objects.filter(pk__in=node_ids)\n .get_descendants(include_self=True)\n .values_list(\"id\", flat=True)\n )\n\n\ndef retry_import(e, **kwargs):\n \"\"\"\n When an exception occurs during channel/content import, if\n * there is an Internet connection error or timeout error,\n or HTTPError where the error code is one of the RETRY_STATUS_CODE,\n return return True to retry the file transfer\n * the file does not exist on the server or disk, skip the file and return False.\n This only applies to content import not channel import.\n * otherwise, raise the exception.\n return value:\n * True - needs retry.\n * False - file is skipped. Does not need retry.\n \"\"\"\n\n skip_404 = kwargs.pop(\"skip_404\")\n\n if (\n isinstance(e, ConnectionError)\n or isinstance(e, Timeout)\n or isinstance(e, ChunkedEncodingError)\n or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE)\n or (isinstance(e, SSLERROR) and \"decryption failed or bad record mac\" in str(e))\n ):\n return True\n\n elif skip_404 and (\n (isinstance(e, HTTPError) and e.response.status_code == 404)\n or (isinstance(e, OSError) and e.errno == 2)\n ):\n return False\n\n else:\n raise e\n\n\ndef compare_checksums(file_name, file_id):\n hasher = hashlib.md5()\n with open(file_name, \"rb\") as f:\n # Read chunks of 4096 bytes for memory efficiency\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hasher.update(chunk)\n checksum = hasher.hexdigest()\n return checksum == file_id\n", "path": "kolibri/core/content/utils/import_export_content.py"}], "after_files": [{"content": "from django.core.cache import caches\n\nfrom kolibri.utils.conf import OPTIONS\n\n\ncache_options = OPTIONS[\"Cache\"]\n\n\nclass CrossProcessCache(object):\n def __init__(self, default_timeout=cache_options[\"CACHE_TIMEOUT\"]):\n self.default_timeout = default_timeout\n\n def __contains__(self, key):\n if key in caches[\"default\"]:\n return True\n if cache_options[\"CACHE_BACKEND\"] != \"redis\" and key in caches[\"process_cache\"]:\n return True\n return False\n\n def get(self, key, default=None, version=None):\n if key in caches[\"default\"] or cache_options[\"CACHE_BACKEND\"] == \"redis\":\n return caches[\"default\"].get(key, default=default, version=version)\n if key in caches[\"process_cache\"]:\n item = caches[\"process_cache\"].get(key, default=None, version=None)\n caches[\"default\"].set(\n key, item, timeout=self.default_timeout, version=version\n )\n return item\n return default\n\n def set(self, key, value, timeout=None, version=None):\n caches[\"default\"].set(\n key, value, timeout=timeout or self.default_timeout, version=version\n )\n if cache_options[\"CACHE_BACKEND\"] != \"redis\":\n caches[\"process_cache\"].set(\n key, value, timeout=timeout or self.default_timeout, version=version\n )\n", "path": "kolibri/core/utils/cache.py"}, {"content": "import hashlib\n\nfrom django.db.models import Sum\nfrom requests.exceptions import ChunkedEncodingError\nfrom requests.exceptions import ConnectionError\nfrom requests.exceptions import HTTPError\nfrom requests.exceptions import Timeout\n\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.content.models import LocalFile\nfrom kolibri.core.content.utils.content_types_tools import (\n renderable_contentnodes_q_filter,\n)\nfrom kolibri.core.content.utils.importability_annotation import (\n get_channel_stats_from_disk,\n)\nfrom kolibri.core.content.utils.importability_annotation import (\n get_channel_stats_from_peer,\n)\n\ntry:\n import OpenSSL\n\n SSLERROR = OpenSSL.SSL.Error\nexcept ImportError:\n import requests\n\n SSLERROR = requests.exceptions.SSLError\n\nRETRY_STATUS_CODE = [502, 503, 504, 521, 522, 523, 524]\n\n\ndef get_nodes_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n available,\n renderable_only=True,\n drive_id=None,\n peer_id=None,\n):\n nodes_to_include = ContentNode.objects.filter(channel_id=channel_id)\n\n # if requested, filter down to only include particular topics/nodes\n if node_ids:\n nodes_to_include = nodes_to_include.filter(pk__in=node_ids).get_descendants(\n include_self=True\n )\n\n # if requested, filter out nodes we're not able to render\n if renderable_only:\n nodes_to_include = nodes_to_include.filter(renderable_contentnodes_q_filter)\n\n # filter down the query to remove files associated with nodes we've specifically been asked to exclude\n if exclude_node_ids:\n nodes_to_exclude = ContentNode.objects.filter(\n pk__in=exclude_node_ids\n ).get_descendants(include_self=True)\n\n nodes_to_include = nodes_to_include.order_by().difference(\n nodes_to_exclude.order_by()\n )\n\n # By default don't filter node ids by their underlying file importability\n file_based_node_id_list = None\n if drive_id:\n file_based_node_id_list = get_channel_stats_from_disk(\n channel_id, drive_id\n ).keys()\n\n if peer_id:\n file_based_node_id_list = get_channel_stats_from_peer(\n channel_id, peer_id\n ).keys()\n if file_based_node_id_list is not None:\n nodes_to_include = nodes_to_include.filter(pk__in=file_based_node_id_list)\n return nodes_to_include.filter(available=available).order_by()\n\n\ndef get_files_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n available,\n renderable_only=True,\n drive_id=None,\n peer_id=None,\n):\n\n nodes_to_include = get_nodes_to_transfer(\n channel_id,\n node_ids,\n exclude_node_ids,\n available,\n renderable_only=renderable_only,\n drive_id=drive_id,\n peer_id=peer_id,\n )\n return calculate_files_to_transfer(nodes_to_include, available)\n\n\ndef calculate_files_to_transfer(nodes_to_include, available):\n files_to_transfer = LocalFile.objects.filter(\n available=available, files__contentnode__in=nodes_to_include\n )\n\n # Make sure the files are unique, to avoid duplicating downloads\n files_to_transfer = files_to_transfer.distinct()\n\n # calculate the total file sizes across all files being returned in the queryset\n total_bytes_to_transfer = (\n files_to_transfer.aggregate(Sum(\"file_size\"))[\"file_size__sum\"] or 0\n )\n\n return files_to_transfer, total_bytes_to_transfer\n\n\ndef _get_node_ids(node_ids):\n\n return (\n ContentNode.objects.filter(pk__in=node_ids)\n .get_descendants(include_self=True)\n .values_list(\"id\", flat=True)\n )\n\n\ndef retry_import(e, **kwargs):\n \"\"\"\n When an exception occurs during channel/content import, if\n * there is an Internet connection error or timeout error,\n or HTTPError where the error code is one of the RETRY_STATUS_CODE,\n return return True to retry the file transfer\n * the file does not exist on the server or disk, skip the file and return False.\n This only applies to content import not channel import.\n * otherwise, raise the exception.\n return value:\n * True - needs retry.\n * False - file is skipped. Does not need retry.\n \"\"\"\n\n skip_404 = kwargs.pop(\"skip_404\")\n\n if (\n isinstance(e, ConnectionError)\n or isinstance(e, Timeout)\n or isinstance(e, ChunkedEncodingError)\n or (isinstance(e, HTTPError) and e.response.status_code in RETRY_STATUS_CODE)\n or (isinstance(e, SSLERROR) and \"decryption failed or bad record mac\" in str(e))\n ):\n return True\n\n elif skip_404 and (\n (isinstance(e, HTTPError) and e.response.status_code == 404)\n or (isinstance(e, OSError) and e.errno == 2)\n ):\n return False\n\n else:\n raise e\n\n\ndef compare_checksums(file_name, file_id):\n hasher = hashlib.md5()\n with open(file_name, \"rb\") as f:\n # Read chunks of 4096 bytes for memory efficiency\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hasher.update(chunk)\n checksum = hasher.hexdigest()\n return checksum == file_id\n", "path": "kolibri/core/content/utils/import_export_content.py"}]} | 2,354 | 342 |
gh_patches_debug_24173 | rasdani/github-patches | git_diff | getnikola__nikola-2000 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for cssminifier.com and javascript-minifier.com
They offer a nice service to minify CSS/JS and it's easy to use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/filters.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2015 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Utility functions to help run filters on files."""
28
29 from .utils import req_missing
30 from functools import wraps
31 import os
32 import io
33 import shutil
34 import subprocess
35 import tempfile
36 import shlex
37
38 try:
39 import typogrify.filters as typo
40 except ImportError:
41 typo = None # NOQA
42
43
44 def apply_to_binary_file(f):
45 """Apply a filter to a binary file.
46
47 Take a function f that transforms a data argument, and returns
48 a function that takes a filename and applies f to the contents,
49 in place. Reads files in binary mode.
50 """
51 @wraps(f)
52 def f_in_file(fname):
53 with open(fname, 'rb') as inf:
54 data = inf.read()
55 data = f(data)
56 with open(fname, 'wb+') as outf:
57 outf.write(data)
58
59 return f_in_file
60
61
62 def apply_to_text_file(f):
63 """Apply a filter to a text file.
64
65 Take a function f that transforms a data argument, and returns
66 a function that takes a filename and applies f to the contents,
67 in place. Reads files in UTF-8.
68 """
69 @wraps(f)
70 def f_in_file(fname):
71 with io.open(fname, 'r', encoding='utf-8') as inf:
72 data = inf.read()
73 data = f(data)
74 with io.open(fname, 'w+', encoding='utf-8') as outf:
75 outf.write(data)
76
77 return f_in_file
78
79
80 def list_replace(the_list, find, replacement):
81 """Replace all occurrences of ``find`` with ``replacement`` in ``the_list``."""
82 for i, v in enumerate(the_list):
83 if v == find:
84 the_list[i] = replacement
85
86
87 def runinplace(command, infile):
88 """Run a command in-place on a file.
89
90 command is a string of the form: "commandname %1 %2" and
91 it will be execed with infile as %1 and a temporary file
92 as %2. Then, that temporary file will be moved over %1.
93
94 Example usage:
95
96 runinplace("yui-compressor %1 -o %2", "myfile.css")
97
98 That will replace myfile.css with a minified version.
99
100 You can also supply command as a list.
101 """
102 if not isinstance(command, list):
103 command = shlex.split(command)
104
105 tmpdir = None
106
107 if "%2" in command:
108 tmpdir = tempfile.mkdtemp(prefix="nikola")
109 tmpfname = os.path.join(tmpdir, os.path.basename(infile))
110
111 try:
112 list_replace(command, "%1", infile)
113 if tmpdir:
114 list_replace(command, "%2", tmpfname)
115
116 subprocess.check_call(command)
117
118 if tmpdir:
119 shutil.move(tmpfname, infile)
120 finally:
121 if tmpdir:
122 shutil.rmtree(tmpdir)
123
124
125 def yui_compressor(infile):
126 """Run YUI Compressor on a file."""
127 yuicompressor = False
128 try:
129 subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
130 yuicompressor = 'yui-compressor'
131 except Exception:
132 pass
133 if not yuicompressor:
134 try:
135 subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
136 yuicompressor = 'yuicompressor'
137 except:
138 raise Exception("yui-compressor is not installed.")
139 return False
140
141 return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)
142
143
144 def closure_compiler(infile):
145 """Run closure-compiler on a file."""
146 return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)
147
148
149 def optipng(infile):
150 """Run optipng on a file."""
151 return runinplace(r"optipng -preserve -o2 -quiet %1", infile)
152
153
154 def jpegoptim(infile):
155 """Run jpegoptim on a file."""
156 return runinplace(r"jpegoptim -p --strip-all -q %1", infile)
157
158
159 def html_tidy_withconfig(infile):
160 """Run HTML Tidy with tidy5.conf as config file."""
161 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent -config tidy5.conf -modify %1")
162
163
164 def html_tidy_nowrap(infile):
165 """Run HTML Tidy without line wrapping."""
166 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1")
167
168
169 def html_tidy_wrap(infile):
170 """Run HTML Tidy with line wrapping."""
171 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1")
172
173
174 def html_tidy_wrap_attr(infile):
175 """Run HTML tidy with line wrapping and attribute indentation."""
176 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1")
177
178
179 def html_tidy_mini(infile):
180 """Run HTML tidy with minimal settings."""
181 return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no --drop-empty-elements no -modify %1")
182
183
184 def _html_tidy_runner(infile, options):
185 """Run HTML Tidy."""
186 # Warnings (returncode 1) are not critical, and *everything* is a warning.
187 try:
188 status = runinplace(r"tidy5 " + options, infile)
189 except subprocess.CalledProcessError as err:
190 status = 0 if err.returncode == 1 else err.returncode
191 return status
192
193
194 @apply_to_text_file
195 def html5lib_minify(data):
196 """Minify with html5lib."""
197 import html5lib
198 import html5lib.serializer
199 data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),
200 tree='lxml',
201 quote_attr_values=False,
202 omit_optional_tags=True,
203 minimize_boolean_attributes=True,
204 strip_whitespace=True,
205 alphabetical_attributes=True,
206 escape_lt_in_attrs=True)
207 return data
208
209
210 @apply_to_text_file
211 def html5lib_xmllike(data):
212 """Transform document to an XML-like form with html5lib."""
213 import html5lib
214 import html5lib.serializer
215 data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),
216 tree='lxml',
217 quote_attr_values=True,
218 omit_optional_tags=False,
219 strip_whitespace=False,
220 alphabetical_attributes=True,
221 escape_lt_in_attrs=True)
222 return data
223
224
225 @apply_to_text_file
226 def minify_lines(data):
227 """Do nothing -- deprecated filter."""
228 return data
229
230
231 @apply_to_text_file
232 def typogrify(data):
233 """Prettify text with typogrify."""
234 if typo is None:
235 req_missing(['typogrify'], 'use the typogrify filter')
236
237 data = typo.amp(data)
238 data = typo.widont(data)
239 data = typo.smartypants(data)
240 # Disabled because of typogrify bug where it breaks <title>
241 # data = typo.caps(data)
242 data = typo.initial_quotes(data)
243 return data
244
245
246 @apply_to_text_file
247 def typogrify_sans_widont(data):
248 """Prettify text with typogrify, skipping the widont filter."""
249 # typogrify with widont disabled because it caused broken headline
250 # wrapping, see issue #1465
251 if typo is None:
252 req_missing(['typogrify'], 'use the typogrify_sans_widont filter')
253
254 data = typo.amp(data)
255 data = typo.smartypants(data)
256 # Disabled because of typogrify bug where it breaks <title>
257 # data = typo.caps(data)
258 data = typo.initial_quotes(data)
259 return data
260
261
262 @apply_to_text_file
263 def php_template_injection(data):
264 """Insert PHP code into Nikola templates."""
265 import re
266 template = re.search('<\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\:(.*) checksum\:(.*)__ -->', data)
267 if template:
268 source = template.group(1)
269 with io.open(source, "r", encoding="utf-8") as in_file:
270 phpdata = in_file.read()
271 _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')'
272 phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]
273 phpdata = re.sub(template.group(0), phpdata, data)
274 return phpdata
275 else:
276 return data
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/filters.py b/nikola/filters.py
--- a/nikola/filters.py
+++ b/nikola/filters.py
@@ -26,7 +26,6 @@
"""Utility functions to help run filters on files."""
-from .utils import req_missing
from functools import wraps
import os
import io
@@ -39,6 +38,9 @@
import typogrify.filters as typo
except ImportError:
typo = None # NOQA
+import requests
+
+from .utils import req_missing, LOGGER
def apply_to_binary_file(f):
@@ -274,3 +276,29 @@
return phpdata
else:
return data
+
+
+@apply_to_text_file
+def cssminify(data):
+ """Minify CSS using http://cssminifier.com/."""
+ try:
+ url = 'http://cssminifier.com/raw'
+ _data = {'input': data}
+ response = requests.post(url, data=_data)
+ return response.text
+ except Exception as exc:
+ LOGGER.error("can't use cssminifier.com: {}", exc)
+ return data
+
+
+@apply_to_text_file
+def jsminify(data):
+ """Minify JS using http://javascript-minifier.com/."""
+ try:
+ url = 'http://javascript-minifier.com/raw'
+ _data = {'input': data}
+ response = requests.post(url, data=_data)
+ return response.text
+ except Exception as exc:
+ LOGGER.error("can't use javascript-minifier.com: {}", exc)
+ return data
| {"golden_diff": "diff --git a/nikola/filters.py b/nikola/filters.py\n--- a/nikola/filters.py\n+++ b/nikola/filters.py\n@@ -26,7 +26,6 @@\n \n \"\"\"Utility functions to help run filters on files.\"\"\"\n \n-from .utils import req_missing\n from functools import wraps\n import os\n import io\n@@ -39,6 +38,9 @@\n import typogrify.filters as typo\n except ImportError:\n typo = None # NOQA\n+import requests\n+\n+from .utils import req_missing, LOGGER\n \n \n def apply_to_binary_file(f):\n@@ -274,3 +276,29 @@\n return phpdata\n else:\n return data\n+\n+\n+@apply_to_text_file\n+def cssminify(data):\n+ \"\"\"Minify CSS using http://cssminifier.com/.\"\"\"\n+ try:\n+ url = 'http://cssminifier.com/raw'\n+ _data = {'input': data}\n+ response = requests.post(url, data=_data)\n+ return response.text\n+ except Exception as exc:\n+ LOGGER.error(\"can't use cssminifier.com: {}\", exc)\n+ return data\n+\n+\n+@apply_to_text_file\n+def jsminify(data):\n+ \"\"\"Minify JS using http://javascript-minifier.com/.\"\"\"\n+ try:\n+ url = 'http://javascript-minifier.com/raw'\n+ _data = {'input': data}\n+ response = requests.post(url, data=_data)\n+ return response.text\n+ except Exception as exc:\n+ LOGGER.error(\"can't use javascript-minifier.com: {}\", exc)\n+ return data\n", "issue": "Add support for cssminifier.com and javascript-minifier.com\nThey offer a nice service to minify CSS/JS and it's easy to use.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Utility functions to help run filters on files.\"\"\"\n\nfrom .utils import req_missing\nfrom functools import wraps\nimport os\nimport io\nimport shutil\nimport subprocess\nimport tempfile\nimport shlex\n\ntry:\n import typogrify.filters as typo\nexcept ImportError:\n typo = None # NOQA\n\n\ndef apply_to_binary_file(f):\n \"\"\"Apply a filter to a binary file.\n\n Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in binary mode.\n \"\"\"\n @wraps(f)\n def f_in_file(fname):\n with open(fname, 'rb') as inf:\n data = inf.read()\n data = f(data)\n with open(fname, 'wb+') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef apply_to_text_file(f):\n \"\"\"Apply a filter to a text file.\n\n Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in UTF-8.\n \"\"\"\n @wraps(f)\n def f_in_file(fname):\n with io.open(fname, 'r', encoding='utf-8') as inf:\n data = inf.read()\n data = f(data)\n with io.open(fname, 'w+', encoding='utf-8') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef list_replace(the_list, find, replacement):\n \"\"\"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``.\"\"\"\n for i, v in enumerate(the_list):\n if v == find:\n the_list[i] = replacement\n\n\ndef runinplace(command, infile):\n \"\"\"Run a command in-place on a file.\n\n command is a string of the form: \"commandname %1 %2\" and\n it will be execed with infile as %1 and a temporary file\n as %2. Then, that temporary file will be moved over %1.\n\n Example usage:\n\n runinplace(\"yui-compressor %1 -o %2\", \"myfile.css\")\n\n That will replace myfile.css with a minified version.\n\n You can also supply command as a list.\n \"\"\"\n if not isinstance(command, list):\n command = shlex.split(command)\n\n tmpdir = None\n\n if \"%2\" in command:\n tmpdir = tempfile.mkdtemp(prefix=\"nikola\")\n tmpfname = os.path.join(tmpdir, os.path.basename(infile))\n\n try:\n list_replace(command, \"%1\", infile)\n if tmpdir:\n list_replace(command, \"%2\", tmpfname)\n\n subprocess.check_call(command)\n\n if tmpdir:\n shutil.move(tmpfname, infile)\n finally:\n if tmpdir:\n shutil.rmtree(tmpdir)\n\n\ndef yui_compressor(infile):\n \"\"\"Run YUI Compressor on a file.\"\"\"\n yuicompressor = False\n try:\n subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yui-compressor'\n except Exception:\n pass\n if not yuicompressor:\n try:\n subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yuicompressor'\n except:\n raise Exception(\"yui-compressor is not installed.\")\n return False\n\n return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)\n\n\ndef closure_compiler(infile):\n \"\"\"Run closure-compiler on a file.\"\"\"\n return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)\n\n\ndef optipng(infile):\n \"\"\"Run optipng on a file.\"\"\"\n return runinplace(r\"optipng -preserve -o2 -quiet %1\", infile)\n\n\ndef jpegoptim(infile):\n \"\"\"Run jpegoptim on a file.\"\"\"\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n\n\ndef html_tidy_withconfig(infile):\n \"\"\"Run HTML Tidy with tidy5.conf as config file.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent -config tidy5.conf -modify %1\")\n\n\ndef html_tidy_nowrap(infile):\n \"\"\"Run HTML Tidy without line wrapping.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_wrap(infile):\n \"\"\"Run HTML Tidy with line wrapping.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_wrap_attr(infile):\n \"\"\"Run HTML tidy with line wrapping and attribute indentation.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_mini(infile):\n \"\"\"Run HTML tidy with minimal settings.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no --drop-empty-elements no -modify %1\")\n\n\ndef _html_tidy_runner(infile, options):\n \"\"\"Run HTML Tidy.\"\"\"\n # Warnings (returncode 1) are not critical, and *everything* is a warning.\n try:\n status = runinplace(r\"tidy5 \" + options, infile)\n except subprocess.CalledProcessError as err:\n status = 0 if err.returncode == 1 else err.returncode\n return status\n\n\n@apply_to_text_file\ndef html5lib_minify(data):\n \"\"\"Minify with html5lib.\"\"\"\n import html5lib\n import html5lib.serializer\n data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),\n tree='lxml',\n quote_attr_values=False,\n omit_optional_tags=True,\n minimize_boolean_attributes=True,\n strip_whitespace=True,\n alphabetical_attributes=True,\n escape_lt_in_attrs=True)\n return data\n\n\n@apply_to_text_file\ndef html5lib_xmllike(data):\n \"\"\"Transform document to an XML-like form with html5lib.\"\"\"\n import html5lib\n import html5lib.serializer\n data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),\n tree='lxml',\n quote_attr_values=True,\n omit_optional_tags=False,\n strip_whitespace=False,\n alphabetical_attributes=True,\n escape_lt_in_attrs=True)\n return data\n\n\n@apply_to_text_file\ndef minify_lines(data):\n \"\"\"Do nothing -- deprecated filter.\"\"\"\n return data\n\n\n@apply_to_text_file\ndef typogrify(data):\n \"\"\"Prettify text with typogrify.\"\"\"\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify filter')\n\n data = typo.amp(data)\n data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef typogrify_sans_widont(data):\n \"\"\"Prettify text with typogrify, skipping the widont filter.\"\"\"\n # typogrify with widont disabled because it caused broken headline\n # wrapping, see issue #1465\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify_sans_widont filter')\n\n data = typo.amp(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef php_template_injection(data):\n \"\"\"Insert PHP code into Nikola templates.\"\"\"\n import re\n template = re.search('<\\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\\:(.*) checksum\\:(.*)__ -->', data)\n if template:\n source = template.group(1)\n with io.open(source, \"r\", encoding=\"utf-8\") as in_file:\n phpdata = in_file.read()\n _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\\n' * 2) + '|' + (\"\\r\\n\" * 2) + ')'\n phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]\n phpdata = re.sub(template.group(0), phpdata, data)\n return phpdata\n else:\n return data\n", "path": "nikola/filters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Utility functions to help run filters on files.\"\"\"\n\nfrom functools import wraps\nimport os\nimport io\nimport shutil\nimport subprocess\nimport tempfile\nimport shlex\n\ntry:\n import typogrify.filters as typo\nexcept ImportError:\n typo = None # NOQA\nimport requests\n\nfrom .utils import req_missing, LOGGER\n\n\ndef apply_to_binary_file(f):\n \"\"\"Apply a filter to a binary file.\n\n Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in binary mode.\n \"\"\"\n @wraps(f)\n def f_in_file(fname):\n with open(fname, 'rb') as inf:\n data = inf.read()\n data = f(data)\n with open(fname, 'wb+') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef apply_to_text_file(f):\n \"\"\"Apply a filter to a text file.\n\n Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in UTF-8.\n \"\"\"\n @wraps(f)\n def f_in_file(fname):\n with io.open(fname, 'r', encoding='utf-8') as inf:\n data = inf.read()\n data = f(data)\n with io.open(fname, 'w+', encoding='utf-8') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef list_replace(the_list, find, replacement):\n \"\"\"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``.\"\"\"\n for i, v in enumerate(the_list):\n if v == find:\n the_list[i] = replacement\n\n\ndef runinplace(command, infile):\n \"\"\"Run a command in-place on a file.\n\n command is a string of the form: \"commandname %1 %2\" and\n it will be execed with infile as %1 and a temporary file\n as %2. Then, that temporary file will be moved over %1.\n\n Example usage:\n\n runinplace(\"yui-compressor %1 -o %2\", \"myfile.css\")\n\n That will replace myfile.css with a minified version.\n\n You can also supply command as a list.\n \"\"\"\n if not isinstance(command, list):\n command = shlex.split(command)\n\n tmpdir = None\n\n if \"%2\" in command:\n tmpdir = tempfile.mkdtemp(prefix=\"nikola\")\n tmpfname = os.path.join(tmpdir, os.path.basename(infile))\n\n try:\n list_replace(command, \"%1\", infile)\n if tmpdir:\n list_replace(command, \"%2\", tmpfname)\n\n subprocess.check_call(command)\n\n if tmpdir:\n shutil.move(tmpfname, infile)\n finally:\n if tmpdir:\n shutil.rmtree(tmpdir)\n\n\ndef yui_compressor(infile):\n \"\"\"Run YUI Compressor on a file.\"\"\"\n yuicompressor = False\n try:\n subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yui-compressor'\n except Exception:\n pass\n if not yuicompressor:\n try:\n subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yuicompressor'\n except:\n raise Exception(\"yui-compressor is not installed.\")\n return False\n\n return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)\n\n\ndef closure_compiler(infile):\n \"\"\"Run closure-compiler on a file.\"\"\"\n return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)\n\n\ndef optipng(infile):\n \"\"\"Run optipng on a file.\"\"\"\n return runinplace(r\"optipng -preserve -o2 -quiet %1\", infile)\n\n\ndef jpegoptim(infile):\n \"\"\"Run jpegoptim on a file.\"\"\"\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n\n\ndef html_tidy_withconfig(infile):\n \"\"\"Run HTML Tidy with tidy5.conf as config file.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent -config tidy5.conf -modify %1\")\n\n\ndef html_tidy_nowrap(infile):\n \"\"\"Run HTML Tidy without line wrapping.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_wrap(infile):\n \"\"\"Run HTML Tidy with line wrapping.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_wrap_attr(infile):\n \"\"\"Run HTML tidy with line wrapping and attribute indentation.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1\")\n\n\ndef html_tidy_mini(infile):\n \"\"\"Run HTML tidy with minimal settings.\"\"\"\n return _html_tidy_runner(infile, r\"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no --drop-empty-elements no -modify %1\")\n\n\ndef _html_tidy_runner(infile, options):\n \"\"\"Run HTML Tidy.\"\"\"\n # Warnings (returncode 1) are not critical, and *everything* is a warning.\n try:\n status = runinplace(r\"tidy5 \" + options, infile)\n except subprocess.CalledProcessError as err:\n status = 0 if err.returncode == 1 else err.returncode\n return status\n\n\n@apply_to_text_file\ndef html5lib_minify(data):\n \"\"\"Minify with html5lib.\"\"\"\n import html5lib\n import html5lib.serializer\n data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),\n tree='lxml',\n quote_attr_values=False,\n omit_optional_tags=True,\n minimize_boolean_attributes=True,\n strip_whitespace=True,\n alphabetical_attributes=True,\n escape_lt_in_attrs=True)\n return data\n\n\n@apply_to_text_file\ndef html5lib_xmllike(data):\n \"\"\"Transform document to an XML-like form with html5lib.\"\"\"\n import html5lib\n import html5lib.serializer\n data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),\n tree='lxml',\n quote_attr_values=True,\n omit_optional_tags=False,\n strip_whitespace=False,\n alphabetical_attributes=True,\n escape_lt_in_attrs=True)\n return data\n\n\n@apply_to_text_file\ndef minify_lines(data):\n \"\"\"Do nothing -- deprecated filter.\"\"\"\n return data\n\n\n@apply_to_text_file\ndef typogrify(data):\n \"\"\"Prettify text with typogrify.\"\"\"\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify filter')\n\n data = typo.amp(data)\n data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef typogrify_sans_widont(data):\n \"\"\"Prettify text with typogrify, skipping the widont filter.\"\"\"\n # typogrify with widont disabled because it caused broken headline\n # wrapping, see issue #1465\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify_sans_widont filter')\n\n data = typo.amp(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef php_template_injection(data):\n \"\"\"Insert PHP code into Nikola templates.\"\"\"\n import re\n template = re.search('<\\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\\:(.*) checksum\\:(.*)__ -->', data)\n if template:\n source = template.group(1)\n with io.open(source, \"r\", encoding=\"utf-8\") as in_file:\n phpdata = in_file.read()\n _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\\n' * 2) + '|' + (\"\\r\\n\" * 2) + ')'\n phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]\n phpdata = re.sub(template.group(0), phpdata, data)\n return phpdata\n else:\n return data\n\n\n@apply_to_text_file\ndef cssminify(data):\n \"\"\"Minify CSS using http://cssminifier.com/.\"\"\"\n try:\n url = 'http://cssminifier.com/raw'\n _data = {'input': data}\n response = requests.post(url, data=_data)\n return response.text\n except Exception as exc:\n LOGGER.error(\"can't use cssminifier.com: {}\", exc)\n return data\n\n\n@apply_to_text_file\ndef jsminify(data):\n \"\"\"Minify JS using http://javascript-minifier.com/.\"\"\"\n try:\n url = 'http://javascript-minifier.com/raw'\n _data = {'input': data}\n response = requests.post(url, data=_data)\n return response.text\n except Exception as exc:\n LOGGER.error(\"can't use javascript-minifier.com: {}\", exc)\n return data\n", "path": "nikola/filters.py"}]} | 3,330 | 369 |
gh_patches_debug_24404 | rasdani/github-patches | git_diff | mdn__kuma-6974 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
T - Add 301 redirect to Subscriptions Management page
`/payments/recurring/management` is moving to `/payments/management`, therefore we need to add 301 redirect in kuma/payments/urls.py
See https://github.com/mdn/kuma/issues/6703#issuecomment-614246571 for context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/payments/urls.py`
Content:
```
1 from django.urls import path
2
3 from . import views
4
5 lang_urlpatterns = [
6 path("terms/", views.payment_terms, name="payment_terms"),
7 path("thank-you/", views.thank_you, name="thank_you"),
8 path("management/", views.payment_management, name="payment_management",),
9 path("", views.index, name="payments_index"),
10 ]
11
```
Path: `kuma/payments/views.py`
Content:
```
1 import logging
2
3 from django.shortcuts import render
4 from django.views.decorators.cache import never_cache
5 from waffle.decorators import waffle_flag
6
7 from kuma.users.models import User
8
9
10 log = logging.getLogger("kuma.payments.views")
11
12
13 @never_cache
14 def index(request):
15 highest_subscriber_number = User.get_highest_subscriber_number()
16 # TODO: This is never unit tested because our tests never test SSR rendering.
17 # See https://github.com/mdn/kuma/issues/6797
18 context = {"next_subscriber_number": highest_subscriber_number + 1}
19 return render(request, "payments/index.html", context)
20
21
22 @waffle_flag("subscription")
23 @never_cache
24 def thank_you(request):
25 return render(request, "payments/thank-you.html")
26
27
28 @waffle_flag("subscription")
29 @never_cache
30 def payment_terms(request):
31 return render(request, "payments/terms.html")
32
33
34 @waffle_flag("subscription")
35 @never_cache
36 def payment_management(request):
37 return render(request, "payments/management.html")
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/payments/urls.py b/kuma/payments/urls.py
--- a/kuma/payments/urls.py
+++ b/kuma/payments/urls.py
@@ -1,10 +1,17 @@
from django.urls import path
+from django.views.generic import RedirectView
from . import views
lang_urlpatterns = [
path("terms/", views.payment_terms, name="payment_terms"),
path("thank-you/", views.thank_you, name="thank_you"),
- path("management/", views.payment_management, name="payment_management",),
+ path(
+ # This is the old URL we had for a while
+ "recurring/management/",
+ RedirectView.as_view(pattern_name="payment_management", permanent=True),
+ name="recurring_payment_management",
+ ),
+ path("management/", views.payment_management, name="payment_management"),
path("", views.index, name="payments_index"),
]
diff --git a/kuma/payments/views.py b/kuma/payments/views.py
--- a/kuma/payments/views.py
+++ b/kuma/payments/views.py
@@ -13,8 +13,6 @@
@never_cache
def index(request):
highest_subscriber_number = User.get_highest_subscriber_number()
- # TODO: This is never unit tested because our tests never test SSR rendering.
- # See https://github.com/mdn/kuma/issues/6797
context = {"next_subscriber_number": highest_subscriber_number + 1}
return render(request, "payments/index.html", context)
| {"golden_diff": "diff --git a/kuma/payments/urls.py b/kuma/payments/urls.py\n--- a/kuma/payments/urls.py\n+++ b/kuma/payments/urls.py\n@@ -1,10 +1,17 @@\n from django.urls import path\n+from django.views.generic import RedirectView\n \n from . import views\n \n lang_urlpatterns = [\n path(\"terms/\", views.payment_terms, name=\"payment_terms\"),\n path(\"thank-you/\", views.thank_you, name=\"thank_you\"),\n- path(\"management/\", views.payment_management, name=\"payment_management\",),\n+ path(\n+ # This is the old URL we had for a while\n+ \"recurring/management/\",\n+ RedirectView.as_view(pattern_name=\"payment_management\", permanent=True),\n+ name=\"recurring_payment_management\",\n+ ),\n+ path(\"management/\", views.payment_management, name=\"payment_management\"),\n path(\"\", views.index, name=\"payments_index\"),\n ]\ndiff --git a/kuma/payments/views.py b/kuma/payments/views.py\n--- a/kuma/payments/views.py\n+++ b/kuma/payments/views.py\n@@ -13,8 +13,6 @@\n @never_cache\n def index(request):\n highest_subscriber_number = User.get_highest_subscriber_number()\n- # TODO: This is never unit tested because our tests never test SSR rendering.\n- # See https://github.com/mdn/kuma/issues/6797\n context = {\"next_subscriber_number\": highest_subscriber_number + 1}\n return render(request, \"payments/index.html\", context)\n", "issue": "T - Add 301 redirect to Subscriptions Management page \n`/payments/recurring/management` is moving to `/payments/management`, therefore we need to add 301 redirect in kuma/payments/urls.py\r\n\r\nSee https://github.com/mdn/kuma/issues/6703#issuecomment-614246571 for context \n", "before_files": [{"content": "from django.urls import path\n\nfrom . import views\n\nlang_urlpatterns = [\n path(\"terms/\", views.payment_terms, name=\"payment_terms\"),\n path(\"thank-you/\", views.thank_you, name=\"thank_you\"),\n path(\"management/\", views.payment_management, name=\"payment_management\",),\n path(\"\", views.index, name=\"payments_index\"),\n]\n", "path": "kuma/payments/urls.py"}, {"content": "import logging\n\nfrom django.shortcuts import render\nfrom django.views.decorators.cache import never_cache\nfrom waffle.decorators import waffle_flag\n\nfrom kuma.users.models import User\n\n\nlog = logging.getLogger(\"kuma.payments.views\")\n\n\n@never_cache\ndef index(request):\n highest_subscriber_number = User.get_highest_subscriber_number()\n # TODO: This is never unit tested because our tests never test SSR rendering.\n # See https://github.com/mdn/kuma/issues/6797\n context = {\"next_subscriber_number\": highest_subscriber_number + 1}\n return render(request, \"payments/index.html\", context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef thank_you(request):\n return render(request, \"payments/thank-you.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_terms(request):\n return render(request, \"payments/terms.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_management(request):\n return render(request, \"payments/management.html\")\n", "path": "kuma/payments/views.py"}], "after_files": [{"content": "from django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom . import views\n\nlang_urlpatterns = [\n path(\"terms/\", views.payment_terms, name=\"payment_terms\"),\n path(\"thank-you/\", views.thank_you, name=\"thank_you\"),\n path(\n # This is the old URL we had for a while\n \"recurring/management/\",\n RedirectView.as_view(pattern_name=\"payment_management\", permanent=True),\n name=\"recurring_payment_management\",\n ),\n path(\"management/\", views.payment_management, name=\"payment_management\"),\n path(\"\", views.index, name=\"payments_index\"),\n]\n", "path": "kuma/payments/urls.py"}, {"content": "import logging\n\nfrom django.shortcuts import render\nfrom django.views.decorators.cache import never_cache\nfrom waffle.decorators import waffle_flag\n\nfrom kuma.users.models import User\n\n\nlog = logging.getLogger(\"kuma.payments.views\")\n\n\n@never_cache\ndef index(request):\n highest_subscriber_number = User.get_highest_subscriber_number()\n context = {\"next_subscriber_number\": highest_subscriber_number + 1}\n return render(request, \"payments/index.html\", context)\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef thank_you(request):\n return render(request, \"payments/thank-you.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_terms(request):\n return render(request, \"payments/terms.html\")\n\n\n@waffle_flag(\"subscription\")\n@never_cache\ndef payment_management(request):\n return render(request, \"payments/management.html\")\n", "path": "kuma/payments/views.py"}]} | 738 | 339 |
gh_patches_debug_31279 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-1897 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bundle Easyblock does not put patches into root config
At https://github.com/easybuilders/easybuild-easyblocks/blob/b99cc5a4dfb98cafbbd4a8827ea9bfb444724e27/easybuild/easyblocks/generic/bundle.py#L156 the patches checksums are added to the root, but the patches are not which makes e.g. the unit tests fail, see https://github.com/easybuilders/easybuild-easyconfigs/pull/9546
Should the patches be added? From the logic in the check_checksums it seems: yes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/generic/bundle.py`
Content:
```
1 ##
2 # Copyright 2009-2019 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for installing a bundle of modules, implemented as a generic easyblock
27
28 @author: Stijn De Weirdt (Ghent University)
29 @author: Dries Verdegem (Ghent University)
30 @author: Kenneth Hoste (Ghent University)
31 @author: Pieter De Baets (Ghent University)
32 @author: Jens Timmerman (Ghent University)
33 """
34 import copy
35 import os
36
37 import easybuild.tools.environment as env
38 from easybuild.framework.easyblock import EasyBlock
39 from easybuild.framework.easyconfig import CUSTOM
40 from easybuild.framework.easyconfig.easyconfig import get_easyblock_class
41 from easybuild.tools.build_log import EasyBuildError, print_msg
42 from easybuild.tools.modules import get_software_root, get_software_version
43 from easybuild.tools.py2vs3 import string_type
44
45
46 class Bundle(EasyBlock):
47 """
48 Bundle of modules: only generate module files, nothing to build/install
49 """
50
51 @staticmethod
52 def extra_options(extra_vars=None):
53 """Easyconfig parameters specific to bundles."""
54 if extra_vars is None:
55 extra_vars = {}
56 extra_vars.update({
57 'altroot': [None, "Software name of dependency to use to define $EBROOT for this bundle", CUSTOM],
58 'altversion': [None, "Software name of dependency to use to define $EBVERSION for this bundle", CUSTOM],
59 'default_component_specs': [{}, "Default specs to use for every component", CUSTOM],
60 'components': [(), "List of components to install: tuples w/ name, version and easyblock to use", CUSTOM],
61 'default_easyblock': [None, "Default easyblock to use for components", CUSTOM],
62 })
63 return EasyBlock.extra_options(extra_vars)
64
65 def __init__(self, *args, **kwargs):
66 """Initialize easyblock."""
67 super(Bundle, self).__init__(*args, **kwargs)
68 self.altroot = None
69 self.altversion = None
70
71 # list of EasyConfig instances for components
72 self.comp_cfgs = []
73
74 # list of sources for bundle itself *must* be empty
75 if self.cfg['sources']:
76 raise EasyBuildError("List of sources for bundle itself must be empty, found %s", self.cfg['sources'])
77
78 # disable templating to avoid premature resolving of template values
79 self.cfg.enable_templating = False
80
81 # list of checksums for patches (must be included after checksums for sources)
82 checksums_patches = []
83
84 for comp in self.cfg['components']:
85 comp_name, comp_version, comp_specs = comp[0], comp[1], {}
86 if len(comp) == 3:
87 comp_specs = comp[2]
88
89 comp_cfg = self.cfg.copy()
90
91 easyblock = comp_specs.get('easyblock') or self.cfg['default_easyblock']
92 if easyblock is None:
93 raise EasyBuildError("No easyblock specified for component %s v%s", comp_cfg['name'],
94 comp_cfg['version'])
95 elif easyblock == 'Bundle':
96 raise EasyBuildError("The Bundle easyblock can not be used to install components in a bundle")
97
98 comp_cfg.easyblock = get_easyblock_class(easyblock, name=comp_cfg['name'])
99
100 # make sure that extra easyconfig parameters are known, so they can be set
101 extra_opts = comp_cfg.easyblock.extra_options()
102 comp_cfg.extend_params(copy.deepcopy(extra_opts))
103
104 comp_cfg['name'] = comp_name
105 comp_cfg['version'] = comp_version
106 comp_cfg.generate_template_values()
107
108 # do not inherit easyblock to use from parent (since that would result in an infinite loop in install_step)
109 comp_cfg['easyblock'] = None
110
111 # reset list of sources/source_urls/checksums
112 comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = []
113
114 for key in self.cfg['default_component_specs']:
115 comp_cfg[key] = self.cfg['default_component_specs'][key]
116
117 for key in comp_specs:
118 comp_cfg[key] = comp_specs[key]
119
120 # enable resolving of templates for component-specific EasyConfig instance
121 comp_cfg.enable_templating = True
122
123 # 'sources' is strictly required
124 if comp_cfg['sources']:
125 # If per-component source URLs are provided, attach them directly to the relevant sources
126 if comp_cfg['source_urls']:
127 for source in comp_cfg['sources']:
128 if isinstance(source, string_type):
129 self.cfg.update('sources', [{'filename': source, 'source_urls': comp_cfg['source_urls']}])
130 elif isinstance(source, dict):
131 # Update source_urls in the 'source' dict to use the one for the components
132 # (if it doesn't already exist)
133 if 'source_urls' not in source:
134 source['source_urls'] = comp_cfg['source_urls']
135 self.cfg.update('sources', [source])
136 else:
137 raise EasyBuildError("Source %s for component %s is neither a string nor a dict, cannot "
138 "process it.", source, comp_cfg['name'])
139 else:
140 # add component sources to list of sources
141 self.cfg.update('sources', comp_cfg['sources'])
142 else:
143 raise EasyBuildError("No sources specification for component %s v%s", comp_name, comp_version)
144
145 if comp_cfg['checksums']:
146 src_cnt = len(comp_cfg['sources'])
147
148 # add per-component checksums for sources to list of checksums
149 self.cfg.update('checksums', comp_cfg['checksums'][:src_cnt])
150
151 # add per-component checksums for patches to list of checksums for patches
152 checksums_patches.extend(comp_cfg['checksums'][src_cnt:])
153
154 self.comp_cfgs.append(comp_cfg)
155
156 self.cfg.update('checksums', checksums_patches)
157
158 self.cfg.enable_templating = True
159
160 def check_checksums(self):
161 """
162 Check whether a SHA256 checksum is available for all sources & patches (incl. extensions).
163
164 :return: list of strings describing checksum issues (missing checksums, wrong checksum type, etc.)
165 """
166 checksum_issues = super(Bundle, self).check_checksums()
167
168 for comp in self.comp_cfgs:
169 checksum_issues.extend(self.check_checksums_for(comp, sub="of component %s" % comp['name']))
170
171 return checksum_issues
172
173 def configure_step(self):
174 """Collect altroot/altversion info."""
175 # pick up altroot/altversion, if they are defined
176 self.altroot = None
177 if self.cfg['altroot']:
178 self.altroot = get_software_root(self.cfg['altroot'])
179 self.altversion = None
180 if self.cfg['altversion']:
181 self.altversion = get_software_version(self.cfg['altversion'])
182
183 def build_step(self):
184 """Do nothing."""
185 pass
186
187 def install_step(self):
188 """Install components, if specified."""
189 comp_cnt = len(self.cfg['components'])
190 for idx, cfg in enumerate(self.comp_cfgs):
191
192 print_msg("installing bundle component %s v%s (%d/%d)..." % (cfg['name'], cfg['version'], idx+1, comp_cnt))
193 self.log.info("Installing component %s v%s using easyblock %s", cfg['name'], cfg['version'], cfg.easyblock)
194
195 comp = cfg.easyblock(cfg)
196
197 # correct build/install dirs
198 comp.builddir = self.builddir
199 comp.install_subdir, comp.installdir = self.install_subdir, self.installdir
200
201 # make sure we can build in parallel
202 comp.set_parallel()
203
204 # figure out correct start directory
205 comp.guess_start_dir()
206
207 # need to run fetch_patches to ensure per-component patches are applied
208 comp.fetch_patches()
209
210 comp.src = []
211
212 # find match entries in self.src for this component
213 for source in comp.cfg['sources']:
214 if isinstance(source, string_type):
215 comp_src_fn = source
216 elif isinstance(source, dict):
217 if 'filename' in source:
218 comp_src_fn = source['filename']
219 else:
220 raise EasyBuildError("Encountered source file specified as dict without 'filename': %s", source)
221 else:
222 raise EasyBuildError("Specification of unknown type for source file: %s", source)
223
224 found = False
225 for src in self.src:
226 if src['name'] == comp_src_fn:
227 self.log.info("Found spec for source %s for component %s: %s", comp_src_fn, comp.name, src)
228 comp.src.append(src)
229 found = True
230 break
231 if not found:
232 raise EasyBuildError("Failed to find spec for source %s for component %s", comp_src_fn, comp.name)
233
234 # location of first unpacked source is used to determine where to apply patch(es)
235 comp.src[-1]['finalpath'] = comp.cfg['start_dir']
236
237 # run relevant steps
238 for step_name in ['patch', 'configure', 'build', 'install']:
239 if step_name in cfg['skipsteps']:
240 comp.log.info("Skipping '%s' step for component %s v%s", step_name, cfg['name'], cfg['version'])
241 else:
242 comp.run_step(step_name, [lambda x: getattr(x, '%s_step' % step_name)])
243
244 # update environment to ensure stuff provided by former components can be picked up by latter components
245 # once the installation is finalised, this is handled by the generated module
246 reqs = comp.make_module_req_guess()
247 for envvar in reqs:
248 curr_val = os.getenv(envvar, '')
249 curr_paths = curr_val.split(os.pathsep)
250 for subdir in reqs[envvar]:
251 path = os.path.join(self.installdir, subdir)
252 if path not in curr_paths:
253 if curr_val:
254 new_val = '%s:%s' % (path, curr_val)
255 else:
256 new_val = path
257 env.setvar(envvar, new_val)
258
259 def make_module_extra(self, *args, **kwargs):
260 """Set extra stuff in module file, e.g. $EBROOT*, $EBVERSION*, etc."""
261 if 'altroot' not in kwargs:
262 kwargs['altroot'] = self.altroot
263 if 'altversion' not in kwargs:
264 kwargs['altversion'] = self.altversion
265 return super(Bundle, self).make_module_extra(*args, **kwargs)
266
267 def sanity_check_step(self, *args, **kwargs):
268 """
269 Nothing is being installed, so just being able to load the (fake) module is sufficient
270 """
271 if self.cfg['exts_list'] or self.cfg['sanity_check_paths'] or self.cfg['sanity_check_commands']:
272 super(Bundle, self).sanity_check_step(*args, **kwargs)
273 else:
274 self.log.info("Testing loading of module '%s' by means of sanity check" % self.full_mod_name)
275 fake_mod_data = self.load_fake_module(purge=True)
276 self.log.debug("Cleaning up after testing loading of module")
277 self.clean_up_fake_module(fake_mod_data)
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/easybuild/easyblocks/generic/bundle.py b/easybuild/easyblocks/generic/bundle.py
--- a/easybuild/easyblocks/generic/bundle.py
+++ b/easybuild/easyblocks/generic/bundle.py
@@ -74,6 +74,8 @@
# list of sources for bundle itself *must* be empty
if self.cfg['sources']:
raise EasyBuildError("List of sources for bundle itself must be empty, found %s", self.cfg['sources'])
+ if self.cfg['patches']:
+ raise EasyBuildError("List of patches for bundle itself must be empty, found %s", self.cfg['patches'])
# disable templating to avoid premature resolving of template values
self.cfg.enable_templating = False
@@ -109,7 +111,7 @@
comp_cfg['easyblock'] = None
# reset list of sources/source_urls/checksums
- comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = []
+ comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = comp_cfg['patches'] = []
for key in self.cfg['default_component_specs']:
comp_cfg[key] = self.cfg['default_component_specs'][key]
@@ -151,6 +153,9 @@
# add per-component checksums for patches to list of checksums for patches
checksums_patches.extend(comp_cfg['checksums'][src_cnt:])
+ if comp_cfg['patches']:
+ self.cfg.update('patches', comp_cfg['patches'])
+
self.comp_cfgs.append(comp_cfg)
self.cfg.update('checksums', checksums_patches)
| {"golden_diff": "diff --git a/easybuild/easyblocks/generic/bundle.py b/easybuild/easyblocks/generic/bundle.py\n--- a/easybuild/easyblocks/generic/bundle.py\n+++ b/easybuild/easyblocks/generic/bundle.py\n@@ -74,6 +74,8 @@\n # list of sources for bundle itself *must* be empty\n if self.cfg['sources']:\n raise EasyBuildError(\"List of sources for bundle itself must be empty, found %s\", self.cfg['sources'])\n+ if self.cfg['patches']:\n+ raise EasyBuildError(\"List of patches for bundle itself must be empty, found %s\", self.cfg['patches'])\n \n # disable templating to avoid premature resolving of template values\n self.cfg.enable_templating = False\n@@ -109,7 +111,7 @@\n comp_cfg['easyblock'] = None\n \n # reset list of sources/source_urls/checksums\n- comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = []\n+ comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = comp_cfg['patches'] = []\n \n for key in self.cfg['default_component_specs']:\n comp_cfg[key] = self.cfg['default_component_specs'][key]\n@@ -151,6 +153,9 @@\n # add per-component checksums for patches to list of checksums for patches\n checksums_patches.extend(comp_cfg['checksums'][src_cnt:])\n \n+ if comp_cfg['patches']:\n+ self.cfg.update('patches', comp_cfg['patches'])\n+\n self.comp_cfgs.append(comp_cfg)\n \n self.cfg.update('checksums', checksums_patches)\n", "issue": "Bundle Easyblock does not put patches into root config\nAt https://github.com/easybuilders/easybuild-easyblocks/blob/b99cc5a4dfb98cafbbd4a8827ea9bfb444724e27/easybuild/easyblocks/generic/bundle.py#L156 the patches checksums are added to the root, but the patches are not which makes e.g. the unit tests fail, see https://github.com/easybuilders/easybuild-easyconfigs/pull/9546\r\n\r\nShould the patches be added? From the logic in the check_checksums it seems: yes\n", "before_files": [{"content": "##\n# Copyright 2009-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing a bundle of modules, implemented as a generic easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n\"\"\"\nimport copy\nimport os\n\nimport easybuild.tools.environment as env\nfrom easybuild.framework.easyblock import EasyBlock\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.framework.easyconfig.easyconfig import get_easyblock_class\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.modules import get_software_root, get_software_version\nfrom easybuild.tools.py2vs3 import string_type\n\n\nclass Bundle(EasyBlock):\n \"\"\"\n Bundle of modules: only generate module files, nothing to build/install\n \"\"\"\n\n @staticmethod\n def extra_options(extra_vars=None):\n \"\"\"Easyconfig parameters specific to bundles.\"\"\"\n if extra_vars is None:\n extra_vars = {}\n extra_vars.update({\n 'altroot': [None, \"Software name of dependency to use to define $EBROOT for this bundle\", CUSTOM],\n 'altversion': [None, \"Software name of dependency to use to define $EBVERSION for this bundle\", CUSTOM],\n 'default_component_specs': [{}, \"Default specs to use for every component\", CUSTOM],\n 'components': [(), \"List of components to install: tuples w/ name, version and easyblock to use\", CUSTOM],\n 'default_easyblock': [None, \"Default easyblock to use for components\", CUSTOM],\n })\n return EasyBlock.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize easyblock.\"\"\"\n super(Bundle, self).__init__(*args, **kwargs)\n self.altroot = None\n self.altversion = None\n\n # list of EasyConfig instances for components\n self.comp_cfgs = []\n\n # list of sources for bundle itself *must* be empty\n if self.cfg['sources']:\n raise EasyBuildError(\"List of sources for bundle itself must be empty, found %s\", self.cfg['sources'])\n\n # disable templating to avoid premature resolving of template values\n self.cfg.enable_templating = False\n\n # list of checksums for patches (must be included after checksums for sources)\n checksums_patches = []\n\n for comp in self.cfg['components']:\n comp_name, comp_version, comp_specs = comp[0], comp[1], {}\n if len(comp) == 3:\n comp_specs = comp[2]\n\n comp_cfg = self.cfg.copy()\n\n easyblock = comp_specs.get('easyblock') or self.cfg['default_easyblock']\n if easyblock is None:\n raise EasyBuildError(\"No easyblock specified for component %s v%s\", comp_cfg['name'],\n comp_cfg['version'])\n elif easyblock == 'Bundle':\n raise EasyBuildError(\"The Bundle easyblock can not be used to install components in a bundle\")\n\n comp_cfg.easyblock = get_easyblock_class(easyblock, name=comp_cfg['name'])\n\n # make sure that extra easyconfig parameters are known, so they can be set\n extra_opts = comp_cfg.easyblock.extra_options()\n comp_cfg.extend_params(copy.deepcopy(extra_opts))\n\n comp_cfg['name'] = comp_name\n comp_cfg['version'] = comp_version\n comp_cfg.generate_template_values()\n\n # do not inherit easyblock to use from parent (since that would result in an infinite loop in install_step)\n comp_cfg['easyblock'] = None\n\n # reset list of sources/source_urls/checksums\n comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = []\n\n for key in self.cfg['default_component_specs']:\n comp_cfg[key] = self.cfg['default_component_specs'][key]\n\n for key in comp_specs:\n comp_cfg[key] = comp_specs[key]\n\n # enable resolving of templates for component-specific EasyConfig instance\n comp_cfg.enable_templating = True\n\n # 'sources' is strictly required\n if comp_cfg['sources']:\n # If per-component source URLs are provided, attach them directly to the relevant sources\n if comp_cfg['source_urls']:\n for source in comp_cfg['sources']:\n if isinstance(source, string_type):\n self.cfg.update('sources', [{'filename': source, 'source_urls': comp_cfg['source_urls']}])\n elif isinstance(source, dict):\n # Update source_urls in the 'source' dict to use the one for the components\n # (if it doesn't already exist)\n if 'source_urls' not in source:\n source['source_urls'] = comp_cfg['source_urls']\n self.cfg.update('sources', [source])\n else:\n raise EasyBuildError(\"Source %s for component %s is neither a string nor a dict, cannot \"\n \"process it.\", source, comp_cfg['name'])\n else:\n # add component sources to list of sources\n self.cfg.update('sources', comp_cfg['sources'])\n else:\n raise EasyBuildError(\"No sources specification for component %s v%s\", comp_name, comp_version)\n\n if comp_cfg['checksums']:\n src_cnt = len(comp_cfg['sources'])\n\n # add per-component checksums for sources to list of checksums\n self.cfg.update('checksums', comp_cfg['checksums'][:src_cnt])\n\n # add per-component checksums for patches to list of checksums for patches\n checksums_patches.extend(comp_cfg['checksums'][src_cnt:])\n\n self.comp_cfgs.append(comp_cfg)\n\n self.cfg.update('checksums', checksums_patches)\n\n self.cfg.enable_templating = True\n\n def check_checksums(self):\n \"\"\"\n Check whether a SHA256 checksum is available for all sources & patches (incl. extensions).\n\n :return: list of strings describing checksum issues (missing checksums, wrong checksum type, etc.)\n \"\"\"\n checksum_issues = super(Bundle, self).check_checksums()\n\n for comp in self.comp_cfgs:\n checksum_issues.extend(self.check_checksums_for(comp, sub=\"of component %s\" % comp['name']))\n\n return checksum_issues\n\n def configure_step(self):\n \"\"\"Collect altroot/altversion info.\"\"\"\n # pick up altroot/altversion, if they are defined\n self.altroot = None\n if self.cfg['altroot']:\n self.altroot = get_software_root(self.cfg['altroot'])\n self.altversion = None\n if self.cfg['altversion']:\n self.altversion = get_software_version(self.cfg['altversion'])\n\n def build_step(self):\n \"\"\"Do nothing.\"\"\"\n pass\n\n def install_step(self):\n \"\"\"Install components, if specified.\"\"\"\n comp_cnt = len(self.cfg['components'])\n for idx, cfg in enumerate(self.comp_cfgs):\n\n print_msg(\"installing bundle component %s v%s (%d/%d)...\" % (cfg['name'], cfg['version'], idx+1, comp_cnt))\n self.log.info(\"Installing component %s v%s using easyblock %s\", cfg['name'], cfg['version'], cfg.easyblock)\n\n comp = cfg.easyblock(cfg)\n\n # correct build/install dirs\n comp.builddir = self.builddir\n comp.install_subdir, comp.installdir = self.install_subdir, self.installdir\n\n # make sure we can build in parallel\n comp.set_parallel()\n\n # figure out correct start directory\n comp.guess_start_dir()\n\n # need to run fetch_patches to ensure per-component patches are applied\n comp.fetch_patches()\n\n comp.src = []\n\n # find match entries in self.src for this component\n for source in comp.cfg['sources']:\n if isinstance(source, string_type):\n comp_src_fn = source\n elif isinstance(source, dict):\n if 'filename' in source:\n comp_src_fn = source['filename']\n else:\n raise EasyBuildError(\"Encountered source file specified as dict without 'filename': %s\", source)\n else:\n raise EasyBuildError(\"Specification of unknown type for source file: %s\", source)\n\n found = False\n for src in self.src:\n if src['name'] == comp_src_fn:\n self.log.info(\"Found spec for source %s for component %s: %s\", comp_src_fn, comp.name, src)\n comp.src.append(src)\n found = True\n break\n if not found:\n raise EasyBuildError(\"Failed to find spec for source %s for component %s\", comp_src_fn, comp.name)\n\n # location of first unpacked source is used to determine where to apply patch(es)\n comp.src[-1]['finalpath'] = comp.cfg['start_dir']\n\n # run relevant steps\n for step_name in ['patch', 'configure', 'build', 'install']:\n if step_name in cfg['skipsteps']:\n comp.log.info(\"Skipping '%s' step for component %s v%s\", step_name, cfg['name'], cfg['version'])\n else:\n comp.run_step(step_name, [lambda x: getattr(x, '%s_step' % step_name)])\n\n # update environment to ensure stuff provided by former components can be picked up by latter components\n # once the installation is finalised, this is handled by the generated module\n reqs = comp.make_module_req_guess()\n for envvar in reqs:\n curr_val = os.getenv(envvar, '')\n curr_paths = curr_val.split(os.pathsep)\n for subdir in reqs[envvar]:\n path = os.path.join(self.installdir, subdir)\n if path not in curr_paths:\n if curr_val:\n new_val = '%s:%s' % (path, curr_val)\n else:\n new_val = path\n env.setvar(envvar, new_val)\n\n def make_module_extra(self, *args, **kwargs):\n \"\"\"Set extra stuff in module file, e.g. $EBROOT*, $EBVERSION*, etc.\"\"\"\n if 'altroot' not in kwargs:\n kwargs['altroot'] = self.altroot\n if 'altversion' not in kwargs:\n kwargs['altversion'] = self.altversion\n return super(Bundle, self).make_module_extra(*args, **kwargs)\n\n def sanity_check_step(self, *args, **kwargs):\n \"\"\"\n Nothing is being installed, so just being able to load the (fake) module is sufficient\n \"\"\"\n if self.cfg['exts_list'] or self.cfg['sanity_check_paths'] or self.cfg['sanity_check_commands']:\n super(Bundle, self).sanity_check_step(*args, **kwargs)\n else:\n self.log.info(\"Testing loading of module '%s' by means of sanity check\" % self.full_mod_name)\n fake_mod_data = self.load_fake_module(purge=True)\n self.log.debug(\"Cleaning up after testing loading of module\")\n self.clean_up_fake_module(fake_mod_data)\n", "path": "easybuild/easyblocks/generic/bundle.py"}], "after_files": [{"content": "##\n# Copyright 2009-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for installing a bundle of modules, implemented as a generic easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n\"\"\"\nimport copy\nimport os\n\nimport easybuild.tools.environment as env\nfrom easybuild.framework.easyblock import EasyBlock\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.framework.easyconfig.easyconfig import get_easyblock_class\nfrom easybuild.tools.build_log import EasyBuildError, print_msg\nfrom easybuild.tools.modules import get_software_root, get_software_version\nfrom easybuild.tools.py2vs3 import string_type\n\n\nclass Bundle(EasyBlock):\n \"\"\"\n Bundle of modules: only generate module files, nothing to build/install\n \"\"\"\n\n @staticmethod\n def extra_options(extra_vars=None):\n \"\"\"Easyconfig parameters specific to bundles.\"\"\"\n if extra_vars is None:\n extra_vars = {}\n extra_vars.update({\n 'altroot': [None, \"Software name of dependency to use to define $EBROOT for this bundle\", CUSTOM],\n 'altversion': [None, \"Software name of dependency to use to define $EBVERSION for this bundle\", CUSTOM],\n 'default_component_specs': [{}, \"Default specs to use for every component\", CUSTOM],\n 'components': [(), \"List of components to install: tuples w/ name, version and easyblock to use\", CUSTOM],\n 'default_easyblock': [None, \"Default easyblock to use for components\", CUSTOM],\n })\n return EasyBlock.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize easyblock.\"\"\"\n super(Bundle, self).__init__(*args, **kwargs)\n self.altroot = None\n self.altversion = None\n\n # list of EasyConfig instances for components\n self.comp_cfgs = []\n\n # list of sources for bundle itself *must* be empty\n if self.cfg['sources']:\n raise EasyBuildError(\"List of sources for bundle itself must be empty, found %s\", self.cfg['sources'])\n if self.cfg['patches']:\n raise EasyBuildError(\"List of patches for bundle itself must be empty, found %s\", self.cfg['patches'])\n\n # disable templating to avoid premature resolving of template values\n self.cfg.enable_templating = False\n\n # list of checksums for patches (must be included after checksums for sources)\n checksums_patches = []\n\n for comp in self.cfg['components']:\n comp_name, comp_version, comp_specs = comp[0], comp[1], {}\n if len(comp) == 3:\n comp_specs = comp[2]\n\n comp_cfg = self.cfg.copy()\n\n easyblock = comp_specs.get('easyblock') or self.cfg['default_easyblock']\n if easyblock is None:\n raise EasyBuildError(\"No easyblock specified for component %s v%s\", comp_cfg['name'],\n comp_cfg['version'])\n elif easyblock == 'Bundle':\n raise EasyBuildError(\"The Bundle easyblock can not be used to install components in a bundle\")\n\n comp_cfg.easyblock = get_easyblock_class(easyblock, name=comp_cfg['name'])\n\n # make sure that extra easyconfig parameters are known, so they can be set\n extra_opts = comp_cfg.easyblock.extra_options()\n comp_cfg.extend_params(copy.deepcopy(extra_opts))\n\n comp_cfg['name'] = comp_name\n comp_cfg['version'] = comp_version\n comp_cfg.generate_template_values()\n\n # do not inherit easyblock to use from parent (since that would result in an infinite loop in install_step)\n comp_cfg['easyblock'] = None\n\n # reset list of sources/source_urls/checksums\n comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = comp_cfg['patches'] = []\n\n for key in self.cfg['default_component_specs']:\n comp_cfg[key] = self.cfg['default_component_specs'][key]\n\n for key in comp_specs:\n comp_cfg[key] = comp_specs[key]\n\n # enable resolving of templates for component-specific EasyConfig instance\n comp_cfg.enable_templating = True\n\n # 'sources' is strictly required\n if comp_cfg['sources']:\n # If per-component source URLs are provided, attach them directly to the relevant sources\n if comp_cfg['source_urls']:\n for source in comp_cfg['sources']:\n if isinstance(source, string_type):\n self.cfg.update('sources', [{'filename': source, 'source_urls': comp_cfg['source_urls']}])\n elif isinstance(source, dict):\n # Update source_urls in the 'source' dict to use the one for the components\n # (if it doesn't already exist)\n if 'source_urls' not in source:\n source['source_urls'] = comp_cfg['source_urls']\n self.cfg.update('sources', [source])\n else:\n raise EasyBuildError(\"Source %s for component %s is neither a string nor a dict, cannot \"\n \"process it.\", source, comp_cfg['name'])\n else:\n # add component sources to list of sources\n self.cfg.update('sources', comp_cfg['sources'])\n else:\n raise EasyBuildError(\"No sources specification for component %s v%s\", comp_name, comp_version)\n\n if comp_cfg['checksums']:\n src_cnt = len(comp_cfg['sources'])\n\n # add per-component checksums for sources to list of checksums\n self.cfg.update('checksums', comp_cfg['checksums'][:src_cnt])\n\n # add per-component checksums for patches to list of checksums for patches\n checksums_patches.extend(comp_cfg['checksums'][src_cnt:])\n\n if comp_cfg['patches']:\n self.cfg.update('patches', comp_cfg['patches'])\n\n self.comp_cfgs.append(comp_cfg)\n\n self.cfg.update('checksums', checksums_patches)\n\n self.cfg.enable_templating = True\n\n def check_checksums(self):\n \"\"\"\n Check whether a SHA256 checksum is available for all sources & patches (incl. extensions).\n\n :return: list of strings describing checksum issues (missing checksums, wrong checksum type, etc.)\n \"\"\"\n checksum_issues = super(Bundle, self).check_checksums()\n\n for comp in self.comp_cfgs:\n checksum_issues.extend(self.check_checksums_for(comp, sub=\"of component %s\" % comp['name']))\n\n return checksum_issues\n\n def configure_step(self):\n \"\"\"Collect altroot/altversion info.\"\"\"\n # pick up altroot/altversion, if they are defined\n self.altroot = None\n if self.cfg['altroot']:\n self.altroot = get_software_root(self.cfg['altroot'])\n self.altversion = None\n if self.cfg['altversion']:\n self.altversion = get_software_version(self.cfg['altversion'])\n\n def build_step(self):\n \"\"\"Do nothing.\"\"\"\n pass\n\n def install_step(self):\n \"\"\"Install components, if specified.\"\"\"\n comp_cnt = len(self.cfg['components'])\n for idx, cfg in enumerate(self.comp_cfgs):\n\n print_msg(\"installing bundle component %s v%s (%d/%d)...\" % (cfg['name'], cfg['version'], idx+1, comp_cnt))\n self.log.info(\"Installing component %s v%s using easyblock %s\", cfg['name'], cfg['version'], cfg.easyblock)\n\n comp = cfg.easyblock(cfg)\n\n # correct build/install dirs\n comp.builddir = self.builddir\n comp.install_subdir, comp.installdir = self.install_subdir, self.installdir\n\n # make sure we can build in parallel\n comp.set_parallel()\n\n # figure out correct start directory\n comp.guess_start_dir()\n\n # need to run fetch_patches to ensure per-component patches are applied\n comp.fetch_patches()\n\n comp.src = []\n\n # find match entries in self.src for this component\n for source in comp.cfg['sources']:\n if isinstance(source, string_type):\n comp_src_fn = source\n elif isinstance(source, dict):\n if 'filename' in source:\n comp_src_fn = source['filename']\n else:\n raise EasyBuildError(\"Encountered source file specified as dict without 'filename': %s\", source)\n else:\n raise EasyBuildError(\"Specification of unknown type for source file: %s\", source)\n\n found = False\n for src in self.src:\n if src['name'] == comp_src_fn:\n self.log.info(\"Found spec for source %s for component %s: %s\", comp_src_fn, comp.name, src)\n comp.src.append(src)\n found = True\n break\n if not found:\n raise EasyBuildError(\"Failed to find spec for source %s for component %s\", comp_src_fn, comp.name)\n\n # location of first unpacked source is used to determine where to apply patch(es)\n comp.src[-1]['finalpath'] = comp.cfg['start_dir']\n\n # run relevant steps\n for step_name in ['patch', 'configure', 'build', 'install']:\n if step_name in cfg['skipsteps']:\n comp.log.info(\"Skipping '%s' step for component %s v%s\", step_name, cfg['name'], cfg['version'])\n else:\n comp.run_step(step_name, [lambda x: getattr(x, '%s_step' % step_name)])\n\n # update environment to ensure stuff provided by former components can be picked up by latter components\n # once the installation is finalised, this is handled by the generated module\n reqs = comp.make_module_req_guess()\n for envvar in reqs:\n curr_val = os.getenv(envvar, '')\n curr_paths = curr_val.split(os.pathsep)\n for subdir in reqs[envvar]:\n path = os.path.join(self.installdir, subdir)\n if path not in curr_paths:\n if curr_val:\n new_val = '%s:%s' % (path, curr_val)\n else:\n new_val = path\n env.setvar(envvar, new_val)\n\n def make_module_extra(self, *args, **kwargs):\n \"\"\"Set extra stuff in module file, e.g. $EBROOT*, $EBVERSION*, etc.\"\"\"\n if 'altroot' not in kwargs:\n kwargs['altroot'] = self.altroot\n if 'altversion' not in kwargs:\n kwargs['altversion'] = self.altversion\n return super(Bundle, self).make_module_extra(*args, **kwargs)\n\n def sanity_check_step(self, *args, **kwargs):\n \"\"\"\n Nothing is being installed, so just being able to load the (fake) module is sufficient\n \"\"\"\n if self.cfg['exts_list'] or self.cfg['sanity_check_paths'] or self.cfg['sanity_check_commands']:\n super(Bundle, self).sanity_check_step(*args, **kwargs)\n else:\n self.log.info(\"Testing loading of module '%s' by means of sanity check\" % self.full_mod_name)\n fake_mod_data = self.load_fake_module(purge=True)\n self.log.debug(\"Cleaning up after testing loading of module\")\n self.clean_up_fake_module(fake_mod_data)\n", "path": "easybuild/easyblocks/generic/bundle.py"}]} | 3,801 | 377 |
gh_patches_debug_455 | rasdani/github-patches | git_diff | openfun__marsha-2411 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🐛(backend) licence not saved during creation video resource
## Bug Report
**Problematic Behavior**
When we create a video and set a licence, the licence is not saved.
[error-licence-2023-09-12 143121.webm](https://github.com/openfun/marsha/assets/25994652/60514ad8-07cd-4390-97c9-21eb3525ecc6)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/backend/marsha/core/forms.py`
Content:
```
1 """Marsha forms module."""
2 from django.core.exceptions import ValidationError
3 from django.forms import CharField, ModelForm
4
5 from . import models
6 from .defaults import INITIALIZED
7
8
9 class DocumentForm(ModelForm):
10 """Form to create or update documents."""
11
12 class Meta:
13 """Meta for DocumentForm."""
14
15 model = models.Document
16 fields = ["description", "is_public", "lti_id", "playlist", "title"]
17
18
19 class VideoForm(ModelForm):
20 """Form to create or update videos."""
21
22 upload_state = CharField(
23 max_length=20,
24 required=False,
25 )
26
27 class Meta:
28 """Meta for VideoForm."""
29
30 model = models.Video
31 fields = [
32 "description",
33 "is_public",
34 "lti_id",
35 "playlist",
36 "title",
37 "upload_state",
38 ]
39
40 def clean_upload_state(self):
41 """Check upload_state valid value."""
42 upload_state = self.cleaned_data["upload_state"]
43
44 if upload_state and upload_state != INITIALIZED:
45 raise ValidationError(f"{INITIALIZED} is the only accepted value")
46
47 return upload_state
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/backend/marsha/core/forms.py b/src/backend/marsha/core/forms.py
--- a/src/backend/marsha/core/forms.py
+++ b/src/backend/marsha/core/forms.py
@@ -35,6 +35,7 @@
"playlist",
"title",
"upload_state",
+ "license",
]
def clean_upload_state(self):
| {"golden_diff": "diff --git a/src/backend/marsha/core/forms.py b/src/backend/marsha/core/forms.py\n--- a/src/backend/marsha/core/forms.py\n+++ b/src/backend/marsha/core/forms.py\n@@ -35,6 +35,7 @@\n \"playlist\",\n \"title\",\n \"upload_state\",\n+ \"license\",\n ]\n \n def clean_upload_state(self):\n", "issue": "\ud83d\udc1b(backend) licence not saved during creation video resource\n## Bug Report\r\n\r\n**Problematic Behavior**\r\nWhen we create a video and set a licence, the licence is not saved.\r\n\r\n[error-licence-2023-09-12 143121.webm](https://github.com/openfun/marsha/assets/25994652/60514ad8-07cd-4390-97c9-21eb3525ecc6)\r\n\r\n\n", "before_files": [{"content": "\"\"\"Marsha forms module.\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.forms import CharField, ModelForm\n\nfrom . import models\nfrom .defaults import INITIALIZED\n\n\nclass DocumentForm(ModelForm):\n \"\"\"Form to create or update documents.\"\"\"\n\n class Meta:\n \"\"\"Meta for DocumentForm.\"\"\"\n\n model = models.Document\n fields = [\"description\", \"is_public\", \"lti_id\", \"playlist\", \"title\"]\n\n\nclass VideoForm(ModelForm):\n \"\"\"Form to create or update videos.\"\"\"\n\n upload_state = CharField(\n max_length=20,\n required=False,\n )\n\n class Meta:\n \"\"\"Meta for VideoForm.\"\"\"\n\n model = models.Video\n fields = [\n \"description\",\n \"is_public\",\n \"lti_id\",\n \"playlist\",\n \"title\",\n \"upload_state\",\n ]\n\n def clean_upload_state(self):\n \"\"\"Check upload_state valid value.\"\"\"\n upload_state = self.cleaned_data[\"upload_state\"]\n\n if upload_state and upload_state != INITIALIZED:\n raise ValidationError(f\"{INITIALIZED} is the only accepted value\")\n\n return upload_state\n", "path": "src/backend/marsha/core/forms.py"}], "after_files": [{"content": "\"\"\"Marsha forms module.\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.forms import CharField, ModelForm\n\nfrom . import models\nfrom .defaults import INITIALIZED\n\n\nclass DocumentForm(ModelForm):\n \"\"\"Form to create or update documents.\"\"\"\n\n class Meta:\n \"\"\"Meta for DocumentForm.\"\"\"\n\n model = models.Document\n fields = [\"description\", \"is_public\", \"lti_id\", \"playlist\", \"title\"]\n\n\nclass VideoForm(ModelForm):\n \"\"\"Form to create or update videos.\"\"\"\n\n upload_state = CharField(\n max_length=20,\n required=False,\n )\n\n class Meta:\n \"\"\"Meta for VideoForm.\"\"\"\n\n model = models.Video\n fields = [\n \"description\",\n \"is_public\",\n \"lti_id\",\n \"playlist\",\n \"title\",\n \"upload_state\",\n \"license\",\n ]\n\n def clean_upload_state(self):\n \"\"\"Check upload_state valid value.\"\"\"\n upload_state = self.cleaned_data[\"upload_state\"]\n\n if upload_state and upload_state != INITIALIZED:\n raise ValidationError(f\"{INITIALIZED} is the only accepted value\")\n\n return upload_state\n", "path": "src/backend/marsha/core/forms.py"}]} | 708 | 84 |
gh_patches_debug_22905 | rasdani/github-patches | git_diff | streamlink__streamlink-1511 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kanal 7 does not show
## **Checklist**
- [x] This is a bug report.
- [ ] This is a feature request.
- [ ] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
## **Description**
i cant see anything at kanal 7.com . i have test it with this links but i became black screen
## **Reproduction steps / Explicit stream URLs to test**
#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//new.10gbps.tv%3a443/live/kanal7LiveDesktop/index.m3u8
#DESCRIPTION KANAL 7
#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.kanal7.com/canli-izle
#DESCRIPTION KANAL 7
#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.izle7.com/canli-yayin-frame?air=1
#DESCRIPTION KANAL 7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/kanal7.py`
Content:
```
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http
6 from streamlink.plugin.api import useragents
7 from streamlink.plugin.api import validate
8 from streamlink.stream import HLSStream
9
10
11 class Kanal7(Plugin):
12 url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle")
13 iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"')
14 stream_re = re.compile(r'''tp_file\s+=\s+['"](http[^"]*?)['"]''')
15
16 @classmethod
17 def can_handle_url(cls, url):
18 return cls.url_re.match(url) is not None
19
20 def find_iframe(self, url):
21 res = http.get(url)
22 # find iframe url
23 iframe = self.iframe_re.search(res.text)
24 iframe_url = iframe and iframe.group(1)
25 if iframe_url:
26 self.logger.debug("Found iframe: {}", iframe_url)
27 return iframe_url
28
29 def _get_streams(self):
30 iframe1 = self.find_iframe(self.url)
31 if iframe1:
32 iframe2 = self.find_iframe(iframe1)
33 if iframe2:
34 ires = http.get(iframe2)
35 stream_m = self.stream_re.search(ires.text)
36 stream_url = stream_m and stream_m.group(1)
37 if stream_url:
38 yield "live", HLSStream(self.session, stream_url, headers={"Referer": iframe2})
39 else:
40 self.logger.error("Could not find second iframe, has the page layout changed?")
41 else:
42 self.logger.error("Could not find iframe, has the page layout changed?")
43
44
45 __plugin__ = Kanal7
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py
--- a/src/streamlink/plugins/kanal7.py
+++ b/src/streamlink/plugins/kanal7.py
@@ -6,12 +6,13 @@
from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
+from streamlink.utils import update_scheme
class Kanal7(Plugin):
url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle")
- iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"')
- stream_re = re.compile(r'''tp_file\s+=\s+['"](http[^"]*?)['"]''')
+ iframe_re = re.compile(r'iframe .*?src="((?:http:)?//[^"]*?)"')
+ stream_re = re.compile(r'''video-source\s*=\s*['"](http[^"']*?)['"]''')
@classmethod
def can_handle_url(cls, url):
@@ -23,6 +24,7 @@
iframe = self.iframe_re.search(res.text)
iframe_url = iframe and iframe.group(1)
if iframe_url:
+ iframe_url = update_scheme(self.url, iframe_url)
self.logger.debug("Found iframe: {}", iframe_url)
return iframe_url
| {"golden_diff": "diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py\n--- a/src/streamlink/plugins/kanal7.py\n+++ b/src/streamlink/plugins/kanal7.py\n@@ -6,12 +6,13 @@\n from streamlink.plugin.api import useragents\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n+from streamlink.utils import update_scheme\n \n \n class Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n- iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n- stream_re = re.compile(r'''tp_file\\s+=\\s+['\"](http[^\"]*?)['\"]''')\n+ iframe_re = re.compile(r'iframe .*?src=\"((?:http:)?//[^\"]*?)\"')\n+ stream_re = re.compile(r'''video-source\\s*=\\s*['\"](http[^\"']*?)['\"]''')\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -23,6 +24,7 @@\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n+ iframe_url = update_scheme(self.url, iframe_url)\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n", "issue": "Kanal 7 does not show\n## **Checklist**\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n## **Description**\r\n\r\n i cant see anything at kanal 7.com . i have test it with this links but i became black screen \r\n\r\n## **Reproduction steps / Explicit stream URLs to test**\r\n\r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//new.10gbps.tv%3a443/live/kanal7LiveDesktop/index.m3u8\r\n#DESCRIPTION KANAL 7 \r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.kanal7.com/canli-izle\r\n#DESCRIPTION KANAL 7\r\n#SERVICE 5002:0:1:1DE6:C544:7E:460000:0:0:0:http%3a//127.0.0.1%3a8088/http%3a//www.izle7.com/canli-yayin-frame?air=1\r\n#DESCRIPTION KANAL 7\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'''tp_file\\s+=\\s+['\"](http[^\"]*?)['\"]''')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import update_scheme\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"((?:http:)?//[^\"]*?)\"')\n stream_re = re.compile(r'''video-source\\s*=\\s*['\"](http[^\"']*?)['\"]''')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n iframe_url = update_scheme(self.url, iframe_url)\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}]} | 1,087 | 312 |
gh_patches_debug_15114 | rasdani/github-patches | git_diff | nextcloud__appstore-246 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Email change form
Among the account pages should be a page from which a user can change their email address. The email field on the `User` model needs to be updated and a correspondig django-allauth `EmailAddress` object should be associated with the user. django-allauth supports multiple email addresses per user, but we only want one. New email addresses should be verified with [this](http://django-allauth.readthedocs.io/en/latest/views.html#e-mail-verification). The page should be located at `/account/email/`.
@BernhardPosselt Do you agree with the above?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nextcloudappstore/core/user/views.py`
Content:
```
1 from allauth.account.views import PasswordChangeView
2 from django.contrib import messages
3 from django.contrib.auth.mixins import LoginRequiredMixin
4 from django.contrib.auth.models import User
5 from django.core.urlresolvers import reverse_lazy
6 from django.shortcuts import redirect, render
7 from django.views.generic import TemplateView
8 from django.views.generic import UpdateView
9
10 from nextcloudappstore.core.user.forms import DeleteAccountForm
11
12
13 class ChangeLanguageView(LoginRequiredMixin, TemplateView):
14 template_name = 'user/set-language.html'
15
16 def get_context_data(self, **kwargs):
17 context = super().get_context_data(**kwargs)
18 context['acc_page'] = 'account-change-language'
19 return context
20
21
22 class DeleteAccountView(LoginRequiredMixin, TemplateView):
23 template_name = 'user/delete-account.html'
24
25 def get_context_data(self, **kwargs):
26 context = super().get_context_data(**kwargs)
27 context['form'] = DeleteAccountForm()
28 context['acc_page'] = 'delete-account'
29 return context
30
31 def post(self, request, *args, **kwargs):
32 form = DeleteAccountForm(request.POST, user=request.user)
33 if form.is_valid():
34 request.user.delete()
35 return redirect(reverse_lazy('home'))
36 else:
37 return render(request, self.template_name, {'form': form})
38
39
40 class AccountView(LoginRequiredMixin, UpdateView):
41 """Display and allow changing of the user's name."""
42
43 template_name = 'user/account.html'
44 template_name_suffix = ''
45 model = User
46 fields = ['first_name', 'last_name']
47 success_url = reverse_lazy('user:account')
48
49 def get_context_data(self, **kwargs):
50 context = super().get_context_data(**kwargs)
51 context['acc_page'] = 'account'
52 return context
53
54 def form_valid(self, form):
55 messages.success(self.request, 'Name saved.')
56 return super().form_valid(form)
57
58 def get_object(self, queryset=None):
59 return self.request.user
60
61
62 class PasswordView(LoginRequiredMixin, PasswordChangeView):
63 """Allow the user to change their password."""
64
65 template_name = 'user/password.html'
66 success_url = reverse_lazy('user:account-password')
67
68 def get_context_data(self, **kwargs):
69 context = super().get_context_data(**kwargs)
70 context['acc_page'] = 'password'
71 return context
72
73
74 class APITokenView(LoginRequiredMixin, TemplateView):
75 """Display the user's API token, and allow it to be regenerated."""
76
77 template_name = 'user/api-token.html'
78
79 def get_context_data(self, **kwargs):
80 context = super().get_context_data(**kwargs)
81 context['acc_page'] = 'api-token'
82 return context
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py
--- a/nextcloudappstore/core/user/views.py
+++ b/nextcloudappstore/core/user/views.py
@@ -43,7 +43,7 @@
template_name = 'user/account.html'
template_name_suffix = ''
model = User
- fields = ['first_name', 'last_name']
+ fields = ['first_name', 'last_name', 'email']
success_url = reverse_lazy('user:account')
def get_context_data(self, **kwargs):
@@ -52,7 +52,7 @@
return context
def form_valid(self, form):
- messages.success(self.request, 'Name saved.')
+ messages.success(self.request, 'Account details saved.')
return super().form_valid(form)
def get_object(self, queryset=None):
| {"golden_diff": "diff --git a/nextcloudappstore/core/user/views.py b/nextcloudappstore/core/user/views.py\n--- a/nextcloudappstore/core/user/views.py\n+++ b/nextcloudappstore/core/user/views.py\n@@ -43,7 +43,7 @@\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n- fields = ['first_name', 'last_name']\n+ fields = ['first_name', 'last_name', 'email']\n success_url = reverse_lazy('user:account')\n \n def get_context_data(self, **kwargs):\n@@ -52,7 +52,7 @@\n return context\n \n def form_valid(self, form):\n- messages.success(self.request, 'Name saved.')\n+ messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n \n def get_object(self, queryset=None):\n", "issue": "Email change form\nAmong the account pages should be a page from which a user can change their email address. The email field on the `User` model needs to be updated and a correspondig django-allauth `EmailAddress` object should be associated with the user. django-allauth supports multiple email addresses per user, but we only want one. New email addresses should be verified with [this](http://django-allauth.readthedocs.io/en/latest/views.html#e-mail-verification). The page should be located at `/account/email/`.\n\n@BernhardPosselt Do you agree with the above?\n\n", "before_files": [{"content": "from allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect, render\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\nfrom nextcloudappstore.core.user.forms import DeleteAccountForm\n\n\nclass ChangeLanguageView(LoginRequiredMixin, TemplateView):\n template_name = 'user/set-language.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account-change-language'\n return context\n\n\nclass DeleteAccountView(LoginRequiredMixin, TemplateView):\n template_name = 'user/delete-account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = DeleteAccountForm()\n context['acc_page'] = 'delete-account'\n return context\n\n def post(self, request, *args, **kwargs):\n form = DeleteAccountForm(request.POST, user=request.user)\n if form.is_valid():\n request.user.delete()\n return redirect(reverse_lazy('home'))\n else:\n return render(request, self.template_name, {'form': form})\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n fields = ['first_name', 'last_name']\n success_url = reverse_lazy('user:account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n messages.success(self.request, 'Name saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('user:account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/core/user/views.py"}], "after_files": [{"content": "from allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import redirect, render\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\nfrom nextcloudappstore.core.user.forms import DeleteAccountForm\n\n\nclass ChangeLanguageView(LoginRequiredMixin, TemplateView):\n template_name = 'user/set-language.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account-change-language'\n return context\n\n\nclass DeleteAccountView(LoginRequiredMixin, TemplateView):\n template_name = 'user/delete-account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = DeleteAccountForm()\n context['acc_page'] = 'delete-account'\n return context\n\n def post(self, request, *args, **kwargs):\n form = DeleteAccountForm(request.POST, user=request.user)\n if form.is_valid():\n request.user.delete()\n return redirect(reverse_lazy('home'))\n else:\n return render(request, self.template_name, {'form': form})\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n model = User\n fields = ['first_name', 'last_name', 'email']\n success_url = reverse_lazy('user:account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('user:account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/core/user/views.py"}]} | 1,116 | 194 |
gh_patches_debug_21933 | rasdani/github-patches | git_diff | craiga__will-of-the-prophets-31 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Figure out how timezones should work on roll page
Times are in UTC. Ben and Adam will be using this from the US. Can we auto-detect the user's timezone? Is this something Django can do for us?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `will_of_the_prophets/urls.py`
Content:
```
1 """will_of_the_prophets URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/2.0/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16 from django.contrib import admin
17 from django.urls import include, path
18
19
20 from will_of_the_prophets import views
21
22 urlpatterns = [
23 path('admin/', admin.site.urls),
24 path('accounts/', include('django.contrib.auth.urls')),
25 path('roll/', views.RollView.as_view(), name='roll'),
26 path('', views.public_board, name='public_board'),
27 ]
28
```
Path: `will_of_the_prophets/settings/__init__.py`
Content:
```
1 """
2 Django settings for will_of_the_prophets project.
3
4 Generated by 'django-admin startproject' using Django 2.0.4.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/2.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/2.0/ref/settings/
11 """
12
13 import os
14
15 import django_heroku
16
17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
20
21 # Quick-start development settings - unsuitable for production
22 # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
23
24 # SECURITY WARNING: keep the secret key used in production secret!
25 SECRET_KEY = os.environ.get(
26 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')
27
28 # SECURITY WARNING: don't run with debug turned on in production!
29 DEBUG = os.environ.get('DEBUG', False)
30
31 ALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']
32
33
34 # Application definition
35
36 INSTALLED_APPS = [
37 'raven.contrib.django.raven_compat',
38 'django.contrib.admin',
39 'django.contrib.auth',
40 'django.contrib.contenttypes',
41 'django.contrib.sessions',
42 'django.contrib.messages',
43 'django.contrib.staticfiles',
44 'sass_processor',
45 'widget_tweaks',
46 'bootstrap',
47 'will_of_the_prophets',
48 ]
49
50 MIDDLEWARE = [
51 'django.middleware.security.SecurityMiddleware',
52 'django.contrib.sessions.middleware.SessionMiddleware',
53 'django.middleware.common.CommonMiddleware',
54 'django.middleware.csrf.CsrfViewMiddleware',
55 'django.contrib.auth.middleware.AuthenticationMiddleware',
56 'django.contrib.messages.middleware.MessageMiddleware',
57 'django.middleware.clickjacking.XFrameOptionsMiddleware',
58 ]
59
60 ROOT_URLCONF = 'will_of_the_prophets.urls'
61
62 TEMPLATES = [
63 {
64 'BACKEND': 'django.template.backends.django.DjangoTemplates',
65 'DIRS': [],
66 'APP_DIRS': True,
67 'OPTIONS': {
68 'context_processors': [
69 'django.template.context_processors.debug',
70 'django.template.context_processors.request',
71 'django.contrib.auth.context_processors.auth',
72 'django.contrib.messages.context_processors.messages',
73 ],
74 },
75 },
76 ]
77
78 WSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'
79
80
81 # Database
82 # https://docs.djangoproject.com/en/2.0/ref/settings/#databases
83
84 DATABASES = {
85 'default': {
86 'ENGINE': 'django.db.backends.sqlite3',
87 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
88 }
89 }
90
91
92 # Password validation
93 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
94
95 AUTH_PASSWORD_VALIDATORS = [
96 {
97 'NAME': ('django.contrib.auth.password_validation'
98 '.UserAttributeSimilarityValidator'),
99 },
100 {
101 'NAME': ('django.contrib.auth.password_validation'
102 '.MinimumLengthValidator'),
103 },
104 {
105 'NAME': ('django.contrib.auth.password_validation'
106 '.CommonPasswordValidator'),
107 },
108 {
109 'NAME': ('django.contrib.auth.password_validation'
110 '.NumericPasswordValidator'),
111 },
112 ]
113
114
115 # Internationalization
116 # https://docs.djangoproject.com/en/2.0/topics/i18n/
117
118 LANGUAGE_CODE = 'en-us'
119
120 TIME_ZONE = 'UTC'
121
122 USE_I18N = True
123
124 USE_L10N = True
125
126 USE_TZ = True
127
128
129 # Static files (CSS, JavaScript, Images)
130 # https://docs.djangoproject.com/en/2.0/howto/static-files/
131
132 STATIC_URL = '/static/'
133
134 STATICFILES_FINDERS = [
135 'django.contrib.staticfiles.finders.FileSystemFinder',
136 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
137 # https://github.com/jrief/django-sass-processor
138 'sass_processor.finders.CssFinder',
139 ]
140
141
142 # django-sass-processor
143 # https://github.com/jrief/django-sass-processor
144 SASS_OUTPUT_STYLE = 'compressed'
145
146
147 # Configure Django App for Heroku.
148 django_heroku.settings(locals())
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py
--- a/will_of_the_prophets/settings/__init__.py
+++ b/will_of_the_prophets/settings/__init__.py
@@ -43,6 +43,7 @@
'django.contrib.staticfiles',
'sass_processor',
'widget_tweaks',
+ 'tz_detect',
'bootstrap',
'will_of_the_prophets',
]
@@ -144,5 +145,14 @@
SASS_OUTPUT_STYLE = 'compressed'
+# django-tz-detect
+# https://github.com/adamcharnock/django-tz-detect
+MIDDLEWARE += [
+ 'tz_detect.middleware.TimezoneMiddleware',
+]
+
+TZ_DETECT_COUNTRIES = ('US', 'CN', 'IN', 'JP', 'BR', 'RU', 'DE', 'FR', 'GB')
+
+
# Configure Django App for Heroku.
django_heroku.settings(locals())
diff --git a/will_of_the_prophets/urls.py b/will_of_the_prophets/urls.py
--- a/will_of_the_prophets/urls.py
+++ b/will_of_the_prophets/urls.py
@@ -23,5 +23,6 @@
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('roll/', views.RollView.as_view(), name='roll'),
+ path('tz_detect/', include('tz_detect.urls')),
path('', views.public_board, name='public_board'),
]
| {"golden_diff": "diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py\n--- a/will_of_the_prophets/settings/__init__.py\n+++ b/will_of_the_prophets/settings/__init__.py\n@@ -43,6 +43,7 @@\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'widget_tweaks',\n+ 'tz_detect',\n 'bootstrap',\n 'will_of_the_prophets',\n ]\n@@ -144,5 +145,14 @@\n SASS_OUTPUT_STYLE = 'compressed'\n \n \n+# django-tz-detect\n+# https://github.com/adamcharnock/django-tz-detect\n+MIDDLEWARE += [\n+ 'tz_detect.middleware.TimezoneMiddleware',\n+]\n+\n+TZ_DETECT_COUNTRIES = ('US', 'CN', 'IN', 'JP', 'BR', 'RU', 'DE', 'FR', 'GB')\n+\n+\n # Configure Django App for Heroku.\n django_heroku.settings(locals())\ndiff --git a/will_of_the_prophets/urls.py b/will_of_the_prophets/urls.py\n--- a/will_of_the_prophets/urls.py\n+++ b/will_of_the_prophets/urls.py\n@@ -23,5 +23,6 @@\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n path('roll/', views.RollView.as_view(), name='roll'),\n+ path('tz_detect/', include('tz_detect.urls')),\n path('', views.public_board, name='public_board'),\n ]\n", "issue": "Figure out how timezones should work on roll page\nTimes are in UTC. Ben and Adam will be using this from the US. Can we auto-detect the user's timezone? Is this something Django can do for us?\n", "before_files": [{"content": "\"\"\"will_of_the_prophets URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\n\nfrom will_of_the_prophets import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n path('roll/', views.RollView.as_view(), name='roll'),\n path('', views.public_board, name='public_board'),\n]\n", "path": "will_of_the_prophets/urls.py"}, {"content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'widget_tweaks',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py"}], "after_files": [{"content": "\"\"\"will_of_the_prophets URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\n\nfrom will_of_the_prophets import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')),\n path('roll/', views.RollView.as_view(), name='roll'),\n path('tz_detect/', include('tz_detect.urls')),\n path('', views.public_board, name='public_board'),\n]\n", "path": "will_of_the_prophets/urls.py"}, {"content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'widget_tweaks',\n 'tz_detect',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# django-tz-detect\n# https://github.com/adamcharnock/django-tz-detect\nMIDDLEWARE += [\n 'tz_detect.middleware.TimezoneMiddleware',\n]\n\nTZ_DETECT_COUNTRIES = ('US', 'CN', 'IN', 'JP', 'BR', 'RU', 'DE', 'FR', 'GB')\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py"}]} | 1,884 | 355 |
gh_patches_debug_34816 | rasdani/github-patches | git_diff | chainer__chainer-2195 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Partial deserialization of links
We sometimes want to deserialize a link only partially, i.e. we want to load a smaller set of parameters to a larger link. It happens when we do some surgery to neural nets (e.g. fine-tuning), in which case the set of parameters in the source network does not match to the set of parameters in the target network.
One possible design is to introduce a `strict` option to the deserializer, and if that flag is False, any mismatch of the sets of parameters is ignored. I think it is safer to turn on this flag by default (I mean, we should not change the default behavior).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/serializers/npz.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import serializer
5
6
7 class DictionarySerializer(serializer.Serializer):
8
9 """Serializer for dictionary.
10
11 This is the standard serializer in Chainer. The hierarchy of objects are
12 simply mapped to a flat dictionary with keys representing the paths to
13 objects in the hierarchy.
14
15 .. note::
16 Despite of its name, this serializer DOES NOT serialize the
17 object into external files. It just build a flat dictionary of arrays
18 that can be fed into :func:`numpy.savez` and
19 :func:`numpy.savez_compressed`. If you want to use this serializer
20 directly, you have to manually send a resulting dictionary to one of
21 these functions.
22
23 Args:
24 target (dict): The dictionary that this serializer saves the objects
25 to. If target is None, then a new dictionary is created.
26 path (str): The base path in the hierarchy that this serializer
27 indicates.
28
29 Attributes:
30 target (dict): The target dictionary. Once the serialization completes,
31 this dictionary can be fed into :func:`numpy.savez` or
32 :func:`numpy.savez_compressed` to serialize it in the NPZ format.
33
34 """
35
36 def __init__(self, target=None, path=''):
37 self.target = {} if target is None else target
38 self.path = path
39
40 def __getitem__(self, key):
41 key = key.strip('/')
42 return DictionarySerializer(self.target, self.path + key + '/')
43
44 def __call__(self, key, value):
45 key = key.lstrip('/')
46 ret = value
47 if isinstance(value, cuda.ndarray):
48 value = value.get()
49 arr = numpy.asarray(value)
50 self.target[self.path + key] = arr
51 return ret
52
53
54 def save_npz(filename, obj, compression=True):
55 """Saves an object to the file in NPZ format.
56
57 This is a short-cut function to save only one object into an NPZ file.
58
59 Args:
60 filename (str): Target file name.
61 obj: Object to be serialized. It must support serialization protocol.
62 compression (bool): If ``True``, compression in the resulting zip file
63 is enabled.
64
65 """
66 s = DictionarySerializer()
67 s.save(obj)
68 with open(filename, 'wb') as f:
69 if compression:
70 numpy.savez_compressed(f, **s.target)
71 else:
72 numpy.savez(f, **s.target)
73
74
75 class NpzDeserializer(serializer.Deserializer):
76
77 """Deserializer for NPZ format.
78
79 This is the standard deserializer in Chainer. This deserializer can be used
80 to read an object serialized by :func:`save_npz`.
81
82 Args:
83 npz: `npz` file object.
84 path: The base path that the deserialization starts from.
85
86 """
87
88 def __init__(self, npz, path=''):
89 self.npz = npz
90 self.path = path
91
92 def __getitem__(self, key):
93 key = key.strip('/')
94 return NpzDeserializer(self.npz, self.path + key + '/')
95
96 def __call__(self, key, value):
97 key = key.lstrip('/')
98 dataset = self.npz[self.path + key]
99 if value is None:
100 return dataset
101 elif isinstance(value, numpy.ndarray):
102 numpy.copyto(value, dataset)
103 elif isinstance(value, cuda.ndarray):
104 value.set(numpy.asarray(dataset))
105 else:
106 value = type(value)(numpy.asarray(dataset))
107 return value
108
109
110 def load_npz(filename, obj):
111 """Loads an object from the file in NPZ format.
112
113 This is a short-cut function to load from an `.npz` file that contains only
114 one object.
115
116 Args:
117 filename (str): Name of the file to be loaded.
118 obj: Object to be deserialized. It must support serialization protocol.
119
120 """
121 with numpy.load(filename) as f:
122 d = NpzDeserializer(f)
123 d.load(obj)
124
```
Path: `chainer/serializers/hdf5.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import serializer
5
6
7 try:
8 import h5py
9 _available = True
10 except ImportError:
11 _available = False
12
13
14 def _check_available():
15 if not _available:
16 msg = '''h5py is not installed on your environment.
17 Please install h5py to activate hdf5 serializers.
18
19 $ pip install h5py'''
20 raise RuntimeError(msg)
21
22
23 class HDF5Serializer(serializer.Serializer):
24
25 """Serializer for HDF5 format.
26
27 This is the standard serializer in Chainer. The chain hierarchy is simply
28 mapped to HDF5 hierarchical groups.
29
30 Args:
31 group (h5py.Group): The group that this serializer represents.
32 compression (int): Gzip compression level.
33
34 """
35
36 def __init__(self, group, compression=4):
37 _check_available()
38
39 self.group = group
40 self.compression = compression
41
42 def __getitem__(self, key):
43 name = self.group.name + '/' + key
44 return HDF5Serializer(self.group.require_group(name), self.compression)
45
46 def __call__(self, key, value):
47 ret = value
48 if isinstance(value, cuda.ndarray):
49 value = cuda.to_cpu(value)
50 arr = numpy.asarray(value)
51 compression = None if arr.size <= 1 else self.compression
52 self.group.create_dataset(key, data=arr, compression=compression)
53 return ret
54
55
56 def save_hdf5(filename, obj, compression=4):
57 """Saves an object to the file in HDF5 format.
58
59 This is a short-cut function to save only one object into an HDF5 file. If
60 you want to save multiple objects to one HDF5 file, use
61 :class:`HDF5Serializer` directly by passing appropriate :class:`h5py.Group`
62 objects.
63
64 Args:
65 filename (str): Target file name.
66 obj: Object to be serialized. It must support serialization protocol.
67 compression (int): Gzip compression level.
68
69 """
70 _check_available()
71 with h5py.File(filename, 'w') as f:
72 s = HDF5Serializer(f, compression=compression)
73 s.save(obj)
74
75
76 class HDF5Deserializer(serializer.Deserializer):
77
78 """Deserializer for HDF5 format.
79
80 This is the standard deserializer in Chainer. This deserializer can be used
81 to read an object serialized by :class:`HDF5Serializer`.
82
83 Args:
84 group (h5py.Group): The group that the deserialization starts from.
85
86 """
87
88 def __init__(self, group):
89 _check_available()
90 self.group = group
91
92 def __getitem__(self, key):
93 name = self.group.name + '/' + key
94 return HDF5Deserializer(self.group.require_group(name))
95
96 def __call__(self, key, value):
97 dataset = self.group[key]
98 if value is None:
99 return numpy.asarray(dataset)
100 elif isinstance(value, numpy.ndarray):
101 dataset.read_direct(value)
102 elif isinstance(value, cuda.ndarray):
103 value.set(numpy.asarray(dataset))
104 else:
105 value = type(value)(numpy.asarray(dataset))
106 return value
107
108
109 def load_hdf5(filename, obj):
110 """Loads an object from the file in HDF5 format.
111
112 This is a short-cut function to load from an HDF5 file that contains only
113 one object. If you want to load multiple objects from one HDF5 file, use
114 :class:`HDF5Deserializer` directly by passing appropriate
115 :class:`h5py.Group` objects.
116
117 Args:
118 filename (str): Name of the file to be loaded.
119 obj: Object to be deserialized. It must support serialization protocol.
120
121 """
122 _check_available()
123 with h5py.File(filename, 'r') as f:
124 d = HDF5Deserializer(f)
125 d.load(obj)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/serializers/hdf5.py b/chainer/serializers/hdf5.py
--- a/chainer/serializers/hdf5.py
+++ b/chainer/serializers/hdf5.py
@@ -82,18 +82,26 @@
Args:
group (h5py.Group): The group that the deserialization starts from.
+ strict (bool): If ``True``, the deserializer raises an error when an
+ expected value is not found in the given HDF5 file. Otherwise,
+ it ignores the value and skip deserialization.
"""
- def __init__(self, group):
+ def __init__(self, group, strict=True):
_check_available()
self.group = group
+ self.strict = strict
def __getitem__(self, key):
name = self.group.name + '/' + key
return HDF5Deserializer(self.group.require_group(name))
def __call__(self, key, value):
+ if not self.strict and key not in self.group:
+ return value
+
+ self.group.keys
dataset = self.group[key]
if value is None:
return numpy.asarray(dataset)
diff --git a/chainer/serializers/npz.py b/chainer/serializers/npz.py
--- a/chainer/serializers/npz.py
+++ b/chainer/serializers/npz.py
@@ -82,20 +82,27 @@
Args:
npz: `npz` file object.
path: The base path that the deserialization starts from.
+ strict (bool): If ``True``, the deserializer raises an error when an
+ expected value is not found in the given NPZ file. Otherwise,
+ it ignores the value and skip deserialization.
"""
- def __init__(self, npz, path=''):
+ def __init__(self, npz, path='', strict=True):
self.npz = npz
self.path = path
+ self.strict = strict
def __getitem__(self, key):
key = key.strip('/')
return NpzDeserializer(self.npz, self.path + key + '/')
def __call__(self, key, value):
- key = key.lstrip('/')
- dataset = self.npz[self.path + key]
+ key = self.path + key.lstrip('/')
+ if not self.strict and key not in self.npz:
+ return value
+
+ dataset = self.npz[key]
if value is None:
return dataset
elif isinstance(value, numpy.ndarray):
| {"golden_diff": "diff --git a/chainer/serializers/hdf5.py b/chainer/serializers/hdf5.py\n--- a/chainer/serializers/hdf5.py\n+++ b/chainer/serializers/hdf5.py\n@@ -82,18 +82,26 @@\n \n Args:\n group (h5py.Group): The group that the deserialization starts from.\n+ strict (bool): If ``True``, the deserializer raises an error when an\n+ expected value is not found in the given HDF5 file. Otherwise,\n+ it ignores the value and skip deserialization.\n \n \"\"\"\n \n- def __init__(self, group):\n+ def __init__(self, group, strict=True):\n _check_available()\n self.group = group\n+ self.strict = strict\n \n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Deserializer(self.group.require_group(name))\n \n def __call__(self, key, value):\n+ if not self.strict and key not in self.group:\n+ return value\n+\n+ self.group.keys\n dataset = self.group[key]\n if value is None:\n return numpy.asarray(dataset)\ndiff --git a/chainer/serializers/npz.py b/chainer/serializers/npz.py\n--- a/chainer/serializers/npz.py\n+++ b/chainer/serializers/npz.py\n@@ -82,20 +82,27 @@\n Args:\n npz: `npz` file object.\n path: The base path that the deserialization starts from.\n+ strict (bool): If ``True``, the deserializer raises an error when an\n+ expected value is not found in the given NPZ file. Otherwise,\n+ it ignores the value and skip deserialization.\n \n \"\"\"\n \n- def __init__(self, npz, path=''):\n+ def __init__(self, npz, path='', strict=True):\n self.npz = npz\n self.path = path\n+ self.strict = strict\n \n def __getitem__(self, key):\n key = key.strip('/')\n return NpzDeserializer(self.npz, self.path + key + '/')\n \n def __call__(self, key, value):\n- key = key.lstrip('/')\n- dataset = self.npz[self.path + key]\n+ key = self.path + key.lstrip('/')\n+ if not self.strict and key not in self.npz:\n+ return value\n+\n+ dataset = self.npz[key]\n if value is None:\n return dataset\n elif isinstance(value, numpy.ndarray):\n", "issue": "Partial deserialization of links\nWe sometimes want to deserialize a link only partially, i.e. we want to load a smaller set of parameters to a larger link. It happens when we do some surgery to neural nets (e.g. fine-tuning), in which case the set of parameters in the source network does not match to the set of parameters in the target network.\r\n\r\nOne possible design is to introduce a `strict` option to the deserializer, and if that flag is False, any mismatch of the sets of parameters is ignored. I think it is safer to turn on this flag by default (I mean, we should not change the default behavior).\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import serializer\n\n\nclass DictionarySerializer(serializer.Serializer):\n\n \"\"\"Serializer for dictionary.\n\n This is the standard serializer in Chainer. The hierarchy of objects are\n simply mapped to a flat dictionary with keys representing the paths to\n objects in the hierarchy.\n\n .. note::\n Despite of its name, this serializer DOES NOT serialize the\n object into external files. It just build a flat dictionary of arrays\n that can be fed into :func:`numpy.savez` and\n :func:`numpy.savez_compressed`. If you want to use this serializer\n directly, you have to manually send a resulting dictionary to one of\n these functions.\n\n Args:\n target (dict): The dictionary that this serializer saves the objects\n to. If target is None, then a new dictionary is created.\n path (str): The base path in the hierarchy that this serializer\n indicates.\n\n Attributes:\n target (dict): The target dictionary. Once the serialization completes,\n this dictionary can be fed into :func:`numpy.savez` or\n :func:`numpy.savez_compressed` to serialize it in the NPZ format.\n\n \"\"\"\n\n def __init__(self, target=None, path=''):\n self.target = {} if target is None else target\n self.path = path\n\n def __getitem__(self, key):\n key = key.strip('/')\n return DictionarySerializer(self.target, self.path + key + '/')\n\n def __call__(self, key, value):\n key = key.lstrip('/')\n ret = value\n if isinstance(value, cuda.ndarray):\n value = value.get()\n arr = numpy.asarray(value)\n self.target[self.path + key] = arr\n return ret\n\n\ndef save_npz(filename, obj, compression=True):\n \"\"\"Saves an object to the file in NPZ format.\n\n This is a short-cut function to save only one object into an NPZ file.\n\n Args:\n filename (str): Target file name.\n obj: Object to be serialized. It must support serialization protocol.\n compression (bool): If ``True``, compression in the resulting zip file\n is enabled.\n\n \"\"\"\n s = DictionarySerializer()\n s.save(obj)\n with open(filename, 'wb') as f:\n if compression:\n numpy.savez_compressed(f, **s.target)\n else:\n numpy.savez(f, **s.target)\n\n\nclass NpzDeserializer(serializer.Deserializer):\n\n \"\"\"Deserializer for NPZ format.\n\n This is the standard deserializer in Chainer. This deserializer can be used\n to read an object serialized by :func:`save_npz`.\n\n Args:\n npz: `npz` file object.\n path: The base path that the deserialization starts from.\n\n \"\"\"\n\n def __init__(self, npz, path=''):\n self.npz = npz\n self.path = path\n\n def __getitem__(self, key):\n key = key.strip('/')\n return NpzDeserializer(self.npz, self.path + key + '/')\n\n def __call__(self, key, value):\n key = key.lstrip('/')\n dataset = self.npz[self.path + key]\n if value is None:\n return dataset\n elif isinstance(value, numpy.ndarray):\n numpy.copyto(value, dataset)\n elif isinstance(value, cuda.ndarray):\n value.set(numpy.asarray(dataset))\n else:\n value = type(value)(numpy.asarray(dataset))\n return value\n\n\ndef load_npz(filename, obj):\n \"\"\"Loads an object from the file in NPZ format.\n\n This is a short-cut function to load from an `.npz` file that contains only\n one object.\n\n Args:\n filename (str): Name of the file to be loaded.\n obj: Object to be deserialized. It must support serialization protocol.\n\n \"\"\"\n with numpy.load(filename) as f:\n d = NpzDeserializer(f)\n d.load(obj)\n", "path": "chainer/serializers/npz.py"}, {"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import serializer\n\n\ntry:\n import h5py\n _available = True\nexcept ImportError:\n _available = False\n\n\ndef _check_available():\n if not _available:\n msg = '''h5py is not installed on your environment.\nPlease install h5py to activate hdf5 serializers.\n\n $ pip install h5py'''\n raise RuntimeError(msg)\n\n\nclass HDF5Serializer(serializer.Serializer):\n\n \"\"\"Serializer for HDF5 format.\n\n This is the standard serializer in Chainer. The chain hierarchy is simply\n mapped to HDF5 hierarchical groups.\n\n Args:\n group (h5py.Group): The group that this serializer represents.\n compression (int): Gzip compression level.\n\n \"\"\"\n\n def __init__(self, group, compression=4):\n _check_available()\n\n self.group = group\n self.compression = compression\n\n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Serializer(self.group.require_group(name), self.compression)\n\n def __call__(self, key, value):\n ret = value\n if isinstance(value, cuda.ndarray):\n value = cuda.to_cpu(value)\n arr = numpy.asarray(value)\n compression = None if arr.size <= 1 else self.compression\n self.group.create_dataset(key, data=arr, compression=compression)\n return ret\n\n\ndef save_hdf5(filename, obj, compression=4):\n \"\"\"Saves an object to the file in HDF5 format.\n\n This is a short-cut function to save only one object into an HDF5 file. If\n you want to save multiple objects to one HDF5 file, use\n :class:`HDF5Serializer` directly by passing appropriate :class:`h5py.Group`\n objects.\n\n Args:\n filename (str): Target file name.\n obj: Object to be serialized. It must support serialization protocol.\n compression (int): Gzip compression level.\n\n \"\"\"\n _check_available()\n with h5py.File(filename, 'w') as f:\n s = HDF5Serializer(f, compression=compression)\n s.save(obj)\n\n\nclass HDF5Deserializer(serializer.Deserializer):\n\n \"\"\"Deserializer for HDF5 format.\n\n This is the standard deserializer in Chainer. This deserializer can be used\n to read an object serialized by :class:`HDF5Serializer`.\n\n Args:\n group (h5py.Group): The group that the deserialization starts from.\n\n \"\"\"\n\n def __init__(self, group):\n _check_available()\n self.group = group\n\n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Deserializer(self.group.require_group(name))\n\n def __call__(self, key, value):\n dataset = self.group[key]\n if value is None:\n return numpy.asarray(dataset)\n elif isinstance(value, numpy.ndarray):\n dataset.read_direct(value)\n elif isinstance(value, cuda.ndarray):\n value.set(numpy.asarray(dataset))\n else:\n value = type(value)(numpy.asarray(dataset))\n return value\n\n\ndef load_hdf5(filename, obj):\n \"\"\"Loads an object from the file in HDF5 format.\n\n This is a short-cut function to load from an HDF5 file that contains only\n one object. If you want to load multiple objects from one HDF5 file, use\n :class:`HDF5Deserializer` directly by passing appropriate\n :class:`h5py.Group` objects.\n\n Args:\n filename (str): Name of the file to be loaded.\n obj: Object to be deserialized. It must support serialization protocol.\n\n \"\"\"\n _check_available()\n with h5py.File(filename, 'r') as f:\n d = HDF5Deserializer(f)\n d.load(obj)\n", "path": "chainer/serializers/hdf5.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import serializer\n\n\nclass DictionarySerializer(serializer.Serializer):\n\n \"\"\"Serializer for dictionary.\n\n This is the standard serializer in Chainer. The hierarchy of objects are\n simply mapped to a flat dictionary with keys representing the paths to\n objects in the hierarchy.\n\n .. note::\n Despite of its name, this serializer DOES NOT serialize the\n object into external files. It just build a flat dictionary of arrays\n that can be fed into :func:`numpy.savez` and\n :func:`numpy.savez_compressed`. If you want to use this serializer\n directly, you have to manually send a resulting dictionary to one of\n these functions.\n\n Args:\n target (dict): The dictionary that this serializer saves the objects\n to. If target is None, then a new dictionary is created.\n path (str): The base path in the hierarchy that this serializer\n indicates.\n\n Attributes:\n target (dict): The target dictionary. Once the serialization completes,\n this dictionary can be fed into :func:`numpy.savez` or\n :func:`numpy.savez_compressed` to serialize it in the NPZ format.\n\n \"\"\"\n\n def __init__(self, target=None, path=''):\n self.target = {} if target is None else target\n self.path = path\n\n def __getitem__(self, key):\n key = key.strip('/')\n return DictionarySerializer(self.target, self.path + key + '/')\n\n def __call__(self, key, value):\n key = key.lstrip('/')\n ret = value\n if isinstance(value, cuda.ndarray):\n value = value.get()\n arr = numpy.asarray(value)\n self.target[self.path + key] = arr\n return ret\n\n\ndef save_npz(filename, obj, compression=True):\n \"\"\"Saves an object to the file in NPZ format.\n\n This is a short-cut function to save only one object into an NPZ file.\n\n Args:\n filename (str): Target file name.\n obj: Object to be serialized. It must support serialization protocol.\n compression (bool): If ``True``, compression in the resulting zip file\n is enabled.\n\n \"\"\"\n s = DictionarySerializer()\n s.save(obj)\n with open(filename, 'wb') as f:\n if compression:\n numpy.savez_compressed(f, **s.target)\n else:\n numpy.savez(f, **s.target)\n\n\nclass NpzDeserializer(serializer.Deserializer):\n\n \"\"\"Deserializer for NPZ format.\n\n This is the standard deserializer in Chainer. This deserializer can be used\n to read an object serialized by :func:`save_npz`.\n\n Args:\n npz: `npz` file object.\n path: The base path that the deserialization starts from.\n strict (bool): If ``True``, the deserializer raises an error when an\n expected value is not found in the given NPZ file. Otherwise,\n it ignores the value and skip deserialization.\n\n \"\"\"\n\n def __init__(self, npz, path='', strict=True):\n self.npz = npz\n self.path = path\n self.strict = strict\n\n def __getitem__(self, key):\n key = key.strip('/')\n return NpzDeserializer(self.npz, self.path + key + '/')\n\n def __call__(self, key, value):\n key = self.path + key.lstrip('/')\n if not self.strict and key not in self.npz:\n return value\n\n dataset = self.npz[key]\n if value is None:\n return dataset\n elif isinstance(value, numpy.ndarray):\n numpy.copyto(value, dataset)\n elif isinstance(value, cuda.ndarray):\n value.set(numpy.asarray(dataset))\n else:\n value = type(value)(numpy.asarray(dataset))\n return value\n\n\ndef load_npz(filename, obj):\n \"\"\"Loads an object from the file in NPZ format.\n\n This is a short-cut function to load from an `.npz` file that contains only\n one object.\n\n Args:\n filename (str): Name of the file to be loaded.\n obj: Object to be deserialized. It must support serialization protocol.\n\n \"\"\"\n with numpy.load(filename) as f:\n d = NpzDeserializer(f)\n d.load(obj)\n", "path": "chainer/serializers/npz.py"}, {"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import serializer\n\n\ntry:\n import h5py\n _available = True\nexcept ImportError:\n _available = False\n\n\ndef _check_available():\n if not _available:\n msg = '''h5py is not installed on your environment.\nPlease install h5py to activate hdf5 serializers.\n\n $ pip install h5py'''\n raise RuntimeError(msg)\n\n\nclass HDF5Serializer(serializer.Serializer):\n\n \"\"\"Serializer for HDF5 format.\n\n This is the standard serializer in Chainer. The chain hierarchy is simply\n mapped to HDF5 hierarchical groups.\n\n Args:\n group (h5py.Group): The group that this serializer represents.\n compression (int): Gzip compression level.\n\n \"\"\"\n\n def __init__(self, group, compression=4):\n _check_available()\n\n self.group = group\n self.compression = compression\n\n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Serializer(self.group.require_group(name), self.compression)\n\n def __call__(self, key, value):\n ret = value\n if isinstance(value, cuda.ndarray):\n value = cuda.to_cpu(value)\n arr = numpy.asarray(value)\n compression = None if arr.size <= 1 else self.compression\n self.group.create_dataset(key, data=arr, compression=compression)\n return ret\n\n\ndef save_hdf5(filename, obj, compression=4):\n \"\"\"Saves an object to the file in HDF5 format.\n\n This is a short-cut function to save only one object into an HDF5 file. If\n you want to save multiple objects to one HDF5 file, use\n :class:`HDF5Serializer` directly by passing appropriate :class:`h5py.Group`\n objects.\n\n Args:\n filename (str): Target file name.\n obj: Object to be serialized. It must support serialization protocol.\n compression (int): Gzip compression level.\n\n \"\"\"\n _check_available()\n with h5py.File(filename, 'w') as f:\n s = HDF5Serializer(f, compression=compression)\n s.save(obj)\n\n\nclass HDF5Deserializer(serializer.Deserializer):\n\n \"\"\"Deserializer for HDF5 format.\n\n This is the standard deserializer in Chainer. This deserializer can be used\n to read an object serialized by :class:`HDF5Serializer`.\n\n Args:\n group (h5py.Group): The group that the deserialization starts from.\n strict (bool): If ``True``, the deserializer raises an error when an\n expected value is not found in the given HDF5 file. Otherwise,\n it ignores the value and skip deserialization.\n\n \"\"\"\n\n def __init__(self, group, strict=True):\n _check_available()\n self.group = group\n self.strict = strict\n\n def __getitem__(self, key):\n name = self.group.name + '/' + key\n return HDF5Deserializer(self.group.require_group(name))\n\n def __call__(self, key, value):\n if not self.strict and key not in self.group:\n return value\n\n self.group.keys\n dataset = self.group[key]\n if value is None:\n return numpy.asarray(dataset)\n elif isinstance(value, numpy.ndarray):\n dataset.read_direct(value)\n elif isinstance(value, cuda.ndarray):\n value.set(numpy.asarray(dataset))\n else:\n value = type(value)(numpy.asarray(dataset))\n return value\n\n\ndef load_hdf5(filename, obj):\n \"\"\"Loads an object from the file in HDF5 format.\n\n This is a short-cut function to load from an HDF5 file that contains only\n one object. If you want to load multiple objects from one HDF5 file, use\n :class:`HDF5Deserializer` directly by passing appropriate\n :class:`h5py.Group` objects.\n\n Args:\n filename (str): Name of the file to be loaded.\n obj: Object to be deserialized. It must support serialization protocol.\n\n \"\"\"\n _check_available()\n with h5py.File(filename, 'r') as f:\n d = HDF5Deserializer(f)\n d.load(obj)\n", "path": "chainer/serializers/hdf5.py"}]} | 2,647 | 570 |
gh_patches_debug_53934 | rasdani/github-patches | git_diff | bokeh__bokeh-3570 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ImageURL example in reference guide is broken
The example doesn't render an image, just a gridded, empty plot.
http://bokeh.pydata.org/en/latest/docs/reference/models/glyphs.html#bokeh.models.glyphs.ImageURL
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/glyphs/image_url.py`
Content:
```
1
2 import numpy as np
3
4 from bokeh.util.browser import view
5 from bokeh.document import Document
6 from bokeh.embed import file_html
7 from bokeh.models.glyphs import ImageURL
8 from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid
9 from bokeh.resources import INLINE
10
11 url = "http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png"
12 N = 5
13
14 source = ColumnDataSource(dict(
15 url = [url]*N,
16 x1 = np.linspace( 0, 150, N),
17 y1 = np.linspace( 0, 150, N),
18 w1 = np.linspace( 10, 50, N),
19 h1 = np.linspace( 10, 50, N),
20 x2 = np.linspace(-50, 150, N),
21 y2 = np.linspace( 0, 200, N),
22 ))
23
24 xdr = Range1d(start=-100, end=200)
25 ydr = Range1d(start=-100, end=200)
26
27 plot = Plot(title="ImageURL", x_range=xdr, y_range=ydr)
28
29 image1 = ImageURL(url="url", x="x1", y="y1", w="w1", h="h1", anchor="center", global_alpha=0.2)
30 plot.add_glyph(source, image1)
31
32 image2 = ImageURL(url="url", x="x2", y="y2", w=20, h=20, anchor="top_left")
33 plot.add_glyph(source, image2)
34
35 image3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor="bottom_right")
36 plot.add_glyph(source, image3)
37
38 xaxis = LinearAxis()
39 plot.add_layout(xaxis, 'below')
40
41 yaxis = LinearAxis()
42 plot.add_layout(yaxis,'left')
43
44 plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
45 plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
46
47 doc = Document( )
48 doc.add_root(plot)
49
50 if __name__ == "__main__":
51 filename = "image_url.html"
52 with open(filename, "w") as f:
53 f.write(file_html(doc, INLINE, "Image URL Example"))
54 print("Wrote %s" % filename)
55 view(filename)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/glyphs/image_url.py b/examples/glyphs/image_url.py
--- a/examples/glyphs/image_url.py
+++ b/examples/glyphs/image_url.py
@@ -8,7 +8,7 @@
from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid
from bokeh.resources import INLINE
-url = "http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png"
+url = "http://bokeh.pydata.org/en/latest/_static/images/logo.png"
N = 5
source = ColumnDataSource(dict(
| {"golden_diff": "diff --git a/examples/glyphs/image_url.py b/examples/glyphs/image_url.py\n--- a/examples/glyphs/image_url.py\n+++ b/examples/glyphs/image_url.py\n@@ -8,7 +8,7 @@\n from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\n from bokeh.resources import INLINE\n \n-url = \"http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png\"\n+url = \"http://bokeh.pydata.org/en/latest/_static/images/logo.png\"\n N = 5\n \n source = ColumnDataSource(dict(\n", "issue": "ImageURL example in reference guide is broken\nThe example doesn't render an image, just a gridded, empty plot.\n\nhttp://bokeh.pydata.org/en/latest/docs/reference/models/glyphs.html#bokeh.models.glyphs.ImageURL\n\n", "before_files": [{"content": "\nimport numpy as np\n\nfrom bokeh.util.browser import view\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import ImageURL\nfrom bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\nfrom bokeh.resources import INLINE\n\nurl = \"http://bokeh.pydata.org/en/latest/_static/bokeh-transparent.png\"\nN = 5\n\nsource = ColumnDataSource(dict(\n url = [url]*N,\n x1 = np.linspace( 0, 150, N),\n y1 = np.linspace( 0, 150, N),\n w1 = np.linspace( 10, 50, N),\n h1 = np.linspace( 10, 50, N),\n x2 = np.linspace(-50, 150, N),\n y2 = np.linspace( 0, 200, N),\n))\n\nxdr = Range1d(start=-100, end=200)\nydr = Range1d(start=-100, end=200)\n\nplot = Plot(title=\"ImageURL\", x_range=xdr, y_range=ydr)\n\nimage1 = ImageURL(url=\"url\", x=\"x1\", y=\"y1\", w=\"w1\", h=\"h1\", anchor=\"center\", global_alpha=0.2)\nplot.add_glyph(source, image1)\n\nimage2 = ImageURL(url=\"url\", x=\"x2\", y=\"y2\", w=20, h=20, anchor=\"top_left\")\nplot.add_glyph(source, image2)\n\nimage3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor=\"bottom_right\")\nplot.add_glyph(source, image3)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis,'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document( )\ndoc.add_root(plot)\n\nif __name__ == \"__main__\":\n filename = \"image_url.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Image URL Example\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n", "path": "examples/glyphs/image_url.py"}], "after_files": [{"content": "\nimport numpy as np\n\nfrom bokeh.util.browser import view\nfrom bokeh.document import Document\nfrom bokeh.embed import file_html\nfrom bokeh.models.glyphs import ImageURL\nfrom bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid\nfrom bokeh.resources import INLINE\n\nurl = \"http://bokeh.pydata.org/en/latest/_static/images/logo.png\"\nN = 5\n\nsource = ColumnDataSource(dict(\n url = [url]*N,\n x1 = np.linspace( 0, 150, N),\n y1 = np.linspace( 0, 150, N),\n w1 = np.linspace( 10, 50, N),\n h1 = np.linspace( 10, 50, N),\n x2 = np.linspace(-50, 150, N),\n y2 = np.linspace( 0, 200, N),\n))\n\nxdr = Range1d(start=-100, end=200)\nydr = Range1d(start=-100, end=200)\n\nplot = Plot(title=\"ImageURL\", x_range=xdr, y_range=ydr)\n\nimage1 = ImageURL(url=\"url\", x=\"x1\", y=\"y1\", w=\"w1\", h=\"h1\", anchor=\"center\", global_alpha=0.2)\nplot.add_glyph(source, image1)\n\nimage2 = ImageURL(url=\"url\", x=\"x2\", y=\"y2\", w=20, h=20, anchor=\"top_left\")\nplot.add_glyph(source, image2)\n\nimage3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor=\"bottom_right\")\nplot.add_glyph(source, image3)\n\nxaxis = LinearAxis()\nplot.add_layout(xaxis, 'below')\n\nyaxis = LinearAxis()\nplot.add_layout(yaxis,'left')\n\nplot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))\nplot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))\n\ndoc = Document( )\ndoc.add_root(plot)\n\nif __name__ == \"__main__\":\n filename = \"image_url.html\"\n with open(filename, \"w\") as f:\n f.write(file_html(doc, INLINE, \"Image URL Example\"))\n print(\"Wrote %s\" % filename)\n view(filename)\n", "path": "examples/glyphs/image_url.py"}]} | 952 | 126 |
gh_patches_debug_5589 | rasdani/github-patches | git_diff | getsentry__sentry-17282 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unclear instructions for GitLab integration setup
> Step 1: Create a Sentry App in GitLab
> To configure GitLab with Sentry, you will need to create a Sentry app in your GitLab instance.
> You'll also need to be a maintainer or owner in GitLab. Projects owned by users are not supported.
> Navigate to the **User Settings** section of your GitLab instance.
> In the sidebar, select 'Applications'.
Did the author mean `https://gitlaburl/admin`? If so, this is called `Admin Area`:

Also, providing a direct URL won't hurt. Why not something like this:
> Navigate to the Admin Area of your GitLab instance, then select "Applications" in the sidebar. (Or navigate directly to /admin/applications/new)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/integrations/gitlab/integration.py`
Content:
```
1 from __future__ import absolute_import
2
3 import six
4 from six.moves.urllib.parse import urlparse
5 from django.utils.translation import ugettext_lazy as _
6 from django import forms
7
8 from sentry.web.helpers import render_to_response
9 from sentry.identity.pipeline import IdentityProviderPipeline
10 from sentry.identity.gitlab import get_user_info, get_oauth_data
11 from sentry.identity.gitlab.provider import GitlabIdentityProvider
12 from sentry.integrations import (
13 FeatureDescription,
14 IntegrationInstallation,
15 IntegrationFeatures,
16 IntegrationProvider,
17 IntegrationMetadata,
18 )
19 from sentry.integrations.exceptions import ApiError, IntegrationError
20 from sentry.integrations.repositories import RepositoryMixin
21 from sentry.pipeline import NestedPipelineView, PipelineView
22 from sentry.utils.http import absolute_uri
23 from sentry.utils.hashlib import sha1_text
24
25 from .client import GitLabApiClient, GitLabSetupClient
26 from .issues import GitlabIssueBasic
27 from .repository import GitlabRepositoryProvider
28
29 DESCRIPTION = """
30 Connect your Sentry organization to an organization in your GitLab instance or gitlab.com, enabling the following features:
31 """
32
33 FEATURES = [
34 FeatureDescription(
35 """
36 Track commits and releases (learn more
37 [here](https://docs.sentry.io/learn/releases/))
38 """,
39 IntegrationFeatures.COMMITS,
40 ),
41 FeatureDescription(
42 """
43 Resolve Sentry issues via GitLab commits and merge requests by
44 including `Fixes PROJ-ID` in the message
45 """,
46 IntegrationFeatures.COMMITS,
47 ),
48 FeatureDescription(
49 """
50 Create GitLab issues from Sentry
51 """,
52 IntegrationFeatures.ISSUE_BASIC,
53 ),
54 FeatureDescription(
55 """
56 Link Sentry issues to existing GitLab issues
57 """,
58 IntegrationFeatures.ISSUE_BASIC,
59 ),
60 ]
61
62 metadata = IntegrationMetadata(
63 description=DESCRIPTION.strip(),
64 features=FEATURES,
65 author="The Sentry Team",
66 noun=_("Installation"),
67 issue_url="https://github.com/getsentry/sentry/issues/",
68 source_url="https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/gitlab",
69 aspects={},
70 )
71
72
73 class GitlabIntegration(IntegrationInstallation, GitlabIssueBasic, RepositoryMixin):
74 repo_search = True
75
76 def __init__(self, *args, **kwargs):
77 super(GitlabIntegration, self).__init__(*args, **kwargs)
78 self.default_identity = None
79
80 def get_group_id(self):
81 return self.model.metadata["group_id"]
82
83 def get_client(self):
84 if self.default_identity is None:
85 self.default_identity = self.get_default_identity()
86
87 return GitLabApiClient(self)
88
89 def get_repositories(self, query=None):
90 # Note: gitlab projects are the same things as repos everywhere else
91 group = self.get_group_id()
92 resp = self.get_client().search_group_projects(group, query)
93 return [{"identifier": repo["id"], "name": repo["name_with_namespace"]} for repo in resp]
94
95 def search_projects(self, query):
96 client = self.get_client()
97 group_id = self.get_group_id()
98 return client.search_group_projects(group_id, query)
99
100 def search_issues(self, project_id, query, iids):
101 client = self.get_client()
102 return client.search_project_issues(project_id, query, iids)
103
104 def error_message_from_json(self, data):
105 """
106 Extract error messages from gitlab API errors.
107 Generic errors come in the `error` key while validation errors
108 are generally in `message`.
109
110 See https://docs.gitlab.com/ee/api/#data-validation-and-error-reporting
111 """
112 if "message" in data:
113 return data["message"]
114 if "error" in data:
115 return data["error"]
116
117
118 class InstallationForm(forms.Form):
119 url = forms.CharField(
120 label=_("GitLab URL"),
121 help_text=_(
122 "The base URL for your GitLab instance, including the host and protocol. "
123 "Do not include group path."
124 "<br>"
125 "If using gitlab.com, enter https://gitlab.com/"
126 ),
127 widget=forms.TextInput(attrs={"placeholder": "https://gitlab.example.com"}),
128 )
129 group = forms.CharField(
130 label=_("GitLab Group Path"),
131 help_text=_(
132 "This can be found in the URL of your group's GitLab page. "
133 "<br>"
134 "For example, if your group can be found at "
135 "https://gitlab.com/my-group/my-subgroup, enter `my-group/my-subgroup`."
136 ),
137 widget=forms.TextInput(attrs={"placeholder": _("my-group/my-subgroup")}),
138 )
139 include_subgroups = forms.BooleanField(
140 label=_("Include Subgroups"),
141 help_text=_("Include projects in subgroups of the GitLab group."),
142 widget=forms.CheckboxInput(),
143 required=False,
144 initial=False,
145 )
146 verify_ssl = forms.BooleanField(
147 label=_("Verify SSL"),
148 help_text=_(
149 "By default, we verify SSL certificates "
150 "when delivering payloads to your GitLab instance, "
151 "and request GitLab to verify SSL when it delivers "
152 "webhooks to Sentry."
153 ),
154 widget=forms.CheckboxInput(),
155 required=False,
156 initial=True,
157 )
158 client_id = forms.CharField(
159 label=_("GitLab Application ID"),
160 widget=forms.TextInput(
161 attrs={
162 "placeholder": _("5832fc6e14300a0d962240a8144466eef4ee93ef0d218477e55f11cf12fc3737")
163 }
164 ),
165 )
166 client_secret = forms.CharField(
167 label=_("GitLab Application Secret"),
168 widget=forms.TextInput(attrs={"placeholder": _("XXXXXXXXXXXXXXXXXXXXXXXXXXX")}),
169 )
170
171 def clean_url(self):
172 """Strip off trailing / as they cause invalid URLs downstream"""
173 return self.cleaned_data["url"].rstrip("/")
174
175
176 class InstallationConfigView(PipelineView):
177 def dispatch(self, request, pipeline):
178 if request.method == "POST":
179 form = InstallationForm(request.POST)
180 if form.is_valid():
181 form_data = form.cleaned_data
182
183 pipeline.bind_state("installation_data", form_data)
184
185 pipeline.bind_state(
186 "oauth_config_information",
187 {
188 "access_token_url": u"{}/oauth/token".format(form_data.get("url")),
189 "authorize_url": u"{}/oauth/authorize".format(form_data.get("url")),
190 "client_id": form_data.get("client_id"),
191 "client_secret": form_data.get("client_secret"),
192 "verify_ssl": form_data.get("verify_ssl"),
193 },
194 )
195 pipeline.get_logger().info(
196 "gitlab.setup.installation-config-view.success",
197 extra={
198 "base_url": form_data.get("url"),
199 "client_id": form_data.get("client_id"),
200 "verify_ssl": form_data.get("verify_ssl"),
201 },
202 )
203 return pipeline.next_step()
204 else:
205 form = InstallationForm()
206
207 return render_to_response(
208 template="sentry/integrations/gitlab-config.html",
209 context={"form": form},
210 request=request,
211 )
212
213
214 class InstallationGuideView(PipelineView):
215 def dispatch(self, request, pipeline):
216 if "completed_installation_guide" in request.GET:
217 return pipeline.next_step()
218 return render_to_response(
219 template="sentry/integrations/gitlab-config.html",
220 context={
221 "next_url": "%s%s"
222 % (absolute_uri("extensions/gitlab/setup/"), "?completed_installation_guide"),
223 "setup_values": [
224 {"label": "Name", "value": "Sentry"},
225 {"label": "Redirect URI", "value": absolute_uri("/extensions/gitlab/setup/")},
226 {"label": "Scopes", "value": "api"},
227 ],
228 },
229 request=request,
230 )
231
232
233 class GitlabIntegrationProvider(IntegrationProvider):
234 key = "gitlab"
235 name = "GitLab"
236 metadata = metadata
237 integration_cls = GitlabIntegration
238
239 needs_default_identity = True
240
241 features = frozenset([IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.COMMITS])
242
243 setup_dialog_config = {"width": 1030, "height": 1000}
244
245 def _make_identity_pipeline_view(self):
246 """
247 Make the nested identity provider view. It is important that this view is
248 not constructed until we reach this step and the
249 ``oauth_config_information`` is available in the pipeline state. This
250 method should be late bound into the pipeline vies.
251 """
252 identity_pipeline_config = dict(
253 oauth_scopes=sorted(GitlabIdentityProvider.oauth_scopes),
254 redirect_url=absolute_uri("/extensions/gitlab/setup/"),
255 **self.pipeline.fetch_state("oauth_config_information")
256 )
257
258 return NestedPipelineView(
259 bind_key="identity",
260 provider_key="gitlab",
261 pipeline_cls=IdentityProviderPipeline,
262 config=identity_pipeline_config,
263 )
264
265 def get_group_info(self, access_token, installation_data):
266 client = GitLabSetupClient(
267 installation_data["url"], access_token, installation_data["verify_ssl"]
268 )
269 try:
270 resp = client.get_group(installation_data["group"])
271 return resp.json
272 except ApiError as e:
273 self.get_logger().info(
274 "gitlab.installation.get-group-info-failure",
275 extra={
276 "base_url": installation_data["url"],
277 "verify_ssl": installation_data["verify_ssl"],
278 "group": installation_data["group"],
279 "include_subgroups": installation_data["include_subgroups"],
280 "error_message": six.text_type(e),
281 "error_status": e.code,
282 },
283 )
284 raise IntegrationError("The requested GitLab group could not be found.")
285
286 def get_pipeline_views(self):
287 return [
288 InstallationGuideView(),
289 InstallationConfigView(),
290 lambda: self._make_identity_pipeline_view(),
291 ]
292
293 def build_integration(self, state):
294 data = state["identity"]["data"]
295 oauth_data = get_oauth_data(data)
296 user = get_user_info(data["access_token"], state["installation_data"])
297 group = self.get_group_info(data["access_token"], state["installation_data"])
298 include_subgroups = state["installation_data"]["include_subgroups"]
299 scopes = sorted(GitlabIdentityProvider.oauth_scopes)
300 base_url = state["installation_data"]["url"]
301
302 hostname = urlparse(base_url).netloc
303 verify_ssl = state["installation_data"]["verify_ssl"]
304
305 # Generate a hash to prevent stray hooks from being accepted
306 # use a consistent hash so that reinstalls/shared integrations don't
307 # rotate secrets.
308 secret = sha1_text("".join([hostname, state["installation_data"]["client_id"]]))
309
310 integration = {
311 "name": group["full_name"],
312 # Splice the gitlab host and project together to
313 # act as unique link between a gitlab instance, group + sentry.
314 # This value is embedded then in the webook token that we
315 # give to gitlab to allow us to find the integration a hook came
316 # from.
317 "external_id": u"{}:{}".format(hostname, group["id"]),
318 "metadata": {
319 "icon": group["avatar_url"],
320 "instance": hostname,
321 "domain_name": u"{}/{}".format(hostname, group["full_path"]),
322 "scopes": scopes,
323 "verify_ssl": verify_ssl,
324 "base_url": base_url,
325 "webhook_secret": secret.hexdigest(),
326 "group_id": group["id"],
327 "include_subgroups": include_subgroups,
328 },
329 "user_identity": {
330 "type": "gitlab",
331 "external_id": u"{}:{}".format(hostname, user["id"]),
332 "scopes": scopes,
333 "data": oauth_data,
334 },
335 }
336 return integration
337
338 def setup(self):
339 from sentry.plugins.base import bindings
340
341 bindings.add(
342 "integration-repository.provider", GitlabRepositoryProvider, id="integrations:gitlab"
343 )
344
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/integrations/gitlab/integration.py b/src/sentry/integrations/gitlab/integration.py
--- a/src/sentry/integrations/gitlab/integration.py
+++ b/src/sentry/integrations/gitlab/integration.py
@@ -175,6 +175,10 @@
class InstallationConfigView(PipelineView):
def dispatch(self, request, pipeline):
+ if "goback" in request.GET:
+ pipeline.state.step_index = 0
+ return pipeline.current_step()
+
if request.method == "POST":
form = InstallationForm(request.POST)
if form.is_valid():
| {"golden_diff": "diff --git a/src/sentry/integrations/gitlab/integration.py b/src/sentry/integrations/gitlab/integration.py\n--- a/src/sentry/integrations/gitlab/integration.py\n+++ b/src/sentry/integrations/gitlab/integration.py\n@@ -175,6 +175,10 @@\n \n class InstallationConfigView(PipelineView):\n def dispatch(self, request, pipeline):\n+ if \"goback\" in request.GET:\n+ pipeline.state.step_index = 0\n+ return pipeline.current_step()\n+\n if request.method == \"POST\":\n form = InstallationForm(request.POST)\n if form.is_valid():\n", "issue": "Unclear instructions for GitLab integration setup\n> Step 1: Create a Sentry App in GitLab\r\n\r\n> To configure GitLab with Sentry, you will need to create a Sentry app in your GitLab instance.\r\n\r\n> You'll also need to be a maintainer or owner in GitLab. Projects owned by users are not supported.\r\n\r\n> Navigate to the **User Settings** section of your GitLab instance.\r\n\r\n> In the sidebar, select 'Applications'.\r\n\r\nDid the author mean `https://gitlaburl/admin`? If so, this is called `Admin Area`:\r\n\r\n\r\n\r\nAlso, providing a direct URL won't hurt. Why not something like this:\r\n\r\n> Navigate to the Admin Area of your GitLab instance, then select \"Applications\" in the sidebar. (Or navigate directly to /admin/applications/new)\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\nfrom six.moves.urllib.parse import urlparse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\n\nfrom sentry.web.helpers import render_to_response\nfrom sentry.identity.pipeline import IdentityProviderPipeline\nfrom sentry.identity.gitlab import get_user_info, get_oauth_data\nfrom sentry.identity.gitlab.provider import GitlabIdentityProvider\nfrom sentry.integrations import (\n FeatureDescription,\n IntegrationInstallation,\n IntegrationFeatures,\n IntegrationProvider,\n IntegrationMetadata,\n)\nfrom sentry.integrations.exceptions import ApiError, IntegrationError\nfrom sentry.integrations.repositories import RepositoryMixin\nfrom sentry.pipeline import NestedPipelineView, PipelineView\nfrom sentry.utils.http import absolute_uri\nfrom sentry.utils.hashlib import sha1_text\n\nfrom .client import GitLabApiClient, GitLabSetupClient\nfrom .issues import GitlabIssueBasic\nfrom .repository import GitlabRepositoryProvider\n\nDESCRIPTION = \"\"\"\nConnect your Sentry organization to an organization in your GitLab instance or gitlab.com, enabling the following features:\n\"\"\"\n\nFEATURES = [\n FeatureDescription(\n \"\"\"\n Track commits and releases (learn more\n [here](https://docs.sentry.io/learn/releases/))\n \"\"\",\n IntegrationFeatures.COMMITS,\n ),\n FeatureDescription(\n \"\"\"\n Resolve Sentry issues via GitLab commits and merge requests by\n including `Fixes PROJ-ID` in the message\n \"\"\",\n IntegrationFeatures.COMMITS,\n ),\n FeatureDescription(\n \"\"\"\n Create GitLab issues from Sentry\n \"\"\",\n IntegrationFeatures.ISSUE_BASIC,\n ),\n FeatureDescription(\n \"\"\"\n Link Sentry issues to existing GitLab issues\n \"\"\",\n IntegrationFeatures.ISSUE_BASIC,\n ),\n]\n\nmetadata = IntegrationMetadata(\n description=DESCRIPTION.strip(),\n features=FEATURES,\n author=\"The Sentry Team\",\n noun=_(\"Installation\"),\n issue_url=\"https://github.com/getsentry/sentry/issues/\",\n source_url=\"https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/gitlab\",\n aspects={},\n)\n\n\nclass GitlabIntegration(IntegrationInstallation, GitlabIssueBasic, RepositoryMixin):\n repo_search = True\n\n def __init__(self, *args, **kwargs):\n super(GitlabIntegration, self).__init__(*args, **kwargs)\n self.default_identity = None\n\n def get_group_id(self):\n return self.model.metadata[\"group_id\"]\n\n def get_client(self):\n if self.default_identity is None:\n self.default_identity = self.get_default_identity()\n\n return GitLabApiClient(self)\n\n def get_repositories(self, query=None):\n # Note: gitlab projects are the same things as repos everywhere else\n group = self.get_group_id()\n resp = self.get_client().search_group_projects(group, query)\n return [{\"identifier\": repo[\"id\"], \"name\": repo[\"name_with_namespace\"]} for repo in resp]\n\n def search_projects(self, query):\n client = self.get_client()\n group_id = self.get_group_id()\n return client.search_group_projects(group_id, query)\n\n def search_issues(self, project_id, query, iids):\n client = self.get_client()\n return client.search_project_issues(project_id, query, iids)\n\n def error_message_from_json(self, data):\n \"\"\"\n Extract error messages from gitlab API errors.\n Generic errors come in the `error` key while validation errors\n are generally in `message`.\n\n See https://docs.gitlab.com/ee/api/#data-validation-and-error-reporting\n \"\"\"\n if \"message\" in data:\n return data[\"message\"]\n if \"error\" in data:\n return data[\"error\"]\n\n\nclass InstallationForm(forms.Form):\n url = forms.CharField(\n label=_(\"GitLab URL\"),\n help_text=_(\n \"The base URL for your GitLab instance, including the host and protocol. \"\n \"Do not include group path.\"\n \"<br>\"\n \"If using gitlab.com, enter https://gitlab.com/\"\n ),\n widget=forms.TextInput(attrs={\"placeholder\": \"https://gitlab.example.com\"}),\n )\n group = forms.CharField(\n label=_(\"GitLab Group Path\"),\n help_text=_(\n \"This can be found in the URL of your group's GitLab page. \"\n \"<br>\"\n \"For example, if your group can be found at \"\n \"https://gitlab.com/my-group/my-subgroup, enter `my-group/my-subgroup`.\"\n ),\n widget=forms.TextInput(attrs={\"placeholder\": _(\"my-group/my-subgroup\")}),\n )\n include_subgroups = forms.BooleanField(\n label=_(\"Include Subgroups\"),\n help_text=_(\"Include projects in subgroups of the GitLab group.\"),\n widget=forms.CheckboxInput(),\n required=False,\n initial=False,\n )\n verify_ssl = forms.BooleanField(\n label=_(\"Verify SSL\"),\n help_text=_(\n \"By default, we verify SSL certificates \"\n \"when delivering payloads to your GitLab instance, \"\n \"and request GitLab to verify SSL when it delivers \"\n \"webhooks to Sentry.\"\n ),\n widget=forms.CheckboxInput(),\n required=False,\n initial=True,\n )\n client_id = forms.CharField(\n label=_(\"GitLab Application ID\"),\n widget=forms.TextInput(\n attrs={\n \"placeholder\": _(\"5832fc6e14300a0d962240a8144466eef4ee93ef0d218477e55f11cf12fc3737\")\n }\n ),\n )\n client_secret = forms.CharField(\n label=_(\"GitLab Application Secret\"),\n widget=forms.TextInput(attrs={\"placeholder\": _(\"XXXXXXXXXXXXXXXXXXXXXXXXXXX\")}),\n )\n\n def clean_url(self):\n \"\"\"Strip off trailing / as they cause invalid URLs downstream\"\"\"\n return self.cleaned_data[\"url\"].rstrip(\"/\")\n\n\nclass InstallationConfigView(PipelineView):\n def dispatch(self, request, pipeline):\n if request.method == \"POST\":\n form = InstallationForm(request.POST)\n if form.is_valid():\n form_data = form.cleaned_data\n\n pipeline.bind_state(\"installation_data\", form_data)\n\n pipeline.bind_state(\n \"oauth_config_information\",\n {\n \"access_token_url\": u\"{}/oauth/token\".format(form_data.get(\"url\")),\n \"authorize_url\": u\"{}/oauth/authorize\".format(form_data.get(\"url\")),\n \"client_id\": form_data.get(\"client_id\"),\n \"client_secret\": form_data.get(\"client_secret\"),\n \"verify_ssl\": form_data.get(\"verify_ssl\"),\n },\n )\n pipeline.get_logger().info(\n \"gitlab.setup.installation-config-view.success\",\n extra={\n \"base_url\": form_data.get(\"url\"),\n \"client_id\": form_data.get(\"client_id\"),\n \"verify_ssl\": form_data.get(\"verify_ssl\"),\n },\n )\n return pipeline.next_step()\n else:\n form = InstallationForm()\n\n return render_to_response(\n template=\"sentry/integrations/gitlab-config.html\",\n context={\"form\": form},\n request=request,\n )\n\n\nclass InstallationGuideView(PipelineView):\n def dispatch(self, request, pipeline):\n if \"completed_installation_guide\" in request.GET:\n return pipeline.next_step()\n return render_to_response(\n template=\"sentry/integrations/gitlab-config.html\",\n context={\n \"next_url\": \"%s%s\"\n % (absolute_uri(\"extensions/gitlab/setup/\"), \"?completed_installation_guide\"),\n \"setup_values\": [\n {\"label\": \"Name\", \"value\": \"Sentry\"},\n {\"label\": \"Redirect URI\", \"value\": absolute_uri(\"/extensions/gitlab/setup/\")},\n {\"label\": \"Scopes\", \"value\": \"api\"},\n ],\n },\n request=request,\n )\n\n\nclass GitlabIntegrationProvider(IntegrationProvider):\n key = \"gitlab\"\n name = \"GitLab\"\n metadata = metadata\n integration_cls = GitlabIntegration\n\n needs_default_identity = True\n\n features = frozenset([IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.COMMITS])\n\n setup_dialog_config = {\"width\": 1030, \"height\": 1000}\n\n def _make_identity_pipeline_view(self):\n \"\"\"\n Make the nested identity provider view. It is important that this view is\n not constructed until we reach this step and the\n ``oauth_config_information`` is available in the pipeline state. This\n method should be late bound into the pipeline vies.\n \"\"\"\n identity_pipeline_config = dict(\n oauth_scopes=sorted(GitlabIdentityProvider.oauth_scopes),\n redirect_url=absolute_uri(\"/extensions/gitlab/setup/\"),\n **self.pipeline.fetch_state(\"oauth_config_information\")\n )\n\n return NestedPipelineView(\n bind_key=\"identity\",\n provider_key=\"gitlab\",\n pipeline_cls=IdentityProviderPipeline,\n config=identity_pipeline_config,\n )\n\n def get_group_info(self, access_token, installation_data):\n client = GitLabSetupClient(\n installation_data[\"url\"], access_token, installation_data[\"verify_ssl\"]\n )\n try:\n resp = client.get_group(installation_data[\"group\"])\n return resp.json\n except ApiError as e:\n self.get_logger().info(\n \"gitlab.installation.get-group-info-failure\",\n extra={\n \"base_url\": installation_data[\"url\"],\n \"verify_ssl\": installation_data[\"verify_ssl\"],\n \"group\": installation_data[\"group\"],\n \"include_subgroups\": installation_data[\"include_subgroups\"],\n \"error_message\": six.text_type(e),\n \"error_status\": e.code,\n },\n )\n raise IntegrationError(\"The requested GitLab group could not be found.\")\n\n def get_pipeline_views(self):\n return [\n InstallationGuideView(),\n InstallationConfigView(),\n lambda: self._make_identity_pipeline_view(),\n ]\n\n def build_integration(self, state):\n data = state[\"identity\"][\"data\"]\n oauth_data = get_oauth_data(data)\n user = get_user_info(data[\"access_token\"], state[\"installation_data\"])\n group = self.get_group_info(data[\"access_token\"], state[\"installation_data\"])\n include_subgroups = state[\"installation_data\"][\"include_subgroups\"]\n scopes = sorted(GitlabIdentityProvider.oauth_scopes)\n base_url = state[\"installation_data\"][\"url\"]\n\n hostname = urlparse(base_url).netloc\n verify_ssl = state[\"installation_data\"][\"verify_ssl\"]\n\n # Generate a hash to prevent stray hooks from being accepted\n # use a consistent hash so that reinstalls/shared integrations don't\n # rotate secrets.\n secret = sha1_text(\"\".join([hostname, state[\"installation_data\"][\"client_id\"]]))\n\n integration = {\n \"name\": group[\"full_name\"],\n # Splice the gitlab host and project together to\n # act as unique link between a gitlab instance, group + sentry.\n # This value is embedded then in the webook token that we\n # give to gitlab to allow us to find the integration a hook came\n # from.\n \"external_id\": u\"{}:{}\".format(hostname, group[\"id\"]),\n \"metadata\": {\n \"icon\": group[\"avatar_url\"],\n \"instance\": hostname,\n \"domain_name\": u\"{}/{}\".format(hostname, group[\"full_path\"]),\n \"scopes\": scopes,\n \"verify_ssl\": verify_ssl,\n \"base_url\": base_url,\n \"webhook_secret\": secret.hexdigest(),\n \"group_id\": group[\"id\"],\n \"include_subgroups\": include_subgroups,\n },\n \"user_identity\": {\n \"type\": \"gitlab\",\n \"external_id\": u\"{}:{}\".format(hostname, user[\"id\"]),\n \"scopes\": scopes,\n \"data\": oauth_data,\n },\n }\n return integration\n\n def setup(self):\n from sentry.plugins.base import bindings\n\n bindings.add(\n \"integration-repository.provider\", GitlabRepositoryProvider, id=\"integrations:gitlab\"\n )\n", "path": "src/sentry/integrations/gitlab/integration.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport six\nfrom six.moves.urllib.parse import urlparse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\n\nfrom sentry.web.helpers import render_to_response\nfrom sentry.identity.pipeline import IdentityProviderPipeline\nfrom sentry.identity.gitlab import get_user_info, get_oauth_data\nfrom sentry.identity.gitlab.provider import GitlabIdentityProvider\nfrom sentry.integrations import (\n FeatureDescription,\n IntegrationInstallation,\n IntegrationFeatures,\n IntegrationProvider,\n IntegrationMetadata,\n)\nfrom sentry.integrations.exceptions import ApiError, IntegrationError\nfrom sentry.integrations.repositories import RepositoryMixin\nfrom sentry.pipeline import NestedPipelineView, PipelineView\nfrom sentry.utils.http import absolute_uri\nfrom sentry.utils.hashlib import sha1_text\n\nfrom .client import GitLabApiClient, GitLabSetupClient\nfrom .issues import GitlabIssueBasic\nfrom .repository import GitlabRepositoryProvider\n\nDESCRIPTION = \"\"\"\nConnect your Sentry organization to an organization in your GitLab instance or gitlab.com, enabling the following features:\n\"\"\"\n\nFEATURES = [\n FeatureDescription(\n \"\"\"\n Track commits and releases (learn more\n [here](https://docs.sentry.io/learn/releases/))\n \"\"\",\n IntegrationFeatures.COMMITS,\n ),\n FeatureDescription(\n \"\"\"\n Resolve Sentry issues via GitLab commits and merge requests by\n including `Fixes PROJ-ID` in the message\n \"\"\",\n IntegrationFeatures.COMMITS,\n ),\n FeatureDescription(\n \"\"\"\n Create GitLab issues from Sentry\n \"\"\",\n IntegrationFeatures.ISSUE_BASIC,\n ),\n FeatureDescription(\n \"\"\"\n Link Sentry issues to existing GitLab issues\n \"\"\",\n IntegrationFeatures.ISSUE_BASIC,\n ),\n]\n\nmetadata = IntegrationMetadata(\n description=DESCRIPTION.strip(),\n features=FEATURES,\n author=\"The Sentry Team\",\n noun=_(\"Installation\"),\n issue_url=\"https://github.com/getsentry/sentry/issues/\",\n source_url=\"https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/gitlab\",\n aspects={},\n)\n\n\nclass GitlabIntegration(IntegrationInstallation, GitlabIssueBasic, RepositoryMixin):\n repo_search = True\n\n def __init__(self, *args, **kwargs):\n super(GitlabIntegration, self).__init__(*args, **kwargs)\n self.default_identity = None\n\n def get_group_id(self):\n return self.model.metadata[\"group_id\"]\n\n def get_client(self):\n if self.default_identity is None:\n self.default_identity = self.get_default_identity()\n\n return GitLabApiClient(self)\n\n def get_repositories(self, query=None):\n # Note: gitlab projects are the same things as repos everywhere else\n group = self.get_group_id()\n resp = self.get_client().search_group_projects(group, query)\n return [{\"identifier\": repo[\"id\"], \"name\": repo[\"name_with_namespace\"]} for repo in resp]\n\n def search_projects(self, query):\n client = self.get_client()\n group_id = self.get_group_id()\n return client.search_group_projects(group_id, query)\n\n def search_issues(self, project_id, query, iids):\n client = self.get_client()\n return client.search_project_issues(project_id, query, iids)\n\n def error_message_from_json(self, data):\n \"\"\"\n Extract error messages from gitlab API errors.\n Generic errors come in the `error` key while validation errors\n are generally in `message`.\n\n See https://docs.gitlab.com/ee/api/#data-validation-and-error-reporting\n \"\"\"\n if \"message\" in data:\n return data[\"message\"]\n if \"error\" in data:\n return data[\"error\"]\n\n\nclass InstallationForm(forms.Form):\n url = forms.CharField(\n label=_(\"GitLab URL\"),\n help_text=_(\n \"The base URL for your GitLab instance, including the host and protocol. \"\n \"Do not include group path.\"\n \"<br>\"\n \"If using gitlab.com, enter https://gitlab.com/\"\n ),\n widget=forms.TextInput(attrs={\"placeholder\": \"https://gitlab.example.com\"}),\n )\n group = forms.CharField(\n label=_(\"GitLab Group Path\"),\n help_text=_(\n \"This can be found in the URL of your group's GitLab page. \"\n \"<br>\"\n \"For example, if your group can be found at \"\n \"https://gitlab.com/my-group/my-subgroup, enter `my-group/my-subgroup`.\"\n ),\n widget=forms.TextInput(attrs={\"placeholder\": _(\"my-group/my-subgroup\")}),\n )\n include_subgroups = forms.BooleanField(\n label=_(\"Include Subgroups\"),\n help_text=_(\"Include projects in subgroups of the GitLab group.\"),\n widget=forms.CheckboxInput(),\n required=False,\n initial=False,\n )\n verify_ssl = forms.BooleanField(\n label=_(\"Verify SSL\"),\n help_text=_(\n \"By default, we verify SSL certificates \"\n \"when delivering payloads to your GitLab instance, \"\n \"and request GitLab to verify SSL when it delivers \"\n \"webhooks to Sentry.\"\n ),\n widget=forms.CheckboxInput(),\n required=False,\n initial=True,\n )\n client_id = forms.CharField(\n label=_(\"GitLab Application ID\"),\n widget=forms.TextInput(\n attrs={\n \"placeholder\": _(\"5832fc6e14300a0d962240a8144466eef4ee93ef0d218477e55f11cf12fc3737\")\n }\n ),\n )\n client_secret = forms.CharField(\n label=_(\"GitLab Application Secret\"),\n widget=forms.TextInput(attrs={\"placeholder\": _(\"XXXXXXXXXXXXXXXXXXXXXXXXXXX\")}),\n )\n\n def clean_url(self):\n \"\"\"Strip off trailing / as they cause invalid URLs downstream\"\"\"\n return self.cleaned_data[\"url\"].rstrip(\"/\")\n\n\nclass InstallationConfigView(PipelineView):\n def dispatch(self, request, pipeline):\n if \"goback\" in request.GET:\n pipeline.state.step_index = 0\n return pipeline.current_step()\n\n if request.method == \"POST\":\n form = InstallationForm(request.POST)\n if form.is_valid():\n form_data = form.cleaned_data\n\n pipeline.bind_state(\"installation_data\", form_data)\n\n pipeline.bind_state(\n \"oauth_config_information\",\n {\n \"access_token_url\": u\"{}/oauth/token\".format(form_data.get(\"url\")),\n \"authorize_url\": u\"{}/oauth/authorize\".format(form_data.get(\"url\")),\n \"client_id\": form_data.get(\"client_id\"),\n \"client_secret\": form_data.get(\"client_secret\"),\n \"verify_ssl\": form_data.get(\"verify_ssl\"),\n },\n )\n pipeline.get_logger().info(\n \"gitlab.setup.installation-config-view.success\",\n extra={\n \"base_url\": form_data.get(\"url\"),\n \"client_id\": form_data.get(\"client_id\"),\n \"verify_ssl\": form_data.get(\"verify_ssl\"),\n },\n )\n return pipeline.next_step()\n else:\n form = InstallationForm()\n\n return render_to_response(\n template=\"sentry/integrations/gitlab-config.html\",\n context={\"form\": form},\n request=request,\n )\n\n\nclass InstallationGuideView(PipelineView):\n def dispatch(self, request, pipeline):\n if \"completed_installation_guide\" in request.GET:\n return pipeline.next_step()\n return render_to_response(\n template=\"sentry/integrations/gitlab-config.html\",\n context={\n \"next_url\": \"%s%s\"\n % (absolute_uri(\"extensions/gitlab/setup/\"), \"?completed_installation_guide\"),\n \"setup_values\": [\n {\"label\": \"Name\", \"value\": \"Sentry\"},\n {\"label\": \"Redirect URI\", \"value\": absolute_uri(\"/extensions/gitlab/setup/\")},\n {\"label\": \"Scopes\", \"value\": \"api\"},\n ],\n },\n request=request,\n )\n\n\nclass GitlabIntegrationProvider(IntegrationProvider):\n key = \"gitlab\"\n name = \"GitLab\"\n metadata = metadata\n integration_cls = GitlabIntegration\n\n needs_default_identity = True\n\n features = frozenset([IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.COMMITS])\n\n setup_dialog_config = {\"width\": 1030, \"height\": 1000}\n\n def _make_identity_pipeline_view(self):\n \"\"\"\n Make the nested identity provider view. It is important that this view is\n not constructed until we reach this step and the\n ``oauth_config_information`` is available in the pipeline state. This\n method should be late bound into the pipeline vies.\n \"\"\"\n identity_pipeline_config = dict(\n oauth_scopes=sorted(GitlabIdentityProvider.oauth_scopes),\n redirect_url=absolute_uri(\"/extensions/gitlab/setup/\"),\n **self.pipeline.fetch_state(\"oauth_config_information\")\n )\n\n return NestedPipelineView(\n bind_key=\"identity\",\n provider_key=\"gitlab\",\n pipeline_cls=IdentityProviderPipeline,\n config=identity_pipeline_config,\n )\n\n def get_group_info(self, access_token, installation_data):\n client = GitLabSetupClient(\n installation_data[\"url\"], access_token, installation_data[\"verify_ssl\"]\n )\n try:\n resp = client.get_group(installation_data[\"group\"])\n return resp.json\n except ApiError as e:\n self.get_logger().info(\n \"gitlab.installation.get-group-info-failure\",\n extra={\n \"base_url\": installation_data[\"url\"],\n \"verify_ssl\": installation_data[\"verify_ssl\"],\n \"group\": installation_data[\"group\"],\n \"include_subgroups\": installation_data[\"include_subgroups\"],\n \"error_message\": six.text_type(e),\n \"error_status\": e.code,\n },\n )\n raise IntegrationError(\"The requested GitLab group could not be found.\")\n\n def get_pipeline_views(self):\n return [\n InstallationGuideView(),\n InstallationConfigView(),\n lambda: self._make_identity_pipeline_view(),\n ]\n\n def build_integration(self, state):\n data = state[\"identity\"][\"data\"]\n oauth_data = get_oauth_data(data)\n user = get_user_info(data[\"access_token\"], state[\"installation_data\"])\n group = self.get_group_info(data[\"access_token\"], state[\"installation_data\"])\n include_subgroups = state[\"installation_data\"][\"include_subgroups\"]\n scopes = sorted(GitlabIdentityProvider.oauth_scopes)\n base_url = state[\"installation_data\"][\"url\"]\n\n hostname = urlparse(base_url).netloc\n verify_ssl = state[\"installation_data\"][\"verify_ssl\"]\n\n # Generate a hash to prevent stray hooks from being accepted\n # use a consistent hash so that reinstalls/shared integrations don't\n # rotate secrets.\n secret = sha1_text(\"\".join([hostname, state[\"installation_data\"][\"client_id\"]]))\n\n integration = {\n \"name\": group[\"full_name\"],\n # Splice the gitlab host and project together to\n # act as unique link between a gitlab instance, group + sentry.\n # This value is embedded then in the webook token that we\n # give to gitlab to allow us to find the integration a hook came\n # from.\n \"external_id\": u\"{}:{}\".format(hostname, group[\"id\"]),\n \"metadata\": {\n \"icon\": group[\"avatar_url\"],\n \"instance\": hostname,\n \"domain_name\": u\"{}/{}\".format(hostname, group[\"full_path\"]),\n \"scopes\": scopes,\n \"verify_ssl\": verify_ssl,\n \"base_url\": base_url,\n \"webhook_secret\": secret.hexdigest(),\n \"group_id\": group[\"id\"],\n \"include_subgroups\": include_subgroups,\n },\n \"user_identity\": {\n \"type\": \"gitlab\",\n \"external_id\": u\"{}:{}\".format(hostname, user[\"id\"]),\n \"scopes\": scopes,\n \"data\": oauth_data,\n },\n }\n return integration\n\n def setup(self):\n from sentry.plugins.base import bindings\n\n bindings.add(\n \"integration-repository.provider\", GitlabRepositoryProvider, id=\"integrations:gitlab\"\n )\n", "path": "src/sentry/integrations/gitlab/integration.py"}]} | 3,997 | 141 |
gh_patches_debug_41745 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1051 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify elasticdl.layers.Embedding arguments and constructor according to design doc
According to [design doc](https://github.com/wangkuiyi/elasticdl/blob/develop/elasticdl/doc/distributed_embedding_layer_design.md#elasticdllayersembedding):
```
__init__(
output_dim,
embeddings_initializer='uniform',
mask_zero=False,
input_length=None,
combiner=None,
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/layers/embedding.py`
Content:
```
1 import tensorflow as tf
2 from tensorflow.python.keras.utils import tf_utils
3
4
5 class Embedding(tf.keras.layers.Layer):
6 """
7 Input: indexes for the embedding entries
8 shape is (batch_size, input_length)
9 Output: Corresponding embedding vectors of the input indexes
10 shape is (batch_size, input_length, embedding_dim)
11 Arguments:
12 embedding_dim: the dimension of the embedding vector
13 embedding_initializer: Initializer for embedding table
14 """
15
16 def __init__(self, embedding_dim, embedding_initializer="uniform"):
17 super(Embedding, self).__init__()
18 self.embedding_dim = embedding_dim
19 self.embedding_initializer = embedding_initializer
20 self.tape = None
21 self.worker = None
22 self.bet_ids_pair = []
23
24 @tf_utils.shape_type_conversion
25 def compute_output_shape(self, input_shape):
26 return input_shape + (self.embedding_dim,)
27
28 @property
29 def name(self):
30 return self._name
31
32 @staticmethod
33 def get_key(name_list):
34 return "-".join(map(str, name_list))
35
36 def lookup_embedding(self, unique_ids):
37 batch_embedding = self.worker.embedding_lookup(
38 unique_ids, self._name, self.embedding_initializer
39 )
40 return batch_embedding
41
42 def call(self, input):
43 ids = tf.convert_to_tensor(input, name="embedding_ids")
44 flat_ids = tf.reshape(ids, [-1])
45 unique_ids, idx = tf.unique(flat_ids)
46 batch_embedding_tensor = tf.py_function(
47 self.lookup_embedding, inp=[unique_ids], Tout=tf.float32
48 )
49 if self.tape:
50 # tape.watch works with eager mode only
51 if not tf.executing_eagerly():
52 raise RuntimeError("tape.watch only works with eager mode")
53 self.tape.watch(batch_embedding_tensor)
54 self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))
55 outputs = tf.gather(batch_embedding_tensor, idx)
56 outputs = tf.reshape(
57 outputs, ids.get_shape().concatenate(self.embedding_dim)
58 )
59 return outputs
60
61 def reset(self):
62 self.bet_ids_pair = []
63 self.tape = None
64
65 def set_tape(self, tape):
66 self.tape = tape
67
68 def set_worker(self, worker):
69 self.worker = worker
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py
--- a/elasticdl/python/elasticdl/layers/embedding.py
+++ b/elasticdl/python/elasticdl/layers/embedding.py
@@ -6,24 +6,77 @@
"""
Input: indexes for the embedding entries
shape is (batch_size, input_length)
- Output: Corresponding embedding vectors of the input indexes
- shape is (batch_size, input_length, embedding_dim)
+ Output:
+ corresponding (combined) embeddings with a shape of
+ (batch_size, input_length, output_dim) if combiner is None
+ (batch_size, output_dim) if combiner is not None
Arguments:
- embedding_dim: the dimension of the embedding vector
+ output_dim: the dimension of the embedding vector
embedding_initializer: Initializer for embedding table
+ mask_zero: Whether or not the input value 0 is a special "padding"
+ value that should be masked out.
+ input_length: Length of input sequences, when it is constant.
+ This argument is required if you are going to connect
+ `Flatten` then `Dense` layers upstream
+ (without it, the shape of the dense outputs cannot be computed).
+ combiner: A string specifying the reduction op or None if not used.
+ "mean", "sqrtn" and "sum" are supported for the reduction op.
+ TODO: support mask_zero
+ TODO: support combiner
+ TODO: support sparse input
"""
- def __init__(self, embedding_dim, embedding_initializer="uniform"):
- super(Embedding, self).__init__()
- self.embedding_dim = embedding_dim
+ def __init__(
+ self,
+ output_dim,
+ embedding_initializer="uniform",
+ mask_zero=False,
+ input_length=None,
+ combiner=None,
+ **kwargs
+ ):
+ if "input_shape" not in kwargs and input_length:
+ kwargs["input_shape"] = (input_length,)
+ super(Embedding, self).__init__(**kwargs)
+
+ self.output_dim = output_dim
self.embedding_initializer = embedding_initializer
+ self.mask_zero = mask_zero
+ self.input_length = input_length
+ self.combiner = combiner
self.tape = None
self.worker = None
self.bet_ids_pair = []
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
- return input_shape + (self.embedding_dim,)
+ # this function is taken from
+ # tf.keras.layers.Embedding.compute_output_shape
+ # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156
+ if self.input_length is None:
+ return input_shape + (self.output_dim,)
+ else:
+ if isinstance(self.input_length, (list, tuple)):
+ in_lens = list(self.input_length)
+ else:
+ in_lens = [self.input_length]
+ if len(in_lens) != len(input_shape) - 1:
+ raise ValueError(
+ '"input_length" is %s, '
+ "but received input has shape %s"
+ % (str(self.input_length), str(input_shape))
+ )
+ else:
+ for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
+ if s1 is not None and s2 is not None and s1 != s2:
+ raise ValueError(
+ '"input_length" is %s, '
+ "but received input has shape %s"
+ % (str(self.input_length), str(input_shape))
+ )
+ elif s1 is None:
+ in_lens[i] = s2
+ return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
@property
def name(self):
@@ -54,7 +107,7 @@
self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))
outputs = tf.gather(batch_embedding_tensor, idx)
outputs = tf.reshape(
- outputs, ids.get_shape().concatenate(self.embedding_dim)
+ outputs, ids.get_shape().concatenate(self.output_dim)
)
return outputs
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/layers/embedding.py b/elasticdl/python/elasticdl/layers/embedding.py\n--- a/elasticdl/python/elasticdl/layers/embedding.py\n+++ b/elasticdl/python/elasticdl/layers/embedding.py\n@@ -6,24 +6,77 @@\n \"\"\"\n Input: indexes for the embedding entries\n shape is (batch_size, input_length)\n- Output: Corresponding embedding vectors of the input indexes\n- shape is (batch_size, input_length, embedding_dim)\n+ Output:\n+ corresponding (combined) embeddings with a shape of\n+ (batch_size, input_length, output_dim) if combiner is None\n+ (batch_size, output_dim) if combiner is not None\n Arguments:\n- embedding_dim: the dimension of the embedding vector\n+ output_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n+ mask_zero: Whether or not the input value 0 is a special \"padding\"\n+ value that should be masked out.\n+ input_length: Length of input sequences, when it is constant.\n+ This argument is required if you are going to connect\n+ `Flatten` then `Dense` layers upstream\n+ (without it, the shape of the dense outputs cannot be computed).\n+ combiner: A string specifying the reduction op or None if not used.\n+ \"mean\", \"sqrtn\" and \"sum\" are supported for the reduction op.\n+ TODO: support mask_zero\n+ TODO: support combiner\n+ TODO: support sparse input\n \"\"\"\n \n- def __init__(self, embedding_dim, embedding_initializer=\"uniform\"):\n- super(Embedding, self).__init__()\n- self.embedding_dim = embedding_dim\n+ def __init__(\n+ self,\n+ output_dim,\n+ embedding_initializer=\"uniform\",\n+ mask_zero=False,\n+ input_length=None,\n+ combiner=None,\n+ **kwargs\n+ ):\n+ if \"input_shape\" not in kwargs and input_length:\n+ kwargs[\"input_shape\"] = (input_length,)\n+ super(Embedding, self).__init__(**kwargs)\n+\n+ self.output_dim = output_dim\n self.embedding_initializer = embedding_initializer\n+ self.mask_zero = mask_zero\n+ self.input_length = input_length\n+ self.combiner = combiner\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n \n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n- return input_shape + (self.embedding_dim,)\n+ # this function is taken from\n+ # tf.keras.layers.Embedding.compute_output_shape\n+ # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156\n+ if self.input_length is None:\n+ return input_shape + (self.output_dim,)\n+ else:\n+ if isinstance(self.input_length, (list, tuple)):\n+ in_lens = list(self.input_length)\n+ else:\n+ in_lens = [self.input_length]\n+ if len(in_lens) != len(input_shape) - 1:\n+ raise ValueError(\n+ '\"input_length\" is %s, '\n+ \"but received input has shape %s\"\n+ % (str(self.input_length), str(input_shape))\n+ )\n+ else:\n+ for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n+ if s1 is not None and s2 is not None and s1 != s2:\n+ raise ValueError(\n+ '\"input_length\" is %s, '\n+ \"but received input has shape %s\"\n+ % (str(self.input_length), str(input_shape))\n+ )\n+ elif s1 is None:\n+ in_lens[i] = s2\n+ return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n \n @property\n def name(self):\n@@ -54,7 +107,7 @@\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n- outputs, ids.get_shape().concatenate(self.embedding_dim)\n+ outputs, ids.get_shape().concatenate(self.output_dim)\n )\n return outputs\n", "issue": "Modify elasticdl.layers.Embedding arguments and constructor according to design doc\nAccording to [design doc](https://github.com/wangkuiyi/elasticdl/blob/develop/elasticdl/doc/distributed_embedding_layer_design.md#elasticdllayersembedding):\r\n\r\n```\r\n__init__(\r\n output_dim,\r\n embeddings_initializer='uniform',\r\n mask_zero=False,\r\n input_length=None,\r\n combiner=None,\r\n)\r\n```\n", "before_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.keras.utils import tf_utils\n\n\nclass Embedding(tf.keras.layers.Layer):\n \"\"\"\n Input: indexes for the embedding entries\n shape is (batch_size, input_length)\n Output: Corresponding embedding vectors of the input indexes\n shape is (batch_size, input_length, embedding_dim)\n Arguments:\n embedding_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n \"\"\"\n\n def __init__(self, embedding_dim, embedding_initializer=\"uniform\"):\n super(Embedding, self).__init__()\n self.embedding_dim = embedding_dim\n self.embedding_initializer = embedding_initializer\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape + (self.embedding_dim,)\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def get_key(name_list):\n return \"-\".join(map(str, name_list))\n\n def lookup_embedding(self, unique_ids):\n batch_embedding = self.worker.embedding_lookup(\n unique_ids, self._name, self.embedding_initializer\n )\n return batch_embedding\n\n def call(self, input):\n ids = tf.convert_to_tensor(input, name=\"embedding_ids\")\n flat_ids = tf.reshape(ids, [-1])\n unique_ids, idx = tf.unique(flat_ids)\n batch_embedding_tensor = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n outputs, ids.get_shape().concatenate(self.embedding_dim)\n )\n return outputs\n\n def reset(self):\n self.bet_ids_pair = []\n self.tape = None\n\n def set_tape(self, tape):\n self.tape = tape\n\n def set_worker(self, worker):\n self.worker = worker\n", "path": "elasticdl/python/elasticdl/layers/embedding.py"}], "after_files": [{"content": "import tensorflow as tf\nfrom tensorflow.python.keras.utils import tf_utils\n\n\nclass Embedding(tf.keras.layers.Layer):\n \"\"\"\n Input: indexes for the embedding entries\n shape is (batch_size, input_length)\n Output:\n corresponding (combined) embeddings with a shape of\n (batch_size, input_length, output_dim) if combiner is None\n (batch_size, output_dim) if combiner is not None\n Arguments:\n output_dim: the dimension of the embedding vector\n embedding_initializer: Initializer for embedding table\n mask_zero: Whether or not the input value 0 is a special \"padding\"\n value that should be masked out.\n input_length: Length of input sequences, when it is constant.\n This argument is required if you are going to connect\n `Flatten` then `Dense` layers upstream\n (without it, the shape of the dense outputs cannot be computed).\n combiner: A string specifying the reduction op or None if not used.\n \"mean\", \"sqrtn\" and \"sum\" are supported for the reduction op.\n TODO: support mask_zero\n TODO: support combiner\n TODO: support sparse input\n \"\"\"\n\n def __init__(\n self,\n output_dim,\n embedding_initializer=\"uniform\",\n mask_zero=False,\n input_length=None,\n combiner=None,\n **kwargs\n ):\n if \"input_shape\" not in kwargs and input_length:\n kwargs[\"input_shape\"] = (input_length,)\n super(Embedding, self).__init__(**kwargs)\n\n self.output_dim = output_dim\n self.embedding_initializer = embedding_initializer\n self.mask_zero = mask_zero\n self.input_length = input_length\n self.combiner = combiner\n self.tape = None\n self.worker = None\n self.bet_ids_pair = []\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n # this function is taken from\n # tf.keras.layers.Embedding.compute_output_shape\n # https://github.com/tensorflow/tensorflow/blob/3f3c728bf80e0fd6653744318cbbfe1454c6ddca/tensorflow/python/keras/layers/embeddings.py#L156\n if self.input_length is None:\n return input_shape + (self.output_dim,)\n else:\n if isinstance(self.input_length, (list, tuple)):\n in_lens = list(self.input_length)\n else:\n in_lens = [self.input_length]\n if len(in_lens) != len(input_shape) - 1:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n else:\n for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):\n if s1 is not None and s2 is not None and s1 != s2:\n raise ValueError(\n '\"input_length\" is %s, '\n \"but received input has shape %s\"\n % (str(self.input_length), str(input_shape))\n )\n elif s1 is None:\n in_lens[i] = s2\n return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)\n\n @property\n def name(self):\n return self._name\n\n @staticmethod\n def get_key(name_list):\n return \"-\".join(map(str, name_list))\n\n def lookup_embedding(self, unique_ids):\n batch_embedding = self.worker.embedding_lookup(\n unique_ids, self._name, self.embedding_initializer\n )\n return batch_embedding\n\n def call(self, input):\n ids = tf.convert_to_tensor(input, name=\"embedding_ids\")\n flat_ids = tf.reshape(ids, [-1])\n unique_ids, idx = tf.unique(flat_ids)\n batch_embedding_tensor = tf.py_function(\n self.lookup_embedding, inp=[unique_ids], Tout=tf.float32\n )\n if self.tape:\n # tape.watch works with eager mode only\n if not tf.executing_eagerly():\n raise RuntimeError(\"tape.watch only works with eager mode\")\n self.tape.watch(batch_embedding_tensor)\n self.bet_ids_pair.append((batch_embedding_tensor, unique_ids))\n outputs = tf.gather(batch_embedding_tensor, idx)\n outputs = tf.reshape(\n outputs, ids.get_shape().concatenate(self.output_dim)\n )\n return outputs\n\n def reset(self):\n self.bet_ids_pair = []\n self.tape = None\n\n def set_tape(self, tape):\n self.tape = tape\n\n def set_worker(self, worker):\n self.worker = worker\n", "path": "elasticdl/python/elasticdl/layers/embedding.py"}]} | 972 | 1,008 |
gh_patches_debug_28888 | rasdani/github-patches | git_diff | chainer__chainer-1376 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GPU implementation of transpose_sequence is too slow
As @jnory mentioned in #1174, `transpose_sequence` is too slow. We need to make a single kernel to make a transposed matrix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/array/transpose_sequence.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 def _transpose(xs, length):
9 xp = cuda.get_array_module(*xs)
10 lengths = numpy.zeros(length, dtype='i')
11 for i, x in enumerate(xs):
12 lengths[0:len(x)] = i + 1
13 dtype = xs[0].dtype
14 unit = xs[0].shape[1:]
15 outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])
16
17 for i, x in enumerate(xs):
18 for p, xi in enumerate(x):
19 outs[p][i] = xi
20
21 return outs
22
23
24 class TransposeSequence(function.Function):
25
26 """Function that transposes a list of Variables."""
27
28 def check_type_forward(self, xs_type):
29 for p, n in zip(xs_type, xs_type[1:]):
30 type_check.expect(
31 p.shape[0] >= n.shape[0],
32 p.shape[1:] == n.shape[1:],
33 )
34
35 def forward(self, xs):
36 if len(xs) == 0:
37 return ()
38 return _transpose(xs, len(xs[0]))
39
40 def backward(self, xs, gs):
41 return _transpose(gs, len(xs))
42
43
44 def transpose_sequence(xs):
45 """Transpose a list of Variables.
46
47 This function transposes a list of :class:`~chainer.Variable` s and returns
48 a list of :class:`Variable` s.
49 For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function
50 returns ``[(0, 4, 6), (1, 5), (2), (3)]``.
51 Note that a given list needs to be sorted by each length of
52 :class:`~chainer.Variable`.
53
54 Args:
55 xs (list of ~chainer.Variable): Variables to transpose.
56
57 Returns:
58 tuple or Variable: Transposed list.
59 """
60 ys = TransposeSequence()(*xs)
61 if not isinstance(ys, tuple):
62 ys = (ys,)
63 return ys
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/array/transpose_sequence.py b/chainer/functions/array/transpose_sequence.py
--- a/chainer/functions/array/transpose_sequence.py
+++ b/chainer/functions/array/transpose_sequence.py
@@ -6,17 +6,55 @@
def _transpose(xs, length):
- xp = cuda.get_array_module(*xs)
- lengths = numpy.zeros(length, dtype='i')
- for i, x in enumerate(xs):
- lengths[0:len(x)] = i + 1
- dtype = xs[0].dtype
- unit = xs[0].shape[1:]
- outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])
+ if length == 0:
+ return ()
+ xp = cuda.get_array_module(*xs)
+ lengths = numpy.empty(length, dtype='i')
+ end = length
for i, x in enumerate(xs):
- for p, xi in enumerate(x):
- outs[p][i] = xi
+ lengths[len(x):end] = i
+ end = len(x)
+ lengths[0:end] = len(xs)
+
+ if xp is numpy:
+ dtype = xs[0].dtype
+ unit = xs[0].shape[1:]
+
+ outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])
+ for i, x in enumerate(xs):
+ for p, xi in enumerate(x):
+ outs[p][i] = xi
+
+ else:
+ offsets1 = numpy.empty(len(xs) + 1, dtype='i')
+ offsets1[0] = 0
+ numpy.cumsum([len(x) for x in xs], out=offsets1[1:])
+
+ offsets2 = numpy.empty(length + 1, dtype='i')
+ offsets2[0] = 0
+ numpy.cumsum(lengths, dtype='i', out=offsets2[1:])
+
+ x = xp.concatenate(xs, axis=0)
+ o = xp.empty_like(x)
+ unit = xs[0].size // len(xs[0])
+ size = length * len(xs) * unit
+ cuda.elementwise(
+ 'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs',
+ 'raw T hs',
+ '''
+ int ind = i / unit;
+ int off = i - ind * unit;
+ int y = ind / len;
+ int x = ind - y * len;
+ if (off2[x] + y < off2[x + 1]) {
+ hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off];
+ }
+ ''',
+ 'transpose_sequence'
+ )(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o,
+ size=size)
+ outs = tuple(xp.split(o, offsets2[1:-1]))
return outs
| {"golden_diff": "diff --git a/chainer/functions/array/transpose_sequence.py b/chainer/functions/array/transpose_sequence.py\n--- a/chainer/functions/array/transpose_sequence.py\n+++ b/chainer/functions/array/transpose_sequence.py\n@@ -6,17 +6,55 @@\n \n \n def _transpose(xs, length):\n- xp = cuda.get_array_module(*xs)\n- lengths = numpy.zeros(length, dtype='i')\n- for i, x in enumerate(xs):\n- lengths[0:len(x)] = i + 1\n- dtype = xs[0].dtype\n- unit = xs[0].shape[1:]\n- outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n+ if length == 0:\n+ return ()\n \n+ xp = cuda.get_array_module(*xs)\n+ lengths = numpy.empty(length, dtype='i')\n+ end = length\n for i, x in enumerate(xs):\n- for p, xi in enumerate(x):\n- outs[p][i] = xi\n+ lengths[len(x):end] = i\n+ end = len(x)\n+ lengths[0:end] = len(xs)\n+\n+ if xp is numpy:\n+ dtype = xs[0].dtype\n+ unit = xs[0].shape[1:]\n+\n+ outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n+ for i, x in enumerate(xs):\n+ for p, xi in enumerate(x):\n+ outs[p][i] = xi\n+\n+ else:\n+ offsets1 = numpy.empty(len(xs) + 1, dtype='i')\n+ offsets1[0] = 0\n+ numpy.cumsum([len(x) for x in xs], out=offsets1[1:])\n+\n+ offsets2 = numpy.empty(length + 1, dtype='i')\n+ offsets2[0] = 0\n+ numpy.cumsum(lengths, dtype='i', out=offsets2[1:])\n+\n+ x = xp.concatenate(xs, axis=0)\n+ o = xp.empty_like(x)\n+ unit = xs[0].size // len(xs[0])\n+ size = length * len(xs) * unit\n+ cuda.elementwise(\n+ 'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs',\n+ 'raw T hs',\n+ '''\n+ int ind = i / unit;\n+ int off = i - ind * unit;\n+ int y = ind / len;\n+ int x = ind - y * len;\n+ if (off2[x] + y < off2[x + 1]) {\n+ hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off];\n+ }\n+ ''',\n+ 'transpose_sequence'\n+ )(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o,\n+ size=size)\n+ outs = tuple(xp.split(o, offsets2[1:-1]))\n \n return outs\n", "issue": "GPU implementation of transpose_sequence is too slow\nAs @jnory mentioned in #1174, `transpose_sequence` is too slow. We need to make a single kernel to make a transposed matrix.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _transpose(xs, length):\n xp = cuda.get_array_module(*xs)\n lengths = numpy.zeros(length, dtype='i')\n for i, x in enumerate(xs):\n lengths[0:len(x)] = i + 1\n dtype = xs[0].dtype\n unit = xs[0].shape[1:]\n outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n\n for i, x in enumerate(xs):\n for p, xi in enumerate(x):\n outs[p][i] = xi\n\n return outs\n\n\nclass TransposeSequence(function.Function):\n\n \"\"\"Function that transposes a list of Variables.\"\"\"\n\n def check_type_forward(self, xs_type):\n for p, n in zip(xs_type, xs_type[1:]):\n type_check.expect(\n p.shape[0] >= n.shape[0],\n p.shape[1:] == n.shape[1:],\n )\n\n def forward(self, xs):\n if len(xs) == 0:\n return ()\n return _transpose(xs, len(xs[0]))\n\n def backward(self, xs, gs):\n return _transpose(gs, len(xs))\n\n\ndef transpose_sequence(xs):\n \"\"\"Transpose a list of Variables.\n\n This function transposes a list of :class:`~chainer.Variable` s and returns\n a list of :class:`Variable` s.\n For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function\n returns ``[(0, 4, 6), (1, 5), (2), (3)]``.\n Note that a given list needs to be sorted by each length of\n :class:`~chainer.Variable`.\n\n Args:\n xs (list of ~chainer.Variable): Variables to transpose.\n\n Returns:\n tuple or Variable: Transposed list.\n \"\"\"\n ys = TransposeSequence()(*xs)\n if not isinstance(ys, tuple):\n ys = (ys,)\n return ys\n", "path": "chainer/functions/array/transpose_sequence.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _transpose(xs, length):\n if length == 0:\n return ()\n\n xp = cuda.get_array_module(*xs)\n lengths = numpy.empty(length, dtype='i')\n end = length\n for i, x in enumerate(xs):\n lengths[len(x):end] = i\n end = len(x)\n lengths[0:end] = len(xs)\n\n if xp is numpy:\n dtype = xs[0].dtype\n unit = xs[0].shape[1:]\n\n outs = tuple([xp.empty((l,) + unit, dtype=dtype) for l in lengths])\n for i, x in enumerate(xs):\n for p, xi in enumerate(x):\n outs[p][i] = xi\n\n else:\n offsets1 = numpy.empty(len(xs) + 1, dtype='i')\n offsets1[0] = 0\n numpy.cumsum([len(x) for x in xs], out=offsets1[1:])\n\n offsets2 = numpy.empty(length + 1, dtype='i')\n offsets2[0] = 0\n numpy.cumsum(lengths, dtype='i', out=offsets2[1:])\n\n x = xp.concatenate(xs, axis=0)\n o = xp.empty_like(x)\n unit = xs[0].size // len(xs[0])\n size = length * len(xs) * unit\n cuda.elementwise(\n 'int32 len, int32 unit, raw int32 off1, raw int32 off2, raw T vs',\n 'raw T hs',\n '''\n int ind = i / unit;\n int off = i - ind * unit;\n int y = ind / len;\n int x = ind - y * len;\n if (off2[x] + y < off2[x + 1]) {\n hs[(off2[x] + y) * unit + off] = vs[(off1[y] + x) * unit + off];\n }\n ''',\n 'transpose_sequence'\n )(length, unit, cuda.to_gpu(offsets1), cuda.to_gpu(offsets2), x, o,\n size=size)\n outs = tuple(xp.split(o, offsets2[1:-1]))\n\n return outs\n\n\nclass TransposeSequence(function.Function):\n\n \"\"\"Function that transposes a list of Variables.\"\"\"\n\n def check_type_forward(self, xs_type):\n for p, n in zip(xs_type, xs_type[1:]):\n type_check.expect(\n p.shape[0] >= n.shape[0],\n p.shape[1:] == n.shape[1:],\n )\n\n def forward(self, xs):\n if len(xs) == 0:\n return ()\n return _transpose(xs, len(xs[0]))\n\n def backward(self, xs, gs):\n return _transpose(gs, len(xs))\n\n\ndef transpose_sequence(xs):\n \"\"\"Transpose a list of Variables.\n\n This function transposes a list of :class:`~chainer.Variable` s and returns\n a list of :class:`Variable` s.\n For exampe a user gives ``[(0, 1, 2, 3), (4, 5), (6)]``, the function\n returns ``[(0, 4, 6), (1, 5), (2), (3)]``.\n Note that a given list needs to be sorted by each length of\n :class:`~chainer.Variable`.\n\n Args:\n xs (list of ~chainer.Variable): Variables to transpose.\n\n Returns:\n tuple or Variable: Transposed list.\n \"\"\"\n ys = TransposeSequence()(*xs)\n if not isinstance(ys, tuple):\n ys = (ys,)\n return ys\n", "path": "chainer/functions/array/transpose_sequence.py"}]} | 895 | 695 |
gh_patches_debug_35027 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-4564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] Differentiate Between Unlimited and "Purchased" Incubator use.
Blue incubators are difficult to get so I use it only for 10km eggs. Is there any configuration to prevent bot from using Blue Incubator?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pokemongo_bot/cell_workers/incubate_eggs.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from pokemongo_bot.human_behaviour import sleep
4 from pokemongo_bot.base_task import BaseTask
5
6
7 class IncubateEggs(BaseTask):
8 SUPPORTED_TASK_API_VERSION = 1
9
10 last_km_walked = 0
11
12 def initialize(self):
13 self.next_update = None
14 self.ready_incubators = []
15 self.used_incubators = []
16 self.eggs = []
17 self.km_walked = 0
18 self.hatching_animation_delay = 4.20
19 self.max_iv = 45.0
20
21 self._process_config()
22
23 def _process_config(self):
24 self.longer_eggs_first = self.config.get("longer_eggs_first", True)
25 self.min_interval = self.config.get('min_interval', 120)
26
27 def work(self):
28 try:
29 self._check_inventory()
30 except:
31 return
32
33 if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:
34 self.used_incubators.sort(key=lambda x: x.get("km"))
35 km_left = self.used_incubators[0]['km']-self.km_walked
36 if km_left <= 0:
37 self._hatch_eggs()
38 else:
39 self.bot.metrics.next_hatching_km(km_left)
40
41 if self._should_print():
42 self._print_eggs()
43 self._compute_next_update()
44
45 IncubateEggs.last_km_walked = self.km_walked
46
47 sorting = self.longer_eggs_first
48 self.eggs.sort(key=lambda x: x.get("km"), reverse=sorting)
49
50 if self.ready_incubators:
51 self._apply_incubators()
52
53 def _apply_incubators(self):
54 for incubator in self.ready_incubators:
55 if incubator.get('used', False):
56 continue
57 for egg in self.eggs:
58 if egg["used"] or egg["km"] == -1:
59 continue
60 self.emit_event(
61 'incubate_try',
62 level='debug',
63 formatted="Attempting to apply incubator {incubator_id} to egg {egg_id}",
64 data={
65 'incubator_id': incubator['id'],
66 'egg_id': egg['id']
67 }
68 )
69 ret = self.bot.api.use_item_egg_incubator(
70 item_id=incubator["id"],
71 pokemon_id=egg["id"]
72 )
73 if ret:
74 code = ret.get("responses", {}).get("USE_ITEM_EGG_INCUBATOR", {}).get("result", 0)
75 if code == 1:
76 self.emit_event(
77 'incubate',
78 formatted='Incubating a {distance_in_km} egg.',
79 data={
80 'distance_in_km': str(egg['km'])
81 }
82 )
83 egg["used"] = True
84 incubator["used"] = True
85 break
86 elif code == 5 or code == 7:
87 self.emit_event(
88 'incubator_already_used',
89 level='debug',
90 formatted='Incubator in use.',
91 )
92 incubator["used"] = True
93 break
94 elif code == 6:
95 self.emit_event(
96 'egg_already_incubating',
97 level='debug',
98 formatted='Egg already incubating',
99 )
100 egg["used"] = True
101
102 def _check_inventory(self, lookup_ids=[]):
103 inv = {}
104 response_dict = self.bot.api.get_inventory()
105 matched_pokemon = []
106 temp_eggs = []
107 temp_used_incubators = []
108 temp_ready_incubators = []
109 inv = reduce(
110 dict.__getitem__,
111 ["responses", "GET_INVENTORY", "inventory_delta", "inventory_items"],
112 response_dict
113 )
114 for inv_data in inv:
115 inv_data = inv_data.get("inventory_item_data", {})
116 if "egg_incubators" in inv_data:
117 temp_used_incubators = []
118 temp_ready_incubators = []
119 incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[])
120 if isinstance(incubators, basestring): # checking for old response
121 incubators = [incubators]
122 for incubator in incubators:
123 if 'pokemon_id' in incubator:
124 start_km = incubator.get('start_km_walked', 9001)
125 km_walked = incubator.get('target_km_walked', 9001)
126 temp_used_incubators.append({
127 "id": incubator.get('id', -1),
128 "km": km_walked,
129 "km_needed": (km_walked - start_km)
130 })
131 else:
132 temp_ready_incubators.append({
133 "id": incubator.get('id', -1)
134 })
135 continue
136 if "pokemon_data" in inv_data:
137 pokemon = inv_data.get("pokemon_data", {})
138 if pokemon.get("is_egg", False) and "egg_incubator_id" not in pokemon:
139 temp_eggs.append({
140 "id": pokemon.get("id", -1),
141 "km": pokemon.get("egg_km_walked_target", -1),
142 "used": False
143 })
144 elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:
145 pokemon.update({
146 "iv": [
147 pokemon.get('individual_attack', 0),
148 pokemon.get('individual_defense', 0),
149 pokemon.get('individual_stamina', 0)
150 ]})
151 matched_pokemon.append(pokemon)
152 continue
153 if "player_stats" in inv_data:
154 self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
155 if temp_used_incubators:
156 self.used_incubators = temp_used_incubators
157 if temp_ready_incubators:
158 self.ready_incubators = temp_ready_incubators
159 if temp_eggs:
160 self.eggs = temp_eggs
161 return matched_pokemon
162
163 def _hatch_eggs(self):
164 response_dict = self.bot.api.get_hatched_eggs()
165 log_color = 'green'
166 try:
167 result = reduce(dict.__getitem__, ["responses", "GET_HATCHED_EGGS"], response_dict)
168 except KeyError:
169 return
170 pokemon_ids = []
171 if 'pokemon_id' in result:
172 pokemon_ids = [id for id in result['pokemon_id']]
173 stardust = result.get('stardust_awarded', "error")
174 candy = result.get('candy_awarded', "error")
175 xp = result.get('experience_awarded', "error")
176 sleep(self.hatching_animation_delay)
177 self.bot.latest_inventory = None
178 try:
179 pokemon_data = self._check_inventory(pokemon_ids)
180 for pokemon in pokemon_data:
181 # pokemon ids seem to be offset by one
182 if pokemon['pokemon_id']!=-1:
183 pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']
184 else:
185 pokemon['name'] = "error"
186 except:
187 pokemon_data = [{"name":"error","cp":"error","iv":"error"}]
188 if not pokemon_ids or pokemon_data[0]['name'] == "error":
189 self.emit_event(
190 'egg_hatched',
191 data={
192 'pokemon': 'error',
193 'cp': 'error',
194 'iv': 'error',
195 'exp': 'error',
196 'stardust': 'error',
197 'candy': 'error',
198 }
199 )
200 return
201 for i in range(len(pokemon_data)):
202 msg = "Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies."
203 self.bot.metrics.hatched_eggs(1)
204 self.emit_event(
205 'egg_hatched',
206 formatted=msg,
207 data={
208 'pokemon': pokemon_data[i]['name'],
209 'cp': pokemon_data[i]['cp'],
210 'iv': "{} {}".format(
211 "/".join(map(str, pokemon_data[i]['iv'])),
212 round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)
213 ),
214 'exp': xp[i],
215 'stardust': stardust[i],
216 'candy': candy[i],
217 }
218 )
219
220 def _print_eggs(self):
221 if not self.used_incubators:
222 return
223
224 self.used_incubators.sort(key=lambda x: x.get("km"))
225
226 eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]
227
228 self.emit_event(
229 'next_egg_incubates',
230 formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',
231 data={
232 'eggs_left': len(self.eggs),
233 'eggs_inc': len(self.used_incubators),
234 'eggs': ', '.join(eggs)
235 }
236 )
237
238 def _should_print(self):
239 """
240 Returns a value indicating whether the eggs should be displayed.
241 :return: True if the stats should be displayed; otherwise, False.
242 :rtype: bool
243 """
244 return self.next_update is None or datetime.now() >= self.next_update
245
246 def _compute_next_update(self):
247 """
248 Computes the next update datetime based on the minimum update interval.
249 :return: Nothing.
250 :rtype: None
251 """
252 self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py
--- a/pokemongo_bot/cell_workers/incubate_eggs.py
+++ b/pokemongo_bot/cell_workers/incubate_eggs.py
@@ -23,7 +23,10 @@
def _process_config(self):
self.longer_eggs_first = self.config.get("longer_eggs_first", True)
self.min_interval = self.config.get('min_interval', 120)
-
+
+ self.breakable_incubator = self.config.get("breakable", [])
+ self.infinite_incubator = self.config.get("infinite", [])
+
def work(self):
try:
self._check_inventory()
@@ -57,6 +60,19 @@
for egg in self.eggs:
if egg["used"] or egg["km"] == -1:
continue
+
+ if self.breakable_incubator:
+ # test if the incubator is of type breakable
+ if incubator.get('uses_remaining') is not None:
+ if egg["km"] not in self.breakable_incubator:
+ continue
+
+ if self.infinite_incubator:
+ # test if the incubator is of type infinite
+ if incubator.get('uses_remaining') is None:
+ if egg["km"] not in self.infinite_incubator:
+ continue
+
self.emit_event(
'incubate_try',
level='debug',
@@ -119,7 +135,7 @@
incubators = inv_data.get("egg_incubators", {}).get("egg_incubator",[])
if isinstance(incubators, basestring): # checking for old response
incubators = [incubators]
- for incubator in incubators:
+ for incubator in incubators:
if 'pokemon_id' in incubator:
start_km = incubator.get('start_km_walked', 9001)
km_walked = incubator.get('target_km_walked', 9001)
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/incubate_eggs.py b/pokemongo_bot/cell_workers/incubate_eggs.py\n--- a/pokemongo_bot/cell_workers/incubate_eggs.py\n+++ b/pokemongo_bot/cell_workers/incubate_eggs.py\n@@ -23,7 +23,10 @@\n def _process_config(self):\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n-\n+ \n+ self.breakable_incubator = self.config.get(\"breakable\", [])\n+ self.infinite_incubator = self.config.get(\"infinite\", [])\n+ \n def work(self):\n try:\n self._check_inventory()\n@@ -57,6 +60,19 @@\n for egg in self.eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n+ \n+ if self.breakable_incubator:\n+ # test if the incubator is of type breakable\n+ if incubator.get('uses_remaining') is not None:\n+ if egg[\"km\"] not in self.breakable_incubator:\n+ continue\n+ \n+ if self.infinite_incubator:\n+ # test if the incubator is of type infinite\n+ if incubator.get('uses_remaining') is None:\n+ if egg[\"km\"] not in self.infinite_incubator:\n+ continue\n+ \n self.emit_event(\n 'incubate_try',\n level='debug',\n@@ -119,7 +135,7 @@\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n- for incubator in incubators:\n+ for incubator in incubators: \n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 9001)\n km_walked = incubator.get('target_km_walked', 9001)\n", "issue": "[Feature Request] Differentiate Between Unlimited and \"Purchased\" Incubator use.\nBlue incubators are difficult to get so I use it only for 10km eggs. Is there any configuration to prevent bot from using Blue Incubator?\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass IncubateEggs(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n last_km_walked = 0\n\n def initialize(self):\n self.next_update = None\n self.ready_incubators = []\n self.used_incubators = []\n self.eggs = []\n self.km_walked = 0\n self.hatching_animation_delay = 4.20\n self.max_iv = 45.0\n\n self._process_config()\n\n def _process_config(self):\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n\n def work(self):\n try:\n self._check_inventory()\n except:\n return\n\n if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n km_left = self.used_incubators[0]['km']-self.km_walked\n if km_left <= 0:\n self._hatch_eggs()\n else:\n self.bot.metrics.next_hatching_km(km_left)\n\n if self._should_print():\n self._print_eggs()\n self._compute_next_update()\n\n IncubateEggs.last_km_walked = self.km_walked\n\n sorting = self.longer_eggs_first\n self.eggs.sort(key=lambda x: x.get(\"km\"), reverse=sorting)\n\n if self.ready_incubators:\n self._apply_incubators()\n\n def _apply_incubators(self):\n for incubator in self.ready_incubators:\n if incubator.get('used', False):\n continue\n for egg in self.eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n self.emit_event(\n 'incubate_try',\n level='debug',\n formatted=\"Attempting to apply incubator {incubator_id} to egg {egg_id}\",\n data={\n 'incubator_id': incubator['id'],\n 'egg_id': egg['id']\n }\n )\n ret = self.bot.api.use_item_egg_incubator(\n item_id=incubator[\"id\"],\n pokemon_id=egg[\"id\"]\n )\n if ret:\n code = ret.get(\"responses\", {}).get(\"USE_ITEM_EGG_INCUBATOR\", {}).get(\"result\", 0)\n if code == 1:\n self.emit_event(\n 'incubate',\n formatted='Incubating a {distance_in_km} egg.',\n data={\n 'distance_in_km': str(egg['km'])\n }\n )\n egg[\"used\"] = True\n incubator[\"used\"] = True\n break\n elif code == 5 or code == 7:\n self.emit_event(\n 'incubator_already_used',\n level='debug',\n formatted='Incubator in use.',\n )\n incubator[\"used\"] = True\n break\n elif code == 6:\n self.emit_event(\n 'egg_already_incubating',\n level='debug',\n formatted='Egg already incubating',\n )\n egg[\"used\"] = True\n\n def _check_inventory(self, lookup_ids=[]):\n inv = {}\n response_dict = self.bot.api.get_inventory()\n matched_pokemon = []\n temp_eggs = []\n temp_used_incubators = []\n temp_ready_incubators = []\n inv = reduce(\n dict.__getitem__,\n [\"responses\", \"GET_INVENTORY\", \"inventory_delta\", \"inventory_items\"],\n response_dict\n )\n for inv_data in inv:\n inv_data = inv_data.get(\"inventory_item_data\", {})\n if \"egg_incubators\" in inv_data:\n temp_used_incubators = []\n temp_ready_incubators = []\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n for incubator in incubators:\n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 9001)\n km_walked = incubator.get('target_km_walked', 9001)\n temp_used_incubators.append({\n \"id\": incubator.get('id', -1),\n \"km\": km_walked,\n \"km_needed\": (km_walked - start_km)\n })\n else:\n temp_ready_incubators.append({\n \"id\": incubator.get('id', -1)\n })\n continue\n if \"pokemon_data\" in inv_data:\n pokemon = inv_data.get(\"pokemon_data\", {})\n if pokemon.get(\"is_egg\", False) and \"egg_incubator_id\" not in pokemon:\n temp_eggs.append({\n \"id\": pokemon.get(\"id\", -1),\n \"km\": pokemon.get(\"egg_km_walked_target\", -1),\n \"used\": False\n })\n elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:\n pokemon.update({\n \"iv\": [\n pokemon.get('individual_attack', 0),\n pokemon.get('individual_defense', 0),\n pokemon.get('individual_stamina', 0)\n ]})\n matched_pokemon.append(pokemon)\n continue\n if \"player_stats\" in inv_data:\n self.km_walked = inv_data.get(\"player_stats\", {}).get(\"km_walked\", 0)\n if temp_used_incubators:\n self.used_incubators = temp_used_incubators\n if temp_ready_incubators:\n self.ready_incubators = temp_ready_incubators\n if temp_eggs:\n self.eggs = temp_eggs\n return matched_pokemon\n\n def _hatch_eggs(self):\n response_dict = self.bot.api.get_hatched_eggs()\n log_color = 'green'\n try:\n result = reduce(dict.__getitem__, [\"responses\", \"GET_HATCHED_EGGS\"], response_dict)\n except KeyError:\n return\n pokemon_ids = []\n if 'pokemon_id' in result:\n pokemon_ids = [id for id in result['pokemon_id']]\n stardust = result.get('stardust_awarded', \"error\")\n candy = result.get('candy_awarded', \"error\")\n xp = result.get('experience_awarded', \"error\")\n sleep(self.hatching_animation_delay)\n self.bot.latest_inventory = None\n try:\n pokemon_data = self._check_inventory(pokemon_ids)\n for pokemon in pokemon_data:\n # pokemon ids seem to be offset by one\n if pokemon['pokemon_id']!=-1:\n pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']\n else:\n pokemon['name'] = \"error\"\n except:\n pokemon_data = [{\"name\":\"error\",\"cp\":\"error\",\"iv\":\"error\"}]\n if not pokemon_ids or pokemon_data[0]['name'] == \"error\":\n self.emit_event(\n 'egg_hatched',\n data={\n 'pokemon': 'error',\n 'cp': 'error',\n 'iv': 'error',\n 'exp': 'error',\n 'stardust': 'error',\n 'candy': 'error',\n }\n )\n return\n for i in range(len(pokemon_data)):\n msg = \"Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies.\"\n self.bot.metrics.hatched_eggs(1)\n self.emit_event(\n 'egg_hatched',\n formatted=msg,\n data={\n 'pokemon': pokemon_data[i]['name'],\n 'cp': pokemon_data[i]['cp'],\n 'iv': \"{} {}\".format(\n \"/\".join(map(str, pokemon_data[i]['iv'])),\n round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)\n ),\n 'exp': xp[i],\n 'stardust': stardust[i],\n 'candy': candy[i],\n }\n )\n\n def _print_eggs(self):\n if not self.used_incubators:\n return\n\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n \n eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]\n\n self.emit_event(\n 'next_egg_incubates',\n formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',\n data={\n 'eggs_left': len(self.eggs),\n 'eggs_inc': len(self.used_incubators),\n 'eggs': ', '.join(eggs)\n }\n )\n \n def _should_print(self):\n \"\"\"\n Returns a value indicating whether the eggs should be displayed.\n :return: True if the stats should be displayed; otherwise, False.\n :rtype: bool\n \"\"\"\n return self.next_update is None or datetime.now() >= self.next_update\n\n def _compute_next_update(self):\n \"\"\"\n Computes the next update datetime based on the minimum update interval.\n :return: Nothing.\n :rtype: None\n \"\"\"\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)", "path": "pokemongo_bot/cell_workers/incubate_eggs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom pokemongo_bot.human_behaviour import sleep\nfrom pokemongo_bot.base_task import BaseTask\n\n\nclass IncubateEggs(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n last_km_walked = 0\n\n def initialize(self):\n self.next_update = None\n self.ready_incubators = []\n self.used_incubators = []\n self.eggs = []\n self.km_walked = 0\n self.hatching_animation_delay = 4.20\n self.max_iv = 45.0\n\n self._process_config()\n\n def _process_config(self):\n self.longer_eggs_first = self.config.get(\"longer_eggs_first\", True)\n self.min_interval = self.config.get('min_interval', 120)\n \n self.breakable_incubator = self.config.get(\"breakable\", [])\n self.infinite_incubator = self.config.get(\"infinite\", [])\n \n def work(self):\n try:\n self._check_inventory()\n except:\n return\n\n if self.used_incubators and IncubateEggs.last_km_walked != self.km_walked:\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n km_left = self.used_incubators[0]['km']-self.km_walked\n if km_left <= 0:\n self._hatch_eggs()\n else:\n self.bot.metrics.next_hatching_km(km_left)\n\n if self._should_print():\n self._print_eggs()\n self._compute_next_update()\n\n IncubateEggs.last_km_walked = self.km_walked\n\n sorting = self.longer_eggs_first\n self.eggs.sort(key=lambda x: x.get(\"km\"), reverse=sorting)\n\n if self.ready_incubators:\n self._apply_incubators()\n\n def _apply_incubators(self):\n for incubator in self.ready_incubators:\n if incubator.get('used', False):\n continue\n for egg in self.eggs:\n if egg[\"used\"] or egg[\"km\"] == -1:\n continue\n \n if self.breakable_incubator:\n # test if the incubator is of type breakable\n if incubator.get('uses_remaining') is not None:\n if egg[\"km\"] not in self.breakable_incubator:\n continue\n \n if self.infinite_incubator:\n # test if the incubator is of type infinite\n if incubator.get('uses_remaining') is None:\n if egg[\"km\"] not in self.infinite_incubator:\n continue\n \n self.emit_event(\n 'incubate_try',\n level='debug',\n formatted=\"Attempting to apply incubator {incubator_id} to egg {egg_id}\",\n data={\n 'incubator_id': incubator['id'],\n 'egg_id': egg['id']\n }\n )\n ret = self.bot.api.use_item_egg_incubator(\n item_id=incubator[\"id\"],\n pokemon_id=egg[\"id\"]\n )\n if ret:\n code = ret.get(\"responses\", {}).get(\"USE_ITEM_EGG_INCUBATOR\", {}).get(\"result\", 0)\n if code == 1:\n self.emit_event(\n 'incubate',\n formatted='Incubating a {distance_in_km} egg.',\n data={\n 'distance_in_km': str(egg['km'])\n }\n )\n egg[\"used\"] = True\n incubator[\"used\"] = True\n break\n elif code == 5 or code == 7:\n self.emit_event(\n 'incubator_already_used',\n level='debug',\n formatted='Incubator in use.',\n )\n incubator[\"used\"] = True\n break\n elif code == 6:\n self.emit_event(\n 'egg_already_incubating',\n level='debug',\n formatted='Egg already incubating',\n )\n egg[\"used\"] = True\n\n def _check_inventory(self, lookup_ids=[]):\n inv = {}\n response_dict = self.bot.api.get_inventory()\n matched_pokemon = []\n temp_eggs = []\n temp_used_incubators = []\n temp_ready_incubators = []\n inv = reduce(\n dict.__getitem__,\n [\"responses\", \"GET_INVENTORY\", \"inventory_delta\", \"inventory_items\"],\n response_dict\n )\n for inv_data in inv:\n inv_data = inv_data.get(\"inventory_item_data\", {})\n if \"egg_incubators\" in inv_data:\n temp_used_incubators = []\n temp_ready_incubators = []\n incubators = inv_data.get(\"egg_incubators\", {}).get(\"egg_incubator\",[])\n if isinstance(incubators, basestring): # checking for old response\n incubators = [incubators]\n for incubator in incubators: \n if 'pokemon_id' in incubator:\n start_km = incubator.get('start_km_walked', 9001)\n km_walked = incubator.get('target_km_walked', 9001)\n temp_used_incubators.append({\n \"id\": incubator.get('id', -1),\n \"km\": km_walked,\n \"km_needed\": (km_walked - start_km)\n })\n else:\n temp_ready_incubators.append({\n \"id\": incubator.get('id', -1)\n })\n continue\n if \"pokemon_data\" in inv_data:\n pokemon = inv_data.get(\"pokemon_data\", {})\n if pokemon.get(\"is_egg\", False) and \"egg_incubator_id\" not in pokemon:\n temp_eggs.append({\n \"id\": pokemon.get(\"id\", -1),\n \"km\": pokemon.get(\"egg_km_walked_target\", -1),\n \"used\": False\n })\n elif 'is_egg' not in pokemon and pokemon['id'] in lookup_ids:\n pokemon.update({\n \"iv\": [\n pokemon.get('individual_attack', 0),\n pokemon.get('individual_defense', 0),\n pokemon.get('individual_stamina', 0)\n ]})\n matched_pokemon.append(pokemon)\n continue\n if \"player_stats\" in inv_data:\n self.km_walked = inv_data.get(\"player_stats\", {}).get(\"km_walked\", 0)\n if temp_used_incubators:\n self.used_incubators = temp_used_incubators\n if temp_ready_incubators:\n self.ready_incubators = temp_ready_incubators\n if temp_eggs:\n self.eggs = temp_eggs\n return matched_pokemon\n\n def _hatch_eggs(self):\n response_dict = self.bot.api.get_hatched_eggs()\n log_color = 'green'\n try:\n result = reduce(dict.__getitem__, [\"responses\", \"GET_HATCHED_EGGS\"], response_dict)\n except KeyError:\n return\n pokemon_ids = []\n if 'pokemon_id' in result:\n pokemon_ids = [id for id in result['pokemon_id']]\n stardust = result.get('stardust_awarded', \"error\")\n candy = result.get('candy_awarded', \"error\")\n xp = result.get('experience_awarded', \"error\")\n sleep(self.hatching_animation_delay)\n self.bot.latest_inventory = None\n try:\n pokemon_data = self._check_inventory(pokemon_ids)\n for pokemon in pokemon_data:\n # pokemon ids seem to be offset by one\n if pokemon['pokemon_id']!=-1:\n pokemon['name'] = self.bot.pokemon_list[(pokemon.get('pokemon_id')-1)]['Name']\n else:\n pokemon['name'] = \"error\"\n except:\n pokemon_data = [{\"name\":\"error\",\"cp\":\"error\",\"iv\":\"error\"}]\n if not pokemon_ids or pokemon_data[0]['name'] == \"error\":\n self.emit_event(\n 'egg_hatched',\n data={\n 'pokemon': 'error',\n 'cp': 'error',\n 'iv': 'error',\n 'exp': 'error',\n 'stardust': 'error',\n 'candy': 'error',\n }\n )\n return\n for i in range(len(pokemon_data)):\n msg = \"Egg hatched with a {pokemon} (CP {cp} - IV {iv}), {exp} exp, {stardust} stardust and {candy} candies.\"\n self.bot.metrics.hatched_eggs(1)\n self.emit_event(\n 'egg_hatched',\n formatted=msg,\n data={\n 'pokemon': pokemon_data[i]['name'],\n 'cp': pokemon_data[i]['cp'],\n 'iv': \"{} {}\".format(\n \"/\".join(map(str, pokemon_data[i]['iv'])),\n round(sum(pokemon_data[i]['iv'])/self.max_iv, 2)\n ),\n 'exp': xp[i],\n 'stardust': stardust[i],\n 'candy': candy[i],\n }\n )\n\n def _print_eggs(self):\n if not self.used_incubators:\n return\n\n self.used_incubators.sort(key=lambda x: x.get(\"km\"))\n \n eggs = ['{:.2f}/{} km'.format(e['km_needed']-e['km']+self.km_walked, e['km_needed']) for e in self.used_incubators]\n\n self.emit_event(\n 'next_egg_incubates',\n formatted='Eggs incubating: [{eggs}] (Eggs left: {eggs_left}, Incubating: {eggs_inc})',\n data={\n 'eggs_left': len(self.eggs),\n 'eggs_inc': len(self.used_incubators),\n 'eggs': ', '.join(eggs)\n }\n )\n \n def _should_print(self):\n \"\"\"\n Returns a value indicating whether the eggs should be displayed.\n :return: True if the stats should be displayed; otherwise, False.\n :rtype: bool\n \"\"\"\n return self.next_update is None or datetime.now() >= self.next_update\n\n def _compute_next_update(self):\n \"\"\"\n Computes the next update datetime based on the minimum update interval.\n :return: Nothing.\n :rtype: None\n \"\"\"\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)", "path": "pokemongo_bot/cell_workers/incubate_eggs.py"}]} | 3,083 | 487 |
gh_patches_debug_29272 | rasdani/github-patches | git_diff | acl-org__acl-anthology-1868 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Correction to Anthology ID 2021.paclic-1
In PACLIC 2021 proceedings (https://aclanthology.org/volumes/2021.paclic-1/), edit the indexed name for "Joseph Marvin R. Imperial" of paper 26 (https://aclanthology.org/2021.paclic-1.26/) to "Joseph Marvin Imperial" (no R.) to combine previously indexed papers with one author below:
https://aclanthology.org/people/j/joseph-marvin-imperial/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/retract_paper.py`
Content:
```
1 #! /usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2020 Matt Post <[email protected]>
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """
19 Takes an Anthology ID, downloads the PDF, and produces a revision PDF
20 with a "RETRACTED" watermark, as well as a note at the top pointing
21 to the paper page. Also revises the XML.
22 """
23
24 import argparse
25 import os
26 import shutil
27 import subprocess
28 import sys
29 import tempfile
30
31 from string import Template
32
33 from anthology.utils import (
34 retrieve_url,
35 deconstruct_anthology_id,
36 make_simple_element,
37 get_xml_file,
38 indent,
39 )
40 from anthology.data import CANONICAL_URL_TEMPLATE, PDF_LOCATION_TEMPLATE
41 from add_revision import add_revision
42
43 from datetime import datetime
44
45 import lxml.etree as ET
46
47 template = Template(
48 r"""\documentclass{article}
49 \usepackage[printwatermark]{xwatermark}
50 \usepackage{xcolor}
51 \usepackage{graphicx}
52 \usepackage{pdfpages}
53 \usepackage{hyperref}
54 \hypersetup{plainpages=false,
55 pdfpagemode=none,
56 colorlinks=true,
57 unicode=true
58 }
59
60 % "allpages" didn't work
61 \newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED}
62
63 % set A4
64 \setlength{\paperwidth}{21cm}
65 \setlength{\paperheight}{29.7cm}
66
67 \special{papersize=21cm,29.7cm}
68 \pdfpageheight\paperheight
69 \pdfpagewidth\paperwidth
70 \pagestyle{plain}
71
72 \begin{document}
73
74 \AddToShipoutPicture{%
75 \setlength{\unitlength}{1mm}
76 % center box at (x, y) millimeters from bottom-left corner
77 \put(105,290){\makebox(0,0){This paper was retracted. For more information, see \url{$url}.}}
78 }
79
80 \includepdf[pages=-]{$file}
81
82 \end{document}"""
83 )
84
85
86 def add_watermark(anth_id, workdir="."):
87 """
88 Downloads an Anthology paper and adds a RETRACTED watermark.
89 """
90 page = CANONICAL_URL_TEMPLATE.format(anth_id)
91 url = PDF_LOCATION_TEMPLATE.format(anth_id)
92 orig_pdf = os.path.join(workdir, "tmp.pdf")
93
94 retrieve_url(url, orig_pdf)
95
96 tex_file = os.path.join(workdir, f"{anth_id}.tex")
97 print("TEX_FILE", tex_file)
98 with open(tex_file, "w") as f:
99 print(template.substitute(file=orig_pdf, url=page), file=f)
100
101 command = f"pdflatex {tex_file}"
102 try:
103 subprocess.call(
104 command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60
105 )
106 except TimeoutExpired:
107 print(
108 "pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?",
109 file=sys.stderr,
110 )
111 sys.exit(1)
112
113 new_pdf = f"{tex_file}".replace(".tex", ".pdf")
114
115 return new_pdf
116
117
118 def main(args):
119 """
120 Downloads an Anthology paper and adds a RETRACTED watermark, then updates the XML
121 with an appropriate <revision> and <retracted> tag.
122 """
123
124 with tempfile.TemporaryDirectory() as tempdir:
125
126 new_pdf = add_watermark(args.anthology_id, workdir=tempdir)
127
128 add_revision(
129 args.anthology_id,
130 new_pdf,
131 explanation="Retracted.",
132 change_type="revision",
133 dry_run=False,
134 )
135
136 xml_file = get_xml_file(args.anthology_id)
137 collection_id, volume_id, paper_id = deconstruct_anthology_id(args.anthology_id)
138 tree = ET.parse(xml_file)
139 if paper_id == "0":
140 paper = tree.getroot().find(f"./volume[@id='{volume_id}']/frontmatter")
141 else:
142 paper = tree.getroot().find(
143 f"./volume[@id='{volume_id}']/paper[@id='{paper_id}']"
144 )
145
146 if paper is None:
147 print(f"Couldn't find paper {args.anthology_id}!", file=sys.stderr)
148 sys.exit(2)
149
150 print("Modifying the XML", file=sys.stderr)
151 now = datetime.now()
152 date = f"{now.year}-{now.month:02d}-{now.day:02d}"
153 retracted_node = make_simple_element(
154 "retracted", args.explanation, attrib={"date": date}, parent=paper
155 )
156 indent(tree.getroot())
157 tree.write(xml_file, encoding="UTF-8", xml_declaration=True)
158
159
160 if __name__ == "__main__":
161 parser = argparse.ArgumentParser()
162 parser.add_argument("anthology_id")
163 parser.add_argument("explanation", help="Brief description of the changes.")
164 args = parser.parse_args()
165
166 main(args)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/retract_paper.py b/bin/retract_paper.py
--- a/bin/retract_paper.py
+++ b/bin/retract_paper.py
@@ -1,4 +1,4 @@
-#! /usr/bin/env python3
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Matt Post <[email protected]>
@@ -46,20 +46,17 @@
template = Template(
r"""\documentclass{article}
-\usepackage[printwatermark]{xwatermark}
+\usepackage[text=RETRACTED,scale=3,color=red]{draftwatermark}
\usepackage{xcolor}
\usepackage{graphicx}
\usepackage{pdfpages}
\usepackage{hyperref}
\hypersetup{plainpages=false,
- pdfpagemode=none,
+ pdfpagemode=UseNone,
colorlinks=true,
unicode=true
}
-% "allpages" didn't work
-\newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED}
-
% set A4
\setlength{\paperwidth}{21cm}
\setlength{\paperheight}{29.7cm}
@@ -101,11 +98,11 @@
command = f"pdflatex {tex_file}"
try:
subprocess.call(
- command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60
+ command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=30
)
- except TimeoutExpired:
+ except subprocess.TimeoutExpired:
print(
- "pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?",
+ "pdflatex didn't finish within 30 seconds. Do you have the CTAN watermark package installed?",
file=sys.stderr,
)
sys.exit(1)
| {"golden_diff": "diff --git a/bin/retract_paper.py b/bin/retract_paper.py\n--- a/bin/retract_paper.py\n+++ b/bin/retract_paper.py\n@@ -1,4 +1,4 @@\n-#! /usr/bin/env python3\n+#!/usr/bin/env python3\n # -*- coding: utf-8 -*-\n #\n # Copyright 2020 Matt Post <[email protected]>\n@@ -46,20 +46,17 @@\n \n template = Template(\n r\"\"\"\\documentclass{article}\n-\\usepackage[printwatermark]{xwatermark}\n+\\usepackage[text=RETRACTED,scale=3,color=red]{draftwatermark}\n \\usepackage{xcolor}\n \\usepackage{graphicx}\n \\usepackage{pdfpages}\n \\usepackage{hyperref}\n \\hypersetup{plainpages=false,\n- pdfpagemode=none,\n+ pdfpagemode=UseNone,\n colorlinks=true,\n unicode=true\n }\n \n-% \"allpages\" didn't work\n-\\newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED}\n-\n % set A4\n \\setlength{\\paperwidth}{21cm}\n \\setlength{\\paperheight}{29.7cm}\n@@ -101,11 +98,11 @@\n command = f\"pdflatex {tex_file}\"\n try:\n subprocess.call(\n- command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60\n+ command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=30\n )\n- except TimeoutExpired:\n+ except subprocess.TimeoutExpired:\n print(\n- \"pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?\",\n+ \"pdflatex didn't finish within 30 seconds. Do you have the CTAN watermark package installed?\",\n file=sys.stderr,\n )\n sys.exit(1)\n", "issue": "Correction to Anthology ID 2021.paclic-1\nIn PACLIC 2021 proceedings (https://aclanthology.org/volumes/2021.paclic-1/), edit the indexed name for \"Joseph Marvin R. Imperial\" of paper 26 (https://aclanthology.org/2021.paclic-1.26/) to \"Joseph Marvin Imperial\" (no R.) to combine previously indexed papers with one author below:\r\n\r\nhttps://aclanthology.org/people/j/joseph-marvin-imperial/\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Matt Post <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTakes an Anthology ID, downloads the PDF, and produces a revision PDF\nwith a \"RETRACTED\" watermark, as well as a note at the top pointing\nto the paper page. Also revises the XML.\n\"\"\"\n\nimport argparse\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom string import Template\n\nfrom anthology.utils import (\n retrieve_url,\n deconstruct_anthology_id,\n make_simple_element,\n get_xml_file,\n indent,\n)\nfrom anthology.data import CANONICAL_URL_TEMPLATE, PDF_LOCATION_TEMPLATE\nfrom add_revision import add_revision\n\nfrom datetime import datetime\n\nimport lxml.etree as ET\n\ntemplate = Template(\n r\"\"\"\\documentclass{article}\n\\usepackage[printwatermark]{xwatermark}\n\\usepackage{xcolor}\n\\usepackage{graphicx}\n\\usepackage{pdfpages}\n\\usepackage{hyperref}\n\\hypersetup{plainpages=false,\n pdfpagemode=none,\n colorlinks=true,\n unicode=true\n}\n\n% \"allpages\" didn't work\n\\newwatermark[pages=1-1000,color=red!80,angle=45,scale=3,xpos=-6,ypos=0]{RETRACTED}\n\n% set A4\n\\setlength{\\paperwidth}{21cm}\n\\setlength{\\paperheight}{29.7cm}\n\n\\special{papersize=21cm,29.7cm}\n\\pdfpageheight\\paperheight\n\\pdfpagewidth\\paperwidth\n\\pagestyle{plain}\n\n\\begin{document}\n\n\\AddToShipoutPicture{%\n \\setlength{\\unitlength}{1mm}\n % center box at (x, y) millimeters from bottom-left corner\n \\put(105,290){\\makebox(0,0){This paper was retracted. For more information, see \\url{$url}.}}\n}\n\n\\includepdf[pages=-]{$file}\n\n\\end{document}\"\"\"\n)\n\n\ndef add_watermark(anth_id, workdir=\".\"):\n \"\"\"\n Downloads an Anthology paper and adds a RETRACTED watermark.\n \"\"\"\n page = CANONICAL_URL_TEMPLATE.format(anth_id)\n url = PDF_LOCATION_TEMPLATE.format(anth_id)\n orig_pdf = os.path.join(workdir, \"tmp.pdf\")\n\n retrieve_url(url, orig_pdf)\n\n tex_file = os.path.join(workdir, f\"{anth_id}.tex\")\n print(\"TEX_FILE\", tex_file)\n with open(tex_file, \"w\") as f:\n print(template.substitute(file=orig_pdf, url=page), file=f)\n\n command = f\"pdflatex {tex_file}\"\n try:\n subprocess.call(\n command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=60\n )\n except TimeoutExpired:\n print(\n \"pdflatex didn't finish within 60 seconds. Do you have the CTAN watermark package installed?\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n new_pdf = f\"{tex_file}\".replace(\".tex\", \".pdf\")\n\n return new_pdf\n\n\ndef main(args):\n \"\"\"\n Downloads an Anthology paper and adds a RETRACTED watermark, then updates the XML\n with an appropriate <revision> and <retracted> tag.\n \"\"\"\n\n with tempfile.TemporaryDirectory() as tempdir:\n\n new_pdf = add_watermark(args.anthology_id, workdir=tempdir)\n\n add_revision(\n args.anthology_id,\n new_pdf,\n explanation=\"Retracted.\",\n change_type=\"revision\",\n dry_run=False,\n )\n\n xml_file = get_xml_file(args.anthology_id)\n collection_id, volume_id, paper_id = deconstruct_anthology_id(args.anthology_id)\n tree = ET.parse(xml_file)\n if paper_id == \"0\":\n paper = tree.getroot().find(f\"./volume[@id='{volume_id}']/frontmatter\")\n else:\n paper = tree.getroot().find(\n f\"./volume[@id='{volume_id}']/paper[@id='{paper_id}']\"\n )\n\n if paper is None:\n print(f\"Couldn't find paper {args.anthology_id}!\", file=sys.stderr)\n sys.exit(2)\n\n print(\"Modifying the XML\", file=sys.stderr)\n now = datetime.now()\n date = f\"{now.year}-{now.month:02d}-{now.day:02d}\"\n retracted_node = make_simple_element(\n \"retracted\", args.explanation, attrib={\"date\": date}, parent=paper\n )\n indent(tree.getroot())\n tree.write(xml_file, encoding=\"UTF-8\", xml_declaration=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"anthology_id\")\n parser.add_argument(\"explanation\", help=\"Brief description of the changes.\")\n args = parser.parse_args()\n\n main(args)\n", "path": "bin/retract_paper.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Matt Post <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTakes an Anthology ID, downloads the PDF, and produces a revision PDF\nwith a \"RETRACTED\" watermark, as well as a note at the top pointing\nto the paper page. Also revises the XML.\n\"\"\"\n\nimport argparse\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom string import Template\n\nfrom anthology.utils import (\n retrieve_url,\n deconstruct_anthology_id,\n make_simple_element,\n get_xml_file,\n indent,\n)\nfrom anthology.data import CANONICAL_URL_TEMPLATE, PDF_LOCATION_TEMPLATE\nfrom add_revision import add_revision\n\nfrom datetime import datetime\n\nimport lxml.etree as ET\n\ntemplate = Template(\n r\"\"\"\\documentclass{article}\n\\usepackage[text=RETRACTED,scale=3,color=red]{draftwatermark}\n\\usepackage{xcolor}\n\\usepackage{graphicx}\n\\usepackage{pdfpages}\n\\usepackage{hyperref}\n\\hypersetup{plainpages=false,\n pdfpagemode=UseNone,\n colorlinks=true,\n unicode=true\n}\n\n% set A4\n\\setlength{\\paperwidth}{21cm}\n\\setlength{\\paperheight}{29.7cm}\n\n\\special{papersize=21cm,29.7cm}\n\\pdfpageheight\\paperheight\n\\pdfpagewidth\\paperwidth\n\\pagestyle{plain}\n\n\\begin{document}\n\n\\AddToShipoutPicture{%\n \\setlength{\\unitlength}{1mm}\n % center box at (x, y) millimeters from bottom-left corner\n \\put(105,290){\\makebox(0,0){This paper was retracted. For more information, see \\url{$url}.}}\n}\n\n\\includepdf[pages=-]{$file}\n\n\\end{document}\"\"\"\n)\n\n\ndef add_watermark(anth_id, workdir=\".\"):\n \"\"\"\n Downloads an Anthology paper and adds a RETRACTED watermark.\n \"\"\"\n page = CANONICAL_URL_TEMPLATE.format(anth_id)\n url = PDF_LOCATION_TEMPLATE.format(anth_id)\n orig_pdf = os.path.join(workdir, \"tmp.pdf\")\n\n retrieve_url(url, orig_pdf)\n\n tex_file = os.path.join(workdir, f\"{anth_id}.tex\")\n print(\"TEX_FILE\", tex_file)\n with open(tex_file, \"w\") as f:\n print(template.substitute(file=orig_pdf, url=page), file=f)\n\n command = f\"pdflatex {tex_file}\"\n try:\n subprocess.call(\n command, shell=True, cwd=workdir, stdout=subprocess.DEVNULL, timeout=30\n )\n except subprocess.TimeoutExpired:\n print(\n \"pdflatex didn't finish within 30 seconds. Do you have the CTAN watermark package installed?\",\n file=sys.stderr,\n )\n sys.exit(1)\n\n new_pdf = f\"{tex_file}\".replace(\".tex\", \".pdf\")\n\n return new_pdf\n\n\ndef main(args):\n \"\"\"\n Downloads an Anthology paper and adds a RETRACTED watermark, then updates the XML\n with an appropriate <revision> and <retracted> tag.\n \"\"\"\n\n with tempfile.TemporaryDirectory() as tempdir:\n\n new_pdf = add_watermark(args.anthology_id, workdir=tempdir)\n\n add_revision(\n args.anthology_id,\n new_pdf,\n explanation=\"Retracted.\",\n change_type=\"revision\",\n dry_run=False,\n )\n\n xml_file = get_xml_file(args.anthology_id)\n collection_id, volume_id, paper_id = deconstruct_anthology_id(args.anthology_id)\n tree = ET.parse(xml_file)\n if paper_id == \"0\":\n paper = tree.getroot().find(f\"./volume[@id='{volume_id}']/frontmatter\")\n else:\n paper = tree.getroot().find(\n f\"./volume[@id='{volume_id}']/paper[@id='{paper_id}']\"\n )\n\n if paper is None:\n print(f\"Couldn't find paper {args.anthology_id}!\", file=sys.stderr)\n sys.exit(2)\n\n print(\"Modifying the XML\", file=sys.stderr)\n now = datetime.now()\n date = f\"{now.year}-{now.month:02d}-{now.day:02d}\"\n retracted_node = make_simple_element(\n \"retracted\", args.explanation, attrib={\"date\": date}, parent=paper\n )\n indent(tree.getroot())\n tree.write(xml_file, encoding=\"UTF-8\", xml_declaration=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"anthology_id\")\n parser.add_argument(\"explanation\", help=\"Brief description of the changes.\")\n args = parser.parse_args()\n\n main(args)\n", "path": "bin/retract_paper.py"}]} | 2,015 | 452 |
gh_patches_debug_622 | rasdani/github-patches | git_diff | pex-tool__pex-1859 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.100
On the docket:
+ [x] Using --target-system linux --target-system mac can still lead to failed attempts to lock Windows requirements. #1856
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.99"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.99"
+__version__ = "2.1.100"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.99\"\n+__version__ = \"2.1.100\"\n", "issue": "Release 2.1.100\nOn the docket:\r\n+ [x] Using --target-system linux --target-system mac can still lead to failed attempts to lock Windows requirements. #1856\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.99\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.100\"\n", "path": "pex/version.py"}]} | 352 | 97 |
gh_patches_debug_16027 | rasdani/github-patches | git_diff | SigmaHQ__sigma-1278 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update sigma2attack to the latest navigator version
the data generated for navigator is not up to date.

specs on [att&ck navigator github](https://github.com/mitre-attack/attack-navigator/blob/master/layers/LAYERFORMATv4.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/sigma/sigma2attack.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import argparse
4 import glob
5 import json
6 import os
7 import sys
8
9 import yaml
10
11 def main():
12 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
13 parser.add_argument("--rules-directory", "-d", dest="rules_dir", default="rules", help="Directory to read rules from")
14 parser.add_argument("--out-file", "-o", dest="out_file", default="heatmap.json", help="File to write the JSON layer to")
15 parser.add_argument("--no-comment", dest="no_comment", action="store_true", help="Don't store rule names in comments")
16 args = parser.parse_args()
17
18 rule_files = glob.glob(os.path.join(args.rules_dir, "**/*.yml"), recursive=True)
19 techniques_to_rules = {}
20 curr_max_technique_count = 0
21 num_rules_used = 0
22 for rule_file in rule_files:
23 try:
24 rule = yaml.safe_load(open(rule_file).read())
25 except yaml.YAMLError:
26 sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n")
27 continue
28 if "tags" not in rule:
29 sys.stderr.write("Ignoring rule " + rule_file + " (no tags)\n")
30 continue
31 tags = rule["tags"]
32 for tag in tags:
33 if tag.lower().startswith("attack.t"):
34 technique_id = tag[len("attack."):].upper()
35 num_rules_used += 1
36 if technique_id not in techniques_to_rules:
37 techniques_to_rules[technique_id] = []
38 techniques_to_rules[technique_id].append(os.path.basename(rule_file))
39 curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))
40
41
42 scores = []
43 for technique in techniques_to_rules:
44 entry = {
45 "techniqueID": technique,
46 "score": len(techniques_to_rules[technique]),
47 }
48 if not args.no_comment:
49 entry["comment"] = "\n".join(techniques_to_rules[technique])
50
51 scores.append(entry)
52
53 output = {
54 "domain": "mitre-enterprise",
55 "name": "Sigma rules heatmap",
56 "gradient": {
57 "colors": [
58 "#ffffff",
59 "#ff6666"
60 ],
61 "maxValue": curr_max_technique_count,
62 "minValue": 0
63 },
64 "version": "2.2",
65 "techniques": scores,
66 }
67
68 with open(args.out_file, "w") as f:
69 f.write(json.dumps(output))
70 print("[*] Layer file written in " + args.out_file + " (" + str(num_rules_used) + " rules)")
71
72 if __name__ == "__main__":
73 main()
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py
--- a/tools/sigma/sigma2attack.py
+++ b/tools/sigma/sigma2attack.py
@@ -21,7 +21,7 @@
num_rules_used = 0
for rule_file in rule_files:
try:
- rule = yaml.safe_load(open(rule_file).read())
+ rule = yaml.safe_load(open(rule_file, encoding="utf-8").read())
except yaml.YAMLError:
sys.stderr.write("Ignoring rule " + rule_file + " (parsing failed)\n")
continue
@@ -61,7 +61,10 @@
"maxValue": curr_max_technique_count,
"minValue": 0
},
- "version": "2.2",
+ "versions": {
+ "navigator": "4.0",
+ "layer": "4.0"
+ },
"techniques": scores,
}
| {"golden_diff": "diff --git a/tools/sigma/sigma2attack.py b/tools/sigma/sigma2attack.py\n--- a/tools/sigma/sigma2attack.py\n+++ b/tools/sigma/sigma2attack.py\n@@ -21,7 +21,7 @@\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n- rule = yaml.safe_load(open(rule_file).read())\n+ rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n@@ -61,7 +61,10 @@\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n- \"version\": \"2.2\", \n+ \"versions\": {\n+ \"navigator\": \"4.0\",\n+ \"layer\": \"4.0\"\n+ },\n \"techniques\": scores,\n }\n", "issue": "Update sigma2attack to the latest navigator version\nthe data generated for navigator is not up to date.\r\n\r\n\r\n\r\nspecs on [att&ck navigator github](https://github.com/mitre-attack/attack-navigator/blob/master/layers/LAYERFORMATv4.md)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\n\nimport yaml\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n parser.add_argument(\"--out-file\", \"-o\", dest=\"out_file\", default=\"heatmap.json\", help=\"File to write the JSON layer to\")\n parser.add_argument(\"--no-comment\", dest=\"no_comment\", action=\"store_true\", help=\"Don't store rule names in comments\")\n args = parser.parse_args()\n\n rule_files = glob.glob(os.path.join(args.rules_dir, \"**/*.yml\"), recursive=True)\n techniques_to_rules = {}\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n rule = yaml.safe_load(open(rule_file).read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n if \"tags\" not in rule:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n continue\n tags = rule[\"tags\"]\n for tag in tags:\n if tag.lower().startswith(\"attack.t\"):\n technique_id = tag[len(\"attack.\"):].upper()\n num_rules_used += 1\n if technique_id not in techniques_to_rules:\n techniques_to_rules[technique_id] = []\n techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n\n\n scores = []\n for technique in techniques_to_rules:\n entry = {\n \"techniqueID\": technique, \n \"score\": len(techniques_to_rules[technique]), \n }\n if not args.no_comment:\n entry[\"comment\"] = \"\\n\".join(techniques_to_rules[technique])\n\n scores.append(entry)\n\n output = {\n \"domain\": \"mitre-enterprise\",\n \"name\": \"Sigma rules heatmap\",\n \"gradient\": {\n \"colors\": [\n \"#ffffff\",\n \"#ff6666\"\n ],\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n \"version\": \"2.2\", \n \"techniques\": scores,\n }\n\n with open(args.out_file, \"w\") as f:\n f.write(json.dumps(output))\n print(\"[*] Layer file written in \" + args.out_file + \" (\" + str(num_rules_used) + \" rules)\")\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/sigma/sigma2attack.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport glob\nimport json\nimport os\nimport sys\n\nimport yaml\n\ndef main():\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--rules-directory\", \"-d\", dest=\"rules_dir\", default=\"rules\", help=\"Directory to read rules from\")\n parser.add_argument(\"--out-file\", \"-o\", dest=\"out_file\", default=\"heatmap.json\", help=\"File to write the JSON layer to\")\n parser.add_argument(\"--no-comment\", dest=\"no_comment\", action=\"store_true\", help=\"Don't store rule names in comments\")\n args = parser.parse_args()\n\n rule_files = glob.glob(os.path.join(args.rules_dir, \"**/*.yml\"), recursive=True)\n techniques_to_rules = {}\n curr_max_technique_count = 0\n num_rules_used = 0\n for rule_file in rule_files:\n try:\n rule = yaml.safe_load(open(rule_file, encoding=\"utf-8\").read())\n except yaml.YAMLError:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (parsing failed)\\n\")\n continue\n if \"tags\" not in rule:\n sys.stderr.write(\"Ignoring rule \" + rule_file + \" (no tags)\\n\")\n continue\n tags = rule[\"tags\"]\n for tag in tags:\n if tag.lower().startswith(\"attack.t\"):\n technique_id = tag[len(\"attack.\"):].upper()\n num_rules_used += 1\n if technique_id not in techniques_to_rules:\n techniques_to_rules[technique_id] = []\n techniques_to_rules[technique_id].append(os.path.basename(rule_file))\n curr_max_technique_count = max(curr_max_technique_count, len(techniques_to_rules[technique_id]))\n\n\n scores = []\n for technique in techniques_to_rules:\n entry = {\n \"techniqueID\": technique, \n \"score\": len(techniques_to_rules[technique]), \n }\n if not args.no_comment:\n entry[\"comment\"] = \"\\n\".join(techniques_to_rules[technique])\n\n scores.append(entry)\n\n output = {\n \"domain\": \"mitre-enterprise\",\n \"name\": \"Sigma rules heatmap\",\n \"gradient\": {\n \"colors\": [\n \"#ffffff\",\n \"#ff6666\"\n ],\n \"maxValue\": curr_max_technique_count,\n \"minValue\": 0\n },\n \"versions\": {\n \"navigator\": \"4.0\",\n \"layer\": \"4.0\"\n },\n \"techniques\": scores,\n }\n\n with open(args.out_file, \"w\") as f:\n f.write(json.dumps(output))\n print(\"[*] Layer file written in \" + args.out_file + \" (\" + str(num_rules_used) + \" rules)\")\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/sigma/sigma2attack.py"}]} | 1,140 | 221 |
gh_patches_debug_17072 | rasdani/github-patches | git_diff | Pycord-Development__pycord-1453 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[ext.bridge] Ephemeral kwarg causing errors in defer() and respond()
### Summary
Using the ephemeral= keyword argument causes errors when it is passed into the prefix-command version of the bridge command.
### Reproduction Steps
Using any `ephemeral` keyword arguments
### Minimal Reproducible Code
```python
@bridge_command(name="commands", description="This page lists all commands")
async def _commands(self, context: ApplicationContext) -> None:
await context.defer(ephemeral=True)
await context.respond("Commands List", ephemeral=True)
```
### Expected Results
The ephemeral keyword argument should instead be ignored. This could be as simple as a:
```py
if kwargs.get("ephemeral") is not None:
del kwargs["ephemeral"]
```
This is in fact exactly what I added in both `discord/message.py` line 1625 and `discord/ext/bridge/context.py` line 153 to get it to work temporarily.
### Actual Results
```py
Ignoring exception in command commands:
Traceback (most recent call last):
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py", line 181, in wrapped
ret = await coro(*args, **kwargs)
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py", line 38, in _commands
await context.defer(ephemeral=True)
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 101, in defer
return await self._defer(*args, **kwargs)
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 153, in _defer
return await self._get_super("trigger_typing")(*args, **kwargs)
TypeError: Messageable.trigger_typing() got an unexpected keyword argument 'ephemeral'
```
and
```py
Ignoring exception in command commands:
Traceback (most recent call last):
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py", line 181, in wrapped
ret = await coro(*args, **kwargs)
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py", line 40, in _commands
await context.respond(
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 81, in respond
return await self._respond(*args, **kwargs)
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py", line 147, in _respond
message = await self._get_super("reply")(*args, **kwargs)
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/context.py", line 399, in reply
return await self.message.reply(content, **kwargs)
File "/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/message.py", line 1625, in reply
return await self.channel.send(content, reference=self, **kwargs)
TypeError: Messageable.send() got an unexpected keyword argument 'ephemeral'
```
### Intents
discord.Intents.all
### System Information
- Python v3.10.5-final
- py-cord v2.0.0-candidate
- py-cord pkg_resources: v2.0.0rc1
- aiohttp v3.8.1
- system info: Linux 5.15.48-1-MANJARO #1 SMP PREEMPT Thu Jun 16 12:33:56 UTC 2022
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
This can also apply to other slash-command-specific kwargs, which I can't specifically think of off the top of my head.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/ext/bridge/context.py`
Content:
```
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2015-2021 Rapptz
5 Copyright (c) 2021-present Pycord Development
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 and/or sell copies of the Software, and to permit persons to whom the
12 Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice shall be included in
15 all copies or substantial portions of the Software.
16
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 DEALINGS IN THE SOFTWARE.
24 """
25 from abc import ABC, abstractmethod
26 from typing import TYPE_CHECKING, Any, Optional, Union
27
28 from discord.commands import ApplicationContext
29 from discord.interactions import Interaction, InteractionMessage
30 from discord.message import Message
31 from discord.webhook import WebhookMessage
32
33 from ..commands import Context
34
35 __all__ = ("BridgeContext", "BridgeExtContext", "BridgeApplicationContext")
36
37
38 class BridgeContext(ABC):
39 """
40 The base context class for compatibility commands. This class is an :class:`ABC` (abstract base class), which is
41 subclassed by :class:`BridgeExtContext` and :class:`BridgeApplicationContext`. The methods in this class are meant
42 to give parity between the two contexts, while still allowing for all of their functionality.
43
44 When this is passed to a command, it will either be passed as :class:`BridgeExtContext`, or
45 :class:`BridgeApplicationContext`. Since they are two separate classes, it is quite simple to use :meth:`isinstance`
46 to make different functionality for each context. For example, if you want to respond to a command with the command
47 type that it was invoked with, you can do the following:
48
49 .. code-block:: python3
50
51 @bot.bridge_command()
52 async def example(ctx: BridgeContext):
53 if isinstance(ctx, BridgeExtContext):
54 command_type = "Traditional (prefix-based) command"
55 elif isinstance(ctx, BridgeApplicationContext):
56 command_type = "Application command"
57 await ctx.send(f"This command was invoked with a(n) {command_type}.")
58
59 .. versionadded:: 2.0
60 """
61
62 @abstractmethod
63 async def _respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:
64 ...
65
66 @abstractmethod
67 async def _defer(self, *args, **kwargs) -> None:
68 ...
69
70 @abstractmethod
71 async def _edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:
72 ...
73
74 async def respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:
75 """|coro|
76
77 Responds to the command with the respective response type to the current context. In :class:`BridgeExtContext`,
78 this will be :meth:`~.ExtContext.reply` while in :class:`BridgeApplicationContext`, this will be
79 :meth:`~.ApplicationContext.respond`.
80 """
81 return await self._respond(*args, **kwargs)
82
83 async def reply(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:
84 """|coro|
85
86 Alias for :meth:`~.BridgeContext.respond`.
87 """
88 return await self.respond(*args, **kwargs)
89
90 async def defer(self, *args, **kwargs) -> None:
91 """|coro|
92
93 Defers the command with the respective approach to the current context. In :class:`BridgeExtContext`, this will
94 be :meth:`~.ExtContext.trigger_typing` while in :class:`BridgeApplicationContext`, this will be
95 :meth:`~.ApplicationContext.defer`.
96
97 .. note::
98 There is no ``trigger_typing`` alias for this method. ``trigger_typing`` will always provide the same
99 functionality across contexts.
100 """
101 return await self._defer(*args, **kwargs)
102
103 async def edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:
104 """|coro|
105
106 Edits the original response message with the respective approach to the current context. In
107 :class:`BridgeExtContext`, this will have a custom approach where :meth:`.respond` caches the message to be
108 edited here. In :class:`BridgeApplicationContext`, this will be :meth:`~.ApplicationContext.edit`.
109 """
110 return await self._edit(*args, **kwargs)
111
112 def _get_super(self, attr: str) -> Optional[Any]:
113 return getattr(super(), attr)
114
115
116 class BridgeApplicationContext(BridgeContext, ApplicationContext):
117 """
118 The application context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and
119 :class:`ApplicationContext`. This class is meant to be used with :class:`BridgeCommand`.
120
121 .. versionadded:: 2.0
122 """
123
124 async def _respond(self, *args, **kwargs) -> Union[Interaction, WebhookMessage]:
125 return await self._get_super("respond")(*args, **kwargs)
126
127 async def _defer(self, *args, **kwargs) -> None:
128 return await self._get_super("defer")(*args, **kwargs)
129
130 async def _edit(self, *args, **kwargs) -> InteractionMessage:
131 return await self._get_super("edit")(*args, **kwargs)
132
133
134 class BridgeExtContext(BridgeContext, Context):
135 """
136 The ext.commands context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and
137 :class:`Context`. This class is meant to be used with :class:`BridgeCommand`.
138
139 .. versionadded:: 2.0
140 """
141
142 def __init__(self, *args, **kwargs):
143 super().__init__(*args, **kwargs)
144 self._original_response_message: Optional[Message] = None
145
146 async def _respond(self, *args, **kwargs) -> Message:
147 message = await self._get_super("reply")(*args, **kwargs)
148 if self._original_response_message is None:
149 self._original_response_message = message
150 return message
151
152 async def _defer(self, *args, **kwargs) -> None:
153 return await self._get_super("trigger_typing")(*args, **kwargs)
154
155 async def _edit(self, *args, **kwargs) -> Message:
156 return await self._original_response_message.edit(*args, **kwargs)
157
158 async def delete(self, *, delay: Optional[float] = None, reason: Optional[str] = None) -> None:
159 """|coro|
160
161 Deletes the original response message, if it exists.
162
163 Parameters
164 -----------
165 delay: Optional[:class:`float`]
166 If provided, the number of seconds to wait before deleting the message.
167 reason: Optional[:class:`str`]
168 The reason for deleting the message. Shows up on the audit log.
169 """
170 if self._original_response_message:
171 await self._original_response_message.delete(delay=delay, reason=reason)
172
173
174 if TYPE_CHECKING:
175 # This is a workaround for mypy not being able to resolve the type of BridgeCommand.
176 class BridgeContext(ApplicationContext, Context):
177 ...
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/ext/bridge/context.py b/discord/ext/bridge/context.py
--- a/discord/ext/bridge/context.py
+++ b/discord/ext/bridge/context.py
@@ -144,12 +144,14 @@
self._original_response_message: Optional[Message] = None
async def _respond(self, *args, **kwargs) -> Message:
+ kwargs.pop("ephemeral", None)
message = await self._get_super("reply")(*args, **kwargs)
if self._original_response_message is None:
self._original_response_message = message
return message
async def _defer(self, *args, **kwargs) -> None:
+ kwargs.pop("ephemeral", None)
return await self._get_super("trigger_typing")(*args, **kwargs)
async def _edit(self, *args, **kwargs) -> Message:
| {"golden_diff": "diff --git a/discord/ext/bridge/context.py b/discord/ext/bridge/context.py\n--- a/discord/ext/bridge/context.py\n+++ b/discord/ext/bridge/context.py\n@@ -144,12 +144,14 @@\n self._original_response_message: Optional[Message] = None\n \n async def _respond(self, *args, **kwargs) -> Message:\n+ kwargs.pop(\"ephemeral\", None)\n message = await self._get_super(\"reply\")(*args, **kwargs)\n if self._original_response_message is None:\n self._original_response_message = message\n return message\n \n async def _defer(self, *args, **kwargs) -> None:\n+ kwargs.pop(\"ephemeral\", None)\n return await self._get_super(\"trigger_typing\")(*args, **kwargs)\n \n async def _edit(self, *args, **kwargs) -> Message:\n", "issue": "[ext.bridge] Ephemeral kwarg causing errors in defer() and respond()\n### Summary\r\n\r\nUsing the ephemeral= keyword argument causes errors when it is passed into the prefix-command version of the bridge command.\r\n\r\n### Reproduction Steps\r\n\r\nUsing any `ephemeral` keyword arguments\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\n @bridge_command(name=\"commands\", description=\"This page lists all commands\")\r\n async def _commands(self, context: ApplicationContext) -> None:\r\n await context.defer(ephemeral=True)\r\n await context.respond(\"Commands List\", ephemeral=True)\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nThe ephemeral keyword argument should instead be ignored. This could be as simple as a:\r\n```py\r\nif kwargs.get(\"ephemeral\") is not None:\r\n del kwargs[\"ephemeral\"]\r\n```\r\nThis is in fact exactly what I added in both `discord/message.py` line 1625 and `discord/ext/bridge/context.py` line 153 to get it to work temporarily.\r\n\r\n### Actual Results\r\n\r\n```py\r\nIgnoring exception in command commands:\r\nTraceback (most recent call last):\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py\", line 181, in wrapped\r\n ret = await coro(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py\", line 38, in _commands\r\n await context.defer(ephemeral=True)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 101, in defer\r\n return await self._defer(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 153, in _defer\r\n return await self._get_super(\"trigger_typing\")(*args, **kwargs)\r\nTypeError: Messageable.trigger_typing() got an unexpected keyword argument 'ephemeral'\r\n```\r\nand\r\n```py\r\nIgnoring exception in command commands:\r\nTraceback (most recent call last):\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/core.py\", line 181, in wrapped\r\n ret = await coro(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/slash/help.py\", line 40, in _commands\r\n await context.respond(\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 81, in respond\r\n return await self._respond(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/bridge/context.py\", line 147, in _respond\r\n message = await self._get_super(\"reply\")(*args, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/ext/commands/context.py\", line 399, in reply\r\n return await self.message.reply(content, **kwargs)\r\n File \"/home/krishnan/Coding/Hobby/ChessCord/ChessCordv2.0/venv/lib/python3.10/site-packages/discord/message.py\", line 1625, in reply\r\n return await self.channel.send(content, reference=self, **kwargs)\r\nTypeError: Messageable.send() got an unexpected keyword argument 'ephemeral'\r\n```\r\n\r\n### Intents\r\n\r\ndiscord.Intents.all\r\n\r\n### System Information\r\n\r\n- Python v3.10.5-final\r\n- py-cord v2.0.0-candidate\r\n - py-cord pkg_resources: v2.0.0rc1\r\n- aiohttp v3.8.1\r\n- system info: Linux 5.15.48-1-MANJARO #1 SMP PREEMPT Thu Jun 16 12:33:56 UTC 2022\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\nThis can also apply to other slash-command-specific kwargs, which I can't specifically think of off the top of my head.\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Any, Optional, Union\n\nfrom discord.commands import ApplicationContext\nfrom discord.interactions import Interaction, InteractionMessage\nfrom discord.message import Message\nfrom discord.webhook import WebhookMessage\n\nfrom ..commands import Context\n\n__all__ = (\"BridgeContext\", \"BridgeExtContext\", \"BridgeApplicationContext\")\n\n\nclass BridgeContext(ABC):\n \"\"\"\n The base context class for compatibility commands. This class is an :class:`ABC` (abstract base class), which is\n subclassed by :class:`BridgeExtContext` and :class:`BridgeApplicationContext`. The methods in this class are meant\n to give parity between the two contexts, while still allowing for all of their functionality.\n\n When this is passed to a command, it will either be passed as :class:`BridgeExtContext`, or\n :class:`BridgeApplicationContext`. Since they are two separate classes, it is quite simple to use :meth:`isinstance`\n to make different functionality for each context. For example, if you want to respond to a command with the command\n type that it was invoked with, you can do the following:\n\n .. code-block:: python3\n\n @bot.bridge_command()\n async def example(ctx: BridgeContext):\n if isinstance(ctx, BridgeExtContext):\n command_type = \"Traditional (prefix-based) command\"\n elif isinstance(ctx, BridgeApplicationContext):\n command_type = \"Application command\"\n await ctx.send(f\"This command was invoked with a(n) {command_type}.\")\n\n .. versionadded:: 2.0\n \"\"\"\n\n @abstractmethod\n async def _respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n ...\n\n @abstractmethod\n async def _defer(self, *args, **kwargs) -> None:\n ...\n\n @abstractmethod\n async def _edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:\n ...\n\n async def respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n \"\"\"|coro|\n\n Responds to the command with the respective response type to the current context. In :class:`BridgeExtContext`,\n this will be :meth:`~.ExtContext.reply` while in :class:`BridgeApplicationContext`, this will be\n :meth:`~.ApplicationContext.respond`.\n \"\"\"\n return await self._respond(*args, **kwargs)\n\n async def reply(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n \"\"\"|coro|\n\n Alias for :meth:`~.BridgeContext.respond`.\n \"\"\"\n return await self.respond(*args, **kwargs)\n\n async def defer(self, *args, **kwargs) -> None:\n \"\"\"|coro|\n\n Defers the command with the respective approach to the current context. In :class:`BridgeExtContext`, this will\n be :meth:`~.ExtContext.trigger_typing` while in :class:`BridgeApplicationContext`, this will be\n :meth:`~.ApplicationContext.defer`.\n\n .. note::\n There is no ``trigger_typing`` alias for this method. ``trigger_typing`` will always provide the same\n functionality across contexts.\n \"\"\"\n return await self._defer(*args, **kwargs)\n\n async def edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:\n \"\"\"|coro|\n\n Edits the original response message with the respective approach to the current context. In\n :class:`BridgeExtContext`, this will have a custom approach where :meth:`.respond` caches the message to be\n edited here. In :class:`BridgeApplicationContext`, this will be :meth:`~.ApplicationContext.edit`.\n \"\"\"\n return await self._edit(*args, **kwargs)\n\n def _get_super(self, attr: str) -> Optional[Any]:\n return getattr(super(), attr)\n\n\nclass BridgeApplicationContext(BridgeContext, ApplicationContext):\n \"\"\"\n The application context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and\n :class:`ApplicationContext`. This class is meant to be used with :class:`BridgeCommand`.\n\n .. versionadded:: 2.0\n \"\"\"\n\n async def _respond(self, *args, **kwargs) -> Union[Interaction, WebhookMessage]:\n return await self._get_super(\"respond\")(*args, **kwargs)\n\n async def _defer(self, *args, **kwargs) -> None:\n return await self._get_super(\"defer\")(*args, **kwargs)\n\n async def _edit(self, *args, **kwargs) -> InteractionMessage:\n return await self._get_super(\"edit\")(*args, **kwargs)\n\n\nclass BridgeExtContext(BridgeContext, Context):\n \"\"\"\n The ext.commands context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and\n :class:`Context`. This class is meant to be used with :class:`BridgeCommand`.\n\n .. versionadded:: 2.0\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._original_response_message: Optional[Message] = None\n\n async def _respond(self, *args, **kwargs) -> Message:\n message = await self._get_super(\"reply\")(*args, **kwargs)\n if self._original_response_message is None:\n self._original_response_message = message\n return message\n\n async def _defer(self, *args, **kwargs) -> None:\n return await self._get_super(\"trigger_typing\")(*args, **kwargs)\n\n async def _edit(self, *args, **kwargs) -> Message:\n return await self._original_response_message.edit(*args, **kwargs)\n\n async def delete(self, *, delay: Optional[float] = None, reason: Optional[str] = None) -> None:\n \"\"\"|coro|\n\n Deletes the original response message, if it exists.\n\n Parameters\n -----------\n delay: Optional[:class:`float`]\n If provided, the number of seconds to wait before deleting the message.\n reason: Optional[:class:`str`]\n The reason for deleting the message. Shows up on the audit log.\n \"\"\"\n if self._original_response_message:\n await self._original_response_message.delete(delay=delay, reason=reason)\n\n\nif TYPE_CHECKING:\n # This is a workaround for mypy not being able to resolve the type of BridgeCommand.\n class BridgeContext(ApplicationContext, Context):\n ...\n", "path": "discord/ext/bridge/context.py"}], "after_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Any, Optional, Union\n\nfrom discord.commands import ApplicationContext\nfrom discord.interactions import Interaction, InteractionMessage\nfrom discord.message import Message\nfrom discord.webhook import WebhookMessage\n\nfrom ..commands import Context\n\n__all__ = (\"BridgeContext\", \"BridgeExtContext\", \"BridgeApplicationContext\")\n\n\nclass BridgeContext(ABC):\n \"\"\"\n The base context class for compatibility commands. This class is an :class:`ABC` (abstract base class), which is\n subclassed by :class:`BridgeExtContext` and :class:`BridgeApplicationContext`. The methods in this class are meant\n to give parity between the two contexts, while still allowing for all of their functionality.\n\n When this is passed to a command, it will either be passed as :class:`BridgeExtContext`, or\n :class:`BridgeApplicationContext`. Since they are two separate classes, it is quite simple to use :meth:`isinstance`\n to make different functionality for each context. For example, if you want to respond to a command with the command\n type that it was invoked with, you can do the following:\n\n .. code-block:: python3\n\n @bot.bridge_command()\n async def example(ctx: BridgeContext):\n if isinstance(ctx, BridgeExtContext):\n command_type = \"Traditional (prefix-based) command\"\n elif isinstance(ctx, BridgeApplicationContext):\n command_type = \"Application command\"\n await ctx.send(f\"This command was invoked with a(n) {command_type}.\")\n\n .. versionadded:: 2.0\n \"\"\"\n\n @abstractmethod\n async def _respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n ...\n\n @abstractmethod\n async def _defer(self, *args, **kwargs) -> None:\n ...\n\n @abstractmethod\n async def _edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:\n ...\n\n async def respond(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n \"\"\"|coro|\n\n Responds to the command with the respective response type to the current context. In :class:`BridgeExtContext`,\n this will be :meth:`~.ExtContext.reply` while in :class:`BridgeApplicationContext`, this will be\n :meth:`~.ApplicationContext.respond`.\n \"\"\"\n return await self._respond(*args, **kwargs)\n\n async def reply(self, *args, **kwargs) -> Union[Union[Interaction, WebhookMessage], Message]:\n \"\"\"|coro|\n\n Alias for :meth:`~.BridgeContext.respond`.\n \"\"\"\n return await self.respond(*args, **kwargs)\n\n async def defer(self, *args, **kwargs) -> None:\n \"\"\"|coro|\n\n Defers the command with the respective approach to the current context. In :class:`BridgeExtContext`, this will\n be :meth:`~.ExtContext.trigger_typing` while in :class:`BridgeApplicationContext`, this will be\n :meth:`~.ApplicationContext.defer`.\n\n .. note::\n There is no ``trigger_typing`` alias for this method. ``trigger_typing`` will always provide the same\n functionality across contexts.\n \"\"\"\n return await self._defer(*args, **kwargs)\n\n async def edit(self, *args, **kwargs) -> Union[InteractionMessage, Message]:\n \"\"\"|coro|\n\n Edits the original response message with the respective approach to the current context. In\n :class:`BridgeExtContext`, this will have a custom approach where :meth:`.respond` caches the message to be\n edited here. In :class:`BridgeApplicationContext`, this will be :meth:`~.ApplicationContext.edit`.\n \"\"\"\n return await self._edit(*args, **kwargs)\n\n def _get_super(self, attr: str) -> Optional[Any]:\n return getattr(super(), attr)\n\n\nclass BridgeApplicationContext(BridgeContext, ApplicationContext):\n \"\"\"\n The application context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and\n :class:`ApplicationContext`. This class is meant to be used with :class:`BridgeCommand`.\n\n .. versionadded:: 2.0\n \"\"\"\n\n async def _respond(self, *args, **kwargs) -> Union[Interaction, WebhookMessage]:\n return await self._get_super(\"respond\")(*args, **kwargs)\n\n async def _defer(self, *args, **kwargs) -> None:\n return await self._get_super(\"defer\")(*args, **kwargs)\n\n async def _edit(self, *args, **kwargs) -> InteractionMessage:\n return await self._get_super(\"edit\")(*args, **kwargs)\n\n\nclass BridgeExtContext(BridgeContext, Context):\n \"\"\"\n The ext.commands context class for compatibility commands. This class is a subclass of :class:`BridgeContext` and\n :class:`Context`. This class is meant to be used with :class:`BridgeCommand`.\n\n .. versionadded:: 2.0\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._original_response_message: Optional[Message] = None\n\n async def _respond(self, *args, **kwargs) -> Message:\n kwargs.pop(\"ephemeral\", None)\n message = await self._get_super(\"reply\")(*args, **kwargs)\n if self._original_response_message is None:\n self._original_response_message = message\n return message\n\n async def _defer(self, *args, **kwargs) -> None:\n kwargs.pop(\"ephemeral\", None)\n return await self._get_super(\"trigger_typing\")(*args, **kwargs)\n\n async def _edit(self, *args, **kwargs) -> Message:\n return await self._original_response_message.edit(*args, **kwargs)\n\n async def delete(self, *, delay: Optional[float] = None, reason: Optional[str] = None) -> None:\n \"\"\"|coro|\n\n Deletes the original response message, if it exists.\n\n Parameters\n -----------\n delay: Optional[:class:`float`]\n If provided, the number of seconds to wait before deleting the message.\n reason: Optional[:class:`str`]\n The reason for deleting the message. Shows up on the audit log.\n \"\"\"\n if self._original_response_message:\n await self._original_response_message.delete(delay=delay, reason=reason)\n\n\nif TYPE_CHECKING:\n # This is a workaround for mypy not being able to resolve the type of BridgeCommand.\n class BridgeContext(ApplicationContext, Context):\n ...\n", "path": "discord/ext/bridge/context.py"}]} | 3,439 | 199 |
gh_patches_debug_31559 | rasdani/github-patches | git_diff | uccser__cs-unplugged-195 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid filename for blank resources
When downloading a resource that is blank, the filename is `Sorting Network (0 to -1).pdf`.
The filename should be something like `Sorting Network (blank).pdf`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/resources/views/sorting_network.py`
Content:
```
1 from PIL import Image, ImageDraw, ImageFont
2 from random import sample
3
4
5 def resource_image(get_request, resource):
6 """Creates a image for Sorting Network resource.
7
8 Returns:
9 A Pillow image object.
10 """
11 image_path = 'static/img/resource-sorting-network-colour.png'
12 image = Image.open(image_path)
13 draw = ImageDraw.Draw(image)
14
15 (range_min, range_max, font_size) = number_range(get_request)
16
17 font_path = 'static/fonts/PatrickHand-Regular.ttf'
18
19 # Add numbers to text if needed
20 if get_request['prefilled_values'] != 'blank':
21 font = ImageFont.truetype(font_path, font_size)
22 numbers = sample(range(range_min, range_max), 6)
23 base_coord_x = 70
24 base_coord_y = 2560
25 coord_x_increment = 204
26 for number in numbers:
27 text = str(number)
28 text_width, text_height = draw.textsize(text, font=font)
29 coord_x = base_coord_x - (text_width / 2)
30 coord_y = base_coord_y - (text_height / 2)
31 draw.text(
32 (coord_x, coord_y),
33 text,
34 font=font,
35 fill='#000'
36 )
37 base_coord_x += coord_x_increment
38
39 return image
40
41
42 def subtitle(get_request, resource):
43 """Returns the subtitle string of the resource.
44
45 Used after the resource name in the filename, and
46 also on the resource image.
47 """
48 SUBTITLE_TEMPLATE = '{} to {}'
49 range_min, range_max, font_size = number_range(get_request)
50 text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)
51 return text
52
53
54 def number_range(get_request):
55 """Returns a tuple of (range_min, range_max, font_size)
56 for the requested resource.
57 """
58 prefilled_values = get_request['prefilled_values']
59 range_min = 0
60 range_max = 0
61 font_size = 150
62 if prefilled_values == 'easy':
63 range_min = 1
64 range_max = 10
65 elif prefilled_values == 'medium':
66 range_min = 10
67 range_max = 100
68 font_size = 120
69 elif prefilled_values == 'hard':
70 range_min = 100
71 range_max = 1000
72 font_size = 90
73 return (range_min, range_max, font_size)
74
```
Path: `csunplugged/resources/views/treasure_hunt.py`
Content:
```
1 from PIL import Image, ImageDraw, ImageFont
2 from random import sample
3
4
5 def resource_image(get_request, resource):
6 """Creates a image for Treasure Hunt resource.
7
8 Returns:
9 A Pillow image object.
10 """
11 image_path = 'static/img/resource-treasure-hunt.png'
12 font_path = 'static/fonts/PatrickHand-Regular.ttf'
13 image = Image.open(image_path)
14 draw = ImageDraw.Draw(image)
15
16 (range_min, range_max, font_size) = number_range(get_request)
17
18 # Add numbers to image if required
19 if get_request['prefilled_values'] != 'blank':
20 font = ImageFont.truetype(font_path, font_size)
21
22 total_numbers = 26
23 numbers = sample(range(range_min, range_max), total_numbers)
24 if get_request['number_order'] == 'sorted':
25 numbers.sort()
26
27 starting_coord_y = 494
28 base_coord_y = starting_coord_y
29 coord_y_increment = 286
30 base_coords_x = [257, 692]
31 for i in range(0, total_numbers):
32 text = str(numbers[i])
33 text_width, text_height = draw.textsize(text, font=font)
34
35 coord_x = base_coords_x[i % 2] - (text_width / 2)
36 coord_y = base_coord_y - (text_height / 2)
37 if i % 2 == 1:
38 coord_y -= 10
39 base_coord_y += coord_y_increment
40 draw.text(
41 (coord_x, coord_y),
42 text,
43 font=font,
44 fill='#000'
45 )
46
47 # Add number order and range text
48 text = subtitle(get_request, resource)
49 font = ImageFont.truetype(font_path, 110)
50 text_width, text_height = draw.textsize(text, font=font)
51 coord_x = 1472 - (text_width / 2)
52 coord_y = 35 - (text_height / 2)
53 draw.text(
54 (coord_x, coord_y),
55 text,
56 font=font,
57 fill='#000'
58 )
59
60 return image
61
62
63 def subtitle(get_request, resource):
64 """Returns the subtitle string of the resource.
65
66 Used after the resource name in the filename, and
67 also on the resource image.
68 """
69 SUBTITLE_TEMPLATE = '{} - {} to {}'
70 number_order_text = get_request['number_order'].title()
71 range_min, range_max, font_size = number_range(get_request)
72 text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)
73 return text
74
75
76 def number_range(get_request):
77 """Returns a tuple of (range_min, range_max, font_size)
78 for the requested resource.
79 """
80 prefilled_values = get_request['prefilled_values']
81 range_min = 0
82 if prefilled_values == 'easy':
83 range_max = 100
84 font_size = 97
85 elif prefilled_values == 'medium':
86 range_max = 1000
87 font_size = 80
88 elif prefilled_values == 'hard':
89 range_max = 10000
90 font_size = 70
91 return (range_min, range_max, font_size)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/resources/views/sorting_network.py b/csunplugged/resources/views/sorting_network.py
--- a/csunplugged/resources/views/sorting_network.py
+++ b/csunplugged/resources/views/sorting_network.py
@@ -45,9 +45,12 @@
Used after the resource name in the filename, and
also on the resource image.
"""
- SUBTITLE_TEMPLATE = '{} to {}'
- range_min, range_max, font_size = number_range(get_request)
- text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)
+ if get_request['prefilled_values'] == 'blank':
+ text = 'blank'
+ else:
+ SUBTITLE_TEMPLATE = '{} to {}'
+ range_min, range_max, font_size = number_range(get_request)
+ text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)
return text
diff --git a/csunplugged/resources/views/treasure_hunt.py b/csunplugged/resources/views/treasure_hunt.py
--- a/csunplugged/resources/views/treasure_hunt.py
+++ b/csunplugged/resources/views/treasure_hunt.py
@@ -13,10 +13,9 @@
image = Image.open(image_path)
draw = ImageDraw.Draw(image)
- (range_min, range_max, font_size) = number_range(get_request)
-
# Add numbers to image if required
if get_request['prefilled_values'] != 'blank':
+ (range_min, range_max, font_size) = number_range(get_request)
font = ImageFont.truetype(font_path, font_size)
total_numbers = 26
@@ -66,10 +65,13 @@
Used after the resource name in the filename, and
also on the resource image.
"""
- SUBTITLE_TEMPLATE = '{} - {} to {}'
- number_order_text = get_request['number_order'].title()
- range_min, range_max, font_size = number_range(get_request)
- text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)
+ if get_request['prefilled_values'] == 'blank':
+ text = 'blank'
+ else:
+ SUBTITLE_TEMPLATE = '{} - {} to {}'
+ number_order_text = get_request['number_order'].title()
+ range_min, range_max, font_size = number_range(get_request)
+ text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)
return text
| {"golden_diff": "diff --git a/csunplugged/resources/views/sorting_network.py b/csunplugged/resources/views/sorting_network.py\n--- a/csunplugged/resources/views/sorting_network.py\n+++ b/csunplugged/resources/views/sorting_network.py\n@@ -45,9 +45,12 @@\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n- SUBTITLE_TEMPLATE = '{} to {}'\n- range_min, range_max, font_size = number_range(get_request)\n- text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)\n+ if get_request['prefilled_values'] == 'blank':\n+ text = 'blank'\n+ else:\n+ SUBTITLE_TEMPLATE = '{} to {}'\n+ range_min, range_max, font_size = number_range(get_request)\n+ text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)\n return text\n \n \ndiff --git a/csunplugged/resources/views/treasure_hunt.py b/csunplugged/resources/views/treasure_hunt.py\n--- a/csunplugged/resources/views/treasure_hunt.py\n+++ b/csunplugged/resources/views/treasure_hunt.py\n@@ -13,10 +13,9 @@\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n \n- (range_min, range_max, font_size) = number_range(get_request)\n-\n # Add numbers to image if required\n if get_request['prefilled_values'] != 'blank':\n+ (range_min, range_max, font_size) = number_range(get_request)\n font = ImageFont.truetype(font_path, font_size)\n \n total_numbers = 26\n@@ -66,10 +65,13 @@\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n- SUBTITLE_TEMPLATE = '{} - {} to {}'\n- number_order_text = get_request['number_order'].title()\n- range_min, range_max, font_size = number_range(get_request)\n- text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)\n+ if get_request['prefilled_values'] == 'blank':\n+ text = 'blank'\n+ else:\n+ SUBTITLE_TEMPLATE = '{} - {} to {}'\n+ number_order_text = get_request['number_order'].title()\n+ range_min, range_max, font_size = number_range(get_request)\n+ text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)\n return text\n", "issue": "Invalid filename for blank resources\nWhen downloading a resource that is blank, the filename is `Sorting Network (0 to -1).pdf`.\r\n\r\nThe filename should be something like `Sorting Network (blank).pdf`.\r\n\n", "before_files": [{"content": "from PIL import Image, ImageDraw, ImageFont\nfrom random import sample\n\n\ndef resource_image(get_request, resource):\n \"\"\"Creates a image for Sorting Network resource.\n\n Returns:\n A Pillow image object.\n \"\"\"\n image_path = 'static/img/resource-sorting-network-colour.png'\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n (range_min, range_max, font_size) = number_range(get_request)\n\n font_path = 'static/fonts/PatrickHand-Regular.ttf'\n\n # Add numbers to text if needed\n if get_request['prefilled_values'] != 'blank':\n font = ImageFont.truetype(font_path, font_size)\n numbers = sample(range(range_min, range_max), 6)\n base_coord_x = 70\n base_coord_y = 2560\n coord_x_increment = 204\n for number in numbers:\n text = str(number)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = base_coord_x - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n base_coord_x += coord_x_increment\n\n return image\n\n\ndef subtitle(get_request, resource):\n \"\"\"Returns the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n SUBTITLE_TEMPLATE = '{} to {}'\n range_min, range_max, font_size = number_range(get_request)\n text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)\n return text\n\n\ndef number_range(get_request):\n \"\"\"Returns a tuple of (range_min, range_max, font_size)\n for the requested resource.\n \"\"\"\n prefilled_values = get_request['prefilled_values']\n range_min = 0\n range_max = 0\n font_size = 150\n if prefilled_values == 'easy':\n range_min = 1\n range_max = 10\n elif prefilled_values == 'medium':\n range_min = 10\n range_max = 100\n font_size = 120\n elif prefilled_values == 'hard':\n range_min = 100\n range_max = 1000\n font_size = 90\n return (range_min, range_max, font_size)\n", "path": "csunplugged/resources/views/sorting_network.py"}, {"content": "from PIL import Image, ImageDraw, ImageFont\nfrom random import sample\n\n\ndef resource_image(get_request, resource):\n \"\"\"Creates a image for Treasure Hunt resource.\n\n Returns:\n A Pillow image object.\n \"\"\"\n image_path = 'static/img/resource-treasure-hunt.png'\n font_path = 'static/fonts/PatrickHand-Regular.ttf'\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n (range_min, range_max, font_size) = number_range(get_request)\n\n # Add numbers to image if required\n if get_request['prefilled_values'] != 'blank':\n font = ImageFont.truetype(font_path, font_size)\n\n total_numbers = 26\n numbers = sample(range(range_min, range_max), total_numbers)\n if get_request['number_order'] == 'sorted':\n numbers.sort()\n\n starting_coord_y = 494\n base_coord_y = starting_coord_y\n coord_y_increment = 286\n base_coords_x = [257, 692]\n for i in range(0, total_numbers):\n text = str(numbers[i])\n text_width, text_height = draw.textsize(text, font=font)\n\n coord_x = base_coords_x[i % 2] - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n if i % 2 == 1:\n coord_y -= 10\n base_coord_y += coord_y_increment\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n\n # Add number order and range text\n text = subtitle(get_request, resource)\n font = ImageFont.truetype(font_path, 110)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = 1472 - (text_width / 2)\n coord_y = 35 - (text_height / 2)\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n\n return image\n\n\ndef subtitle(get_request, resource):\n \"\"\"Returns the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n SUBTITLE_TEMPLATE = '{} - {} to {}'\n number_order_text = get_request['number_order'].title()\n range_min, range_max, font_size = number_range(get_request)\n text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)\n return text\n\n\ndef number_range(get_request):\n \"\"\"Returns a tuple of (range_min, range_max, font_size)\n for the requested resource.\n \"\"\"\n prefilled_values = get_request['prefilled_values']\n range_min = 0\n if prefilled_values == 'easy':\n range_max = 100\n font_size = 97\n elif prefilled_values == 'medium':\n range_max = 1000\n font_size = 80\n elif prefilled_values == 'hard':\n range_max = 10000\n font_size = 70\n return (range_min, range_max, font_size)\n", "path": "csunplugged/resources/views/treasure_hunt.py"}], "after_files": [{"content": "from PIL import Image, ImageDraw, ImageFont\nfrom random import sample\n\n\ndef resource_image(get_request, resource):\n \"\"\"Creates a image for Sorting Network resource.\n\n Returns:\n A Pillow image object.\n \"\"\"\n image_path = 'static/img/resource-sorting-network-colour.png'\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n (range_min, range_max, font_size) = number_range(get_request)\n\n font_path = 'static/fonts/PatrickHand-Regular.ttf'\n\n # Add numbers to text if needed\n if get_request['prefilled_values'] != 'blank':\n font = ImageFont.truetype(font_path, font_size)\n numbers = sample(range(range_min, range_max), 6)\n base_coord_x = 70\n base_coord_y = 2560\n coord_x_increment = 204\n for number in numbers:\n text = str(number)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = base_coord_x - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n base_coord_x += coord_x_increment\n\n return image\n\n\ndef subtitle(get_request, resource):\n \"\"\"Returns the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n if get_request['prefilled_values'] == 'blank':\n text = 'blank'\n else:\n SUBTITLE_TEMPLATE = '{} to {}'\n range_min, range_max, font_size = number_range(get_request)\n text = SUBTITLE_TEMPLATE.format(range_min, range_max - 1)\n return text\n\n\ndef number_range(get_request):\n \"\"\"Returns a tuple of (range_min, range_max, font_size)\n for the requested resource.\n \"\"\"\n prefilled_values = get_request['prefilled_values']\n range_min = 0\n range_max = 0\n font_size = 150\n if prefilled_values == 'easy':\n range_min = 1\n range_max = 10\n elif prefilled_values == 'medium':\n range_min = 10\n range_max = 100\n font_size = 120\n elif prefilled_values == 'hard':\n range_min = 100\n range_max = 1000\n font_size = 90\n return (range_min, range_max, font_size)\n", "path": "csunplugged/resources/views/sorting_network.py"}, {"content": "from PIL import Image, ImageDraw, ImageFont\nfrom random import sample\n\n\ndef resource_image(get_request, resource):\n \"\"\"Creates a image for Treasure Hunt resource.\n\n Returns:\n A Pillow image object.\n \"\"\"\n image_path = 'static/img/resource-treasure-hunt.png'\n font_path = 'static/fonts/PatrickHand-Regular.ttf'\n image = Image.open(image_path)\n draw = ImageDraw.Draw(image)\n\n # Add numbers to image if required\n if get_request['prefilled_values'] != 'blank':\n (range_min, range_max, font_size) = number_range(get_request)\n font = ImageFont.truetype(font_path, font_size)\n\n total_numbers = 26\n numbers = sample(range(range_min, range_max), total_numbers)\n if get_request['number_order'] == 'sorted':\n numbers.sort()\n\n starting_coord_y = 494\n base_coord_y = starting_coord_y\n coord_y_increment = 286\n base_coords_x = [257, 692]\n for i in range(0, total_numbers):\n text = str(numbers[i])\n text_width, text_height = draw.textsize(text, font=font)\n\n coord_x = base_coords_x[i % 2] - (text_width / 2)\n coord_y = base_coord_y - (text_height / 2)\n if i % 2 == 1:\n coord_y -= 10\n base_coord_y += coord_y_increment\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n\n # Add number order and range text\n text = subtitle(get_request, resource)\n font = ImageFont.truetype(font_path, 110)\n text_width, text_height = draw.textsize(text, font=font)\n coord_x = 1472 - (text_width / 2)\n coord_y = 35 - (text_height / 2)\n draw.text(\n (coord_x, coord_y),\n text,\n font=font,\n fill='#000'\n )\n\n return image\n\n\ndef subtitle(get_request, resource):\n \"\"\"Returns the subtitle string of the resource.\n\n Used after the resource name in the filename, and\n also on the resource image.\n \"\"\"\n if get_request['prefilled_values'] == 'blank':\n text = 'blank'\n else:\n SUBTITLE_TEMPLATE = '{} - {} to {}'\n number_order_text = get_request['number_order'].title()\n range_min, range_max, font_size = number_range(get_request)\n text = SUBTITLE_TEMPLATE.format(number_order_text, range_min, range_max - 1)\n return text\n\n\ndef number_range(get_request):\n \"\"\"Returns a tuple of (range_min, range_max, font_size)\n for the requested resource.\n \"\"\"\n prefilled_values = get_request['prefilled_values']\n range_min = 0\n if prefilled_values == 'easy':\n range_max = 100\n font_size = 97\n elif prefilled_values == 'medium':\n range_max = 1000\n font_size = 80\n elif prefilled_values == 'hard':\n range_max = 10000\n font_size = 70\n return (range_min, range_max, font_size)\n", "path": "csunplugged/resources/views/treasure_hunt.py"}]} | 1,919 | 563 |
gh_patches_debug_21522 | rasdani/github-patches | git_diff | svthalia__concrexit-3070 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expand/improve document part of event endpoint
<!--
Please add the appropriate label for what change should be made:
docs: changes to the documentation)
refactor: refactoring production code, eg. renaming a variable or rewriting a function
test: adding missing tests, refactoring tests; no production code change
chore: updating poetry etc; no production code change
-->
### Describe the change
Change the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).
### Motivation
It's very nice for the app to be able to just fetch files directly.
Also, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.
### Current implementation
There is a URL that leads to the file, but only after a redirect to `cdn`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/documents/api/v2/serializers/document.py`
Content:
```
1 from rest_framework.fields import SerializerMethodField
2 from rest_framework.reverse import reverse
3
4 from documents.models import Document
5 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
6 CleanedModelSerializer,
7 )
8
9
10 class DocumentSerializer(CleanedModelSerializer):
11 class Meta:
12 model = Document
13 fields = ("pk", "name", "url", "category", "members_only")
14
15 url = SerializerMethodField("_url")
16
17 def _url(self, instance):
18 return self.context["request"].build_absolute_uri(
19 reverse("documents:document", kwargs={"pk": instance.pk})
20 )
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py
--- a/website/documents/api/v2/serializers/document.py
+++ b/website/documents/api/v2/serializers/document.py
@@ -1,10 +1,10 @@
from rest_framework.fields import SerializerMethodField
-from rest_framework.reverse import reverse
from documents.models import Document
from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
CleanedModelSerializer,
)
+from utils.media.services import get_media_url
class DocumentSerializer(CleanedModelSerializer):
@@ -15,6 +15,12 @@
url = SerializerMethodField("_url")
def _url(self, instance):
- return self.context["request"].build_absolute_uri(
- reverse("documents:document", kwargs={"pk": instance.pk})
- )
+ if instance.members_only and (
+ not self.context["request"].user.is_authenticated
+ or not self.context["request"].member.has_active_membership()
+ ):
+ return self.context["request"].build_absolute_uri(
+ instance.get_absolute_url()
+ )
+
+ return get_media_url(instance.file, absolute_url=True)
| {"golden_diff": "diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py\n--- a/website/documents/api/v2/serializers/document.py\n+++ b/website/documents/api/v2/serializers/document.py\n@@ -1,10 +1,10 @@\n from rest_framework.fields import SerializerMethodField\n-from rest_framework.reverse import reverse\n \n from documents.models import Document\n from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n )\n+from utils.media.services import get_media_url\n \n \n class DocumentSerializer(CleanedModelSerializer):\n@@ -15,6 +15,12 @@\n url = SerializerMethodField(\"_url\")\n \n def _url(self, instance):\n- return self.context[\"request\"].build_absolute_uri(\n- reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n- )\n+ if instance.members_only and (\n+ not self.context[\"request\"].user.is_authenticated\n+ or not self.context[\"request\"].member.has_active_membership()\n+ ):\n+ return self.context[\"request\"].build_absolute_uri(\n+ instance.get_absolute_url()\n+ )\n+\n+ return get_media_url(instance.file, absolute_url=True)\n", "issue": "Expand/improve document part of event endpoint\n<!--\r\n\r\nPlease add the appropriate label for what change should be made:\r\ndocs: changes to the documentation)\r\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\r\ntest: adding missing tests, refactoring tests; no production code change\r\nchore: updating poetry etc; no production code change\r\n\r\n-->\r\n\r\n### Describe the change\r\nChange the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).\r\n\r\n### Motivation\r\nIt's very nice for the app to be able to just fetch files directly.\r\nAlso, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.\r\n\r\n### Current implementation\r\nThere is a URL that leads to the file, but only after a redirect to `cdn`.\n", "before_files": [{"content": "from rest_framework.fields import SerializerMethodField\nfrom rest_framework.reverse import reverse\n\nfrom documents.models import Document\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass DocumentSerializer(CleanedModelSerializer):\n class Meta:\n model = Document\n fields = (\"pk\", \"name\", \"url\", \"category\", \"members_only\")\n\n url = SerializerMethodField(\"_url\")\n\n def _url(self, instance):\n return self.context[\"request\"].build_absolute_uri(\n reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n )\n", "path": "website/documents/api/v2/serializers/document.py"}], "after_files": [{"content": "from rest_framework.fields import SerializerMethodField\n\nfrom documents.models import Document\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.media.services import get_media_url\n\n\nclass DocumentSerializer(CleanedModelSerializer):\n class Meta:\n model = Document\n fields = (\"pk\", \"name\", \"url\", \"category\", \"members_only\")\n\n url = SerializerMethodField(\"_url\")\n\n def _url(self, instance):\n if instance.members_only and (\n not self.context[\"request\"].user.is_authenticated\n or not self.context[\"request\"].member.has_active_membership()\n ):\n return self.context[\"request\"].build_absolute_uri(\n instance.get_absolute_url()\n )\n\n return get_media_url(instance.file, absolute_url=True)\n", "path": "website/documents/api/v2/serializers/document.py"}]} | 608 | 266 |
gh_patches_debug_17667 | rasdani/github-patches | git_diff | mdn__kuma-6528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: brand
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/7302494/
```
Resolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...
File "redirect_urls/middleware.py", line 14, in __call__
resolver_match = self.resolver.resolve(request.path_info)
File "newrelic/hooks/framework_django.py", line 600, in wrapper
return _wrapped(*args, **kwargs)
File "newrelic/hooks/framework_django.py", line 588, in _wrapped
result = wrapped(path)
File "newrelic/hooks/framework_django.py", line 575, in wrapper
return wrapped(*args, **kwargs)
File "django/urls/resolvers.py", line 394, in resolve
raise Resolver404({'tried': tried, 'path': new_path})
KeyError: 'brand'
File "stripe/stripe_object.py", line 90, in __getattr__
return self[k]
File "stripe/stripe_object.py", line 131, in __getitem__
raise err
File "stripe/stripe_object.py", line 119, in __getitem__
return super(StripeObject, self).__getitem__(k)
AttributeError: brand
(4 additional frame(s) were not displayed)
...
File "django/views/decorators/cache.py", line 57, in _wrapped_view_func
response = view_func(request, *args, **kwargs)
File "kuma/core/decorators.py", line 210, in wrapped
return func(request, *args, **kwargs)
File "kuma/users/views.py", line 472, in user_edit
"subscription_info": retrieve_stripe_subscription_info(edit_user,),
File "kuma/users/utils.py", line 53, in retrieve_stripe_subscription_info
"brand": source.brand,
File "stripe/stripe_object.py", line 92, in __getattr__
raise AttributeError(*err.args)
AttributeError: brand
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/utils.py`
Content:
```
1 from datetime import datetime
2
3 import stripe
4 from django.conf import settings
5
6
7 def retrieve_stripe_subscription(customer):
8 for subscription in customer.subscriptions.list().auto_paging_iter():
9 # We have to use array indexing syntax, as stripe uses dicts to
10 # represent its objects (dicts come with an .items method)
11 for item in subscription["items"].auto_paging_iter():
12 if item.plan.id == settings.STRIPE_PLAN_ID:
13 return subscription
14
15 return None
16
17
18 def create_stripe_customer_and_subscription_for_user(user, email, stripe_token):
19 customer = (
20 stripe.Customer.retrieve(user.stripe_customer_id)
21 if user.stripe_customer_id
22 else None
23 )
24 if not customer or customer.email != email:
25 customer = stripe.Customer.create(email=email, source=stripe_token,)
26 user.stripe_customer_id = customer.id
27 user.save()
28
29 if retrieve_stripe_subscription(customer) is None:
30 stripe.Subscription.create(
31 customer=customer.id, items=[{"plan": settings.STRIPE_PLAN_ID}],
32 )
33
34
35 def retrieve_stripe_subscription_info(user):
36 stripe_customer = (
37 stripe.Customer.retrieve(user.stripe_customer_id, expand=["default_source"],)
38 if settings.STRIPE_PLAN_ID and user.stripe_customer_id
39 else None
40 )
41
42 stripe_subscription = (
43 retrieve_stripe_subscription(stripe_customer)
44 if stripe_customer and stripe_customer.email == user.email
45 else None
46 )
47 if stripe_subscription:
48 source = stripe_customer.default_source
49 return {
50 "next_payment_at": datetime.fromtimestamp(
51 stripe_subscription.current_period_end
52 ),
53 "brand": source.brand,
54 "expires_at": f"{source.exp_month}/{source.exp_year}",
55 "last4": source.last4,
56 "zip": source.address_zip,
57 }
58
59 return None
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/users/utils.py b/kuma/users/utils.py
--- a/kuma/users/utils.py
+++ b/kuma/users/utils.py
@@ -46,14 +46,23 @@
)
if stripe_subscription:
source = stripe_customer.default_source
+ if source.object == "card":
+ card = source
+ elif source.object == "source":
+ card = source.card
+ else:
+ raise ValueError(
+ f"unexpected stripe customer default_source of type {source.object!r}"
+ )
+
return {
"next_payment_at": datetime.fromtimestamp(
stripe_subscription.current_period_end
),
- "brand": source.brand,
- "expires_at": f"{source.exp_month}/{source.exp_year}",
- "last4": source.last4,
- "zip": source.address_zip,
+ "brand": card.brand,
+ "expires_at": f"{card.exp_month}/{card.exp_year}",
+ "last4": card.last4,
+ "zip": card.address_zip,
}
return None
| {"golden_diff": "diff --git a/kuma/users/utils.py b/kuma/users/utils.py\n--- a/kuma/users/utils.py\n+++ b/kuma/users/utils.py\n@@ -46,14 +46,23 @@\n )\n if stripe_subscription:\n source = stripe_customer.default_source\n+ if source.object == \"card\":\n+ card = source\n+ elif source.object == \"source\":\n+ card = source.card\n+ else:\n+ raise ValueError(\n+ f\"unexpected stripe customer default_source of type {source.object!r}\"\n+ )\n+\n return {\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription.current_period_end\n ),\n- \"brand\": source.brand,\n- \"expires_at\": f\"{source.exp_month}/{source.exp_year}\",\n- \"last4\": source.last4,\n- \"zip\": source.address_zip,\n+ \"brand\": card.brand,\n+ \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n+ \"last4\": card.last4,\n+ \"zip\": card.address_zip,\n }\n \n return None\n", "issue": "AttributeError: brand\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/7302494/\n\n```\nResolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...\n File \"redirect_urls/middleware.py\", line 14, in __call__\n resolver_match = self.resolver.resolve(request.path_info)\n File \"newrelic/hooks/framework_django.py\", line 600, in wrapper\n return _wrapped(*args, **kwargs)\n File \"newrelic/hooks/framework_django.py\", line 588, in _wrapped\n result = wrapped(path)\n File \"newrelic/hooks/framework_django.py\", line 575, in wrapper\n return wrapped(*args, **kwargs)\n File \"django/urls/resolvers.py\", line 394, in resolve\n raise Resolver404({'tried': tried, 'path': new_path})\n\nKeyError: 'brand'\n File \"stripe/stripe_object.py\", line 90, in __getattr__\n return self[k]\n File \"stripe/stripe_object.py\", line 131, in __getitem__\n raise err\n File \"stripe/stripe_object.py\", line 119, in __getitem__\n return super(StripeObject, self).__getitem__(k)\n\nAttributeError: brand\n(4 additional frame(s) were not displayed)\n...\n File \"django/views/decorators/cache.py\", line 57, in _wrapped_view_func\n response = view_func(request, *args, **kwargs)\n File \"kuma/core/decorators.py\", line 210, in wrapped\n return func(request, *args, **kwargs)\n File \"kuma/users/views.py\", line 472, in user_edit\n \"subscription_info\": retrieve_stripe_subscription_info(edit_user,),\n File \"kuma/users/utils.py\", line 53, in retrieve_stripe_subscription_info\n \"brand\": source.brand,\n File \"stripe/stripe_object.py\", line 92, in __getattr__\n raise AttributeError(*err.args)\n\nAttributeError: brand\n```\n", "before_files": [{"content": "from datetime import datetime\n\nimport stripe\nfrom django.conf import settings\n\n\ndef retrieve_stripe_subscription(customer):\n for subscription in customer.subscriptions.list().auto_paging_iter():\n # We have to use array indexing syntax, as stripe uses dicts to\n # represent its objects (dicts come with an .items method)\n for item in subscription[\"items\"].auto_paging_iter():\n if item.plan.id == settings.STRIPE_PLAN_ID:\n return subscription\n\n return None\n\n\ndef create_stripe_customer_and_subscription_for_user(user, email, stripe_token):\n customer = (\n stripe.Customer.retrieve(user.stripe_customer_id)\n if user.stripe_customer_id\n else None\n )\n if not customer or customer.email != email:\n customer = stripe.Customer.create(email=email, source=stripe_token,)\n user.stripe_customer_id = customer.id\n user.save()\n\n if retrieve_stripe_subscription(customer) is None:\n stripe.Subscription.create(\n customer=customer.id, items=[{\"plan\": settings.STRIPE_PLAN_ID}],\n )\n\n\ndef retrieve_stripe_subscription_info(user):\n stripe_customer = (\n stripe.Customer.retrieve(user.stripe_customer_id, expand=[\"default_source\"],)\n if settings.STRIPE_PLAN_ID and user.stripe_customer_id\n else None\n )\n\n stripe_subscription = (\n retrieve_stripe_subscription(stripe_customer)\n if stripe_customer and stripe_customer.email == user.email\n else None\n )\n if stripe_subscription:\n source = stripe_customer.default_source\n return {\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription.current_period_end\n ),\n \"brand\": source.brand,\n \"expires_at\": f\"{source.exp_month}/{source.exp_year}\",\n \"last4\": source.last4,\n \"zip\": source.address_zip,\n }\n\n return None\n", "path": "kuma/users/utils.py"}], "after_files": [{"content": "from datetime import datetime\n\nimport stripe\nfrom django.conf import settings\n\n\ndef retrieve_stripe_subscription(customer):\n for subscription in customer.subscriptions.list().auto_paging_iter():\n # We have to use array indexing syntax, as stripe uses dicts to\n # represent its objects (dicts come with an .items method)\n for item in subscription[\"items\"].auto_paging_iter():\n if item.plan.id == settings.STRIPE_PLAN_ID:\n return subscription\n\n return None\n\n\ndef create_stripe_customer_and_subscription_for_user(user, email, stripe_token):\n customer = (\n stripe.Customer.retrieve(user.stripe_customer_id)\n if user.stripe_customer_id\n else None\n )\n if not customer or customer.email != email:\n customer = stripe.Customer.create(email=email, source=stripe_token,)\n user.stripe_customer_id = customer.id\n user.save()\n\n if retrieve_stripe_subscription(customer) is None:\n stripe.Subscription.create(\n customer=customer.id, items=[{\"plan\": settings.STRIPE_PLAN_ID}],\n )\n\n\ndef retrieve_stripe_subscription_info(user):\n stripe_customer = (\n stripe.Customer.retrieve(user.stripe_customer_id, expand=[\"default_source\"],)\n if settings.STRIPE_PLAN_ID and user.stripe_customer_id\n else None\n )\n\n stripe_subscription = (\n retrieve_stripe_subscription(stripe_customer)\n if stripe_customer and stripe_customer.email == user.email\n else None\n )\n if stripe_subscription:\n source = stripe_customer.default_source\n if source.object == \"card\":\n card = source\n elif source.object == \"source\":\n card = source.card\n else:\n raise ValueError(\n f\"unexpected stripe customer default_source of type {source.object!r}\"\n )\n\n return {\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription.current_period_end\n ),\n \"brand\": card.brand,\n \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n \"last4\": card.last4,\n \"zip\": card.address_zip,\n }\n\n return None\n", "path": "kuma/users/utils.py"}]} | 2,438 | 237 |
gh_patches_debug_2468 | rasdani/github-patches | git_diff | learningequality__kolibri-4935 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
users should not be able to get 1000% on an exam, unfortunately
### Observed behavior
reported by @jtamiace re: @radinamatic's apparent good luck:

### Expected behavior
exams are scored between 0 and 100
### User-facing consequences
????
### Errors and logs
unknown
### Steps to reproduce
see http://kolibribeta.learningequality.org/coach/#/fa4cbfeda32c0c0fbf1832fc1ddd10c3/reports/learners
### Context
k 0.12.0 alpha 7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/plugins/coach/class_summary_api.py`
Content:
```
1 from django.db.models import Max
2 from django.db.models import Sum
3 from django.shortcuts import get_object_or_404
4 from rest_framework import serializers
5 from rest_framework import viewsets
6 from rest_framework.response import Response
7
8 from kolibri.core.auth import models as auth_models
9 from kolibri.core.content.models import ContentNode
10 from kolibri.core.exams.models import Exam
11 from kolibri.core.lessons.models import Lesson
12 from kolibri.core.logger import models as logger_models
13 from kolibri.core.notifications.models import LearnerProgressNotification
14 from kolibri.core.notifications.models import NotificationEventType
15
16
17 # Intended to match NotificationEventType
18 NOT_STARTED = "NotStarted"
19 STARTED = "Started"
20 HELP_NEEDED = "HelpNeeded"
21 COMPLETED = "Completed"
22
23
24 def content_status_serializer(lesson_data, learners_data, classroom):
25
26 # First generate a unique set of content node ids from all the lessons
27 lesson_node_ids = set()
28 for lesson in lesson_data:
29 lesson_node_ids |= set(lesson.get("node_ids"))
30
31 # Now create a map of content_id to node_id so that we can map between lessons, and notifications
32 # which use the node id, and summary logs, which use content_id
33 content_map = {n[0]: n[1] for n in ContentNode.objects.filter(id__in=lesson_node_ids).values_list("content_id", "id")}
34
35 # Get all the values we need from the summary logs to be able to summarize current status on the
36 # relevant content items.
37 content_log_values = logger_models.ContentSummaryLog.objects.filter(
38 content_id__in=set(content_map.keys()), user__in=[learner["id"] for learner in learners_data]
39 ).values("user_id", "content_id", "end_timestamp", "time_spent", "progress")
40
41 # In order to make the lookup speedy, generate a unique key for each user/node that we find
42 # listed in the needs help notifications that are relevant. We can then just check
43 # existence of this key in the set in order to see whether this user has been flagged as needing
44 # help.
45 lookup_key = "{user_id}-{node_id}"
46 needs_help = {
47 lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(
48 classroom_id=classroom.id,
49 notification_event=NotificationEventType.Help,
50 lesson_id__in=[lesson["id"] for lesson in lesson_data],
51 ).values_list("user_id", "contentnode_id", "timestamp")
52 }
53
54 # In case a previously flagged learner has since completed an exercise, check all the completed
55 # notifications also
56 completed = {
57 lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(
58 classroom_id=classroom.id,
59 notification_event=NotificationEventType.Completed,
60 lesson_id__in=[lesson["id"] for lesson in lesson_data],
61 ).values_list("user_id", "contentnode_id", "timestamp")
62 }
63
64 def get_status(log):
65 """
66 Read the dict from a content summary log values query and return the status
67 In the case that we have found a needs help notification for the user and content node
68 in question, return that they need help, otherwise return status based on their
69 current progress.
70 """
71 content_id = log["content_id"]
72 if content_id in content_map:
73 # Don't try to lookup anything if we don't know the content_id
74 # node_id mapping - might happen if a channel has since been deleted
75 key = lookup_key.format(user_id=log["user_id"], node_id=content_map[content_id])
76 if key in needs_help:
77 # Now check if we have not already registered completion of the content node
78 # or if we have and the timestamp is earlier than that on the needs_help event
79 if key not in completed or completed[key] < needs_help[key]:
80 return HELP_NEEDED
81 if log["progress"] == 1:
82 return COMPLETED
83 elif log["progress"] == 0:
84 return NOT_STARTED
85 return STARTED
86
87 def map_content_logs(log):
88 """
89 Parse the content logs to return objects in the expected format.
90 """
91 return {
92 "learner_id": log["user_id"],
93 "content_id": log["content_id"],
94 "status": get_status(log),
95 "last_activity": log["end_timestamp"],
96 "time_spent": log["time_spent"],
97 }
98
99 return map(map_content_logs, content_log_values)
100
101
102 class ExamStatusSerializer(serializers.ModelSerializer):
103 status = serializers.SerializerMethodField()
104 exam_id = serializers.PrimaryKeyRelatedField(source="exam", read_only=True)
105 learner_id = serializers.PrimaryKeyRelatedField(source="user", read_only=True)
106 last_activity = serializers.CharField()
107 num_correct = serializers.SerializerMethodField()
108
109 def get_status(self, exam_log):
110 if exam_log.closed:
111 return COMPLETED
112 else:
113 return STARTED
114
115 def get_num_correct(self, exam_log):
116 return (
117 exam_log.attemptlogs.values_list('item')
118 .order_by('completion_timestamp')
119 .distinct()
120 .aggregate(Sum('correct'))
121 .get('correct__sum')
122 )
123
124 class Meta:
125 model = logger_models.ExamLog
126 fields = ("exam_id", "learner_id", "status", "last_activity", "num_correct")
127
128
129 class GroupSerializer(serializers.ModelSerializer):
130 member_ids = serializers.SerializerMethodField()
131
132 def get_member_ids(self, group):
133 return group.get_members().values_list("id", flat=True)
134
135 class Meta:
136 model = auth_models.LearnerGroup
137 fields = ("id", "name", "member_ids")
138
139
140 class UserSerializer(serializers.ModelSerializer):
141 name = serializers.CharField(source="full_name")
142
143 class Meta:
144 model = auth_models.FacilityUser
145 fields = ("id", "name", "username")
146
147
148 class LessonNodeIdsField(serializers.Field):
149 def to_representation(self, values):
150 return [value["contentnode_id"] for value in values]
151
152
153 class LessonAssignmentsField(serializers.RelatedField):
154 def to_representation(self, assignment):
155 return assignment.collection.id
156
157
158 class LessonSerializer(serializers.ModelSerializer):
159 active = serializers.BooleanField(source="is_active")
160 node_ids = LessonNodeIdsField(default=[], source="resources")
161
162 # classrooms are in here, and filtered out later
163 groups = LessonAssignmentsField(
164 many=True, read_only=True, source="lesson_assignments"
165 )
166
167 class Meta:
168 model = Lesson
169 fields = ("id", "title", "active", "node_ids", "groups")
170
171
172 class ExamQuestionSourcesField(serializers.Field):
173 def to_representation(self, values):
174 return values
175
176
177 class ExamAssignmentsField(serializers.RelatedField):
178 def to_representation(self, assignment):
179 return assignment.collection.id
180
181
182 class ExamSerializer(serializers.ModelSerializer):
183
184 question_sources = ExamQuestionSourcesField(default=[])
185
186 # classes are in here, and filtered out later
187 groups = ExamAssignmentsField(many=True, read_only=True, source="assignments")
188
189 class Meta:
190 model = Exam
191 fields = ("id", "title", "active", "question_sources", "groups", "data_model_version")
192
193
194 class ContentSerializer(serializers.ModelSerializer):
195 node_id = serializers.CharField(source="id")
196
197 class Meta:
198 model = ContentNode
199 fields = ("node_id", "content_id", "title", "kind")
200
201
202 def data(Serializer, queryset):
203 return Serializer(queryset, many=True).data
204
205
206 class ClassSummaryViewSet(viewsets.ViewSet):
207 def retrieve(self, request, pk):
208 classroom = get_object_or_404(auth_models.Classroom, id=pk)
209 query_learners = classroom.get_members()
210 query_lesson = Lesson.objects.filter(collection=pk)
211 query_exams = Exam.objects.filter(collection=pk)
212 query_exam_logs = logger_models.ExamLog.objects.filter(
213 exam__in=query_exams
214 ).annotate(last_activity=Max("attemptlogs__end_timestamp"))
215
216 lesson_data = data(LessonSerializer, query_lesson)
217 exam_data = data(ExamSerializer, query_exams)
218
219 # filter classes out of exam assignments
220 for exam in exam_data:
221 exam["groups"] = [g for g in exam["groups"] if g != pk]
222
223 # filter classes out of lesson assignments
224 for lesson in lesson_data:
225 lesson["groups"] = [g for g in lesson["groups"] if g != pk]
226
227 all_node_ids = set()
228 for lesson in lesson_data:
229 all_node_ids |= set(lesson.get("node_ids"))
230 for exam in exam_data:
231 exam_node_ids = [question['exercise_id'] for question in exam.get("question_sources")]
232 all_node_ids |= set(exam_node_ids)
233
234 query_content = ContentNode.objects.filter(id__in=all_node_ids)
235
236 learners_data = data(UserSerializer, query_learners)
237
238 output = {
239 "id": pk,
240 "name": classroom.name,
241 "coaches": data(UserSerializer, classroom.get_coaches()),
242 "learners": learners_data,
243 "groups": data(GroupSerializer, classroom.get_learner_groups()),
244 "exams": exam_data,
245 "exam_learner_status": data(ExamStatusSerializer, query_exam_logs),
246 "content": data(ContentSerializer, query_content),
247 "content_learner_status": content_status_serializer(lesson_data, learners_data, classroom),
248 "lessons": lesson_data,
249 }
250
251 return Response(output)
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/plugins/coach/class_summary_api.py b/kolibri/plugins/coach/class_summary_api.py
--- a/kolibri/plugins/coach/class_summary_api.py
+++ b/kolibri/plugins/coach/class_summary_api.py
@@ -188,7 +188,7 @@
class Meta:
model = Exam
- fields = ("id", "title", "active", "question_sources", "groups", "data_model_version")
+ fields = ("id", "title", "active", "question_sources", "groups", "data_model_version", "question_count")
class ContentSerializer(serializers.ModelSerializer):
| {"golden_diff": "diff --git a/kolibri/plugins/coach/class_summary_api.py b/kolibri/plugins/coach/class_summary_api.py\n--- a/kolibri/plugins/coach/class_summary_api.py\n+++ b/kolibri/plugins/coach/class_summary_api.py\n@@ -188,7 +188,7 @@\n \n class Meta:\n model = Exam\n- fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\")\n+ fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\", \"question_count\")\n \n \n class ContentSerializer(serializers.ModelSerializer):\n", "issue": "users should not be able to get 1000% on an exam, unfortunately\n\r\n### Observed behavior\r\n\r\nreported by @jtamiace re: @radinamatic's apparent good luck:\r\n\r\n\r\n\r\n### Expected behavior\r\n\r\nexams are scored between 0 and 100\r\n\r\n### User-facing consequences\r\n\r\n????\r\n\r\n### Errors and logs\r\n\r\nunknown\r\n\r\n### Steps to reproduce\r\n\r\nsee http://kolibribeta.learningequality.org/coach/#/fa4cbfeda32c0c0fbf1832fc1ddd10c3/reports/learners\r\n\r\n### Context\r\n\r\nk 0.12.0 alpha 7\r\n\n", "before_files": [{"content": "from django.db.models import Max\nfrom django.db.models import Sum\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom kolibri.core.auth import models as auth_models\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger import models as logger_models\nfrom kolibri.core.notifications.models import LearnerProgressNotification\nfrom kolibri.core.notifications.models import NotificationEventType\n\n\n# Intended to match NotificationEventType\nNOT_STARTED = \"NotStarted\"\nSTARTED = \"Started\"\nHELP_NEEDED = \"HelpNeeded\"\nCOMPLETED = \"Completed\"\n\n\ndef content_status_serializer(lesson_data, learners_data, classroom):\n\n # First generate a unique set of content node ids from all the lessons\n lesson_node_ids = set()\n for lesson in lesson_data:\n lesson_node_ids |= set(lesson.get(\"node_ids\"))\n\n # Now create a map of content_id to node_id so that we can map between lessons, and notifications\n # which use the node id, and summary logs, which use content_id\n content_map = {n[0]: n[1] for n in ContentNode.objects.filter(id__in=lesson_node_ids).values_list(\"content_id\", \"id\")}\n\n # Get all the values we need from the summary logs to be able to summarize current status on the\n # relevant content items.\n content_log_values = logger_models.ContentSummaryLog.objects.filter(\n content_id__in=set(content_map.keys()), user__in=[learner[\"id\"] for learner in learners_data]\n ).values(\"user_id\", \"content_id\", \"end_timestamp\", \"time_spent\", \"progress\")\n\n # In order to make the lookup speedy, generate a unique key for each user/node that we find\n # listed in the needs help notifications that are relevant. We can then just check\n # existence of this key in the set in order to see whether this user has been flagged as needing\n # help.\n lookup_key = \"{user_id}-{node_id}\"\n needs_help = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Help,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n # In case a previously flagged learner has since completed an exercise, check all the completed\n # notifications also\n completed = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Completed,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n def get_status(log):\n \"\"\"\n Read the dict from a content summary log values query and return the status\n In the case that we have found a needs help notification for the user and content node\n in question, return that they need help, otherwise return status based on their\n current progress.\n \"\"\"\n content_id = log[\"content_id\"]\n if content_id in content_map:\n # Don't try to lookup anything if we don't know the content_id\n # node_id mapping - might happen if a channel has since been deleted\n key = lookup_key.format(user_id=log[\"user_id\"], node_id=content_map[content_id])\n if key in needs_help:\n # Now check if we have not already registered completion of the content node\n # or if we have and the timestamp is earlier than that on the needs_help event\n if key not in completed or completed[key] < needs_help[key]:\n return HELP_NEEDED\n if log[\"progress\"] == 1:\n return COMPLETED\n elif log[\"progress\"] == 0:\n return NOT_STARTED\n return STARTED\n\n def map_content_logs(log):\n \"\"\"\n Parse the content logs to return objects in the expected format.\n \"\"\"\n return {\n \"learner_id\": log[\"user_id\"],\n \"content_id\": log[\"content_id\"],\n \"status\": get_status(log),\n \"last_activity\": log[\"end_timestamp\"],\n \"time_spent\": log[\"time_spent\"],\n }\n\n return map(map_content_logs, content_log_values)\n\n\nclass ExamStatusSerializer(serializers.ModelSerializer):\n status = serializers.SerializerMethodField()\n exam_id = serializers.PrimaryKeyRelatedField(source=\"exam\", read_only=True)\n learner_id = serializers.PrimaryKeyRelatedField(source=\"user\", read_only=True)\n last_activity = serializers.CharField()\n num_correct = serializers.SerializerMethodField()\n\n def get_status(self, exam_log):\n if exam_log.closed:\n return COMPLETED\n else:\n return STARTED\n\n def get_num_correct(self, exam_log):\n return (\n exam_log.attemptlogs.values_list('item')\n .order_by('completion_timestamp')\n .distinct()\n .aggregate(Sum('correct'))\n .get('correct__sum')\n )\n\n class Meta:\n model = logger_models.ExamLog\n fields = (\"exam_id\", \"learner_id\", \"status\", \"last_activity\", \"num_correct\")\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n member_ids = serializers.SerializerMethodField()\n\n def get_member_ids(self, group):\n return group.get_members().values_list(\"id\", flat=True)\n\n class Meta:\n model = auth_models.LearnerGroup\n fields = (\"id\", \"name\", \"member_ids\")\n\n\nclass UserSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source=\"full_name\")\n\n class Meta:\n model = auth_models.FacilityUser\n fields = (\"id\", \"name\", \"username\")\n\n\nclass LessonNodeIdsField(serializers.Field):\n def to_representation(self, values):\n return [value[\"contentnode_id\"] for value in values]\n\n\nclass LessonAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass LessonSerializer(serializers.ModelSerializer):\n active = serializers.BooleanField(source=\"is_active\")\n node_ids = LessonNodeIdsField(default=[], source=\"resources\")\n\n # classrooms are in here, and filtered out later\n groups = LessonAssignmentsField(\n many=True, read_only=True, source=\"lesson_assignments\"\n )\n\n class Meta:\n model = Lesson\n fields = (\"id\", \"title\", \"active\", \"node_ids\", \"groups\")\n\n\nclass ExamQuestionSourcesField(serializers.Field):\n def to_representation(self, values):\n return values\n\n\nclass ExamAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass ExamSerializer(serializers.ModelSerializer):\n\n question_sources = ExamQuestionSourcesField(default=[])\n\n # classes are in here, and filtered out later\n groups = ExamAssignmentsField(many=True, read_only=True, source=\"assignments\")\n\n class Meta:\n model = Exam\n fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\")\n\n\nclass ContentSerializer(serializers.ModelSerializer):\n node_id = serializers.CharField(source=\"id\")\n\n class Meta:\n model = ContentNode\n fields = (\"node_id\", \"content_id\", \"title\", \"kind\")\n\n\ndef data(Serializer, queryset):\n return Serializer(queryset, many=True).data\n\n\nclass ClassSummaryViewSet(viewsets.ViewSet):\n def retrieve(self, request, pk):\n classroom = get_object_or_404(auth_models.Classroom, id=pk)\n query_learners = classroom.get_members()\n query_lesson = Lesson.objects.filter(collection=pk)\n query_exams = Exam.objects.filter(collection=pk)\n query_exam_logs = logger_models.ExamLog.objects.filter(\n exam__in=query_exams\n ).annotate(last_activity=Max(\"attemptlogs__end_timestamp\"))\n\n lesson_data = data(LessonSerializer, query_lesson)\n exam_data = data(ExamSerializer, query_exams)\n\n # filter classes out of exam assignments\n for exam in exam_data:\n exam[\"groups\"] = [g for g in exam[\"groups\"] if g != pk]\n\n # filter classes out of lesson assignments\n for lesson in lesson_data:\n lesson[\"groups\"] = [g for g in lesson[\"groups\"] if g != pk]\n\n all_node_ids = set()\n for lesson in lesson_data:\n all_node_ids |= set(lesson.get(\"node_ids\"))\n for exam in exam_data:\n exam_node_ids = [question['exercise_id'] for question in exam.get(\"question_sources\")]\n all_node_ids |= set(exam_node_ids)\n\n query_content = ContentNode.objects.filter(id__in=all_node_ids)\n\n learners_data = data(UserSerializer, query_learners)\n\n output = {\n \"id\": pk,\n \"name\": classroom.name,\n \"coaches\": data(UserSerializer, classroom.get_coaches()),\n \"learners\": learners_data,\n \"groups\": data(GroupSerializer, classroom.get_learner_groups()),\n \"exams\": exam_data,\n \"exam_learner_status\": data(ExamStatusSerializer, query_exam_logs),\n \"content\": data(ContentSerializer, query_content),\n \"content_learner_status\": content_status_serializer(lesson_data, learners_data, classroom),\n \"lessons\": lesson_data,\n }\n\n return Response(output)\n", "path": "kolibri/plugins/coach/class_summary_api.py"}], "after_files": [{"content": "from django.db.models import Max\nfrom django.db.models import Sum\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom kolibri.core.auth import models as auth_models\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger import models as logger_models\nfrom kolibri.core.notifications.models import LearnerProgressNotification\nfrom kolibri.core.notifications.models import NotificationEventType\n\n\n# Intended to match NotificationEventType\nNOT_STARTED = \"NotStarted\"\nSTARTED = \"Started\"\nHELP_NEEDED = \"HelpNeeded\"\nCOMPLETED = \"Completed\"\n\n\ndef content_status_serializer(lesson_data, learners_data, classroom):\n\n # First generate a unique set of content node ids from all the lessons\n lesson_node_ids = set()\n for lesson in lesson_data:\n lesson_node_ids |= set(lesson.get(\"node_ids\"))\n\n # Now create a map of content_id to node_id so that we can map between lessons, and notifications\n # which use the node id, and summary logs, which use content_id\n content_map = {n[0]: n[1] for n in ContentNode.objects.filter(id__in=lesson_node_ids).values_list(\"content_id\", \"id\")}\n\n # Get all the values we need from the summary logs to be able to summarize current status on the\n # relevant content items.\n content_log_values = logger_models.ContentSummaryLog.objects.filter(\n content_id__in=set(content_map.keys()), user__in=[learner[\"id\"] for learner in learners_data]\n ).values(\"user_id\", \"content_id\", \"end_timestamp\", \"time_spent\", \"progress\")\n\n # In order to make the lookup speedy, generate a unique key for each user/node that we find\n # listed in the needs help notifications that are relevant. We can then just check\n # existence of this key in the set in order to see whether this user has been flagged as needing\n # help.\n lookup_key = \"{user_id}-{node_id}\"\n needs_help = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Help,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n # In case a previously flagged learner has since completed an exercise, check all the completed\n # notifications also\n completed = {\n lookup_key.format(user_id=n[0], node_id=n[1]): n[2] for n in LearnerProgressNotification.objects.filter(\n classroom_id=classroom.id,\n notification_event=NotificationEventType.Completed,\n lesson_id__in=[lesson[\"id\"] for lesson in lesson_data],\n ).values_list(\"user_id\", \"contentnode_id\", \"timestamp\")\n }\n\n def get_status(log):\n \"\"\"\n Read the dict from a content summary log values query and return the status\n In the case that we have found a needs help notification for the user and content node\n in question, return that they need help, otherwise return status based on their\n current progress.\n \"\"\"\n content_id = log[\"content_id\"]\n if content_id in content_map:\n # Don't try to lookup anything if we don't know the content_id\n # node_id mapping - might happen if a channel has since been deleted\n key = lookup_key.format(user_id=log[\"user_id\"], node_id=content_map[content_id])\n if key in needs_help:\n # Now check if we have not already registered completion of the content node\n # or if we have and the timestamp is earlier than that on the needs_help event\n if key not in completed or completed[key] < needs_help[key]:\n return HELP_NEEDED\n if log[\"progress\"] == 1:\n return COMPLETED\n elif log[\"progress\"] == 0:\n return NOT_STARTED\n return STARTED\n\n def map_content_logs(log):\n \"\"\"\n Parse the content logs to return objects in the expected format.\n \"\"\"\n return {\n \"learner_id\": log[\"user_id\"],\n \"content_id\": log[\"content_id\"],\n \"status\": get_status(log),\n \"last_activity\": log[\"end_timestamp\"],\n \"time_spent\": log[\"time_spent\"],\n }\n\n return map(map_content_logs, content_log_values)\n\n\nclass ExamStatusSerializer(serializers.ModelSerializer):\n status = serializers.SerializerMethodField()\n exam_id = serializers.PrimaryKeyRelatedField(source=\"exam\", read_only=True)\n learner_id = serializers.PrimaryKeyRelatedField(source=\"user\", read_only=True)\n last_activity = serializers.CharField()\n num_correct = serializers.SerializerMethodField()\n\n def get_status(self, exam_log):\n if exam_log.closed:\n return COMPLETED\n else:\n return STARTED\n\n def get_num_correct(self, exam_log):\n return (\n exam_log.attemptlogs.values_list('item')\n .order_by('completion_timestamp')\n .distinct()\n .aggregate(Sum('correct'))\n .get('correct__sum')\n )\n\n class Meta:\n model = logger_models.ExamLog\n fields = (\"exam_id\", \"learner_id\", \"status\", \"last_activity\", \"num_correct\")\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n member_ids = serializers.SerializerMethodField()\n\n def get_member_ids(self, group):\n return group.get_members().values_list(\"id\", flat=True)\n\n class Meta:\n model = auth_models.LearnerGroup\n fields = (\"id\", \"name\", \"member_ids\")\n\n\nclass UserSerializer(serializers.ModelSerializer):\n name = serializers.CharField(source=\"full_name\")\n\n class Meta:\n model = auth_models.FacilityUser\n fields = (\"id\", \"name\", \"username\")\n\n\nclass LessonNodeIdsField(serializers.Field):\n def to_representation(self, values):\n return [value[\"contentnode_id\"] for value in values]\n\n\nclass LessonAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass LessonSerializer(serializers.ModelSerializer):\n active = serializers.BooleanField(source=\"is_active\")\n node_ids = LessonNodeIdsField(default=[], source=\"resources\")\n\n # classrooms are in here, and filtered out later\n groups = LessonAssignmentsField(\n many=True, read_only=True, source=\"lesson_assignments\"\n )\n\n class Meta:\n model = Lesson\n fields = (\"id\", \"title\", \"active\", \"node_ids\", \"groups\")\n\n\nclass ExamQuestionSourcesField(serializers.Field):\n def to_representation(self, values):\n return values\n\n\nclass ExamAssignmentsField(serializers.RelatedField):\n def to_representation(self, assignment):\n return assignment.collection.id\n\n\nclass ExamSerializer(serializers.ModelSerializer):\n\n question_sources = ExamQuestionSourcesField(default=[])\n\n # classes are in here, and filtered out later\n groups = ExamAssignmentsField(many=True, read_only=True, source=\"assignments\")\n\n class Meta:\n model = Exam\n fields = (\"id\", \"title\", \"active\", \"question_sources\", \"groups\", \"data_model_version\", \"question_count\")\n\n\nclass ContentSerializer(serializers.ModelSerializer):\n node_id = serializers.CharField(source=\"id\")\n\n class Meta:\n model = ContentNode\n fields = (\"node_id\", \"content_id\", \"title\", \"kind\")\n\n\ndef data(Serializer, queryset):\n return Serializer(queryset, many=True).data\n\n\nclass ClassSummaryViewSet(viewsets.ViewSet):\n def retrieve(self, request, pk):\n classroom = get_object_or_404(auth_models.Classroom, id=pk)\n query_learners = classroom.get_members()\n query_lesson = Lesson.objects.filter(collection=pk)\n query_exams = Exam.objects.filter(collection=pk)\n query_exam_logs = logger_models.ExamLog.objects.filter(\n exam__in=query_exams\n ).annotate(last_activity=Max(\"attemptlogs__end_timestamp\"))\n\n lesson_data = data(LessonSerializer, query_lesson)\n exam_data = data(ExamSerializer, query_exams)\n\n # filter classes out of exam assignments\n for exam in exam_data:\n exam[\"groups\"] = [g for g in exam[\"groups\"] if g != pk]\n\n # filter classes out of lesson assignments\n for lesson in lesson_data:\n lesson[\"groups\"] = [g for g in lesson[\"groups\"] if g != pk]\n\n all_node_ids = set()\n for lesson in lesson_data:\n all_node_ids |= set(lesson.get(\"node_ids\"))\n for exam in exam_data:\n exam_node_ids = [question['exercise_id'] for question in exam.get(\"question_sources\")]\n all_node_ids |= set(exam_node_ids)\n\n query_content = ContentNode.objects.filter(id__in=all_node_ids)\n\n learners_data = data(UserSerializer, query_learners)\n\n output = {\n \"id\": pk,\n \"name\": classroom.name,\n \"coaches\": data(UserSerializer, classroom.get_coaches()),\n \"learners\": learners_data,\n \"groups\": data(GroupSerializer, classroom.get_learner_groups()),\n \"exams\": exam_data,\n \"exam_learner_status\": data(ExamStatusSerializer, query_exam_logs),\n \"content\": data(ContentSerializer, query_content),\n \"content_learner_status\": content_status_serializer(lesson_data, learners_data, classroom),\n \"lessons\": lesson_data,\n }\n\n return Response(output)\n", "path": "kolibri/plugins/coach/class_summary_api.py"}]} | 3,180 | 137 |
gh_patches_debug_22672 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-2252 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Half precision doesn't work with top_k > 1
## 🐛 Bug
When using predictions at half-precision, MulticlassAccuracy throws an error when using top_k > 1. Please see the code below:
### To Reproduce
Steps to reproduce the behavior...
```py
import torch
from torchmetrics.classification import MulticlassAccuracy
preds = torch.tensor([[1, 0, 0]]).half()
targets = torch.tensor([1])
metric = MulticlassAccuracy(num_classes=3, top_k=2)
metric(preds, targets)
metric.compute()
```
### Environment
- TorchMetrics version: 1.0.1
- Python & PyTorch Version (e.g., 1.0): 3.10 and 2.0.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/utilities/data.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import sys
15 from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
16
17 import torch
18 from lightning_utilities import apply_to_collection
19 from torch import Tensor
20
21 from torchmetrics.utilities.exceptions import TorchMetricsUserWarning
22 from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _XLA_AVAILABLE
23 from torchmetrics.utilities.prints import rank_zero_warn
24
25 METRIC_EPS = 1e-6
26
27
28 def dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:
29 """Concatenation along the zero dimension."""
30 if isinstance(x, torch.Tensor):
31 return x
32 x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]
33 if not x: # empty list
34 raise ValueError("No samples to concatenate")
35 return torch.cat(x, dim=0)
36
37
38 def dim_zero_sum(x: Tensor) -> Tensor:
39 """Summation along the zero dimension."""
40 return torch.sum(x, dim=0)
41
42
43 def dim_zero_mean(x: Tensor) -> Tensor:
44 """Average along the zero dimension."""
45 return torch.mean(x, dim=0)
46
47
48 def dim_zero_max(x: Tensor) -> Tensor:
49 """Max along the zero dimension."""
50 return torch.max(x, dim=0).values
51
52
53 def dim_zero_min(x: Tensor) -> Tensor:
54 """Min along the zero dimension."""
55 return torch.min(x, dim=0).values
56
57
58 def _flatten(x: Sequence) -> list:
59 """Flatten list of list into single list."""
60 return [item for sublist in x for item in sublist]
61
62
63 def _flatten_dict(x: Dict) -> Tuple[Dict, bool]:
64 """Flatten dict of dicts into single dict and checking for duplicates in keys along the way."""
65 new_dict = {}
66 duplicates = False
67 for key, value in x.items():
68 if isinstance(value, dict):
69 for k, v in value.items():
70 if k in new_dict:
71 duplicates = True
72 new_dict[k] = v
73 else:
74 if key in new_dict:
75 duplicates = True
76 new_dict[key] = value
77 return new_dict, duplicates
78
79
80 def to_onehot(
81 label_tensor: Tensor,
82 num_classes: Optional[int] = None,
83 ) -> Tensor:
84 """Convert a dense label tensor to one-hot format.
85
86 Args:
87 label_tensor: dense label tensor, with shape [N, d1, d2, ...]
88 num_classes: number of classes C
89
90 Returns:
91 A sparse label tensor with shape [N, C, d1, d2, ...]
92
93 Example:
94 >>> x = torch.tensor([1, 2, 3])
95 >>> to_onehot(x)
96 tensor([[0, 1, 0, 0],
97 [0, 0, 1, 0],
98 [0, 0, 0, 1]])
99
100 """
101 if num_classes is None:
102 num_classes = int(label_tensor.max().detach().item() + 1)
103
104 tensor_onehot = torch.zeros(
105 label_tensor.shape[0],
106 num_classes,
107 *label_tensor.shape[1:],
108 dtype=label_tensor.dtype,
109 device=label_tensor.device,
110 )
111 index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)
112 return tensor_onehot.scatter_(1, index, 1.0)
113
114
115 def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:
116 """Convert a probability tensor to binary by selecting top-k the highest entries.
117
118 Args:
119 prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the
120 position defined by the ``dim`` argument
121 topk: number of the highest entries to turn into 1s
122 dim: dimension on which to compare entries
123
124 Returns:
125 A binary tensor of the same shape as the input tensor of type ``torch.int32``
126
127 Example:
128 >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])
129 >>> select_topk(x, topk=2)
130 tensor([[0, 1, 1],
131 [1, 1, 0]], dtype=torch.int32)
132
133 """
134 zeros = torch.zeros_like(prob_tensor)
135 if topk == 1: # argmax has better performance than topk
136 topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)
137 else:
138 topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)
139 return topk_tensor.int()
140
141
142 def to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:
143 """Convert a tensor of probabilities to a dense label tensor.
144
145 Args:
146 x: probabilities to get the categorical label [N, d1, d2, ...]
147 argmax_dim: dimension to apply
148
149 Return:
150 A tensor with categorical labels [N, d2, ...]
151
152 Example:
153 >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])
154 >>> to_categorical(x)
155 tensor([1, 0])
156
157 """
158 return torch.argmax(x, dim=argmax_dim)
159
160
161 def _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:
162 return x.squeeze() if x.numel() == 1 else x
163
164
165 def _squeeze_if_scalar(data: Any) -> Any:
166 return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)
167
168
169 def _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:
170 """Implement custom bincount.
171
172 PyTorch currently does not support ``torch.bincount`` when running in deterministic mode on GPU or when running
173 MPS devices or when running on XLA device. This implementation therefore falls back to using a combination of
174 `torch.arange` and `torch.eq` in these scenarios. A small performance hit can expected and higher memory consumption
175 as `[batch_size, mincount]` tensor needs to be initialized compared to native ``torch.bincount``.
176
177 Args:
178 x: tensor to count
179 minlength: minimum length to count
180
181 Returns:
182 Number of occurrences for each unique element in x
183
184 Example:
185 >>> x = torch.tensor([0,0,0,1,1,2,2,2,2])
186 >>> _bincount(x, minlength=3)
187 tensor([3, 2, 4])
188
189 """
190 if minlength is None:
191 minlength = len(torch.unique(x))
192
193 if torch.are_deterministic_algorithms_enabled() or _XLA_AVAILABLE or _TORCH_GREATER_EQUAL_1_12 and x.is_mps:
194 mesh = torch.arange(minlength, device=x.device).repeat(len(x), 1)
195 return torch.eq(x.reshape(-1, 1), mesh).sum(dim=0)
196
197 return torch.bincount(x, minlength=minlength)
198
199
200 def _cumsum(x: Tensor, dim: Optional[int] = 0, dtype: Optional[torch.dtype] = None) -> Tensor:
201 if torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point() and sys.platform != "win32":
202 rank_zero_warn(
203 "You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently "
204 "not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU. "
205 "Expect some slowdowns.",
206 TorchMetricsUserWarning,
207 )
208 return x.cpu().cumsum(dim=dim, dtype=dtype).cuda()
209 return torch.cumsum(x, dim=dim, dtype=dtype)
210
211
212 def _flexible_bincount(x: Tensor) -> Tensor:
213 """Similar to `_bincount`, but works also with tensor that do not contain continuous values.
214
215 Args:
216 x: tensor to count
217
218 Returns:
219 Number of occurrences for each unique element in x
220
221 """
222 # make sure elements in x start from 0
223 x = x - x.min()
224 unique_x = torch.unique(x)
225
226 output = _bincount(x, minlength=torch.max(unique_x) + 1) # type: ignore[arg-type]
227 # remove zeros from output tensor
228 return output[unique_x]
229
230
231 def allclose(tensor1: Tensor, tensor2: Tensor) -> bool:
232 """Wrap torch.allclose to be robust towards dtype difference."""
233 if tensor1.dtype != tensor2.dtype:
234 tensor2 = tensor2.to(dtype=tensor1.dtype)
235 return torch.allclose(tensor1, tensor2)
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/utilities/data.py b/src/torchmetrics/utilities/data.py
--- a/src/torchmetrics/utilities/data.py
+++ b/src/torchmetrics/utilities/data.py
@@ -112,6 +112,14 @@
return tensor_onehot.scatter_(1, index, 1.0)
+def _top_k_with_half_precision_support(x: Tensor, k: int = 1, dim: int = 1) -> Tensor:
+ """torch.top_k does not support half precision on CPU."""
+ if x.dtype == torch.half and not x.is_cuda:
+ idx = torch.argsort(x, dim=dim, descending=True)
+ return idx.narrow(dim, 0, k)
+ return x.topk(k=k, dim=dim).indices
+
+
def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:
"""Convert a probability tensor to binary by selecting top-k the highest entries.
@@ -135,7 +143,7 @@
if topk == 1: # argmax has better performance than topk
topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)
else:
- topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)
+ topk_tensor = zeros.scatter(dim, _top_k_with_half_precision_support(prob_tensor, k=topk, dim=dim), 1.0)
return topk_tensor.int()
| {"golden_diff": "diff --git a/src/torchmetrics/utilities/data.py b/src/torchmetrics/utilities/data.py\n--- a/src/torchmetrics/utilities/data.py\n+++ b/src/torchmetrics/utilities/data.py\n@@ -112,6 +112,14 @@\n return tensor_onehot.scatter_(1, index, 1.0)\n \n \n+def _top_k_with_half_precision_support(x: Tensor, k: int = 1, dim: int = 1) -> Tensor:\n+ \"\"\"torch.top_k does not support half precision on CPU.\"\"\"\n+ if x.dtype == torch.half and not x.is_cuda:\n+ idx = torch.argsort(x, dim=dim, descending=True)\n+ return idx.narrow(dim, 0, k)\n+ return x.topk(k=k, dim=dim).indices\n+\n+\n def select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:\n \"\"\"Convert a probability tensor to binary by selecting top-k the highest entries.\n \n@@ -135,7 +143,7 @@\n if topk == 1: # argmax has better performance than topk\n topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)\n else:\n- topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)\n+ topk_tensor = zeros.scatter(dim, _top_k_with_half_precision_support(prob_tensor, k=topk, dim=dim), 1.0)\n return topk_tensor.int()\n", "issue": "Half precision doesn't work with top_k > 1\n## \ud83d\udc1b Bug\r\n\r\nWhen using predictions at half-precision, MulticlassAccuracy throws an error when using top_k > 1. Please see the code below:\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\n```py\r\nimport torch\r\nfrom torchmetrics.classification import MulticlassAccuracy\r\n\r\npreds = torch.tensor([[1, 0, 0]]).half()\r\ntargets = torch.tensor([1])\r\n\r\nmetric = MulticlassAccuracy(num_classes=3, top_k=2)\r\nmetric(preds, targets)\r\nmetric.compute()\r\n```\r\n\r\n\r\n### Environment\r\n\r\n- TorchMetrics version: 1.0.1\r\n- Python & PyTorch Version (e.g., 1.0): 3.10 and 2.0.1\r\n\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom lightning_utilities import apply_to_collection\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.exceptions import TorchMetricsUserWarning\nfrom torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _XLA_AVAILABLE\nfrom torchmetrics.utilities.prints import rank_zero_warn\n\nMETRIC_EPS = 1e-6\n\n\ndef dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:\n \"\"\"Concatenation along the zero dimension.\"\"\"\n if isinstance(x, torch.Tensor):\n return x\n x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]\n if not x: # empty list\n raise ValueError(\"No samples to concatenate\")\n return torch.cat(x, dim=0)\n\n\ndef dim_zero_sum(x: Tensor) -> Tensor:\n \"\"\"Summation along the zero dimension.\"\"\"\n return torch.sum(x, dim=0)\n\n\ndef dim_zero_mean(x: Tensor) -> Tensor:\n \"\"\"Average along the zero dimension.\"\"\"\n return torch.mean(x, dim=0)\n\n\ndef dim_zero_max(x: Tensor) -> Tensor:\n \"\"\"Max along the zero dimension.\"\"\"\n return torch.max(x, dim=0).values\n\n\ndef dim_zero_min(x: Tensor) -> Tensor:\n \"\"\"Min along the zero dimension.\"\"\"\n return torch.min(x, dim=0).values\n\n\ndef _flatten(x: Sequence) -> list:\n \"\"\"Flatten list of list into single list.\"\"\"\n return [item for sublist in x for item in sublist]\n\n\ndef _flatten_dict(x: Dict) -> Tuple[Dict, bool]:\n \"\"\"Flatten dict of dicts into single dict and checking for duplicates in keys along the way.\"\"\"\n new_dict = {}\n duplicates = False\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n if k in new_dict:\n duplicates = True\n new_dict[k] = v\n else:\n if key in new_dict:\n duplicates = True\n new_dict[key] = value\n return new_dict, duplicates\n\n\ndef to_onehot(\n label_tensor: Tensor,\n num_classes: Optional[int] = None,\n) -> Tensor:\n \"\"\"Convert a dense label tensor to one-hot format.\n\n Args:\n label_tensor: dense label tensor, with shape [N, d1, d2, ...]\n num_classes: number of classes C\n\n Returns:\n A sparse label tensor with shape [N, C, d1, d2, ...]\n\n Example:\n >>> x = torch.tensor([1, 2, 3])\n >>> to_onehot(x)\n tensor([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n \"\"\"\n if num_classes is None:\n num_classes = int(label_tensor.max().detach().item() + 1)\n\n tensor_onehot = torch.zeros(\n label_tensor.shape[0],\n num_classes,\n *label_tensor.shape[1:],\n dtype=label_tensor.dtype,\n device=label_tensor.device,\n )\n index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)\n return tensor_onehot.scatter_(1, index, 1.0)\n\n\ndef select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:\n \"\"\"Convert a probability tensor to binary by selecting top-k the highest entries.\n\n Args:\n prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the\n position defined by the ``dim`` argument\n topk: number of the highest entries to turn into 1s\n dim: dimension on which to compare entries\n\n Returns:\n A binary tensor of the same shape as the input tensor of type ``torch.int32``\n\n Example:\n >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])\n >>> select_topk(x, topk=2)\n tensor([[0, 1, 1],\n [1, 1, 0]], dtype=torch.int32)\n\n \"\"\"\n zeros = torch.zeros_like(prob_tensor)\n if topk == 1: # argmax has better performance than topk\n topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)\n else:\n topk_tensor = zeros.scatter(dim, prob_tensor.topk(k=topk, dim=dim).indices, 1.0)\n return topk_tensor.int()\n\n\ndef to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:\n \"\"\"Convert a tensor of probabilities to a dense label tensor.\n\n Args:\n x: probabilities to get the categorical label [N, d1, d2, ...]\n argmax_dim: dimension to apply\n\n Return:\n A tensor with categorical labels [N, d2, ...]\n\n Example:\n >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])\n >>> to_categorical(x)\n tensor([1, 0])\n\n \"\"\"\n return torch.argmax(x, dim=argmax_dim)\n\n\ndef _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:\n return x.squeeze() if x.numel() == 1 else x\n\n\ndef _squeeze_if_scalar(data: Any) -> Any:\n return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)\n\n\ndef _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:\n \"\"\"Implement custom bincount.\n\n PyTorch currently does not support ``torch.bincount`` when running in deterministic mode on GPU or when running\n MPS devices or when running on XLA device. This implementation therefore falls back to using a combination of\n `torch.arange` and `torch.eq` in these scenarios. A small performance hit can expected and higher memory consumption\n as `[batch_size, mincount]` tensor needs to be initialized compared to native ``torch.bincount``.\n\n Args:\n x: tensor to count\n minlength: minimum length to count\n\n Returns:\n Number of occurrences for each unique element in x\n\n Example:\n >>> x = torch.tensor([0,0,0,1,1,2,2,2,2])\n >>> _bincount(x, minlength=3)\n tensor([3, 2, 4])\n\n \"\"\"\n if minlength is None:\n minlength = len(torch.unique(x))\n\n if torch.are_deterministic_algorithms_enabled() or _XLA_AVAILABLE or _TORCH_GREATER_EQUAL_1_12 and x.is_mps:\n mesh = torch.arange(minlength, device=x.device).repeat(len(x), 1)\n return torch.eq(x.reshape(-1, 1), mesh).sum(dim=0)\n\n return torch.bincount(x, minlength=minlength)\n\n\ndef _cumsum(x: Tensor, dim: Optional[int] = 0, dtype: Optional[torch.dtype] = None) -> Tensor:\n if torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point() and sys.platform != \"win32\":\n rank_zero_warn(\n \"You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently \"\n \"not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU. \"\n \"Expect some slowdowns.\",\n TorchMetricsUserWarning,\n )\n return x.cpu().cumsum(dim=dim, dtype=dtype).cuda()\n return torch.cumsum(x, dim=dim, dtype=dtype)\n\n\ndef _flexible_bincount(x: Tensor) -> Tensor:\n \"\"\"Similar to `_bincount`, but works also with tensor that do not contain continuous values.\n\n Args:\n x: tensor to count\n\n Returns:\n Number of occurrences for each unique element in x\n\n \"\"\"\n # make sure elements in x start from 0\n x = x - x.min()\n unique_x = torch.unique(x)\n\n output = _bincount(x, minlength=torch.max(unique_x) + 1) # type: ignore[arg-type]\n # remove zeros from output tensor\n return output[unique_x]\n\n\ndef allclose(tensor1: Tensor, tensor2: Tensor) -> bool:\n \"\"\"Wrap torch.allclose to be robust towards dtype difference.\"\"\"\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2.to(dtype=tensor1.dtype)\n return torch.allclose(tensor1, tensor2)\n", "path": "src/torchmetrics/utilities/data.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport sys\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\nimport torch\nfrom lightning_utilities import apply_to_collection\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.exceptions import TorchMetricsUserWarning\nfrom torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_12, _XLA_AVAILABLE\nfrom torchmetrics.utilities.prints import rank_zero_warn\n\nMETRIC_EPS = 1e-6\n\n\ndef dim_zero_cat(x: Union[Tensor, List[Tensor]]) -> Tensor:\n \"\"\"Concatenation along the zero dimension.\"\"\"\n if isinstance(x, torch.Tensor):\n return x\n x = [y.unsqueeze(0) if y.numel() == 1 and y.ndim == 0 else y for y in x]\n if not x: # empty list\n raise ValueError(\"No samples to concatenate\")\n return torch.cat(x, dim=0)\n\n\ndef dim_zero_sum(x: Tensor) -> Tensor:\n \"\"\"Summation along the zero dimension.\"\"\"\n return torch.sum(x, dim=0)\n\n\ndef dim_zero_mean(x: Tensor) -> Tensor:\n \"\"\"Average along the zero dimension.\"\"\"\n return torch.mean(x, dim=0)\n\n\ndef dim_zero_max(x: Tensor) -> Tensor:\n \"\"\"Max along the zero dimension.\"\"\"\n return torch.max(x, dim=0).values\n\n\ndef dim_zero_min(x: Tensor) -> Tensor:\n \"\"\"Min along the zero dimension.\"\"\"\n return torch.min(x, dim=0).values\n\n\ndef _flatten(x: Sequence) -> list:\n \"\"\"Flatten list of list into single list.\"\"\"\n return [item for sublist in x for item in sublist]\n\n\ndef _flatten_dict(x: Dict) -> Tuple[Dict, bool]:\n \"\"\"Flatten dict of dicts into single dict and checking for duplicates in keys along the way.\"\"\"\n new_dict = {}\n duplicates = False\n for key, value in x.items():\n if isinstance(value, dict):\n for k, v in value.items():\n if k in new_dict:\n duplicates = True\n new_dict[k] = v\n else:\n if key in new_dict:\n duplicates = True\n new_dict[key] = value\n return new_dict, duplicates\n\n\ndef to_onehot(\n label_tensor: Tensor,\n num_classes: Optional[int] = None,\n) -> Tensor:\n \"\"\"Convert a dense label tensor to one-hot format.\n\n Args:\n label_tensor: dense label tensor, with shape [N, d1, d2, ...]\n num_classes: number of classes C\n\n Returns:\n A sparse label tensor with shape [N, C, d1, d2, ...]\n\n Example:\n >>> x = torch.tensor([1, 2, 3])\n >>> to_onehot(x)\n tensor([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n \"\"\"\n if num_classes is None:\n num_classes = int(label_tensor.max().detach().item() + 1)\n\n tensor_onehot = torch.zeros(\n label_tensor.shape[0],\n num_classes,\n *label_tensor.shape[1:],\n dtype=label_tensor.dtype,\n device=label_tensor.device,\n )\n index = label_tensor.long().unsqueeze(1).expand_as(tensor_onehot)\n return tensor_onehot.scatter_(1, index, 1.0)\n\n\ndef _top_k_with_half_precision_support(x: Tensor, k: int = 1, dim: int = 1) -> Tensor:\n \"\"\"torch.top_k does not support half precision on CPU.\"\"\"\n if x.dtype == torch.half and not x.is_cuda:\n idx = torch.argsort(x, dim=dim, descending=True)\n return idx.narrow(dim, 0, k)\n return x.topk(k=k, dim=dim).indices\n\n\ndef select_topk(prob_tensor: Tensor, topk: int = 1, dim: int = 1) -> Tensor:\n \"\"\"Convert a probability tensor to binary by selecting top-k the highest entries.\n\n Args:\n prob_tensor: dense tensor of shape ``[..., C, ...]``, where ``C`` is in the\n position defined by the ``dim`` argument\n topk: number of the highest entries to turn into 1s\n dim: dimension on which to compare entries\n\n Returns:\n A binary tensor of the same shape as the input tensor of type ``torch.int32``\n\n Example:\n >>> x = torch.tensor([[1.1, 2.0, 3.0], [2.0, 1.0, 0.5]])\n >>> select_topk(x, topk=2)\n tensor([[0, 1, 1],\n [1, 1, 0]], dtype=torch.int32)\n\n \"\"\"\n zeros = torch.zeros_like(prob_tensor)\n if topk == 1: # argmax has better performance than topk\n topk_tensor = zeros.scatter(dim, prob_tensor.argmax(dim=dim, keepdim=True), 1.0)\n else:\n topk_tensor = zeros.scatter(dim, _top_k_with_half_precision_support(prob_tensor, k=topk, dim=dim), 1.0)\n return topk_tensor.int()\n\n\ndef to_categorical(x: Tensor, argmax_dim: int = 1) -> Tensor:\n \"\"\"Convert a tensor of probabilities to a dense label tensor.\n\n Args:\n x: probabilities to get the categorical label [N, d1, d2, ...]\n argmax_dim: dimension to apply\n\n Return:\n A tensor with categorical labels [N, d2, ...]\n\n Example:\n >>> x = torch.tensor([[0.2, 0.5], [0.9, 0.1]])\n >>> to_categorical(x)\n tensor([1, 0])\n\n \"\"\"\n return torch.argmax(x, dim=argmax_dim)\n\n\ndef _squeeze_scalar_element_tensor(x: Tensor) -> Tensor:\n return x.squeeze() if x.numel() == 1 else x\n\n\ndef _squeeze_if_scalar(data: Any) -> Any:\n return apply_to_collection(data, Tensor, _squeeze_scalar_element_tensor)\n\n\ndef _bincount(x: Tensor, minlength: Optional[int] = None) -> Tensor:\n \"\"\"Implement custom bincount.\n\n PyTorch currently does not support ``torch.bincount`` when running in deterministic mode on GPU or when running\n MPS devices or when running on XLA device. This implementation therefore falls back to using a combination of\n `torch.arange` and `torch.eq` in these scenarios. A small performance hit can expected and higher memory consumption\n as `[batch_size, mincount]` tensor needs to be initialized compared to native ``torch.bincount``.\n\n Args:\n x: tensor to count\n minlength: minimum length to count\n\n Returns:\n Number of occurrences for each unique element in x\n\n Example:\n >>> x = torch.tensor([0,0,0,1,1,2,2,2,2])\n >>> _bincount(x, minlength=3)\n tensor([3, 2, 4])\n\n \"\"\"\n if minlength is None:\n minlength = len(torch.unique(x))\n\n if torch.are_deterministic_algorithms_enabled() or _XLA_AVAILABLE or _TORCH_GREATER_EQUAL_1_12 and x.is_mps:\n mesh = torch.arange(minlength, device=x.device).repeat(len(x), 1)\n return torch.eq(x.reshape(-1, 1), mesh).sum(dim=0)\n\n return torch.bincount(x, minlength=minlength)\n\n\ndef _cumsum(x: Tensor, dim: Optional[int] = 0, dtype: Optional[torch.dtype] = None) -> Tensor:\n if torch.are_deterministic_algorithms_enabled() and x.is_cuda and x.is_floating_point() and sys.platform != \"win32\":\n rank_zero_warn(\n \"You are trying to use a metric in deterministic mode on GPU that uses `torch.cumsum`, which is currently \"\n \"not supported. The tensor will be copied to the CPU memory to compute it and then copied back to GPU. \"\n \"Expect some slowdowns.\",\n TorchMetricsUserWarning,\n )\n return x.cpu().cumsum(dim=dim, dtype=dtype).cuda()\n return torch.cumsum(x, dim=dim, dtype=dtype)\n\n\ndef _flexible_bincount(x: Tensor) -> Tensor:\n \"\"\"Similar to `_bincount`, but works also with tensor that do not contain continuous values.\n\n Args:\n x: tensor to count\n\n Returns:\n Number of occurrences for each unique element in x\n\n \"\"\"\n # make sure elements in x start from 0\n x = x - x.min()\n unique_x = torch.unique(x)\n\n output = _bincount(x, minlength=torch.max(unique_x) + 1) # type: ignore[arg-type]\n # remove zeros from output tensor\n return output[unique_x]\n\n\ndef allclose(tensor1: Tensor, tensor2: Tensor) -> bool:\n \"\"\"Wrap torch.allclose to be robust towards dtype difference.\"\"\"\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2.to(dtype=tensor1.dtype)\n return torch.allclose(tensor1, tensor2)\n", "path": "src/torchmetrics/utilities/data.py"}]} | 3,093 | 354 |
gh_patches_debug_8032 | rasdani/github-patches | git_diff | elastic__ecs-1115 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug][ECS 1.7] Strip `index` attributes on `wildcard` mappings
**Description of the issue:**
The 1.7 experimental `wildcard` mappings need to have the `index` attribute stripped from them. The one that was causing problems for me is here:
https://github.com/elastic/ecs/blob/3edb6f2cf2c657f88875011625cd1801709c5d5b/experimental/generated/beats/fields.ecs.yml#L1154-L1163
Here's the error I'm getting from Elasticsearch:
```
Failed to parse mapping: unknown parameter [index] on mapper [stack_trace] of type [wildcard]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/schema/cleaner.py`
Content:
```
1 import copy
2
3 from generators import ecs_helpers
4 from schema import visitor
5
6 # This script performs a few cleanup functions in place, within the deeply nested
7 # 'fields' structure passed to `clean(fields)`.
8 #
9 # What happens here:
10 #
11 # - check that mandatory attributes are present, without which we can't do much.
12 # - cleans things up, like stripping spaces, sorting arrays
13 # - makes lots of defaults explicit
14 # - pre-calculate a few additional helpful fields
15 # - converts shorthands into full representation (e.g. reuse locations)
16 #
17 # This script only deals with field sets themselves and the fields defined
18 # inside them. It doesn't perform field reuse, and therefore doesn't
19 # deal with final field names either.
20
21
22 def clean(fields, strict=False):
23 global strict_mode
24 strict_mode = strict
25 visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)
26
27
28 # Schema level cleanup
29
30
31 def schema_cleanup(schema):
32 # Sanity check first
33 schema_mandatory_attributes(schema)
34 # trailing space cleanup
35 ecs_helpers.dict_clean_string_values(schema['schema_details'])
36 ecs_helpers.dict_clean_string_values(schema['field_details'])
37 # Some defaults
38 schema['schema_details'].setdefault('group', 2)
39 schema['schema_details'].setdefault('root', False)
40 schema['field_details'].setdefault('type', 'group')
41 schema['field_details'].setdefault('short', schema['field_details']['description'])
42 if 'reusable' in schema['schema_details']:
43 # order to perform chained reuses. Set to 1 if it needs to happen earlier.
44 schema['schema_details']['reusable'].setdefault('order', 2)
45 # Precalculate stuff. Those can't be set in the YAML.
46 if schema['schema_details']['root']:
47 schema['schema_details']['prefix'] = ''
48 else:
49 schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'
50 normalize_reuse_notation(schema)
51 # Final validity check if in strict mode
52 schema_assertions_and_warnings(schema)
53
54
55 SCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']
56
57
58 def schema_mandatory_attributes(schema):
59 '''Ensures for the presence of the mandatory schema attributes and raises if any are missing'''
60 current_schema_attributes = sorted(list(schema['field_details'].keys()) +
61 list(schema['schema_details'].keys()))
62 missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)
63 if len(missing_attributes) > 0:
64 msg = "Schema {} is missing the following mandatory attributes: {}.\nFound these: {}".format(
65 schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)
66 raise ValueError(msg)
67 if 'reusable' in schema['schema_details']:
68 reuse_attributes = sorted(schema['schema_details']['reusable'].keys())
69 missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)
70 if len(missing_reuse_attributes) > 0:
71 msg = "Reusable schema {} is missing the following reuse attributes: {}.\nFound these: {}".format(
72 schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)
73 raise ValueError(msg)
74
75
76 def schema_assertions_and_warnings(schema):
77 '''Additional checks on a fleshed out schema'''
78 single_line_short_description(schema, strict=strict_mode)
79
80
81 def normalize_reuse_notation(schema):
82 """
83 Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.
84
85 When marking "user" as reusable under "destination" with the shorthand entry
86 `- destination`, this is expanded to the complete entry
87 `- { "at": "destination", "as": "user" }`.
88 The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.
89
90 The dictionary notation enables nesting a field set as a different name.
91 An example is nesting "process" fields to capture parent process details
92 at `process.parent.*`.
93 The dictionary notation `- { "at": "process", "as": "parent" }` will yield
94 fields such as `process.parent.pid`.
95 """
96 if 'reusable' not in schema['schema_details']:
97 return
98 schema_name = schema['field_details']['name']
99 reuse_entries = []
100 for reuse_entry in schema['schema_details']['reusable']['expected']:
101 if type(reuse_entry) is dict: # Already explicit
102 if 'at' in reuse_entry and 'as' in reuse_entry:
103 explicit_entry = reuse_entry
104 else:
105 raise ValueError("When specifying reusable expected locations for {} " +
106 "with the dictionary notation, keys 'as' and 'at' are required. " +
107 "Got {}.".format(schema_name, reuse_entry))
108 else: # Make it explicit
109 explicit_entry = {'at': reuse_entry, 'as': schema_name}
110 explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']
111 reuse_entries.append(explicit_entry)
112 schema['schema_details']['reusable']['expected'] = reuse_entries
113
114
115 # Field level cleanup
116
117
118 def field_cleanup(field):
119 field_mandatory_attributes(field)
120 if ecs_helpers.is_intermediate(field):
121 return
122 ecs_helpers.dict_clean_string_values(field['field_details'])
123 if 'allowed_values' in field['field_details']:
124 for allowed_value in field['field_details']['allowed_values']:
125 ecs_helpers.dict_clean_string_values(allowed_value)
126 field_defaults(field)
127 field_assertions_and_warnings(field)
128
129
130 def field_defaults(field):
131 field['field_details'].setdefault('short', field['field_details']['description'])
132 field['field_details'].setdefault('normalize', [])
133 field_or_multi_field_datatype_defaults(field['field_details'])
134 if 'multi_fields' in field['field_details']:
135 for mf in field['field_details']['multi_fields']:
136 field_or_multi_field_datatype_defaults(mf)
137 if 'name' not in mf:
138 mf['name'] = mf['type']
139
140
141 def field_or_multi_field_datatype_defaults(field_details):
142 '''Sets datatype-related defaults on a canonical field or multi-field entries.'''
143 if field_details['type'] == 'keyword':
144 field_details.setdefault('ignore_above', 1024)
145 if field_details['type'] == 'text':
146 field_details.setdefault('norms', False)
147 if 'index' in field_details and not field_details['index']:
148 field_details.setdefault('doc_values', False)
149
150
151 FIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']
152 ACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']
153
154
155 def field_mandatory_attributes(field):
156 '''Ensures for the presence of the mandatory field attributes and raises if any are missing'''
157 if ecs_helpers.is_intermediate(field):
158 return
159 current_field_attributes = sorted(field['field_details'].keys())
160 missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)
161
162 # `alias` fields require a target `path` attribute.
163 if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:
164 missing_attributes.append('path')
165 # `scaled_float` fields require a `scaling_factor` attribute.
166 if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:
167 missing_attributes.append('scaling_factor')
168
169 if len(missing_attributes) > 0:
170 msg = "Field is missing the following mandatory attributes: {}.\nFound these: {}.\nField details: {}"
171 raise ValueError(msg.format(', '.join(missing_attributes),
172 current_field_attributes, field))
173
174
175 def field_assertions_and_warnings(field):
176 '''Additional checks on a fleshed out field'''
177 if not ecs_helpers.is_intermediate(field):
178 # check short description length if in strict mode
179 single_line_short_description(field, strict=strict_mode)
180 check_example_value(field, strict=strict_mode)
181 if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:
182 msg = "Invalid level for field '{}'.\nValue: {}\nAcceptable values: {}".format(
183 field['field_details']['name'], field['field_details']['level'],
184 ACCEPTABLE_FIELD_LEVELS)
185 raise ValueError(msg)
186
187 # Common
188
189
190 SHORT_LIMIT = 120
191
192
193 def single_line_short_description(schema_or_field, strict=True):
194 short_length = len(schema_or_field['field_details']['short'])
195 if "\n" in schema_or_field['field_details']['short'] or short_length > SHORT_LIMIT:
196 msg = "Short descriptions must be single line, and under {} characters (current length: {}).\n".format(
197 SHORT_LIMIT, short_length)
198 msg += "Offending field or field set: {}\nShort description:\n {}".format(
199 schema_or_field['field_details']['name'],
200 schema_or_field['field_details']['short'])
201 if strict:
202 raise ValueError(msg)
203 else:
204 ecs_helpers.strict_warning(msg)
205
206
207 def check_example_value(field, strict=True):
208 """
209 Checks if value of the example field is of type list or dict.
210 Fails or warns (depending on strict mode) if so.
211 """
212 example_value = field['field_details'].get('example', None)
213 if isinstance(example_value, (list, dict)):
214 name = field['field_details']['name']
215 msg = f"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation."
216 if strict:
217 raise ValueError(msg)
218 else:
219 ecs_helpers.strict_warning(msg)
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py
--- a/scripts/schema/cleaner.py
+++ b/scripts/schema/cleaner.py
@@ -144,6 +144,9 @@
field_details.setdefault('ignore_above', 1024)
if field_details['type'] == 'text':
field_details.setdefault('norms', False)
+ # wildcard needs the index param stripped
+ if field_details['type'] == 'wildcard':
+ field_details.pop('index', None)
if 'index' in field_details and not field_details['index']:
field_details.setdefault('doc_values', False)
| {"golden_diff": "diff --git a/scripts/schema/cleaner.py b/scripts/schema/cleaner.py\n--- a/scripts/schema/cleaner.py\n+++ b/scripts/schema/cleaner.py\n@@ -144,6 +144,9 @@\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n+ # wildcard needs the index param stripped\n+ if field_details['type'] == 'wildcard':\n+ field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n", "issue": "[Bug][ECS 1.7] Strip `index` attributes on `wildcard` mappings\n**Description of the issue:**\r\n\r\nThe 1.7 experimental `wildcard` mappings need to have the `index` attribute stripped from them. The one that was causing problems for me is here:\r\n\r\nhttps://github.com/elastic/ecs/blob/3edb6f2cf2c657f88875011625cd1801709c5d5b/experimental/generated/beats/fields.ecs.yml#L1154-L1163\r\n\r\nHere's the error I'm getting from Elasticsearch:\r\n\r\n```\r\nFailed to parse mapping: unknown parameter [index] on mapper [stack_trace] of type [wildcard]\r\n```\r\n\n", "before_files": [{"content": "import copy\n\nfrom generators import ecs_helpers\nfrom schema import visitor\n\n# This script performs a few cleanup functions in place, within the deeply nested\n# 'fields' structure passed to `clean(fields)`.\n#\n# What happens here:\n#\n# - check that mandatory attributes are present, without which we can't do much.\n# - cleans things up, like stripping spaces, sorting arrays\n# - makes lots of defaults explicit\n# - pre-calculate a few additional helpful fields\n# - converts shorthands into full representation (e.g. reuse locations)\n#\n# This script only deals with field sets themselves and the fields defined\n# inside them. It doesn't perform field reuse, and therefore doesn't\n# deal with final field names either.\n\n\ndef clean(fields, strict=False):\n global strict_mode\n strict_mode = strict\n visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)\n\n\n# Schema level cleanup\n\n\ndef schema_cleanup(schema):\n # Sanity check first\n schema_mandatory_attributes(schema)\n # trailing space cleanup\n ecs_helpers.dict_clean_string_values(schema['schema_details'])\n ecs_helpers.dict_clean_string_values(schema['field_details'])\n # Some defaults\n schema['schema_details'].setdefault('group', 2)\n schema['schema_details'].setdefault('root', False)\n schema['field_details'].setdefault('type', 'group')\n schema['field_details'].setdefault('short', schema['field_details']['description'])\n if 'reusable' in schema['schema_details']:\n # order to perform chained reuses. Set to 1 if it needs to happen earlier.\n schema['schema_details']['reusable'].setdefault('order', 2)\n # Precalculate stuff. Those can't be set in the YAML.\n if schema['schema_details']['root']:\n schema['schema_details']['prefix'] = ''\n else:\n schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'\n normalize_reuse_notation(schema)\n # Final validity check if in strict mode\n schema_assertions_and_warnings(schema)\n\n\nSCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']\n\n\ndef schema_mandatory_attributes(schema):\n '''Ensures for the presence of the mandatory schema attributes and raises if any are missing'''\n current_schema_attributes = sorted(list(schema['field_details'].keys()) +\n list(schema['schema_details'].keys()))\n missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)\n if len(missing_attributes) > 0:\n msg = \"Schema {} is missing the following mandatory attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)\n raise ValueError(msg)\n if 'reusable' in schema['schema_details']:\n reuse_attributes = sorted(schema['schema_details']['reusable'].keys())\n missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)\n if len(missing_reuse_attributes) > 0:\n msg = \"Reusable schema {} is missing the following reuse attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)\n raise ValueError(msg)\n\n\ndef schema_assertions_and_warnings(schema):\n '''Additional checks on a fleshed out schema'''\n single_line_short_description(schema, strict=strict_mode)\n\n\ndef normalize_reuse_notation(schema):\n \"\"\"\n Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.\n\n When marking \"user\" as reusable under \"destination\" with the shorthand entry\n `- destination`, this is expanded to the complete entry\n `- { \"at\": \"destination\", \"as\": \"user\" }`.\n The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.\n\n The dictionary notation enables nesting a field set as a different name.\n An example is nesting \"process\" fields to capture parent process details\n at `process.parent.*`.\n The dictionary notation `- { \"at\": \"process\", \"as\": \"parent\" }` will yield\n fields such as `process.parent.pid`.\n \"\"\"\n if 'reusable' not in schema['schema_details']:\n return\n schema_name = schema['field_details']['name']\n reuse_entries = []\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n if type(reuse_entry) is dict: # Already explicit\n if 'at' in reuse_entry and 'as' in reuse_entry:\n explicit_entry = reuse_entry\n else:\n raise ValueError(\"When specifying reusable expected locations for {} \" +\n \"with the dictionary notation, keys 'as' and 'at' are required. \" +\n \"Got {}.\".format(schema_name, reuse_entry))\n else: # Make it explicit\n explicit_entry = {'at': reuse_entry, 'as': schema_name}\n explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']\n reuse_entries.append(explicit_entry)\n schema['schema_details']['reusable']['expected'] = reuse_entries\n\n\n# Field level cleanup\n\n\ndef field_cleanup(field):\n field_mandatory_attributes(field)\n if ecs_helpers.is_intermediate(field):\n return\n ecs_helpers.dict_clean_string_values(field['field_details'])\n if 'allowed_values' in field['field_details']:\n for allowed_value in field['field_details']['allowed_values']:\n ecs_helpers.dict_clean_string_values(allowed_value)\n field_defaults(field)\n field_assertions_and_warnings(field)\n\n\ndef field_defaults(field):\n field['field_details'].setdefault('short', field['field_details']['description'])\n field['field_details'].setdefault('normalize', [])\n field_or_multi_field_datatype_defaults(field['field_details'])\n if 'multi_fields' in field['field_details']:\n for mf in field['field_details']['multi_fields']:\n field_or_multi_field_datatype_defaults(mf)\n if 'name' not in mf:\n mf['name'] = mf['type']\n\n\ndef field_or_multi_field_datatype_defaults(field_details):\n '''Sets datatype-related defaults on a canonical field or multi-field entries.'''\n if field_details['type'] == 'keyword':\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n\n\nFIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\nACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']\n\n\ndef field_mandatory_attributes(field):\n '''Ensures for the presence of the mandatory field attributes and raises if any are missing'''\n if ecs_helpers.is_intermediate(field):\n return\n current_field_attributes = sorted(field['field_details'].keys())\n missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)\n\n # `alias` fields require a target `path` attribute.\n if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:\n missing_attributes.append('path')\n # `scaled_float` fields require a `scaling_factor` attribute.\n if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:\n missing_attributes.append('scaling_factor')\n\n if len(missing_attributes) > 0:\n msg = \"Field is missing the following mandatory attributes: {}.\\nFound these: {}.\\nField details: {}\"\n raise ValueError(msg.format(', '.join(missing_attributes),\n current_field_attributes, field))\n\n\ndef field_assertions_and_warnings(field):\n '''Additional checks on a fleshed out field'''\n if not ecs_helpers.is_intermediate(field):\n # check short description length if in strict mode\n single_line_short_description(field, strict=strict_mode)\n check_example_value(field, strict=strict_mode)\n if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:\n msg = \"Invalid level for field '{}'.\\nValue: {}\\nAcceptable values: {}\".format(\n field['field_details']['name'], field['field_details']['level'],\n ACCEPTABLE_FIELD_LEVELS)\n raise ValueError(msg)\n\n# Common\n\n\nSHORT_LIMIT = 120\n\n\ndef single_line_short_description(schema_or_field, strict=True):\n short_length = len(schema_or_field['field_details']['short'])\n if \"\\n\" in schema_or_field['field_details']['short'] or short_length > SHORT_LIMIT:\n msg = \"Short descriptions must be single line, and under {} characters (current length: {}).\\n\".format(\n SHORT_LIMIT, short_length)\n msg += \"Offending field or field set: {}\\nShort description:\\n {}\".format(\n schema_or_field['field_details']['name'],\n schema_or_field['field_details']['short'])\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n\n\ndef check_example_value(field, strict=True):\n \"\"\"\n Checks if value of the example field is of type list or dict.\n Fails or warns (depending on strict mode) if so.\n \"\"\"\n example_value = field['field_details'].get('example', None)\n if isinstance(example_value, (list, dict)):\n name = field['field_details']['name']\n msg = f\"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation.\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n", "path": "scripts/schema/cleaner.py"}], "after_files": [{"content": "import copy\n\nfrom generators import ecs_helpers\nfrom schema import visitor\n\n# This script performs a few cleanup functions in place, within the deeply nested\n# 'fields' structure passed to `clean(fields)`.\n#\n# What happens here:\n#\n# - check that mandatory attributes are present, without which we can't do much.\n# - cleans things up, like stripping spaces, sorting arrays\n# - makes lots of defaults explicit\n# - pre-calculate a few additional helpful fields\n# - converts shorthands into full representation (e.g. reuse locations)\n#\n# This script only deals with field sets themselves and the fields defined\n# inside them. It doesn't perform field reuse, and therefore doesn't\n# deal with final field names either.\n\n\ndef clean(fields, strict=False):\n global strict_mode\n strict_mode = strict\n visitor.visit_fields(fields, fieldset_func=schema_cleanup, field_func=field_cleanup)\n\n\n# Schema level cleanup\n\n\ndef schema_cleanup(schema):\n # Sanity check first\n schema_mandatory_attributes(schema)\n # trailing space cleanup\n ecs_helpers.dict_clean_string_values(schema['schema_details'])\n ecs_helpers.dict_clean_string_values(schema['field_details'])\n # Some defaults\n schema['schema_details'].setdefault('group', 2)\n schema['schema_details'].setdefault('root', False)\n schema['field_details'].setdefault('type', 'group')\n schema['field_details'].setdefault('short', schema['field_details']['description'])\n if 'reusable' in schema['schema_details']:\n # order to perform chained reuses. Set to 1 if it needs to happen earlier.\n schema['schema_details']['reusable'].setdefault('order', 2)\n # Precalculate stuff. Those can't be set in the YAML.\n if schema['schema_details']['root']:\n schema['schema_details']['prefix'] = ''\n else:\n schema['schema_details']['prefix'] = schema['field_details']['name'] + '.'\n normalize_reuse_notation(schema)\n # Final validity check if in strict mode\n schema_assertions_and_warnings(schema)\n\n\nSCHEMA_MANDATORY_ATTRIBUTES = ['name', 'title', 'description']\n\n\ndef schema_mandatory_attributes(schema):\n '''Ensures for the presence of the mandatory schema attributes and raises if any are missing'''\n current_schema_attributes = sorted(list(schema['field_details'].keys()) +\n list(schema['schema_details'].keys()))\n missing_attributes = ecs_helpers.list_subtract(SCHEMA_MANDATORY_ATTRIBUTES, current_schema_attributes)\n if len(missing_attributes) > 0:\n msg = \"Schema {} is missing the following mandatory attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_attributes), current_schema_attributes)\n raise ValueError(msg)\n if 'reusable' in schema['schema_details']:\n reuse_attributes = sorted(schema['schema_details']['reusable'].keys())\n missing_reuse_attributes = ecs_helpers.list_subtract(['expected', 'top_level'], reuse_attributes)\n if len(missing_reuse_attributes) > 0:\n msg = \"Reusable schema {} is missing the following reuse attributes: {}.\\nFound these: {}\".format(\n schema['field_details']['name'], ', '.join(missing_reuse_attributes), reuse_attributes)\n raise ValueError(msg)\n\n\ndef schema_assertions_and_warnings(schema):\n '''Additional checks on a fleshed out schema'''\n single_line_short_description(schema, strict=strict_mode)\n\n\ndef normalize_reuse_notation(schema):\n \"\"\"\n Replace single word reuse shorthands from the schema YAMLs with the explicit {at: , as:} notation.\n\n When marking \"user\" as reusable under \"destination\" with the shorthand entry\n `- destination`, this is expanded to the complete entry\n `- { \"at\": \"destination\", \"as\": \"user\" }`.\n The field set is thus nested at `destination.user.*`, with fields such as `destination.user.name`.\n\n The dictionary notation enables nesting a field set as a different name.\n An example is nesting \"process\" fields to capture parent process details\n at `process.parent.*`.\n The dictionary notation `- { \"at\": \"process\", \"as\": \"parent\" }` will yield\n fields such as `process.parent.pid`.\n \"\"\"\n if 'reusable' not in schema['schema_details']:\n return\n schema_name = schema['field_details']['name']\n reuse_entries = []\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n if type(reuse_entry) is dict: # Already explicit\n if 'at' in reuse_entry and 'as' in reuse_entry:\n explicit_entry = reuse_entry\n else:\n raise ValueError(\"When specifying reusable expected locations for {} \" +\n \"with the dictionary notation, keys 'as' and 'at' are required. \" +\n \"Got {}.\".format(schema_name, reuse_entry))\n else: # Make it explicit\n explicit_entry = {'at': reuse_entry, 'as': schema_name}\n explicit_entry['full'] = explicit_entry['at'] + '.' + explicit_entry['as']\n reuse_entries.append(explicit_entry)\n schema['schema_details']['reusable']['expected'] = reuse_entries\n\n\n# Field level cleanup\n\n\ndef field_cleanup(field):\n field_mandatory_attributes(field)\n if ecs_helpers.is_intermediate(field):\n return\n ecs_helpers.dict_clean_string_values(field['field_details'])\n if 'allowed_values' in field['field_details']:\n for allowed_value in field['field_details']['allowed_values']:\n ecs_helpers.dict_clean_string_values(allowed_value)\n field_defaults(field)\n field_assertions_and_warnings(field)\n\n\ndef field_defaults(field):\n field['field_details'].setdefault('short', field['field_details']['description'])\n field['field_details'].setdefault('normalize', [])\n field_or_multi_field_datatype_defaults(field['field_details'])\n if 'multi_fields' in field['field_details']:\n for mf in field['field_details']['multi_fields']:\n field_or_multi_field_datatype_defaults(mf)\n if 'name' not in mf:\n mf['name'] = mf['type']\n\n\ndef field_or_multi_field_datatype_defaults(field_details):\n '''Sets datatype-related defaults on a canonical field or multi-field entries.'''\n if field_details['type'] == 'keyword':\n field_details.setdefault('ignore_above', 1024)\n if field_details['type'] == 'text':\n field_details.setdefault('norms', False)\n # wildcard needs the index param stripped\n if field_details['type'] == 'wildcard':\n field_details.pop('index', None)\n if 'index' in field_details and not field_details['index']:\n field_details.setdefault('doc_values', False)\n\n\nFIELD_MANDATORY_ATTRIBUTES = ['name', 'description', 'type', 'level']\nACCEPTABLE_FIELD_LEVELS = ['core', 'extended', 'custom']\n\n\ndef field_mandatory_attributes(field):\n '''Ensures for the presence of the mandatory field attributes and raises if any are missing'''\n if ecs_helpers.is_intermediate(field):\n return\n current_field_attributes = sorted(field['field_details'].keys())\n missing_attributes = ecs_helpers.list_subtract(FIELD_MANDATORY_ATTRIBUTES, current_field_attributes)\n\n # `alias` fields require a target `path` attribute.\n if field['field_details'].get('type') == 'alias' and 'path' not in current_field_attributes:\n missing_attributes.append('path')\n # `scaled_float` fields require a `scaling_factor` attribute.\n if field['field_details'].get('type') == 'scaled_float' and 'scaling_factor' not in current_field_attributes:\n missing_attributes.append('scaling_factor')\n\n if len(missing_attributes) > 0:\n msg = \"Field is missing the following mandatory attributes: {}.\\nFound these: {}.\\nField details: {}\"\n raise ValueError(msg.format(', '.join(missing_attributes),\n current_field_attributes, field))\n\n\ndef field_assertions_and_warnings(field):\n '''Additional checks on a fleshed out field'''\n if not ecs_helpers.is_intermediate(field):\n # check short description length if in strict mode\n single_line_short_description(field, strict=strict_mode)\n check_example_value(field, strict=strict_mode)\n if field['field_details']['level'] not in ACCEPTABLE_FIELD_LEVELS:\n msg = \"Invalid level for field '{}'.\\nValue: {}\\nAcceptable values: {}\".format(\n field['field_details']['name'], field['field_details']['level'],\n ACCEPTABLE_FIELD_LEVELS)\n raise ValueError(msg)\n\n# Common\n\n\nSHORT_LIMIT = 120\n\n\ndef single_line_short_description(schema_or_field, strict=True):\n short_length = len(schema_or_field['field_details']['short'])\n if \"\\n\" in schema_or_field['field_details']['short'] or short_length > SHORT_LIMIT:\n msg = \"Short descriptions must be single line, and under {} characters (current length: {}).\\n\".format(\n SHORT_LIMIT, short_length)\n msg += \"Offending field or field set: {}\\nShort description:\\n {}\".format(\n schema_or_field['field_details']['name'],\n schema_or_field['field_details']['short'])\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n\n\ndef check_example_value(field, strict=True):\n \"\"\"\n Checks if value of the example field is of type list or dict.\n Fails or warns (depending on strict mode) if so.\n \"\"\"\n example_value = field['field_details'].get('example', None)\n if isinstance(example_value, (list, dict)):\n name = field['field_details']['name']\n msg = f\"Example value for field `{name}` contains an object or array which must be quoted to avoid YAML interpretation.\"\n if strict:\n raise ValueError(msg)\n else:\n ecs_helpers.strict_warning(msg)\n", "path": "scripts/schema/cleaner.py"}]} | 3,065 | 143 |
gh_patches_debug_33132 | rasdani/github-patches | git_diff | huggingface__transformers-2065 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CamemBert] Tokenizer function add_tokens doesn't work
## ❓ Questions & Help
Hi,
I am trying to add new tokens to the CamemBert tokenizer, but when I run the function tokenizer.add_tokens, it doesn't seem to add any token at all :
`from transformers import CamembertTokenizer`
`tokenizer = CamembertTokenizer.from_pretrained('camembert-base')`
`tokenizer.add_tokens(['notfrenchword'])`
`Out[12]: 0`
Whereas with Bert model it works perfectly. Is this a bug or am I doing something wrong ?
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `transformers/tokenization_camembert.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License
15 """ Tokenization classes for Camembert model."""
16 from __future__ import (absolute_import, division, print_function,
17 unicode_literals)
18
19 import logging
20 import os
21 from shutil import copyfile
22
23 import sentencepiece as spm
24 from transformers.tokenization_utils import PreTrainedTokenizer
25
26 logger = logging.getLogger(__name__)
27
28 VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'}
29
30 PRETRAINED_VOCAB_FILES_MAP = {
31 'vocab_file':
32 {
33 'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model",
34 }
35 }
36
37 PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
38 'camembert-base': None,
39 }
40
41 class CamembertTokenizer(PreTrainedTokenizer):
42 """
43 Adapted from RobertaTokenizer and XLNetTokenizer
44 SentencePiece based tokenizer. Peculiarities:
45
46 - requires `SentencePiece <https://github.com/google/sentencepiece>`_
47 """
48 vocab_files_names = VOCAB_FILES_NAMES
49 pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
50 max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
51
52 def __init__(self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>",
53 cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>',
54 additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs):
55 super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,
56 sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,
57 mask_token=mask_token, additional_special_tokens=additional_special_tokens,
58 **kwargs)
59 self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
60 self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
61 self.sp_model = spm.SentencePieceProcessor()
62 self.sp_model.Load(str(vocab_file))
63 self.vocab_file = vocab_file
64 # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
65 # sentencepiece vocabulary (this is the case for <s> and </s>
66 self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
67 self.fairseq_offset = len(self.fairseq_tokens_to_ids)
68 self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
69 self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
70
71 def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
72 """
73 Build model inputs from a sequence or a pair of sequence for sequence classification tasks
74 by concatenating and adding special tokens.
75 A RoBERTa sequence has the following format:
76 single sequence: <s> X </s>
77 pair of sequences: <s> A </s></s> B </s>
78 """
79 if token_ids_1 is None:
80 return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
81 cls = [self.cls_token_id]
82 sep = [self.sep_token_id]
83 return cls + token_ids_0 + sep + sep + token_ids_1 + sep
84
85 def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
86 """
87 Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
88 special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
89
90 Args:
91 token_ids_0: list of ids (must not contain special tokens)
92 token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
93 for sequence pairs
94 already_has_special_tokens: (default False) Set to True if the token list is already formated with
95 special tokens for the model
96
97 Returns:
98 A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
99 """
100 if already_has_special_tokens:
101 if token_ids_1 is not None:
102 raise ValueError("You should not supply a second sequence if the provided sequence of "
103 "ids is already formated with special tokens for the model.")
104 return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
105
106 if token_ids_1 is None:
107 return [1] + ([0] * len(token_ids_0)) + [1]
108 return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
109
110 def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
111 """
112 Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
113 A RoBERTa sequence pair mask has the following format:
114 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
115 | first sequence | second sequence
116
117 if token_ids_1 is None, only returns the first portion of the mask (0's).
118 """
119 sep = [self.sep_token_id]
120 cls = [self.cls_token_id]
121
122 if token_ids_1 is None:
123 return len(cls + token_ids_0 + sep) * [0]
124 return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]
125
126 @property
127 def vocab_size(self):
128 return self.fairseq_offset + len(self.sp_model)
129
130 def _tokenize(self, text):
131 return self.sp_model.EncodeAsPieces(text)
132
133 def _convert_token_to_id(self, token):
134 """ Converts a token (str/unicode) in an id using the vocab. """
135 if token in self.fairseq_tokens_to_ids:
136 return self.fairseq_tokens_to_ids[token]
137 return self.fairseq_offset + self.sp_model.PieceToId(token)
138
139 def _convert_id_to_token(self, index):
140 """Converts an index (integer) in a token (string/unicode) using the vocab."""
141 if index in self.fairseq_ids_to_tokens:
142 return self.fairseq_ids_to_tokens[index]
143 return self.sp_model.IdToPiece(index - self.fairseq_offset)
144
145 def save_vocabulary(self, save_directory):
146 """ Save the sentencepiece vocabulary (copy original file) and special tokens file
147 to a directory.
148 """
149 if not os.path.isdir(save_directory):
150 logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
151 return
152 out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
153
154 if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
155 copyfile(self.vocab_file, out_vocab_file)
156
157 return (out_vocab_file,)
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/transformers/tokenization_camembert.py b/transformers/tokenization_camembert.py
--- a/transformers/tokenization_camembert.py
+++ b/transformers/tokenization_camembert.py
@@ -51,7 +51,7 @@
def __init__(self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>",
cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>',
- additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs):
+ additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs):
super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,
sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,
mask_token=mask_token, additional_special_tokens=additional_special_tokens,
@@ -125,7 +125,7 @@
@property
def vocab_size(self):
- return self.fairseq_offset + len(self.sp_model)
+ return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
@@ -134,6 +134,9 @@
""" Converts a token (str/unicode) in an id using the vocab. """
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
+ elif self.sp_model.PieceToId(token) == 0:
+ # Convert sentence piece unk token to fairseq unk token index
+ return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
| {"golden_diff": "diff --git a/transformers/tokenization_camembert.py b/transformers/tokenization_camembert.py\n--- a/transformers/tokenization_camembert.py\n+++ b/transformers/tokenization_camembert.py\n@@ -51,7 +51,7 @@\n \n def __init__(self, vocab_file, bos_token=\"<s>\", eos_token=\"</s>\", sep_token=\"</s>\",\n cls_token=\"<s>\", unk_token=\"<unk>\", pad_token='<pad>', mask_token='<mask>',\n- additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs):\n+ additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs):\n super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,\n sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,\n mask_token=mask_token, additional_special_tokens=additional_special_tokens,\n@@ -125,7 +125,7 @@\n \n @property\n def vocab_size(self):\n- return self.fairseq_offset + len(self.sp_model)\n+ return len(self.fairseq_tokens_to_ids) + len(self.sp_model)\n \n def _tokenize(self, text):\n return self.sp_model.EncodeAsPieces(text)\n@@ -134,6 +134,9 @@\n \"\"\" Converts a token (str/unicode) in an id using the vocab. \"\"\"\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n+ elif self.sp_model.PieceToId(token) == 0:\n+ # Convert sentence piece unk token to fairseq unk token index\n+ return self.unk_token_id\n return self.fairseq_offset + self.sp_model.PieceToId(token)\n \n def _convert_id_to_token(self, index):\n", "issue": "[CamemBert] Tokenizer function add_tokens doesn't work\n## \u2753 Questions & Help\r\n\r\nHi, \r\n\r\nI am trying to add new tokens to the CamemBert tokenizer, but when I run the function tokenizer.add_tokens, it doesn't seem to add any token at all :\r\n\r\n`from transformers import CamembertTokenizer`\r\n`tokenizer = CamembertTokenizer.from_pretrained('camembert-base')`\r\n`tokenizer.add_tokens(['notfrenchword'])`\r\n\r\n`Out[12]: 0`\r\n\r\nWhereas with Bert model it works perfectly. Is this a bug or am I doing something wrong ?\r\n\r\nThanks\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\" Tokenization classes for Camembert model.\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport logging\nimport os\nfrom shutil import copyfile\n\nimport sentencepiece as spm\nfrom transformers.tokenization_utils import PreTrainedTokenizer\n\nlogger = logging.getLogger(__name__)\n\nVOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'}\n\nPRETRAINED_VOCAB_FILES_MAP = {\n 'vocab_file':\n {\n 'camembert-base': \"https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model\",\n }\n}\n\nPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n 'camembert-base': None,\n}\n\nclass CamembertTokenizer(PreTrainedTokenizer):\n \"\"\"\n Adapted from RobertaTokenizer and XLNetTokenizer\n SentencePiece based tokenizer. Peculiarities:\n\n - requires `SentencePiece <https://github.com/google/sentencepiece>`_\n \"\"\"\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(self, vocab_file, bos_token=\"<s>\", eos_token=\"</s>\", sep_token=\"</s>\",\n cls_token=\"<s>\", unk_token=\"<unk>\", pad_token='<pad>', mask_token='<mask>',\n additional_special_tokens=['<s>NOTUSED', '<s>NOTUSED'], **kwargs):\n super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,\n sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,\n mask_token=mask_token, additional_special_tokens=additional_special_tokens,\n **kwargs)\n self.max_len_single_sentence = self.max_len - 2 # take into account special tokens\n self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens\n self.sp_model = spm.SentencePieceProcessor()\n self.sp_model.Load(str(vocab_file))\n self.vocab_file = vocab_file\n # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual\n # sentencepiece vocabulary (this is the case for <s> and </s>\n self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}\n self.fairseq_offset = len(self.fairseq_tokens_to_ids)\n self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)\n self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A RoBERTa sequence has the following format:\n single sequence: <s> X </s>\n pair of sequences: <s> A </s></s> B </s>\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep\n\n def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):\n \"\"\"\n Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.\n\n Args:\n token_ids_0: list of ids (must not contain special tokens)\n token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids\n for sequence pairs\n already_has_special_tokens: (default False) Set to True if the token list is already formated with\n special tokens for the model\n\n Returns:\n A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formated with special tokens for the model.\")\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is None:\n return [1] + ([0] * len(token_ids_0)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]\n\n def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task.\n A RoBERTa sequence pair mask has the following format:\n 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence\n\n if token_ids_1 is None, only returns the first portion of the mask (0's).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]\n\n @property\n def vocab_size(self):\n return self.fairseq_offset + len(self.sp_model)\n\n def _tokenize(self, text):\n return self.sp_model.EncodeAsPieces(text)\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str/unicode) in an id using the vocab. \"\"\"\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n return self.fairseq_offset + self.sp_model.PieceToId(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (string/unicode) using the vocab.\"\"\"\n if index in self.fairseq_ids_to_tokens:\n return self.fairseq_ids_to_tokens[index]\n return self.sp_model.IdToPiece(index - self.fairseq_offset)\n\n def save_vocabulary(self, save_directory):\n \"\"\" Save the sentencepiece vocabulary (copy original file) and special tokens file\n to a directory.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(\"Vocabulary path ({}) should be a directory\".format(save_directory))\n return\n out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)\n", "path": "transformers/tokenization_camembert.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\" Tokenization classes for Camembert model.\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport logging\nimport os\nfrom shutil import copyfile\n\nimport sentencepiece as spm\nfrom transformers.tokenization_utils import PreTrainedTokenizer\n\nlogger = logging.getLogger(__name__)\n\nVOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'}\n\nPRETRAINED_VOCAB_FILES_MAP = {\n 'vocab_file':\n {\n 'camembert-base': \"https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model\",\n }\n}\n\nPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n 'camembert-base': None,\n}\n\nclass CamembertTokenizer(PreTrainedTokenizer):\n \"\"\"\n Adapted from RobertaTokenizer and XLNetTokenizer\n SentencePiece based tokenizer. Peculiarities:\n\n - requires `SentencePiece <https://github.com/google/sentencepiece>`_\n \"\"\"\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(self, vocab_file, bos_token=\"<s>\", eos_token=\"</s>\", sep_token=\"</s>\",\n cls_token=\"<s>\", unk_token=\"<unk>\", pad_token='<pad>', mask_token='<mask>',\n additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs):\n super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,\n sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,\n mask_token=mask_token, additional_special_tokens=additional_special_tokens,\n **kwargs)\n self.max_len_single_sentence = self.max_len - 2 # take into account special tokens\n self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens\n self.sp_model = spm.SentencePieceProcessor()\n self.sp_model.Load(str(vocab_file))\n self.vocab_file = vocab_file\n # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual\n # sentencepiece vocabulary (this is the case for <s> and </s>\n self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}\n self.fairseq_offset = len(self.fairseq_tokens_to_ids)\n self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)\n self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks\n by concatenating and adding special tokens.\n A RoBERTa sequence has the following format:\n single sequence: <s> X </s>\n pair of sequences: <s> A </s></s> B </s>\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep\n\n def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):\n \"\"\"\n Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.\n\n Args:\n token_ids_0: list of ids (must not contain special tokens)\n token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids\n for sequence pairs\n already_has_special_tokens: (default False) Set to True if the token list is already formated with\n special tokens for the model\n\n Returns:\n A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formated with special tokens for the model.\")\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is None:\n return [1] + ([0] * len(token_ids_0)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]\n\n def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task.\n A RoBERTa sequence pair mask has the following format:\n 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence\n\n if token_ids_1 is None, only returns the first portion of the mask (0's).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]\n\n @property\n def vocab_size(self):\n return len(self.fairseq_tokens_to_ids) + len(self.sp_model)\n\n def _tokenize(self, text):\n return self.sp_model.EncodeAsPieces(text)\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str/unicode) in an id using the vocab. \"\"\"\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n elif self.sp_model.PieceToId(token) == 0:\n # Convert sentence piece unk token to fairseq unk token index\n return self.unk_token_id\n return self.fairseq_offset + self.sp_model.PieceToId(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (string/unicode) using the vocab.\"\"\"\n if index in self.fairseq_ids_to_tokens:\n return self.fairseq_ids_to_tokens[index]\n return self.sp_model.IdToPiece(index - self.fairseq_offset)\n\n def save_vocabulary(self, save_directory):\n \"\"\" Save the sentencepiece vocabulary (copy original file) and special tokens file\n to a directory.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(\"Vocabulary path ({}) should be a directory\".format(save_directory))\n return\n out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)\n", "path": "transformers/tokenization_camembert.py"}]} | 2,552 | 423 |
gh_patches_debug_3907 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-10041 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request]: img2img batch should ignore non-image files
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What would your feature do ?
I may place some parameter files along the image files in the same folder.
Currently an exception is throw for non-image files when do "Image.open()".
### Proposed workflow
1. Place a txt file along with the png file in folder A
2. Set batch input folder to A
3. Press generate
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/img2img.py`
Content:
```
1 import math
2 import os
3 import sys
4 import traceback
5
6 import numpy as np
7 from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
8
9 from modules import devices, sd_samplers
10 from modules.generation_parameters_copypaste import create_override_settings_dict
11 from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
12 from modules.shared import opts, state
13 import modules.shared as shared
14 import modules.processing as processing
15 from modules.ui import plaintext_to_html
16 import modules.images as images
17 import modules.scripts
18
19
20 def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
21 processing.fix_seed(p)
22
23 images = shared.listfiles(input_dir)
24
25 is_inpaint_batch = False
26 if inpaint_mask_dir:
27 inpaint_masks = shared.listfiles(inpaint_mask_dir)
28 is_inpaint_batch = len(inpaint_masks) > 0
29 if is_inpaint_batch:
30 print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
31
32 print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
33
34 save_normally = output_dir == ''
35
36 p.do_not_save_grid = True
37 p.do_not_save_samples = not save_normally
38
39 state.job_count = len(images) * p.n_iter
40
41 for i, image in enumerate(images):
42 state.job = f"{i+1} out of {len(images)}"
43 if state.skipped:
44 state.skipped = False
45
46 if state.interrupted:
47 break
48
49 try:
50 img = Image.open(image)
51 except UnidentifiedImageError:
52 continue
53 # Use the EXIF orientation of photos taken by smartphones.
54 img = ImageOps.exif_transpose(img)
55 p.init_images = [img] * p.batch_size
56
57 if is_inpaint_batch:
58 # try to find corresponding mask for an image using simple filename matching
59 mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
60 # if not found use first one ("same mask for all images" use-case)
61 if not mask_image_path in inpaint_masks:
62 mask_image_path = inpaint_masks[0]
63 mask_image = Image.open(mask_image_path)
64 p.image_mask = mask_image
65
66 proc = modules.scripts.scripts_img2img.run(p, *args)
67 if proc is None:
68 proc = process_images(p)
69
70 for n, processed_image in enumerate(proc.images):
71 filename = os.path.basename(image)
72
73 if n > 0:
74 left, right = os.path.splitext(filename)
75 filename = f"{left}-{n}{right}"
76
77 if not save_normally:
78 os.makedirs(output_dir, exist_ok=True)
79 if processed_image.mode == 'RGBA':
80 processed_image = processed_image.convert("RGB")
81 processed_image.save(os.path.join(output_dir, filename))
82
83
84 def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
85 override_settings = create_override_settings_dict(override_settings_texts)
86
87 is_batch = mode == 5
88
89 if mode == 0: # img2img
90 image = init_img.convert("RGB")
91 mask = None
92 elif mode == 1: # img2img sketch
93 image = sketch.convert("RGB")
94 mask = None
95 elif mode == 2: # inpaint
96 image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
97 alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
98 mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
99 image = image.convert("RGB")
100 elif mode == 3: # inpaint sketch
101 image = inpaint_color_sketch
102 orig = inpaint_color_sketch_orig or inpaint_color_sketch
103 pred = np.any(np.array(image) != np.array(orig), axis=-1)
104 mask = Image.fromarray(pred.astype(np.uint8) * 255, "L")
105 mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
106 blur = ImageFilter.GaussianBlur(mask_blur)
107 image = Image.composite(image.filter(blur), orig, mask.filter(blur))
108 image = image.convert("RGB")
109 elif mode == 4: # inpaint upload mask
110 image = init_img_inpaint
111 mask = init_mask_inpaint
112 else:
113 image = None
114 mask = None
115
116 # Use the EXIF orientation of photos taken by smartphones.
117 if image is not None:
118 image = ImageOps.exif_transpose(image)
119
120 if selected_scale_tab == 1:
121 assert image, "Can't scale by because no image is selected"
122
123 width = int(image.width * scale_by)
124 height = int(image.height * scale_by)
125
126 assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
127
128 p = StableDiffusionProcessingImg2Img(
129 sd_model=shared.sd_model,
130 outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
131 outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
132 prompt=prompt,
133 negative_prompt=negative_prompt,
134 styles=prompt_styles,
135 seed=seed,
136 subseed=subseed,
137 subseed_strength=subseed_strength,
138 seed_resize_from_h=seed_resize_from_h,
139 seed_resize_from_w=seed_resize_from_w,
140 seed_enable_extras=seed_enable_extras,
141 sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,
142 batch_size=batch_size,
143 n_iter=n_iter,
144 steps=steps,
145 cfg_scale=cfg_scale,
146 width=width,
147 height=height,
148 restore_faces=restore_faces,
149 tiling=tiling,
150 init_images=[image],
151 mask=mask,
152 mask_blur=mask_blur,
153 inpainting_fill=inpainting_fill,
154 resize_mode=resize_mode,
155 denoising_strength=denoising_strength,
156 image_cfg_scale=image_cfg_scale,
157 inpaint_full_res=inpaint_full_res,
158 inpaint_full_res_padding=inpaint_full_res_padding,
159 inpainting_mask_invert=inpainting_mask_invert,
160 override_settings=override_settings,
161 )
162
163 p.scripts = modules.scripts.scripts_img2img
164 p.script_args = args
165
166 if shared.cmd_opts.enable_console_prompts:
167 print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
168
169 if mask:
170 p.extra_generation_params["Mask blur"] = mask_blur
171
172 if is_batch:
173 assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
174
175 process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)
176
177 processed = Processed(p, [], p.seed, "")
178 else:
179 processed = modules.scripts.scripts_img2img.run(p, *args)
180 if processed is None:
181 processed = process_images(p)
182
183 p.close()
184
185 shared.total_tqdm.clear()
186
187 generation_info_js = processed.js()
188 if opts.samples_log_stdout:
189 print(generation_info_js)
190
191 if opts.do_not_show_images:
192 processed.images = []
193
194 return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modules/img2img.py b/modules/img2img.py
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -48,7 +48,8 @@
try:
img = Image.open(image)
- except UnidentifiedImageError:
+ except UnidentifiedImageError as e:
+ print(e)
continue
# Use the EXIF orientation of photos taken by smartphones.
img = ImageOps.exif_transpose(img)
| {"golden_diff": "diff --git a/modules/img2img.py b/modules/img2img.py\n--- a/modules/img2img.py\n+++ b/modules/img2img.py\n@@ -48,7 +48,8 @@\n \r\n try:\r\n img = Image.open(image)\r\n- except UnidentifiedImageError:\r\n+ except UnidentifiedImageError as e:\r\n+ print(e)\r\n continue\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img)\n", "issue": "[Feature Request]: img2img batch should ignore non-image files\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What would your feature do ?\n\nI may place some parameter files along the image files in the same folder.\r\nCurrently an exception is throw for non-image files when do \"Image.open()\".\n\n### Proposed workflow\n\n1. Place a txt file along with the png file in folder A\r\n2. Set batch input folder to A\r\n3. Press generate\r\n\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "import math\r\nimport os\r\nimport sys\r\nimport traceback\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError\r\n\r\nfrom modules import devices, sd_samplers\r\nfrom modules.generation_parameters_copypaste import create_override_settings_dict\r\nfrom modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\nfrom modules.shared import opts, state\r\nimport modules.shared as shared\r\nimport modules.processing as processing\r\nfrom modules.ui import plaintext_to_html\r\nimport modules.images as images\r\nimport modules.scripts\r\n\r\n\r\ndef process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):\r\n processing.fix_seed(p)\r\n\r\n images = shared.listfiles(input_dir)\r\n\r\n is_inpaint_batch = False\r\n if inpaint_mask_dir:\r\n inpaint_masks = shared.listfiles(inpaint_mask_dir)\r\n is_inpaint_batch = len(inpaint_masks) > 0\r\n if is_inpaint_batch:\r\n print(f\"\\nInpaint batch is enabled. {len(inpaint_masks)} masks found.\")\r\n\r\n print(f\"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.\")\r\n\r\n save_normally = output_dir == ''\r\n\r\n p.do_not_save_grid = True\r\n p.do_not_save_samples = not save_normally\r\n\r\n state.job_count = len(images) * p.n_iter\r\n\r\n for i, image in enumerate(images):\r\n state.job = f\"{i+1} out of {len(images)}\"\r\n if state.skipped:\r\n state.skipped = False\r\n\r\n if state.interrupted:\r\n break\r\n\r\n try:\r\n img = Image.open(image)\r\n except UnidentifiedImageError:\r\n continue\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img)\r\n p.init_images = [img] * p.batch_size\r\n\r\n if is_inpaint_batch:\r\n # try to find corresponding mask for an image using simple filename matching\r\n mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))\r\n # if not found use first one (\"same mask for all images\" use-case)\r\n if not mask_image_path in inpaint_masks:\r\n mask_image_path = inpaint_masks[0]\r\n mask_image = Image.open(mask_image_path)\r\n p.image_mask = mask_image\r\n\r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n if proc is None:\r\n proc = process_images(p)\r\n\r\n for n, processed_image in enumerate(proc.images):\r\n filename = os.path.basename(image)\r\n\r\n if n > 0:\r\n left, right = os.path.splitext(filename)\r\n filename = f\"{left}-{n}{right}\"\r\n\r\n if not save_normally:\r\n os.makedirs(output_dir, exist_ok=True)\r\n if processed_image.mode == 'RGBA':\r\n processed_image = processed_image.convert(\"RGB\")\r\n processed_image.save(os.path.join(output_dir, filename))\r\n\r\n\r\ndef img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):\r\n override_settings = create_override_settings_dict(override_settings_texts)\r\n\r\n is_batch = mode == 5\r\n\r\n if mode == 0: # img2img\r\n image = init_img.convert(\"RGB\")\r\n mask = None\r\n elif mode == 1: # img2img sketch\r\n image = sketch.convert(\"RGB\")\r\n mask = None\r\n elif mode == 2: # inpaint\r\n image, mask = init_img_with_mask[\"image\"], init_img_with_mask[\"mask\"]\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')\r\n image = image.convert(\"RGB\")\r\n elif mode == 3: # inpaint sketch\r\n image = inpaint_color_sketch\r\n orig = inpaint_color_sketch_orig or inpaint_color_sketch\r\n pred = np.any(np.array(image) != np.array(orig), axis=-1)\r\n mask = Image.fromarray(pred.astype(np.uint8) * 255, \"L\")\r\n mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)\r\n blur = ImageFilter.GaussianBlur(mask_blur)\r\n image = Image.composite(image.filter(blur), orig, mask.filter(blur))\r\n image = image.convert(\"RGB\")\r\n elif mode == 4: # inpaint upload mask\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n else:\r\n image = None\r\n mask = None\r\n\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n if image is not None:\r\n image = ImageOps.exif_transpose(image)\r\n\r\n if selected_scale_tab == 1:\r\n assert image, \"Can't scale by because no image is selected\"\r\n\r\n width = int(image.width * scale_by)\r\n height = int(image.height * scale_by)\r\n\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n\r\n p = StableDiffusionProcessingImg2Img(\r\n sd_model=shared.sd_model,\r\n outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,\r\n outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,\r\n prompt=prompt,\r\n negative_prompt=negative_prompt,\r\n styles=prompt_styles,\r\n seed=seed,\r\n subseed=subseed,\r\n subseed_strength=subseed_strength,\r\n seed_resize_from_h=seed_resize_from_h,\r\n seed_resize_from_w=seed_resize_from_w,\r\n seed_enable_extras=seed_enable_extras,\r\n sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,\r\n batch_size=batch_size,\r\n n_iter=n_iter,\r\n steps=steps,\r\n cfg_scale=cfg_scale,\r\n width=width,\r\n height=height,\r\n restore_faces=restore_faces,\r\n tiling=tiling,\r\n init_images=[image],\r\n mask=mask,\r\n mask_blur=mask_blur,\r\n inpainting_fill=inpainting_fill,\r\n resize_mode=resize_mode,\r\n denoising_strength=denoising_strength,\r\n image_cfg_scale=image_cfg_scale,\r\n inpaint_full_res=inpaint_full_res,\r\n inpaint_full_res_padding=inpaint_full_res_padding,\r\n inpainting_mask_invert=inpainting_mask_invert,\r\n override_settings=override_settings,\r\n )\r\n\r\n p.scripts = modules.scripts.scripts_img2img\r\n p.script_args = args\r\n\r\n if shared.cmd_opts.enable_console_prompts:\r\n print(f\"\\nimg2img: {prompt}\", file=shared.progress_print_out)\r\n\r\n if mask:\r\n p.extra_generation_params[\"Mask blur\"] = mask_blur\r\n\r\n if is_batch:\r\n assert not shared.cmd_opts.hide_ui_dir_config, \"Launched with --hide-ui-dir-config, batch img2img disabled\"\r\n\r\n process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)\r\n\r\n processed = Processed(p, [], p.seed, \"\")\r\n else:\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n if processed is None:\r\n processed = process_images(p)\r\n\r\n p.close()\r\n\r\n shared.total_tqdm.clear()\r\n\r\n generation_info_js = processed.js()\r\n if opts.samples_log_stdout:\r\n print(generation_info_js)\r\n\r\n if opts.do_not_show_images:\r\n processed.images = []\r\n\r\n return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)\r\n", "path": "modules/img2img.py"}], "after_files": [{"content": "import math\r\nimport os\r\nimport sys\r\nimport traceback\r\n\r\nimport numpy as np\r\nfrom PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError\r\n\r\nfrom modules import devices, sd_samplers\r\nfrom modules.generation_parameters_copypaste import create_override_settings_dict\r\nfrom modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images\r\nfrom modules.shared import opts, state\r\nimport modules.shared as shared\r\nimport modules.processing as processing\r\nfrom modules.ui import plaintext_to_html\r\nimport modules.images as images\r\nimport modules.scripts\r\n\r\n\r\ndef process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):\r\n processing.fix_seed(p)\r\n\r\n images = shared.listfiles(input_dir)\r\n\r\n is_inpaint_batch = False\r\n if inpaint_mask_dir:\r\n inpaint_masks = shared.listfiles(inpaint_mask_dir)\r\n is_inpaint_batch = len(inpaint_masks) > 0\r\n if is_inpaint_batch:\r\n print(f\"\\nInpaint batch is enabled. {len(inpaint_masks)} masks found.\")\r\n\r\n print(f\"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.\")\r\n\r\n save_normally = output_dir == ''\r\n\r\n p.do_not_save_grid = True\r\n p.do_not_save_samples = not save_normally\r\n\r\n state.job_count = len(images) * p.n_iter\r\n\r\n for i, image in enumerate(images):\r\n state.job = f\"{i+1} out of {len(images)}\"\r\n if state.skipped:\r\n state.skipped = False\r\n\r\n if state.interrupted:\r\n break\r\n\r\n try:\r\n img = Image.open(image)\r\n except UnidentifiedImageError as e:\r\n print(e)\r\n continue\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n img = ImageOps.exif_transpose(img)\r\n p.init_images = [img] * p.batch_size\r\n\r\n if is_inpaint_batch:\r\n # try to find corresponding mask for an image using simple filename matching\r\n mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))\r\n # if not found use first one (\"same mask for all images\" use-case)\r\n if not mask_image_path in inpaint_masks:\r\n mask_image_path = inpaint_masks[0]\r\n mask_image = Image.open(mask_image_path)\r\n p.image_mask = mask_image\r\n\r\n proc = modules.scripts.scripts_img2img.run(p, *args)\r\n if proc is None:\r\n proc = process_images(p)\r\n\r\n for n, processed_image in enumerate(proc.images):\r\n filename = os.path.basename(image)\r\n\r\n if n > 0:\r\n left, right = os.path.splitext(filename)\r\n filename = f\"{left}-{n}{right}\"\r\n\r\n if not save_normally:\r\n os.makedirs(output_dir, exist_ok=True)\r\n if processed_image.mode == 'RGBA':\r\n processed_image = processed_image.convert(\"RGB\")\r\n processed_image.save(os.path.join(output_dir, filename))\r\n\r\n\r\ndef img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):\r\n override_settings = create_override_settings_dict(override_settings_texts)\r\n\r\n is_batch = mode == 5\r\n\r\n if mode == 0: # img2img\r\n image = init_img.convert(\"RGB\")\r\n mask = None\r\n elif mode == 1: # img2img sketch\r\n image = sketch.convert(\"RGB\")\r\n mask = None\r\n elif mode == 2: # inpaint\r\n image, mask = init_img_with_mask[\"image\"], init_img_with_mask[\"mask\"]\r\n alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')\r\n mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')\r\n image = image.convert(\"RGB\")\r\n elif mode == 3: # inpaint sketch\r\n image = inpaint_color_sketch\r\n orig = inpaint_color_sketch_orig or inpaint_color_sketch\r\n pred = np.any(np.array(image) != np.array(orig), axis=-1)\r\n mask = Image.fromarray(pred.astype(np.uint8) * 255, \"L\")\r\n mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)\r\n blur = ImageFilter.GaussianBlur(mask_blur)\r\n image = Image.composite(image.filter(blur), orig, mask.filter(blur))\r\n image = image.convert(\"RGB\")\r\n elif mode == 4: # inpaint upload mask\r\n image = init_img_inpaint\r\n mask = init_mask_inpaint\r\n else:\r\n image = None\r\n mask = None\r\n\r\n # Use the EXIF orientation of photos taken by smartphones.\r\n if image is not None:\r\n image = ImageOps.exif_transpose(image)\r\n\r\n if selected_scale_tab == 1:\r\n assert image, \"Can't scale by because no image is selected\"\r\n\r\n width = int(image.width * scale_by)\r\n height = int(image.height * scale_by)\r\n\r\n assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'\r\n\r\n p = StableDiffusionProcessingImg2Img(\r\n sd_model=shared.sd_model,\r\n outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,\r\n outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,\r\n prompt=prompt,\r\n negative_prompt=negative_prompt,\r\n styles=prompt_styles,\r\n seed=seed,\r\n subseed=subseed,\r\n subseed_strength=subseed_strength,\r\n seed_resize_from_h=seed_resize_from_h,\r\n seed_resize_from_w=seed_resize_from_w,\r\n seed_enable_extras=seed_enable_extras,\r\n sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name,\r\n batch_size=batch_size,\r\n n_iter=n_iter,\r\n steps=steps,\r\n cfg_scale=cfg_scale,\r\n width=width,\r\n height=height,\r\n restore_faces=restore_faces,\r\n tiling=tiling,\r\n init_images=[image],\r\n mask=mask,\r\n mask_blur=mask_blur,\r\n inpainting_fill=inpainting_fill,\r\n resize_mode=resize_mode,\r\n denoising_strength=denoising_strength,\r\n image_cfg_scale=image_cfg_scale,\r\n inpaint_full_res=inpaint_full_res,\r\n inpaint_full_res_padding=inpaint_full_res_padding,\r\n inpainting_mask_invert=inpainting_mask_invert,\r\n override_settings=override_settings,\r\n )\r\n\r\n p.scripts = modules.scripts.scripts_img2img\r\n p.script_args = args\r\n\r\n if shared.cmd_opts.enable_console_prompts:\r\n print(f\"\\nimg2img: {prompt}\", file=shared.progress_print_out)\r\n\r\n if mask:\r\n p.extra_generation_params[\"Mask blur\"] = mask_blur\r\n\r\n if is_batch:\r\n assert not shared.cmd_opts.hide_ui_dir_config, \"Launched with --hide-ui-dir-config, batch img2img disabled\"\r\n\r\n process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)\r\n\r\n processed = Processed(p, [], p.seed, \"\")\r\n else:\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n if processed is None:\r\n processed = process_images(p)\r\n\r\n p.close()\r\n\r\n shared.total_tqdm.clear()\r\n\r\n generation_info_js = processed.js()\r\n if opts.samples_log_stdout:\r\n print(generation_info_js)\r\n\r\n if opts.do_not_show_images:\r\n processed.images = []\r\n\r\n return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments)\r\n", "path": "modules/img2img.py"}]} | 2,765 | 105 |
gh_patches_debug_28907 | rasdani/github-patches | git_diff | ansible__ansible-43525 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing all_content param in ovirt_hosts_facts
##### SUMMARY
ovirt_hosts_facts misses the all_content param and so it's not possible to get back the whole host details. ovirt_vms_facts list, for instance, has it.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
ovirt_hosts_facts
##### ANSIBLE VERSION
```
ansible 2.6.1
config file = /etc/ansible/ansible.cfg
configured module search path = [u'/home/stirabos/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.15 (default, May 16 2018, 17:50:09) [GCC 8.1.1 20180502 (Red Hat 8.1.1-1)]
```
##### CONFIGURATION
##### OS / ENVIRONMENT
N/A
##### STEPS TO REPRODUCE
<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used. -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- ovirt_vms_facts:
auth: "{{ ovirt_auth }}"
all_content: true
- debug: var=ovirt_vms
- ovirt_hosts_facts:
auth: "{{ ovirt_auth }}"
all_content: true
- debug: var=ovirt_hosts
```
##### EXPECTED RESULTS
a list of hosts with full detail for each of them
##### ACTUAL RESULTS
```
TASK [ovirt_hosts_facts] ******************************************************************************************************************************************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "msg": "Unsupported parameters for (ovirt_hosts_facts) module: all_content Supported parameters include: auth, fetch_nested, nested_attributes, pattern"}
to retry, use: --limit @/root/test.retry
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (c) 2016 Red Hat, Inc.
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20 #
21
22 ANSIBLE_METADATA = {'metadata_version': '1.1',
23 'status': ['preview'],
24 'supported_by': 'community'}
25
26
27 DOCUMENTATION = '''
28 ---
29 module: ovirt_hosts_facts
30 short_description: Retrieve facts about one or more oVirt/RHV hosts
31 author: "Ondra Machacek (@machacekondra)"
32 version_added: "2.3"
33 description:
34 - "Retrieve facts about one or more oVirt/RHV hosts."
35 notes:
36 - "This module creates a new top-level C(ovirt_hosts) fact, which
37 contains a list of hosts."
38 options:
39 pattern:
40 description:
41 - "Search term which is accepted by oVirt/RHV search backend."
42 - "For example to search host X from datacenter Y use following pattern:
43 name=X and datacenter=Y"
44 extends_documentation_fragment: ovirt_facts
45 '''
46
47 EXAMPLES = '''
48 # Examples don't contain auth parameter for simplicity,
49 # look at ovirt_auth module to see how to reuse authentication:
50
51 # Gather facts about all hosts which names start with C(host) and
52 # belong to data center C(west):
53 - ovirt_hosts_facts:
54 pattern: name=host* and datacenter=west
55 - debug:
56 var: ovirt_hosts
57 '''
58
59 RETURN = '''
60 ovirt_hosts:
61 description: "List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys,
62 all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
63 returned: On success.
64 type: list
65 '''
66
67 import traceback
68
69 from ansible.module_utils.basic import AnsibleModule
70 from ansible.module_utils.ovirt import (
71 check_sdk,
72 create_connection,
73 get_dict_of_struct,
74 ovirt_facts_full_argument_spec,
75 )
76
77
78 def main():
79 argument_spec = ovirt_facts_full_argument_spec(
80 pattern=dict(default='', required=False),
81 )
82 module = AnsibleModule(argument_spec)
83 check_sdk(module)
84
85 try:
86 auth = module.params.pop('auth')
87 connection = create_connection(auth)
88 hosts_service = connection.system_service().hosts_service()
89 hosts = hosts_service.list(search=module.params['pattern'])
90 module.exit_json(
91 changed=False,
92 ansible_facts=dict(
93 ovirt_hosts=[
94 get_dict_of_struct(
95 struct=c,
96 connection=connection,
97 fetch_nested=module.params.get('fetch_nested'),
98 attributes=module.params.get('nested_attributes'),
99 ) for c in hosts
100 ],
101 ),
102 )
103 except Exception as e:
104 module.fail_json(msg=str(e), exception=traceback.format_exc())
105 finally:
106 connection.close(logout=auth.get('token') is None)
107
108
109 if __name__ == '__main__':
110 main()
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py
--- a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py
@@ -41,6 +41,12 @@
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search host X from datacenter Y use following pattern:
name=X and datacenter=Y"
+ all_content:
+ description:
+ - "If I(true) all the attributes of the hosts should be
+ included in the response."
+ default: False
+ version_added: "2.7"
extends_documentation_fragment: ovirt_facts
'''
@@ -78,6 +84,7 @@
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
+ all_content=dict(default=False, type='bool'),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
@@ -86,7 +93,10 @@
auth = module.params.pop('auth')
connection = create_connection(auth)
hosts_service = connection.system_service().hosts_service()
- hosts = hosts_service.list(search=module.params['pattern'])
+ hosts = hosts_service.list(
+ search=module.params['pattern'],
+ all_content=module.params['all_content'],
+ )
module.exit_json(
changed=False,
ansible_facts=dict(
| {"golden_diff": "diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n--- a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n+++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py\n@@ -41,6 +41,12 @@\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\n+ all_content:\n+ description:\n+ - \"If I(true) all the attributes of the hosts should be\n+ included in the response.\"\n+ default: False\n+ version_added: \"2.7\"\n extends_documentation_fragment: ovirt_facts\n '''\n \n@@ -78,6 +84,7 @@\n def main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n+ all_content=dict(default=False, type='bool'),\n )\n module = AnsibleModule(argument_spec)\n check_sdk(module)\n@@ -86,7 +93,10 @@\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n- hosts = hosts_service.list(search=module.params['pattern'])\n+ hosts = hosts_service.list(\n+ search=module.params['pattern'],\n+ all_content=module.params['all_content'],\n+ )\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n", "issue": "Missing all_content param in ovirt_hosts_facts\n##### SUMMARY\r\novirt_hosts_facts misses the all_content param and so it's not possible to get back the whole host details. ovirt_vms_facts list, for instance, has it.\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\novirt_hosts_facts\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.6.1\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'/home/stirabos/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.15 (default, May 16 2018, 17:50:09) [GCC 8.1.1 20180502 (Red Hat 8.1.1-1)]\r\n```\r\n\r\n##### CONFIGURATION\r\n\r\n\r\n##### OS / ENVIRONMENT\r\nN/A\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used. -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n - ovirt_vms_facts:\r\n auth: \"{{ ovirt_auth }}\"\r\n all_content: true\r\n - debug: var=ovirt_vms\r\n - ovirt_hosts_facts:\r\n auth: \"{{ ovirt_auth }}\"\r\n all_content: true\r\n - debug: var=ovirt_hosts\r\n```\r\n\r\n##### EXPECTED RESULTS\r\na list of hosts with full detail for each of them\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nTASK [ovirt_hosts_facts] ******************************************************************************************************************************************************************************************\r\nfatal: [localhost]: FAILED! => {\"changed\": false, \"msg\": \"Unsupported parameters for (ovirt_hosts_facts) module: all_content Supported parameters include: auth, fetch_nested, nested_attributes, pattern\"}\r\n\tto retry, use: --limit @/root/test.retry\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ovirt_hosts_facts\nshort_description: Retrieve facts about one or more oVirt/RHV hosts\nauthor: \"Ondra Machacek (@machacekondra)\"\nversion_added: \"2.3\"\ndescription:\n - \"Retrieve facts about one or more oVirt/RHV hosts.\"\nnotes:\n - \"This module creates a new top-level C(ovirt_hosts) fact, which\n contains a list of hosts.\"\noptions:\n pattern:\n description:\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\nextends_documentation_fragment: ovirt_facts\n'''\n\nEXAMPLES = '''\n# Examples don't contain auth parameter for simplicity,\n# look at ovirt_auth module to see how to reuse authentication:\n\n# Gather facts about all hosts which names start with C(host) and\n# belong to data center C(west):\n- ovirt_hosts_facts:\n pattern: name=host* and datacenter=west\n- debug:\n var: ovirt_hosts\n'''\n\nRETURN = '''\novirt_hosts:\n description: \"List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys,\n all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host.\"\n returned: On success.\n type: list\n'''\n\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ovirt import (\n check_sdk,\n create_connection,\n get_dict_of_struct,\n ovirt_facts_full_argument_spec,\n)\n\n\ndef main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n )\n module = AnsibleModule(argument_spec)\n check_sdk(module)\n\n try:\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n hosts = hosts_service.list(search=module.params['pattern'])\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n ovirt_hosts=[\n get_dict_of_struct(\n struct=c,\n connection=connection,\n fetch_nested=module.params.get('fetch_nested'),\n attributes=module.params.get('nested_attributes'),\n ) for c in hosts\n ],\n ),\n )\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n finally:\n connection.close(logout=auth.get('token') is None)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2016 Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ovirt_hosts_facts\nshort_description: Retrieve facts about one or more oVirt/RHV hosts\nauthor: \"Ondra Machacek (@machacekondra)\"\nversion_added: \"2.3\"\ndescription:\n - \"Retrieve facts about one or more oVirt/RHV hosts.\"\nnotes:\n - \"This module creates a new top-level C(ovirt_hosts) fact, which\n contains a list of hosts.\"\noptions:\n pattern:\n description:\n - \"Search term which is accepted by oVirt/RHV search backend.\"\n - \"For example to search host X from datacenter Y use following pattern:\n name=X and datacenter=Y\"\n all_content:\n description:\n - \"If I(true) all the attributes of the hosts should be\n included in the response.\"\n default: False\n version_added: \"2.7\"\nextends_documentation_fragment: ovirt_facts\n'''\n\nEXAMPLES = '''\n# Examples don't contain auth parameter for simplicity,\n# look at ovirt_auth module to see how to reuse authentication:\n\n# Gather facts about all hosts which names start with C(host) and\n# belong to data center C(west):\n- ovirt_hosts_facts:\n pattern: name=host* and datacenter=west\n- debug:\n var: ovirt_hosts\n'''\n\nRETURN = '''\novirt_hosts:\n description: \"List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys,\n all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host.\"\n returned: On success.\n type: list\n'''\n\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ovirt import (\n check_sdk,\n create_connection,\n get_dict_of_struct,\n ovirt_facts_full_argument_spec,\n)\n\n\ndef main():\n argument_spec = ovirt_facts_full_argument_spec(\n pattern=dict(default='', required=False),\n all_content=dict(default=False, type='bool'),\n )\n module = AnsibleModule(argument_spec)\n check_sdk(module)\n\n try:\n auth = module.params.pop('auth')\n connection = create_connection(auth)\n hosts_service = connection.system_service().hosts_service()\n hosts = hosts_service.list(\n search=module.params['pattern'],\n all_content=module.params['all_content'],\n )\n module.exit_json(\n changed=False,\n ansible_facts=dict(\n ovirt_hosts=[\n get_dict_of_struct(\n struct=c,\n connection=connection,\n fetch_nested=module.params.get('fetch_nested'),\n attributes=module.params.get('nested_attributes'),\n ) for c in hosts\n ],\n ),\n )\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n finally:\n connection.close(logout=auth.get('token') is None)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py"}]} | 1,730 | 351 |
gh_patches_debug_5892 | rasdani/github-patches | git_diff | buildbot__buildbot-7318 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Address PEP 706 - Filter for tarfile.extractall
Given proposal improves security of tarfile extraction to help avoid CVE-2007-4559.
- In Python 3.12-3.13, a DeprecationWarning is emitted and extraction uses `fully_trusted` filter.
- In Python 3.14+, it will use the `data` filter.
It seems given proposal was backported also to older version of Python.
Reference: https://peps.python.org/pep-0706/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/process/remotetransfer.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 """
17 module for regrouping all FileWriterImpl and FileReaderImpl away from steps
18 """
19
20 import os
21 import tarfile
22 import tempfile
23 from io import BytesIO
24
25 from buildbot.util import bytes2unicode
26 from buildbot.util import unicode2bytes
27 from buildbot.worker.protocols import base
28
29
30 class FileWriter(base.FileWriterImpl):
31
32 """
33 Helper class that acts as a file-object with write access
34 """
35
36 def __init__(self, destfile, maxsize, mode):
37 # Create missing directories.
38 destfile = os.path.abspath(destfile)
39 dirname = os.path.dirname(destfile)
40 if not os.path.exists(dirname):
41 os.makedirs(dirname)
42
43 self.destfile = destfile
44 self.mode = mode
45 fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-')
46 self.fp = os.fdopen(fd, 'wb')
47 self.remaining = maxsize
48
49 def remote_write(self, data):
50 """
51 Called from remote worker to write L{data} to L{fp} within boundaries
52 of L{maxsize}
53
54 @type data: C{string}
55 @param data: String of data to write
56 """
57 data = unicode2bytes(data)
58 if self.remaining is not None:
59 if len(data) > self.remaining:
60 data = data[:self.remaining]
61 self.fp.write(data)
62 self.remaining = self.remaining - len(data)
63 else:
64 self.fp.write(data)
65
66 def remote_utime(self, accessed_modified):
67 os.utime(self.destfile, accessed_modified)
68
69 def remote_close(self):
70 """
71 Called by remote worker to state that no more data will be transferred
72 """
73 self.fp.close()
74 self.fp = None
75 # on windows, os.rename does not automatically unlink, so do it
76 # manually
77 if os.path.exists(self.destfile):
78 os.unlink(self.destfile)
79 os.rename(self.tmpname, self.destfile)
80 self.tmpname = None
81 if self.mode is not None:
82 os.chmod(self.destfile, self.mode)
83
84 def cancel(self):
85 # unclean shutdown, the file is probably truncated, so delete it
86 # altogether rather than deliver a corrupted file
87 fp = getattr(self, "fp", None)
88 if fp:
89 fp.close()
90 if self.destfile and os.path.exists(self.destfile):
91 os.unlink(self.destfile)
92 if self.tmpname and os.path.exists(self.tmpname):
93 os.unlink(self.tmpname)
94
95
96 class DirectoryWriter(FileWriter):
97
98 """
99 A DirectoryWriter is implemented as a FileWriter, with an added post-processing
100 step to unpack the archive, once the transfer has completed.
101 """
102
103 def __init__(self, destroot, maxsize, compress, mode):
104 self.destroot = destroot
105 self.compress = compress
106
107 self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-')
108 os.close(self.fd)
109
110 super().__init__(self.tarname, maxsize, mode)
111
112 def remote_unpack(self):
113 """
114 Called by remote worker to state that no more data will be transferred
115 """
116 # Make sure remote_close is called, otherwise atomic rename won't happen
117 self.remote_close()
118
119 # Map configured compression to a TarFile setting
120 if self.compress == 'bz2':
121 mode = 'r|bz2'
122 elif self.compress == 'gz':
123 mode = 'r|gz'
124 else:
125 mode = 'r'
126
127 # Unpack archive and clean up after self
128 with tarfile.open(name=self.tarname, mode=mode) as archive:
129 archive.extractall(path=self.destroot)
130 os.remove(self.tarname)
131
132
133 class FileReader(base.FileReaderImpl):
134
135 """
136 Helper class that acts as a file-object with read access
137 """
138
139 def __init__(self, fp):
140 self.fp = fp
141
142 def remote_read(self, maxlength):
143 """
144 Called from remote worker to read at most L{maxlength} bytes of data
145
146 @type maxlength: C{integer}
147 @param maxlength: Maximum number of data bytes that can be returned
148
149 @return: Data read from L{fp}
150 @rtype: C{string} of bytes read from file
151 """
152 if self.fp is None:
153 return ''
154
155 data = self.fp.read(maxlength)
156 return data
157
158 def remote_close(self):
159 """
160 Called by remote worker to state that no more data will be transferred
161 """
162 if self.fp is not None:
163 self.fp.close()
164 self.fp = None
165
166
167 class StringFileWriter(base.FileWriterImpl):
168
169 """
170 FileWriter class that just puts received data into a buffer.
171
172 Used to upload a file from worker for inline processing rather than
173 writing into a file on master.
174 """
175
176 def __init__(self):
177 self.buffer = ""
178
179 def remote_write(self, data):
180 self.buffer += bytes2unicode(data)
181
182 def remote_close(self):
183 pass
184
185
186 class StringFileReader(FileReader):
187
188 """
189 FileWriter class that just buid send data from a string.
190
191 Used to download a file to worker from local string rather than first
192 writing into a file on master.
193 """
194
195 def __init__(self, s):
196 s = unicode2bytes(s)
197 super().__init__(BytesIO(s))
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/master/buildbot/process/remotetransfer.py b/master/buildbot/process/remotetransfer.py
--- a/master/buildbot/process/remotetransfer.py
+++ b/master/buildbot/process/remotetransfer.py
@@ -126,7 +126,10 @@
# Unpack archive and clean up after self
with tarfile.open(name=self.tarname, mode=mode) as archive:
- archive.extractall(path=self.destroot)
+ if hasattr(tarfile, 'data_filter'):
+ archive.extractall(path=self.destroot, filter='data')
+ else:
+ archive.extractall(path=self.destroot)
os.remove(self.tarname)
| {"golden_diff": "diff --git a/master/buildbot/process/remotetransfer.py b/master/buildbot/process/remotetransfer.py\n--- a/master/buildbot/process/remotetransfer.py\n+++ b/master/buildbot/process/remotetransfer.py\n@@ -126,7 +126,10 @@\n \n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n- archive.extractall(path=self.destroot)\n+ if hasattr(tarfile, 'data_filter'):\n+ archive.extractall(path=self.destroot, filter='data')\n+ else:\n+ archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n", "issue": "Address PEP 706 - Filter for tarfile.extractall\nGiven proposal improves security of tarfile extraction to help avoid CVE-2007-4559.\r\n\r\n- In Python 3.12-3.13, a DeprecationWarning is emitted and extraction uses `fully_trusted` filter.\r\n- In Python 3.14+, it will use the `data` filter.\r\n\r\nIt seems given proposal was backported also to older version of Python.\r\n\r\nReference: https://peps.python.org/pep-0706/\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\"\"\"\nmodule for regrouping all FileWriterImpl and FileReaderImpl away from steps\n\"\"\"\n\nimport os\nimport tarfile\nimport tempfile\nfrom io import BytesIO\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import unicode2bytes\nfrom buildbot.worker.protocols import base\n\n\nclass FileWriter(base.FileWriterImpl):\n\n \"\"\"\n Helper class that acts as a file-object with write access\n \"\"\"\n\n def __init__(self, destfile, maxsize, mode):\n # Create missing directories.\n destfile = os.path.abspath(destfile)\n dirname = os.path.dirname(destfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n self.destfile = destfile\n self.mode = mode\n fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-')\n self.fp = os.fdopen(fd, 'wb')\n self.remaining = maxsize\n\n def remote_write(self, data):\n \"\"\"\n Called from remote worker to write L{data} to L{fp} within boundaries\n of L{maxsize}\n\n @type data: C{string}\n @param data: String of data to write\n \"\"\"\n data = unicode2bytes(data)\n if self.remaining is not None:\n if len(data) > self.remaining:\n data = data[:self.remaining]\n self.fp.write(data)\n self.remaining = self.remaining - len(data)\n else:\n self.fp.write(data)\n\n def remote_utime(self, accessed_modified):\n os.utime(self.destfile, accessed_modified)\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n self.fp.close()\n self.fp = None\n # on windows, os.rename does not automatically unlink, so do it\n # manually\n if os.path.exists(self.destfile):\n os.unlink(self.destfile)\n os.rename(self.tmpname, self.destfile)\n self.tmpname = None\n if self.mode is not None:\n os.chmod(self.destfile, self.mode)\n\n def cancel(self):\n # unclean shutdown, the file is probably truncated, so delete it\n # altogether rather than deliver a corrupted file\n fp = getattr(self, \"fp\", None)\n if fp:\n fp.close()\n if self.destfile and os.path.exists(self.destfile):\n os.unlink(self.destfile)\n if self.tmpname and os.path.exists(self.tmpname):\n os.unlink(self.tmpname)\n\n\nclass DirectoryWriter(FileWriter):\n\n \"\"\"\n A DirectoryWriter is implemented as a FileWriter, with an added post-processing\n step to unpack the archive, once the transfer has completed.\n \"\"\"\n\n def __init__(self, destroot, maxsize, compress, mode):\n self.destroot = destroot\n self.compress = compress\n\n self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-')\n os.close(self.fd)\n\n super().__init__(self.tarname, maxsize, mode)\n\n def remote_unpack(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n # Make sure remote_close is called, otherwise atomic rename won't happen\n self.remote_close()\n\n # Map configured compression to a TarFile setting\n if self.compress == 'bz2':\n mode = 'r|bz2'\n elif self.compress == 'gz':\n mode = 'r|gz'\n else:\n mode = 'r'\n\n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n\n\nclass FileReader(base.FileReaderImpl):\n\n \"\"\"\n Helper class that acts as a file-object with read access\n \"\"\"\n\n def __init__(self, fp):\n self.fp = fp\n\n def remote_read(self, maxlength):\n \"\"\"\n Called from remote worker to read at most L{maxlength} bytes of data\n\n @type maxlength: C{integer}\n @param maxlength: Maximum number of data bytes that can be returned\n\n @return: Data read from L{fp}\n @rtype: C{string} of bytes read from file\n \"\"\"\n if self.fp is None:\n return ''\n\n data = self.fp.read(maxlength)\n return data\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n if self.fp is not None:\n self.fp.close()\n self.fp = None\n\n\nclass StringFileWriter(base.FileWriterImpl):\n\n \"\"\"\n FileWriter class that just puts received data into a buffer.\n\n Used to upload a file from worker for inline processing rather than\n writing into a file on master.\n \"\"\"\n\n def __init__(self):\n self.buffer = \"\"\n\n def remote_write(self, data):\n self.buffer += bytes2unicode(data)\n\n def remote_close(self):\n pass\n\n\nclass StringFileReader(FileReader):\n\n \"\"\"\n FileWriter class that just buid send data from a string.\n\n Used to download a file to worker from local string rather than first\n writing into a file on master.\n \"\"\"\n\n def __init__(self, s):\n s = unicode2bytes(s)\n super().__init__(BytesIO(s))\n", "path": "master/buildbot/process/remotetransfer.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\"\"\"\nmodule for regrouping all FileWriterImpl and FileReaderImpl away from steps\n\"\"\"\n\nimport os\nimport tarfile\nimport tempfile\nfrom io import BytesIO\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import unicode2bytes\nfrom buildbot.worker.protocols import base\n\n\nclass FileWriter(base.FileWriterImpl):\n\n \"\"\"\n Helper class that acts as a file-object with write access\n \"\"\"\n\n def __init__(self, destfile, maxsize, mode):\n # Create missing directories.\n destfile = os.path.abspath(destfile)\n dirname = os.path.dirname(destfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n self.destfile = destfile\n self.mode = mode\n fd, self.tmpname = tempfile.mkstemp(dir=dirname, prefix='buildbot-transfer-')\n self.fp = os.fdopen(fd, 'wb')\n self.remaining = maxsize\n\n def remote_write(self, data):\n \"\"\"\n Called from remote worker to write L{data} to L{fp} within boundaries\n of L{maxsize}\n\n @type data: C{string}\n @param data: String of data to write\n \"\"\"\n data = unicode2bytes(data)\n if self.remaining is not None:\n if len(data) > self.remaining:\n data = data[:self.remaining]\n self.fp.write(data)\n self.remaining = self.remaining - len(data)\n else:\n self.fp.write(data)\n\n def remote_utime(self, accessed_modified):\n os.utime(self.destfile, accessed_modified)\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n self.fp.close()\n self.fp = None\n # on windows, os.rename does not automatically unlink, so do it\n # manually\n if os.path.exists(self.destfile):\n os.unlink(self.destfile)\n os.rename(self.tmpname, self.destfile)\n self.tmpname = None\n if self.mode is not None:\n os.chmod(self.destfile, self.mode)\n\n def cancel(self):\n # unclean shutdown, the file is probably truncated, so delete it\n # altogether rather than deliver a corrupted file\n fp = getattr(self, \"fp\", None)\n if fp:\n fp.close()\n if self.destfile and os.path.exists(self.destfile):\n os.unlink(self.destfile)\n if self.tmpname and os.path.exists(self.tmpname):\n os.unlink(self.tmpname)\n\n\nclass DirectoryWriter(FileWriter):\n\n \"\"\"\n A DirectoryWriter is implemented as a FileWriter, with an added post-processing\n step to unpack the archive, once the transfer has completed.\n \"\"\"\n\n def __init__(self, destroot, maxsize, compress, mode):\n self.destroot = destroot\n self.compress = compress\n\n self.fd, self.tarname = tempfile.mkstemp(prefix='buildbot-transfer-')\n os.close(self.fd)\n\n super().__init__(self.tarname, maxsize, mode)\n\n def remote_unpack(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n # Make sure remote_close is called, otherwise atomic rename won't happen\n self.remote_close()\n\n # Map configured compression to a TarFile setting\n if self.compress == 'bz2':\n mode = 'r|bz2'\n elif self.compress == 'gz':\n mode = 'r|gz'\n else:\n mode = 'r'\n\n # Unpack archive and clean up after self\n with tarfile.open(name=self.tarname, mode=mode) as archive:\n if hasattr(tarfile, 'data_filter'):\n archive.extractall(path=self.destroot, filter='data')\n else:\n archive.extractall(path=self.destroot)\n os.remove(self.tarname)\n\n\nclass FileReader(base.FileReaderImpl):\n\n \"\"\"\n Helper class that acts as a file-object with read access\n \"\"\"\n\n def __init__(self, fp):\n self.fp = fp\n\n def remote_read(self, maxlength):\n \"\"\"\n Called from remote worker to read at most L{maxlength} bytes of data\n\n @type maxlength: C{integer}\n @param maxlength: Maximum number of data bytes that can be returned\n\n @return: Data read from L{fp}\n @rtype: C{string} of bytes read from file\n \"\"\"\n if self.fp is None:\n return ''\n\n data = self.fp.read(maxlength)\n return data\n\n def remote_close(self):\n \"\"\"\n Called by remote worker to state that no more data will be transferred\n \"\"\"\n if self.fp is not None:\n self.fp.close()\n self.fp = None\n\n\nclass StringFileWriter(base.FileWriterImpl):\n\n \"\"\"\n FileWriter class that just puts received data into a buffer.\n\n Used to upload a file from worker for inline processing rather than\n writing into a file on master.\n \"\"\"\n\n def __init__(self):\n self.buffer = \"\"\n\n def remote_write(self, data):\n self.buffer += bytes2unicode(data)\n\n def remote_close(self):\n pass\n\n\nclass StringFileReader(FileReader):\n\n \"\"\"\n FileWriter class that just buid send data from a string.\n\n Used to download a file to worker from local string rather than first\n writing into a file on master.\n \"\"\"\n\n def __init__(self, s):\n s = unicode2bytes(s)\n super().__init__(BytesIO(s))\n", "path": "master/buildbot/process/remotetransfer.py"}]} | 2,203 | 153 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.