code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
"""
Unit Tests for endpoints.py
"""
import unittest
import os # pylint: disable=unused-import
from mock import patch, call
from github_approval_checker.utils import util # pylint: disable=unused-import
from github_approval_checker.utils.github_handler import GithubHandler # pylint: disable=unused-import
from github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError # noqa pylint: disable=unused-import
from github_approval_checker.api import endpoints # pylint: disable=unused-import
class EndpointsUnitTests(unittest.TestCase):
"""
Test endpoints.py
"""
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
handler.get_statuses.return_value = [
{
"state": "error",
"context": "context2",
"target_url": "fake://status_target_2",
"description": "Status Check 2"
},
{
"state": "pending",
"context": "context3",
"target_url": "fake://status_target_3",
"description": "Status Check 3"
},
{
"state": "failure",
"context": "context1",
"target_url": "fake://status_target_1",
"description": "Status Check 1"
}
]
handler.is_authorized.return_value = True
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "approved",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
handler.post_status.side_effect = [
201,
400
]
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_called_once_with("repo-full-name", "review-commit-id")
self.assertEqual(handler.is_authorized.call_count, 2)
handler.post_status.assert_has_calls([
call(
"repo-full-name",
"review-commit-id",
"context2",
"fake://status_target_2",
"review-user-login",
"Status Check 2"
),
call(
"repo-full-name",
"review-commit-id",
"context1",
"fake://status_target_1",
"review-user-login",
"Status Check 1"
)
])
self.assertEqual(response, util.STATUS_OK)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_unapproved(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a review where the status is not approved.
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = {
"context1": [
"whitelist1"
],
"context2": [
"whitelist2"
]
}
validate_config.return_value = None
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, ({'status': 'OK', 'message': 'Review state is not approved'}, 200))
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
def test_post_pull_request_review_missing(
self,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a missing config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.side_effect = APIError("config-error", "{'message': 'bad-config'}")
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
self.assertEqual(response, "{'message': 'bad-config'}")
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_config(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with a bad config file
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.return_value = None
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
validate_config.side_effect = ConfigError(
'Config Validation Error',
({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)
)
data = {
"repository": {
"name": "repo-name",
"full_name": "repo-full-name",
"owner": {
"login": "repo-owner"
}
},
"review": {
"state": "changes-requested",
"commit_id": "review-commit-id",
"user": {
"login": "review-user-login"
}
}
}
response = endpoints.post_pull_request_review(data)
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_called_once_with("repo-full-name", None)
validate_config.assert_called_once_with("config-data")
self.assertEqual(
response,
(
{
'status': 'Config Validation Error',
'message': 'Bad config data'
},
500
)
)
@patch("github_approval_checker.utils.util.verify_signature")
@patch("github_approval_checker.api.endpoints.connexion")
@patch("github_approval_checker.api.endpoints.GithubHandler")
@patch("github_approval_checker.utils.util.validate_config")
def test_post_pull_request_review_bad_sign(
self,
validate_config,
handler_class,
conn,
verify_signature
):
"""
Test endpoints.post_pull_request_review with an incorrect signature
"""
conn.request.data.return_value = ''
conn.request.headers.get.return_value = 'sha1=signature'
verify_signature.side_effect = SignatureError("Error validating signature")
response = endpoints.post_pull_request_review({})
handler = handler_class.return_value
handler.get_config.return_value = "config-data"
handler.get_statuses.assert_not_called()
handler.is_authorized.assert_not_called()
handler.post_status.assert_not_called()
handler.get_config.assert_not_called()
validate_config.assert_not_called()
self.assertEqual(
response,
(
{
'status': 'Signature Validation Error',
'message': 'Error validating signature'
},
400
)
)
|
normal
|
{
"blob_id": "7626202d1e3ec7321addbb028be2275b882efda2",
"index": 6453,
"step-1": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-2": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n <mask token>\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-3": "<mask token>\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-4": "<mask token>\nimport unittest\nimport os\nfrom mock import patch, call\nfrom github_approval_checker.utils import util\nfrom github_approval_checker.utils.github_handler import GithubHandler\nfrom github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError\nfrom github_approval_checker.api import endpoints\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review(self, validate_config, handler_class,\n conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n handler.get_statuses.return_value = [{'state': 'error', 'context':\n 'context2', 'target_url': 'fake://status_target_2',\n 'description': 'Status Check 2'}, {'state': 'pending',\n 'context': 'context3', 'target_url': 'fake://status_target_3',\n 'description': 'Status Check 3'}, {'state': 'failure',\n 'context': 'context1', 'target_url': 'fake://status_target_1',\n 'description': 'Status Check 1'}]\n handler.is_authorized.return_value = True\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'approved', 'commit_id': 'review-commit-id', 'user':\n {'login': 'review-user-login'}}}\n handler.post_status.side_effect = [201, 400]\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_called_once_with('repo-full-name',\n 'review-commit-id')\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([call('repo-full-name',\n 'review-commit-id', 'context2', 'fake://status_target_2',\n 'review-user-login', 'Status Check 2'), call('repo-full-name',\n 'review-commit-id', 'context1', 'fake://status_target_1',\n 'review-user-login', 'Status Check 1')])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_unapproved(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = {'context1': ['whitelist1'],\n 'context2': ['whitelist2']}\n validate_config.return_value = None\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message':\n 'Review state is not approved'}, 200))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n def test_post_pull_request_review_missing(self, handler_class, conn,\n verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError('config-error',\n \"{'message': 'bad-config'}\")\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_config(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n validate_config.side_effect = ConfigError('Config Validation Error',\n ({'status': 'Config Validation Error', 'message':\n 'Bad config data'}, 500))\n data = {'repository': {'name': 'repo-name', 'full_name':\n 'repo-full-name', 'owner': {'login': 'repo-owner'}}, 'review':\n {'state': 'changes-requested', 'commit_id': 'review-commit-id',\n 'user': {'login': 'review-user-login'}}}\n response = endpoints.post_pull_request_review(data)\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with('repo-full-name', None)\n validate_config.assert_called_once_with('config-data')\n self.assertEqual(response, ({'status': 'Config Validation Error',\n 'message': 'Bad config data'}, 500))\n\n @patch('github_approval_checker.utils.util.verify_signature')\n @patch('github_approval_checker.api.endpoints.connexion')\n @patch('github_approval_checker.api.endpoints.GithubHandler')\n @patch('github_approval_checker.utils.util.validate_config')\n def test_post_pull_request_review_bad_sign(self, validate_config,\n handler_class, conn, verify_signature):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\n 'Error validating signature')\n response = endpoints.post_pull_request_review({})\n handler = handler_class.return_value\n handler.get_config.return_value = 'config-data'\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(response, ({'status': 'Signature Validation Error',\n 'message': 'Error validating signature'}, 400))\n",
"step-5": "\"\"\"\nUnit Tests for endpoints.py\n\"\"\"\n\nimport unittest\nimport os # pylint: disable=unused-import\nfrom mock import patch, call\nfrom github_approval_checker.utils import util # pylint: disable=unused-import\nfrom github_approval_checker.utils.github_handler import GithubHandler # pylint: disable=unused-import\nfrom github_approval_checker.utils.exceptions import ConfigError, APIError, SignatureError # noqa pylint: disable=unused-import\nfrom github_approval_checker.api import endpoints # pylint: disable=unused-import\n\n\nclass EndpointsUnitTests(unittest.TestCase):\n \"\"\"\n Test endpoints.py\n \"\"\"\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = {\n \"context1\": [\n \"whitelist1\"\n ],\n \"context2\": [\n \"whitelist2\"\n ]\n }\n\n handler.get_statuses.return_value = [\n {\n \"state\": \"error\",\n \"context\": \"context2\",\n \"target_url\": \"fake://status_target_2\",\n \"description\": \"Status Check 2\"\n },\n {\n \"state\": \"pending\",\n \"context\": \"context3\",\n \"target_url\": \"fake://status_target_3\",\n \"description\": \"Status Check 3\"\n },\n {\n \"state\": \"failure\",\n \"context\": \"context1\",\n \"target_url\": \"fake://status_target_1\",\n \"description\": \"Status Check 1\"\n }\n ]\n\n handler.is_authorized.return_value = True\n\n validate_config.return_value = None\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"approved\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n handler.post_status.side_effect = [\n 201,\n 400\n ]\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_called_once_with(\"repo-full-name\", \"review-commit-id\")\n self.assertEqual(handler.is_authorized.call_count, 2)\n handler.post_status.assert_has_calls([\n call(\n \"repo-full-name\",\n \"review-commit-id\",\n \"context2\",\n \"fake://status_target_2\",\n \"review-user-login\",\n \"Status Check 2\"\n ),\n call(\n \"repo-full-name\",\n \"review-commit-id\",\n \"context1\",\n \"fake://status_target_1\",\n \"review-user-login\",\n \"Status Check 1\"\n )\n ])\n self.assertEqual(response, util.STATUS_OK)\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_unapproved(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a review where the status is not approved.\n \"\"\"\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = {\n \"context1\": [\n \"whitelist1\"\n ],\n \"context2\": [\n \"whitelist2\"\n ]\n }\n\n validate_config.return_value = None\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, ({'status': 'OK', 'message': 'Review state is not approved'}, 200))\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n def test_post_pull_request_review_missing(\n self,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a missing config file\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.side_effect = APIError(\"config-error\", \"{'message': 'bad-config'}\")\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n self.assertEqual(response, \"{'message': 'bad-config'}\")\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_bad_config(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with a bad config file\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.return_value = None\n\n handler = handler_class.return_value\n handler.get_config.return_value = \"config-data\"\n\n validate_config.side_effect = ConfigError(\n 'Config Validation Error',\n ({'status': 'Config Validation Error', 'message': 'Bad config data'}, 500)\n )\n\n data = {\n \"repository\": {\n \"name\": \"repo-name\",\n \"full_name\": \"repo-full-name\",\n \"owner\": {\n \"login\": \"repo-owner\"\n }\n },\n \"review\": {\n \"state\": \"changes-requested\",\n \"commit_id\": \"review-commit-id\",\n \"user\": {\n \"login\": \"review-user-login\"\n }\n }\n }\n\n response = endpoints.post_pull_request_review(data)\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_called_once_with(\"repo-full-name\", None)\n validate_config.assert_called_once_with(\"config-data\")\n self.assertEqual(\n response,\n (\n {\n 'status': 'Config Validation Error',\n 'message': 'Bad config data'\n },\n 500\n )\n )\n\n @patch(\"github_approval_checker.utils.util.verify_signature\")\n @patch(\"github_approval_checker.api.endpoints.connexion\")\n @patch(\"github_approval_checker.api.endpoints.GithubHandler\")\n @patch(\"github_approval_checker.utils.util.validate_config\")\n def test_post_pull_request_review_bad_sign(\n self,\n validate_config,\n handler_class,\n conn,\n verify_signature\n ):\n \"\"\"\n Test endpoints.post_pull_request_review with an incorrect signature\n \"\"\"\n\n conn.request.data.return_value = ''\n conn.request.headers.get.return_value = 'sha1=signature'\n verify_signature.side_effect = SignatureError(\"Error validating signature\")\n\n response = endpoints.post_pull_request_review({})\n\n handler = handler_class.return_value\n handler.get_config.return_value = \"config-data\"\n\n handler.get_statuses.assert_not_called()\n handler.is_authorized.assert_not_called()\n handler.post_status.assert_not_called()\n handler.get_config.assert_not_called()\n validate_config.assert_not_called()\n self.assertEqual(\n response,\n (\n {\n 'status': 'Signature Validation Error',\n 'message': 'Error validating signature'\n },\n 400\n )\n )\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import sys
def isPalin(s):
result = True
for i in range(len(s)/2):
if s[i] != s[-(i + 1)]:
result = False
break
return result
def main():
curr_large = 0
for i in xrange(900, 1000):
for j in xrange(900, 1000):
prod = i * j
# Turns out list comprehension is more succint, but I
# leave the traditional up method anyway
if str(prod) == str(prod)[::-1] and prod > curr_large:
curr_large = prod
print curr_large
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "1c171c67ca5ef0e9b5f2941eec7a625a8823271f",
"index": 8463,
"step-1": "import sys\n\ndef isPalin(s):\n result = True\n for i in range(len(s)/2):\n if s[i] != s[-(i + 1)]:\n result = False\n break\n return result\n\n\ndef main():\n curr_large = 0\n for i in xrange(900, 1000):\n for j in xrange(900, 1000):\n prod = i * j\n # Turns out list comprehension is more succint, but I \n # leave the traditional up method anyway\n if str(prod) == str(prod)[::-1] and prod > curr_large:\n curr_large = prod\n print curr_large\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import random
firstNames = ("Thomas", "Daniel", "James", "Aaron", "Tommy", "Terrell", "Jack", "Joseph", "Samuel", "Quinn", "Hunter", "Vince", "Young", "Ian", "Erving", "Leo")
lastNames = ("Smith", "Johnson", "Williams", "Kline","Brown", "Garcia", "Jones", "Miller", "Davis","Williams", "Alves", "Sobronsky", "Hall", "Murphy", "Morris")
# Verifies statistics are not negative
f = lambda x : 0 if (x < 0) else x
def improvementFunction(age, maxMu):
return (maxMu/-30) * (age - 17) * (age - 30)
class profile:
def __init__ (self):
self.name = firstNames[random.randrange(0,len(firstNames))] + " " + lastNames[random.randrange(0,len(lastNames))]
self.years = 2020
self.ppg = [f(round( random.gauss(10.5, 2.4), 1))]
self.apg = [f(round(random.gauss(5.2, 2.4), 1))]
self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]
self.bpg = [f(round(random.gauss(1, .8), 1))]
self.spg = [f(round(random.gauss(.9, 1.2), 1))]
self.tpg = [f(round(random.gauss(1.8, .5), 1))]
self.age = random.randrange(18,24)
self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]
self.tpp = [f(round(random.gauss(28.7, 6), 1))]
def getStats (self):
output = {"Age:" : self.age,
"name" : self.name,
"points per game" : self.ppg[-1],
"assists per game" : self.apg[-1],
"rebounds per game" : self.rpg[-1],
"blocks per game" : self.bpg[-1],
"steals per game" : self.spg[-1],
"turnovers per game" : self.tpg[-1],
"field goal percentage" : self.fgp[-1],
"three point percentage" : self.tpp[-1]}
return output
def incrementAge (self):
self.age += 1
def updateStats (self):
self.ppg.append(f(round(self.ppg[-1] + random.gauss(improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))
self.apg.append(f(round(self.apg[-1] + random.gauss(improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))
self.rpg.append(f(round(self.rpg[-1] + random.gauss(improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))
self.bpg.append(f(round(self.bpg[-1] + random.gauss(improvementFunction(self.age, self.bpg[-1] * 2 - 1), .5), 1)))
self.spg.append(f(round(self.spg[-1] + random.gauss(improvementFunction(self.age, self.spg[-1] * 2 - 1), .5), 1)))
self.tpg.append(f(round(self.tpg[-1] + random.gauss(improvementFunction(self.age, 2.5 - .5), .5), 1)))
self.fgp.append(f(round(self.fgp[-1] + random.gauss(improvementFunction(self.age, 10 - 3), 2.5), 1)))
self.tpp.append(f(round(self.tpp[-1] + random.gauss(improvementFunction(self.age, 8 - 3), 1.9), 1)))
|
normal
|
{
"blob_id": "5607d4fea315fa7bf87337453fbef90a93a66516",
"index": 3968,
"step-1": "<mask token>\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-2": "<mask token>\n\n\ndef improvementFunction(age, maxMu):\n return maxMu / -30 * (age - 17) * (age - 30)\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-3": "<mask token>\nfirstNames = ('Thomas', 'Daniel', 'James', 'Aaron', 'Tommy', 'Terrell',\n 'Jack', 'Joseph', 'Samuel', 'Quinn', 'Hunter', 'Vince', 'Young', 'Ian',\n 'Erving', 'Leo')\nlastNames = ('Smith', 'Johnson', 'Williams', 'Kline', 'Brown', 'Garcia',\n 'Jones', 'Miller', 'Davis', 'Williams', 'Alves', 'Sobronsky', 'Hall',\n 'Murphy', 'Morris')\nf = lambda x: 0 if x < 0 else x\n\n\ndef improvementFunction(age, maxMu):\n return maxMu / -30 * (age - 17) * (age - 30)\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-4": "import random\nfirstNames = ('Thomas', 'Daniel', 'James', 'Aaron', 'Tommy', 'Terrell',\n 'Jack', 'Joseph', 'Samuel', 'Quinn', 'Hunter', 'Vince', 'Young', 'Ian',\n 'Erving', 'Leo')\nlastNames = ('Smith', 'Johnson', 'Williams', 'Kline', 'Brown', 'Garcia',\n 'Jones', 'Miller', 'Davis', 'Williams', 'Alves', 'Sobronsky', 'Hall',\n 'Murphy', 'Morris')\nf = lambda x: 0 if x < 0 else x\n\n\ndef improvementFunction(age, maxMu):\n return maxMu / -30 * (age - 17) * (age - 30)\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-5": "import random\r\n\r\n\r\nfirstNames = (\"Thomas\", \"Daniel\", \"James\", \"Aaron\", \"Tommy\", \"Terrell\", \"Jack\", \"Joseph\", \"Samuel\", \"Quinn\", \"Hunter\", \"Vince\", \"Young\", \"Ian\", \"Erving\", \"Leo\")\r\nlastNames = (\"Smith\", \"Johnson\", \"Williams\", \"Kline\",\"Brown\", \"Garcia\", \"Jones\", \"Miller\", \"Davis\",\"Williams\", \"Alves\", \"Sobronsky\", \"Hall\", \"Murphy\", \"Morris\")\r\n\r\n# Verifies statistics are not negative\r\nf = lambda x : 0 if (x < 0) else x\r\n\r\n\r\ndef improvementFunction(age, maxMu):\r\n return (maxMu/-30) * (age - 17) * (age - 30)\r\n\r\n\r\nclass profile:\r\n def __init__ (self):\r\n self.name = firstNames[random.randrange(0,len(firstNames))] + \" \" + lastNames[random.randrange(0,len(lastNames))]\r\n self.years = 2020\r\n self.ppg = [f(round( random.gauss(10.5, 2.4), 1))]\r\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\r\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\r\n self.bpg = [f(round(random.gauss(1, .8), 1))]\r\n self.spg = [f(round(random.gauss(.9, 1.2), 1))]\r\n self.tpg = [f(round(random.gauss(1.8, .5), 1))]\r\n self.age = random.randrange(18,24)\r\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\r\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\r\n\r\n def getStats (self):\r\n output = {\"Age:\" : self.age,\r\n \"name\" : self.name,\r\n \"points per game\" : self.ppg[-1],\r\n \"assists per game\" : self.apg[-1],\r\n \"rebounds per game\" : self.rpg[-1],\r\n \"blocks per game\" : self.bpg[-1],\r\n \"steals per game\" : self.spg[-1],\r\n \"turnovers per game\" : self.tpg[-1],\r\n \"field goal percentage\" : self.fgp[-1],\r\n \"three point percentage\" : self.tpp[-1]}\r\n return output\r\n\r\n def incrementAge (self):\r\n self.age += 1\r\n\r\n def updateStats (self):\r\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\r\n self.apg.append(f(round(self.apg[-1] + random.gauss(improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\r\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\r\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(improvementFunction(self.age, self.bpg[-1] * 2 - 1), .5), 1)))\r\n self.spg.append(f(round(self.spg[-1] + random.gauss(improvementFunction(self.age, self.spg[-1] * 2 - 1), .5), 1)))\r\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(improvementFunction(self.age, 2.5 - .5), .5), 1)))\r\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(improvementFunction(self.age, 10 - 3), 2.5), 1)))\r\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(improvementFunction(self.age, 8 - 3), 1.9), 1)))\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from rest_framework import serializers
from .models import data
from django.contrib.auth.models import User
class dataSerializer(serializers.ModelSerializer):
class Meta:
model = data
fields = ['id', 'task', 'duedate', 'person', 'done', 'task_user']
class userSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'password', 'email']
|
normal
|
{
"blob_id": "972c479ea40232e14fbf678ca2ccf9716e473fe8",
"index": 9736,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass userSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = ['username', 'password', 'email']\n",
"step-3": "<mask token>\n\n\nclass dataSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = data\n fields = ['id', 'task', 'duedate', 'person', 'done', 'task_user']\n\n\nclass userSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = ['username', 'password', 'email']\n",
"step-4": "from rest_framework import serializers\nfrom .models import data\nfrom django.contrib.auth.models import User\n\n\nclass dataSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = data\n fields = ['id', 'task', 'duedate', 'person', 'done', 'task_user']\n\n\nclass userSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = ['username', 'password', 'email']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/python3
print("content-type: text/html")
print()
import cgi
import subprocess as sp
import requests
import xmltodict
import json
db = cgi.FieldStorage()
ch=db.getvalue("ch")
url =("http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>" .format(ch))
url=url.replace(" ","%20")
r = requests.get(url)
n = xmltodict.parse(r.content)
k = json.dumps(n)
df = json.loads(k)
l=df["Vehicle"]["vehicleJson"]
p=json.loads(l)
output="Your car's details are:\n"+"Owner name: "+str(p['Owner'])+"\n"+"Car Company: "+str(p['CarMake']['CurrentTextValue'])+"\n"+"Car Model: "+str(p['CarModel']['CurrentTextValue'])+"\n"+"Fuel Type: "+str(p['FuelType']['CurrentTextValue'])+"\n"+"Registration Year: "+str(p['RegistrationYear'])+"\n"+"Insurance: "+str(p['Insurance'])+"\n"+"Vehicle ID: "+str(p['VechileIdentificationNumber'])+"\n"+"Engine No.: "+str(p['EngineNumber'])+"\n"+"Location RTO: "+str(p['Location'])
print(output)
|
normal
|
{
"blob_id": "87a62f76027e0653f6966f76a42def2ce2a26ba3",
"index": 5893,
"step-1": "<mask token>\n",
"step-2": "print('content-type: text/html')\nprint()\n<mask token>\nprint(output)\n",
"step-3": "print('content-type: text/html')\nprint()\n<mask token>\ndb = cgi.FieldStorage()\nch = db.getvalue('ch')\nurl = (\n 'http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>'\n .format(ch))\nurl = url.replace(' ', '%20')\nr = requests.get(url)\nn = xmltodict.parse(r.content)\nk = json.dumps(n)\ndf = json.loads(k)\nl = df['Vehicle']['vehicleJson']\np = json.loads(l)\noutput = \"Your car's details are:\\n\" + 'Owner name: ' + str(p['Owner']\n ) + '\\n' + 'Car Company: ' + str(p['CarMake']['CurrentTextValue']\n ) + '\\n' + 'Car Model: ' + str(p['CarModel']['CurrentTextValue']\n ) + '\\n' + 'Fuel Type: ' + str(p['FuelType']['CurrentTextValue']\n ) + '\\n' + 'Registration Year: ' + str(p['RegistrationYear']\n ) + '\\n' + 'Insurance: ' + str(p['Insurance']\n ) + '\\n' + 'Vehicle ID: ' + str(p['VechileIdentificationNumber']\n ) + '\\n' + 'Engine No.: ' + str(p['EngineNumber']\n ) + '\\n' + 'Location RTO: ' + str(p['Location'])\nprint(output)\n",
"step-4": "print('content-type: text/html')\nprint()\nimport cgi\nimport subprocess as sp\nimport requests\nimport xmltodict\nimport json\ndb = cgi.FieldStorage()\nch = db.getvalue('ch')\nurl = (\n 'http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>'\n .format(ch))\nurl = url.replace(' ', '%20')\nr = requests.get(url)\nn = xmltodict.parse(r.content)\nk = json.dumps(n)\ndf = json.loads(k)\nl = df['Vehicle']['vehicleJson']\np = json.loads(l)\noutput = \"Your car's details are:\\n\" + 'Owner name: ' + str(p['Owner']\n ) + '\\n' + 'Car Company: ' + str(p['CarMake']['CurrentTextValue']\n ) + '\\n' + 'Car Model: ' + str(p['CarModel']['CurrentTextValue']\n ) + '\\n' + 'Fuel Type: ' + str(p['FuelType']['CurrentTextValue']\n ) + '\\n' + 'Registration Year: ' + str(p['RegistrationYear']\n ) + '\\n' + 'Insurance: ' + str(p['Insurance']\n ) + '\\n' + 'Vehicle ID: ' + str(p['VechileIdentificationNumber']\n ) + '\\n' + 'Engine No.: ' + str(p['EngineNumber']\n ) + '\\n' + 'Location RTO: ' + str(p['Location'])\nprint(output)\n",
"step-5": "#! /usr/bin/python3\r\n\r\nprint(\"content-type: text/html\")\r\nprint()\r\n\r\n\r\nimport cgi\r\nimport subprocess as sp\r\nimport requests\r\nimport xmltodict\r\nimport json\r\n\r\ndb = cgi.FieldStorage()\r\nch=db.getvalue(\"ch\")\r\nurl =(\"http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>\" .format(ch))\r\nurl=url.replace(\" \",\"%20\")\r\nr = requests.get(url)\r\nn = xmltodict.parse(r.content)\r\nk = json.dumps(n)\r\ndf = json.loads(k)\r\nl=df[\"Vehicle\"][\"vehicleJson\"]\r\np=json.loads(l)\r\noutput=\"Your car's details are:\\n\"+\"Owner name: \"+str(p['Owner'])+\"\\n\"+\"Car Company: \"+str(p['CarMake']['CurrentTextValue'])+\"\\n\"+\"Car Model: \"+str(p['CarModel']['CurrentTextValue'])+\"\\n\"+\"Fuel Type: \"+str(p['FuelType']['CurrentTextValue'])+\"\\n\"+\"Registration Year: \"+str(p['RegistrationYear'])+\"\\n\"+\"Insurance: \"+str(p['Insurance'])+\"\\n\"+\"Vehicle ID: \"+str(p['VechileIdentificationNumber'])+\"\\n\"+\"Engine No.: \"+str(p['EngineNumber'])+\"\\n\"+\"Location RTO: \"+str(p['Location'])\r\nprint(output)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from EdgeState import EdgeState
from rest_framework import serializers
from dzTrafico.BusinessLayer.TrafficAnalysis.TrafficAnalyzer import TrafficAnalyzer, VirtualRampMetering
from dzTrafico.BusinessEntities.Location import LocationSerializer
from dzTrafico.BusinessLayer.SimulationCreation.NetworkManager import NetworkManager
class Sink(object):
id = 0
trafficAnalyzer = None
incidents = []
def __init__(self):
self.id = Sink.id
Sink.id += 1
self.nodes = []
#print "--------nodes----------"
#print len(nodes)
def add_node(self, node):
self.nodes.append(node)
def get_sensors(self):
sensors = []
for node in self.nodes:
for sensor in node.sensors:
sensors.append(sensor)
return sensors
def change_lane(self):
for node in self.nodes:
if node.LC_is_activated:
node.change_lane()
def incident_change_lane(self):
for node in self.nodes:
if node.isCongested:
node.incident_change_lane()
def update_vsl(self):
vsl = []
index = 1
for node in self.nodes:
if node.VSL_is_activated:
Sink.trafficAnalyzer.update_vsl(self, node)
vsl_node = dict()
vsl_node["id"] = index
vsl_node["vsl"] = node.get_current_vsl()
vsl.append(vsl_node)
index += 1
return vsl
def deactivate_vsl(self):
for node in self.nodes:
node.deactivate_VSL()
def deactivate_lc(self):
for node in self.nodes:
node.deactivate_LC()
def set_sumo_LC_Model(self, mode):
for node in self.nodes:
if node.LC_is_activated:
node.set_sumo_LC_Model(mode)
def read_traffic_state(self):
traffic_state = []
for node in self.nodes:
congested_lanes = node.check_congested_lanes()
congestion_detected = len(congested_lanes) > 0
if congestion_detected:
for incident in Sink.incidents:
print "incident ===> ", incident.edge.getID()
congestion_detected = node.edge.getID() == incident.edge.getID()
if congestion_detected:
congested_lanes = [incident.lane]
break
if congestion_detected and not TrafficAnalyzer.isCongestionDetected:
print "--------notify_congestion_detected----------"
print node.edge.getID()
print congested_lanes
node.isCongested = True
node.set_congested_lanes(congested_lanes)
if TrafficAnalyzer.isLCControlActivated:
node.close_incident_lanes()
Sink.trafficAnalyzer.notify_congestion_detected(self, node, congested_lanes)
elif TrafficAnalyzer.congestionExists and node.isCongested and TrafficAnalyzer.isLCControlActivated:
if node.check_if_discharged():
Sink.trafficAnalyzer.clear_congestion()
node.isCongested = False
edge_coords = dict()
start, end = NetworkManager.get_edge_coords(node.edge)
edge_coords["start"] = LocationSerializer(start).data
edge_coords["end"] = LocationSerializer(end).data
traffic_state.append(
EdgeState(
node.edge.getID(),
edge_coords,
node.get_current_speed(),
node.get_current_vsl(),
node.get_current_density(),
node.VSL_is_activated,
congestion_detected
)
)
return traffic_state
def get_node_by_edgeID(self, edge_id):
for node in self.nodes:
if node.edge.getID() == edge_id:
return node
return None
def get_LC_recommendations(self):
lc_recommendations = []
index = VirtualRampMetering.num_vsl_controlled_sections + 1
for node in self.nodes:
lanes = []
if node.LC_is_activated:
for i in range(0,len(node.recommendations)):
for r in node.recommendations:
if r.lane == i:
lanes.append(
NodeLanesRcmd(
r.lane,
r.recommendation
)
)
lc_recommendations.extend(
[
NodeLCRcmd(
index,
lanes
)
]
)
index += 1
nodeLCRcmdSerializer = NodeLCRcmdSerializer(lc_recommendations, many=True)
return nodeLCRcmdSerializer.data
class NodeLCRcmd(object):
def __init__(self, id, lanes):
self.id = id
self.lanes = lanes
class NodeLanesRcmd(object):
def __init__(self, lane, recommendation):
self.lane = lane
self.recommendation = recommendation
class NodeLanesRcmdSerializer(serializers.Serializer):
lane = serializers.IntegerField()
recommendation = serializers.IntegerField()
class NodeLCRcmdSerializer(serializers.Serializer):
id = serializers.IntegerField()
lanes = NodeLanesRcmdSerializer(many=True)
|
normal
|
{
"blob_id": "892c363c247177deb3297af84a93819a69e16801",
"index": 8907,
"step-1": "from EdgeState import EdgeState\nfrom rest_framework import serializers\nfrom dzTrafico.BusinessLayer.TrafficAnalysis.TrafficAnalyzer import TrafficAnalyzer, VirtualRampMetering\nfrom dzTrafico.BusinessEntities.Location import LocationSerializer\nfrom dzTrafico.BusinessLayer.SimulationCreation.NetworkManager import NetworkManager\n\nclass Sink(object):\n\n id = 0\n trafficAnalyzer = None\n incidents = []\n\n def __init__(self):\n self.id = Sink.id\n Sink.id += 1\n self.nodes = []\n\n #print \"--------nodes----------\"\n #print len(nodes)\n\n def add_node(self, node):\n self.nodes.append(node)\n\n def get_sensors(self):\n sensors = []\n for node in self.nodes:\n for sensor in node.sensors:\n sensors.append(sensor)\n return sensors\n\n def change_lane(self):\n for node in self.nodes:\n if node.LC_is_activated:\n node.change_lane()\n\n def incident_change_lane(self):\n for node in self.nodes:\n if node.isCongested:\n node.incident_change_lane()\n\n def update_vsl(self):\n vsl = []\n index = 1\n for node in self.nodes:\n if node.VSL_is_activated:\n Sink.trafficAnalyzer.update_vsl(self, node)\n vsl_node = dict()\n vsl_node[\"id\"] = index\n vsl_node[\"vsl\"] = node.get_current_vsl()\n vsl.append(vsl_node)\n index += 1\n return vsl\n\n def deactivate_vsl(self):\n for node in self.nodes:\n node.deactivate_VSL()\n\n def deactivate_lc(self):\n for node in self.nodes:\n node.deactivate_LC()\n\n def set_sumo_LC_Model(self, mode):\n for node in self.nodes:\n if node.LC_is_activated:\n node.set_sumo_LC_Model(mode)\n\n def read_traffic_state(self):\n traffic_state = []\n for node in self.nodes:\n\n congested_lanes = node.check_congested_lanes()\n congestion_detected = len(congested_lanes) > 0\n if congestion_detected:\n for incident in Sink.incidents:\n print \"incident ===> \", incident.edge.getID()\n congestion_detected = node.edge.getID() == incident.edge.getID()\n if congestion_detected:\n congested_lanes = [incident.lane]\n break\n\n if congestion_detected and not TrafficAnalyzer.isCongestionDetected:\n print \"--------notify_congestion_detected----------\"\n print node.edge.getID()\n print congested_lanes\n node.isCongested = True\n node.set_congested_lanes(congested_lanes)\n if TrafficAnalyzer.isLCControlActivated:\n node.close_incident_lanes()\n Sink.trafficAnalyzer.notify_congestion_detected(self, node, congested_lanes)\n\n elif TrafficAnalyzer.congestionExists and node.isCongested and TrafficAnalyzer.isLCControlActivated:\n if node.check_if_discharged():\n Sink.trafficAnalyzer.clear_congestion()\n node.isCongested = False\n edge_coords = dict()\n start, end = NetworkManager.get_edge_coords(node.edge)\n edge_coords[\"start\"] = LocationSerializer(start).data\n edge_coords[\"end\"] = LocationSerializer(end).data\n traffic_state.append(\n EdgeState(\n node.edge.getID(),\n edge_coords,\n node.get_current_speed(),\n node.get_current_vsl(),\n node.get_current_density(),\n node.VSL_is_activated,\n congestion_detected\n )\n )\n return traffic_state\n\n def get_node_by_edgeID(self, edge_id):\n for node in self.nodes:\n if node.edge.getID() == edge_id:\n return node\n return None\n\n def get_LC_recommendations(self):\n lc_recommendations = []\n index = VirtualRampMetering.num_vsl_controlled_sections + 1\n for node in self.nodes:\n lanes = []\n if node.LC_is_activated:\n for i in range(0,len(node.recommendations)):\n for r in node.recommendations:\n if r.lane == i:\n lanes.append(\n NodeLanesRcmd(\n r.lane,\n r.recommendation\n )\n )\n lc_recommendations.extend(\n [\n NodeLCRcmd(\n index,\n lanes\n )\n ]\n )\n index += 1\n nodeLCRcmdSerializer = NodeLCRcmdSerializer(lc_recommendations, many=True)\n return nodeLCRcmdSerializer.data\n\nclass NodeLCRcmd(object):\n def __init__(self, id, lanes):\n self.id = id\n self.lanes = lanes\n\nclass NodeLanesRcmd(object):\n def __init__(self, lane, recommendation):\n self.lane = lane\n self.recommendation = recommendation\n\nclass NodeLanesRcmdSerializer(serializers.Serializer):\n lane = serializers.IntegerField()\n recommendation = serializers.IntegerField()\n\nclass NodeLCRcmdSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n lanes = NodeLanesRcmdSerializer(many=True)\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
@Description: 数据库迁移
@Author: Zpp
@Date: 2020-03-30 11:01:56
@LastEditors: Zpp
@LastEditTime: 2020-04-28 09:55:26
'''
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from conf.setting import Config
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from models.salary import *
from models.system import *
from models.log import *
# 初始化 migrate
# 两个参数一个是 Flask 的 app,一个是数据库 db
migrate = Migrate(app, db)
# 初始化管理器
manager = Manager(app)
# 添加 db 命令,并与 MigrateCommand 绑定
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
normal
|
{
"blob_id": "69ebdab4cd1f0b5154305410381db252205ff97d",
"index": 9768,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(rootPath)\n<mask token>\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-3": "<mask token>\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n<mask token>\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-4": "<mask token>\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom conf.setting import Config\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nfrom models.salary import *\nfrom models.system import *\nfrom models.log import *\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n'''\n@Description: 数据库迁移\n@Author: Zpp\n@Date: 2020-03-30 11:01:56\n@LastEditors: Zpp\n@LastEditTime: 2020-04-28 09:55:26\n'''\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom conf.setting import Config\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nfrom models.salary import *\nfrom models.system import *\nfrom models.log import *\n\n# 初始化 migrate\n# 两个参数一个是 Flask 的 app,一个是数据库 db\nmigrate = Migrate(app, db)\n\n# 初始化管理器\nmanager = Manager(app)\n# 添加 db 命令,并与 MigrateCommand 绑定\nmanager.add_command('db', MigrateCommand)\n\n\nif __name__ == '__main__':\n manager.run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from mesa import Model
from mesa.space import SingleGrid
from mesa.time import BaseScheduler, RandomActivation, SimultaneousActivation
from pdpython_model.fixed_model.agents import PDAgent
from mesa.datacollection import DataCollector
class PDModel(Model):
schedule_types = {"Sequential": BaseScheduler,
"Random": RandomActivation,
"Simultaneous": SimultaneousActivation}
def __init__(self, height=8, width=8,
number_of_agents=2,
schedule_type="Simultaneous",
rounds=1,):
# Model Parameters
self.height = height
self.width = width
self.number_of_agents = number_of_agents
self.step_count = 0
self.schedule_type = schedule_type
self.payoffs = {("C", "C"): 3,
("C", "D"): 0,
("D", "C"): 5,
("D", "D"): 2}
# Model Functions
self.schedule = self.schedule_types[self.schedule_type](self)
self.grid = SingleGrid(self.height, self.width, torus=True)
# Find list of empty cells
self.coordinates = [(x, y) for x in range(self.width) for y in range(self.height)]
self.agentIDs = list(range(1, (number_of_agents + 1)))
self.make_agents()
self.running = True
def make_agents(self):
for i in range(self.number_of_agents):
x, y = self.coordinates.pop(0)
# print("x, y:", x, y)
# x, y = self.grid.find_empty()
pdagent = PDAgent((x, y), self, True)
self.grid.place_agent(pdagent, (x, y))
self.schedule.add(pdagent)
def step(self):
self.schedule.step()
self.step_count += 1
def run_model(self, rounds=200):
for i in range(rounds):
self.step()
|
normal
|
{
"blob_id": "446c438b79f9957289fa85f21516c13d67e2cfaf",
"index": 3270,
"step-1": "<mask token>\n\n\nclass PDModel(Model):\n <mask token>\n <mask token>\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PDModel(Model):\n <mask token>\n\n def __init__(self, height=8, width=8, number_of_agents=2, schedule_type\n ='Simultaneous', rounds=1):\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {('C', 'C'): 3, ('C', 'D'): 0, ('D', 'C'): 5, ('D',\n 'D'): 2}\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n self.coordinates = [(x, y) for x in range(self.width) for y in\n range(self.height)]\n self.agentIDs = list(range(1, number_of_agents + 1))\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n",
"step-3": "<mask token>\n\n\nclass PDModel(Model):\n schedule_types = {'Sequential': BaseScheduler, 'Random':\n RandomActivation, 'Simultaneous': SimultaneousActivation}\n\n def __init__(self, height=8, width=8, number_of_agents=2, schedule_type\n ='Simultaneous', rounds=1):\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {('C', 'C'): 3, ('C', 'D'): 0, ('D', 'C'): 5, ('D',\n 'D'): 2}\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n self.coordinates = [(x, y) for x in range(self.width) for y in\n range(self.height)]\n self.agentIDs = list(range(1, number_of_agents + 1))\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n",
"step-4": "from mesa import Model\nfrom mesa.space import SingleGrid\nfrom mesa.time import BaseScheduler, RandomActivation, SimultaneousActivation\nfrom pdpython_model.fixed_model.agents import PDAgent\nfrom mesa.datacollection import DataCollector\n\n\nclass PDModel(Model):\n schedule_types = {'Sequential': BaseScheduler, 'Random':\n RandomActivation, 'Simultaneous': SimultaneousActivation}\n\n def __init__(self, height=8, width=8, number_of_agents=2, schedule_type\n ='Simultaneous', rounds=1):\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {('C', 'C'): 3, ('C', 'D'): 0, ('D', 'C'): 5, ('D',\n 'D'): 2}\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n self.coordinates = [(x, y) for x in range(self.width) for y in\n range(self.height)]\n self.agentIDs = list(range(1, number_of_agents + 1))\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n",
"step-5": "from mesa import Model\nfrom mesa.space import SingleGrid\nfrom mesa.time import BaseScheduler, RandomActivation, SimultaneousActivation\nfrom pdpython_model.fixed_model.agents import PDAgent\n\nfrom mesa.datacollection import DataCollector\n\n\nclass PDModel(Model):\n\n schedule_types = {\"Sequential\": BaseScheduler,\n \"Random\": RandomActivation,\n \"Simultaneous\": SimultaneousActivation}\n\n def __init__(self, height=8, width=8,\n number_of_agents=2,\n schedule_type=\"Simultaneous\",\n rounds=1,):\n\n\n # Model Parameters\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {(\"C\", \"C\"): 3,\n (\"C\", \"D\"): 0,\n (\"D\", \"C\"): 5,\n (\"D\", \"D\"): 2}\n\n\n # Model Functions\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n\n # Find list of empty cells\n self.coordinates = [(x, y) for x in range(self.width) for y in range(self.height)]\n\n self.agentIDs = list(range(1, (number_of_agents + 1)))\n\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n # print(\"x, y:\", x, y)\n # x, y = self.grid.find_empty()\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
# Generated by Django 2.1.3 on 2019-01-06 06:53
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Session",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("token", models.CharField(editable=False, max_length=64, unique=True)),
("description", models.CharField(blank=True, max_length=512)),
("created_at", models.DateTimeField(auto_now_add=True)),
(
"last_seen_at",
models.DateTimeField(blank=True, editable=False, null=True),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
normal
|
{
"blob_id": "a91d42764fa14111afca4551edd6c889903ed9bd",
"index": 8056,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Session', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('token', models.CharField(editable=\n False, max_length=64, unique=True)), ('description', models.\n CharField(blank=True, max_length=512)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('last_seen_at', models.\n DateTimeField(blank=True, editable=False, null=True)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-4": "import django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Session', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('token', models.CharField(editable=\n False, max_length=64, unique=True)), ('description', models.\n CharField(blank=True, max_length=512)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('last_seen_at', models.\n DateTimeField(blank=True, editable=False, null=True)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 2.1.3 on 2019-01-06 06:53\n\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Session\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"token\", models.CharField(editable=False, max_length=64, unique=True)),\n (\"description\", models.CharField(blank=True, max_length=512)),\n (\"created_at\", models.DateTimeField(auto_now_add=True)),\n (\n \"last_seen_at\",\n models.DateTimeField(blank=True, editable=False, null=True),\n ),\n (\n \"user\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.models import load_model
from utils import resize_to_fit, clear_chunks, stack_windows
from imutils import paths
import numpy as np
import imutils
import cv2 as cv2
import pickle
from tqdm import tqdm
c1_correct = 0
c2_correct = 0
c3_correct = 0
c4_correct = 0
c5_correct = 0
total_correct = 0
incorrectly_segmented = 0
correct_guesses_dict = {}
MODEL_FILENAME = "captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model_labels.dat"
CAPTCHA_IMAGE_FOLDER = "test captchas"
# Load up the model labels (so we can translate model predictions to actual letters)
with open(MODEL_LABELS_FILENAME, "rb") as f:
lb = pickle.load(f)
# Load the trained neural network
model = load_model(MODEL_FILENAME)
for root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):
for name in tqdm(files, desc='Solving captchas'):
kernel = (5,5)
#load image
image = cv2.imread(os.path.join(root, name))
image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)
#add padding
image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT, None, 255)
#blur
k = np.ones((5,5),np.float32)/25
image = cv2.filter2D(image,-1,k)
# threshhold image
ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
# clear white dots
clear_chunks(image,0,50)
# erosion
image = cv2.erode(image, kernel, iterations=1)
# get contours
contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
#segment letters
letter_image_regions = [] #(x, y, w ,h)
contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
contours = contours[:5]
for contour in contours:
if cv2.contourArea(contour) < 60:
continue
(x, y, w, h) = cv2.boundingRect(contour)
if w / h > 1.5:
half_width = int(w / 2)
letter_image_regions.append((x, y, half_width, h))
letter_image_regions.append((x + half_width, y, half_width, h))
else:
letter_image_regions.append((x, y, w, h))
if len(letter_image_regions) != 5:
incorrectly_segmented += 1
continue
print(f"Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect")
letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])
chars = []
i=0
for (x,y,w,h) in letter_image_regions:
letter = image[y-2:y+h+2, x-2:x+w+2]
chars.append(letter)
i+=1
predictions = []
for letter in chars:
# Re-size the letter image to 20x20 pixels to match training data
letter = resize_to_fit(letter, 20, 20)
# Turn the single image into a 4d list of images to make Keras happy
letter = np.expand_dims(letter, axis=2)
letter = np.expand_dims(letter, axis=0)
# Ask the neural network to make a prediction
prediction = model.predict(letter)
# Convert the one-hot-encoded prediction back to a normal letter
letter_text = lb.inverse_transform(prediction)[0]
predictions.append(letter_text)
gc1, gc2, gc3, gc4, gc5 = predictions
c1, c2, c3, c4, c5, e1, e2, e3, e4 = name
correct_guesses = 0
if c1 == gc1:
c1_correct += 1
correct_guesses += 1
if c2 == gc2:
c2_correct += 1
correct_guesses += 1
if c3 == gc3:
c3_correct += 1
correct_guesses += 1
if c4 == gc4:
c4_correct += 1
correct_guesses += 1
if c5 == gc5:
c5_correct += 1
correct_guesses += 1
if ''.join(predictions) == ''.join([c1,c2,c3,c4,c5]):
total_correct += 1
n = correct_guesses_dict.get(correct_guesses, 0) + 1
correct_guesses_dict[correct_guesses] = n
print(f"Prediction for {name}: {''.join(predictions)}")
print(f"correct c1: {c1_correct}")
print(f"correct c2: {c2_correct}")
print(f"correct c3: {c3_correct}")
print(f"correct c4: {c4_correct}")
print(f"correct c5: {c5_correct}")
print(f"correct total: {total_correct}")
print(f"correctly segmented: {10000 - incorrectly_segmented}")
print(correct_guesses_dict)
|
normal
|
{
"blob_id": "c2ddf31bce4a5f3ae2b0d5455bbc9942f92bff40",
"index": 275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(MODEL_LABELS_FILENAME, 'rb') as f:\n lb = pickle.load(f)\n<mask token>\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n kernel = 5, 5\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT,\n None, 255)\n k = np.ones((5, 5), np.float32) / 25\n image = cv2.filter2D(image, -1, k)\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n clear_chunks(image, 0, 50)\n image = cv2.erode(image, kernel, iterations=1)\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n letter_image_regions = []\n contours = sorted(contours, key=lambda x: cv2.contourArea(x),\n reverse=True)\n contours = contours[:5]\n for contour in contours:\n if cv2.contourArea(contour) < 60:\n continue\n x, y, w, h = cv2.boundingRect(contour)\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(\n f'Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect'\n )\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n chars = []\n i = 0\n for x, y, w, h in letter_image_regions:\n letter = image[y - 2:y + h + 2, x - 2:x + w + 2]\n chars.append(letter)\n i += 1\n predictions = []\n for letter in chars:\n letter = resize_to_fit(letter, 20, 20)\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n prediction = model.predict(letter)\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name\n correct_guesses = 0\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n if ''.join(predictions) == ''.join([c1, c2, c3, c4, c5]):\n total_correct += 1\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\nprint(f'correct c1: {c1_correct}')\nprint(f'correct c2: {c2_correct}')\nprint(f'correct c3: {c3_correct}')\nprint(f'correct c4: {c4_correct}')\nprint(f'correct c5: {c5_correct}')\nprint(f'correct total: {total_correct}')\nprint(f'correctly segmented: {10000 - incorrectly_segmented}')\nprint(correct_guesses_dict)\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n<mask token>\nc1_correct = 0\nc2_correct = 0\nc3_correct = 0\nc4_correct = 0\nc5_correct = 0\ntotal_correct = 0\nincorrectly_segmented = 0\ncorrect_guesses_dict = {}\nMODEL_FILENAME = 'captcha_model.hdf5'\nMODEL_LABELS_FILENAME = 'model_labels.dat'\nCAPTCHA_IMAGE_FOLDER = 'test captchas'\nwith open(MODEL_LABELS_FILENAME, 'rb') as f:\n lb = pickle.load(f)\nmodel = load_model(MODEL_FILENAME)\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n kernel = 5, 5\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT,\n None, 255)\n k = np.ones((5, 5), np.float32) / 25\n image = cv2.filter2D(image, -1, k)\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n clear_chunks(image, 0, 50)\n image = cv2.erode(image, kernel, iterations=1)\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n letter_image_regions = []\n contours = sorted(contours, key=lambda x: cv2.contourArea(x),\n reverse=True)\n contours = contours[:5]\n for contour in contours:\n if cv2.contourArea(contour) < 60:\n continue\n x, y, w, h = cv2.boundingRect(contour)\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(\n f'Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect'\n )\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n chars = []\n i = 0\n for x, y, w, h in letter_image_regions:\n letter = image[y - 2:y + h + 2, x - 2:x + w + 2]\n chars.append(letter)\n i += 1\n predictions = []\n for letter in chars:\n letter = resize_to_fit(letter, 20, 20)\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n prediction = model.predict(letter)\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name\n correct_guesses = 0\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n if ''.join(predictions) == ''.join([c1, c2, c3, c4, c5]):\n total_correct += 1\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\nprint(f'correct c1: {c1_correct}')\nprint(f'correct c2: {c2_correct}')\nprint(f'correct c3: {c3_correct}')\nprint(f'correct c4: {c4_correct}')\nprint(f'correct c5: {c5_correct}')\nprint(f'correct total: {total_correct}')\nprint(f'correctly segmented: {10000 - incorrectly_segmented}')\nprint(correct_guesses_dict)\n",
"step-4": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom keras.models import load_model\nfrom utils import resize_to_fit, clear_chunks, stack_windows\nfrom imutils import paths\nimport numpy as np\nimport imutils\nimport cv2 as cv2\nimport pickle\nfrom tqdm import tqdm\nc1_correct = 0\nc2_correct = 0\nc3_correct = 0\nc4_correct = 0\nc5_correct = 0\ntotal_correct = 0\nincorrectly_segmented = 0\ncorrect_guesses_dict = {}\nMODEL_FILENAME = 'captcha_model.hdf5'\nMODEL_LABELS_FILENAME = 'model_labels.dat'\nCAPTCHA_IMAGE_FOLDER = 'test captchas'\nwith open(MODEL_LABELS_FILENAME, 'rb') as f:\n lb = pickle.load(f)\nmodel = load_model(MODEL_FILENAME)\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n kernel = 5, 5\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT,\n None, 255)\n k = np.ones((5, 5), np.float32) / 25\n image = cv2.filter2D(image, -1, k)\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n clear_chunks(image, 0, 50)\n image = cv2.erode(image, kernel, iterations=1)\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n letter_image_regions = []\n contours = sorted(contours, key=lambda x: cv2.contourArea(x),\n reverse=True)\n contours = contours[:5]\n for contour in contours:\n if cv2.contourArea(contour) < 60:\n continue\n x, y, w, h = cv2.boundingRect(contour)\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(\n f'Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect'\n )\n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n chars = []\n i = 0\n for x, y, w, h in letter_image_regions:\n letter = image[y - 2:y + h + 2, x - 2:x + w + 2]\n chars.append(letter)\n i += 1\n predictions = []\n for letter in chars:\n letter = resize_to_fit(letter, 20, 20)\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n prediction = model.predict(letter)\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name\n correct_guesses = 0\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n if ''.join(predictions) == ''.join([c1, c2, c3, c4, c5]):\n total_correct += 1\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\nprint(f'correct c1: {c1_correct}')\nprint(f'correct c2: {c2_correct}')\nprint(f'correct c3: {c3_correct}')\nprint(f'correct c4: {c4_correct}')\nprint(f'correct c5: {c5_correct}')\nprint(f'correct total: {total_correct}')\nprint(f'correctly segmented: {10000 - incorrectly_segmented}')\nprint(correct_guesses_dict)\n",
"step-5": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nfrom keras.models import load_model\nfrom utils import resize_to_fit, clear_chunks, stack_windows\nfrom imutils import paths\nimport numpy as np\nimport imutils\nimport cv2 as cv2\nimport pickle\nfrom tqdm import tqdm\n\nc1_correct = 0\nc2_correct = 0\nc3_correct = 0\nc4_correct = 0\nc5_correct = 0\n\ntotal_correct = 0\nincorrectly_segmented = 0\n\ncorrect_guesses_dict = {}\n\nMODEL_FILENAME = \"captcha_model.hdf5\"\nMODEL_LABELS_FILENAME = \"model_labels.dat\"\nCAPTCHA_IMAGE_FOLDER = \"test captchas\"\n\n\n# Load up the model labels (so we can translate model predictions to actual letters)\nwith open(MODEL_LABELS_FILENAME, \"rb\") as f:\n lb = pickle.load(f)\n\n# Load the trained neural network\nmodel = load_model(MODEL_FILENAME)\n\n\nfor root, dirs, files in os.walk(CAPTCHA_IMAGE_FOLDER):\n for name in tqdm(files, desc='Solving captchas'):\n \n kernel = (5,5)\n\n #load image\n image = cv2.imread(os.path.join(root, name))\n image = cv2.cvtColor(image, cv2.COLOR_RGBA2GRAY)\n \n #add padding\n image = cv2.copyMakeBorder(image, 8, 8, 8, 8, cv2.BORDER_CONSTANT, None, 255)\n\n #blur\n k = np.ones((5,5),np.float32)/25\n image = cv2.filter2D(image,-1,k)\n\n # threshhold image\n ret, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)\n\n # clear white dots\n clear_chunks(image,0,50)\n\n # erosion\n image = cv2.erode(image, kernel, iterations=1)\n\n # get contours\n contours = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n\n #segment letters\n letter_image_regions = [] #(x, y, w ,h)\n \n \n contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)\n contours = contours[:5]\n \n for contour in contours:\n \n if cv2.contourArea(contour) < 60:\n continue\n\n \n (x, y, w, h) = cv2.boundingRect(contour)\n\n if w / h > 1.5:\n half_width = int(w / 2)\n letter_image_regions.append((x, y, half_width, h))\n letter_image_regions.append((x + half_width, y, half_width, h))\n else:\n letter_image_regions.append((x, y, w, h))\n\n if len(letter_image_regions) != 5:\n incorrectly_segmented += 1\n continue\n print(f\"Found {len(letter_image_regions)} letter regions instead of 5 , the guess will likely be incorrect\")\n \n \n letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])\n\n chars = []\n i=0\n for (x,y,w,h) in letter_image_regions:\n letter = image[y-2:y+h+2, x-2:x+w+2]\n chars.append(letter)\n i+=1\n\n predictions = []\n\n for letter in chars:\n # Re-size the letter image to 20x20 pixels to match training data\n letter = resize_to_fit(letter, 20, 20)\n\n # Turn the single image into a 4d list of images to make Keras happy\n letter = np.expand_dims(letter, axis=2)\n letter = np.expand_dims(letter, axis=0)\n\n # Ask the neural network to make a prediction\n prediction = model.predict(letter)\n\n # Convert the one-hot-encoded prediction back to a normal letter\n letter_text = lb.inverse_transform(prediction)[0]\n predictions.append(letter_text)\n\n gc1, gc2, gc3, gc4, gc5 = predictions\n c1, c2, c3, c4, c5, e1, e2, e3, e4 = name \n\n correct_guesses = 0\n\n if c1 == gc1:\n c1_correct += 1\n correct_guesses += 1\n if c2 == gc2:\n c2_correct += 1\n correct_guesses += 1\n if c3 == gc3:\n c3_correct += 1\n correct_guesses += 1\n if c4 == gc4:\n c4_correct += 1\n correct_guesses += 1\n if c5 == gc5:\n c5_correct += 1\n correct_guesses += 1\n\n if ''.join(predictions) == ''.join([c1,c2,c3,c4,c5]):\n total_correct += 1\n\n n = correct_guesses_dict.get(correct_guesses, 0) + 1\n correct_guesses_dict[correct_guesses] = n\n\n print(f\"Prediction for {name}: {''.join(predictions)}\")\n \nprint(f\"correct c1: {c1_correct}\")\nprint(f\"correct c2: {c2_correct}\")\nprint(f\"correct c3: {c3_correct}\")\nprint(f\"correct c4: {c4_correct}\")\nprint(f\"correct c5: {c5_correct}\")\n\nprint(f\"correct total: {total_correct}\")\n\nprint(f\"correctly segmented: {10000 - incorrectly_segmented}\")\n\nprint(correct_guesses_dict)\n \n \n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import aiohttp
import asyncio
import base64
import discord
import json
from discord.ext import commands
class BasicMC(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name="stealskin", aliases=["skinsteal", "skin"])
@commands.cooldown(1, 4, commands.BucketType.user)
async def skinner(self, ctx, gamertag: str):
response = await self.session.get(f"https://api.mojang.com/users/profiles/minecraft/{gamertag}")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
uuid = json.loads(await response.text()).get("id")
if uuid is None:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
response = await self.session.get(
f"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false")
content = json.loads(await response.text())
if "error" in content:
if content["error"] == "TooManyRequestsException":
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Oops, we're being ratelimited by the Mojang API, try again later!"))
return
if len(content["properties"]) == 0:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="We can't get this person's skin for some reason..."))
return
undec = base64.b64decode(content["properties"][0]["value"])
try:
url = json.loads(undec)["textures"]["SKIN"]["url"]
except Exception:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="An error occurred while fetching that skin!"))
return
skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description=f"{gamertag}'s skin\n[**[Download]**]({url})")
skin_embed.set_thumbnail(url=url)
skin_embed.set_image(url=f"https://mc-heads.net/body/{gamertag}")
await ctx.send(embed=skin_embed)
@commands.command(name="nametouuid", aliases=["uuid", "getuuid"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_uuid(self, ctx, gamertag: str):
r = await self.session.post("https://api.mojang.com/profiles/minecraft", json=[gamertag])
j = json.loads(await r.text()) # [0]['id']
if not j:
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),
description="That user could not be found."))
return
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{gamertag}: ``{j[0]['id']}``"))
@commands.command(name="uuidtoname", aliases=["getgamertag"])
@commands.cooldown(1, 2, commands.BucketType.user)
async def get_gamertag(self, ctx, uuid: str):
response = await self.session.get(f"https://api.mojang.com/user/profiles/{uuid}/names")
if response.status == 204:
await ctx.send(
embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description="That player doesn't exist!"))
return
j = json.loads(await response.text())
name = j[len(j) - 1]["name"]
await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f"{uuid}: ``{name}``"))
@commands.command(name="colorcodes", aliases=["mccolorcodes", "colors", "cc"])
async def mc_color_codes(self, ctx):
embed = discord.Embed(color=await self.bot.cc(ctx.author.id),
description="Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.")
embed.set_author(name="Minecraft Formatting Codes")
embed.add_field(name="Color Codes", value="<:red:697541699706028083> **Red** ``§c``\n"
"<:yellow:697541699743776808> **Yellow** ``§e``\n"
"<:green:697541699316219967> **Green** ``§a``\n"
"<:aqua:697541699173613750> **Aqua** ``§b``\n"
"<:blue:697541699655696787> **Blue** ``§9``\n"
"<:light_purple:697541699546775612> **Light Purple** ``§d``\n"
"<:white:697541699785719838> **White** ``§f``\n"
"<:gray:697541699534061630> **Gray** ``§7``\n")
embed.add_field(name="Color Codes", value="<:dark_red:697541699488055426> **Dark Red** ``§4``\n"
"<:gold:697541699639050382> **Gold** ``§6``\n"
"<:dark_green:697541699500769420> **Dark Green** ``§2``\n"
"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n"
"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n"
"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n"
"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n"
"<:black:697541699496444025> **Black** ``§0``\n")
embed.add_field(name="Formatting Codes", value="<:bold:697541699488186419> **Bold** ``§l``\n"
"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n"
"<:underline:697541699806953583> __Underline__ ``§n``\n"
"<:italic:697541699152379995> *Italic* ``§o``\n"
"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n"
"<:reset:697541699697639446> Reset ``§r``\n")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(BasicMC(bot))
|
normal
|
{
"blob_id": "a6f242a0443ffbad835f86098b70ede41c03515b",
"index": 7652,
"step-1": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n <mask token>\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-4": "import aiohttp\nimport asyncio\nimport base64\nimport discord\nimport json\nfrom discord.ext import commands\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.session = aiohttp.ClientSession()\n\n @commands.command(name='stealskin', aliases=['skinsteal', 'skin'])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(\n f'https://api.mojang.com/users/profiles/minecraft/{gamertag}')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get('id')\n if uuid is None:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f'https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false'\n )\n content = json.loads(await response.text())\n if 'error' in content:\n if content['error'] == 'TooManyRequestsException':\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(\n ctx.author.id), description=\n \"Oops, we're being ratelimited by the Mojang API, try again later!\"\n ))\n return\n if len(content['properties']) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n \"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content['properties'][0]['value'])\n try:\n url = json.loads(undec)['textures']['SKIN']['url']\n except Exception:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\n 'An error occurred while fetching that skin!'))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"\"\"{gamertag}'s skin\n[**[Download]**]({url})\"\"\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f'https://mc-heads.net/body/{gamertag}')\n await ctx.send(embed=skin_embed)\n\n @commands.command(name='nametouuid', aliases=['uuid', 'getuuid'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post('https://api.mojang.com/profiles/minecraft'\n , json=[gamertag])\n j = json.loads(await r.text())\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description='That user could not be found.'))\n return\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name='uuidtoname', aliases=['getgamertag'])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(\n f'https://api.mojang.com/user/profiles/{uuid}/names')\n if response.status == 204:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1]['name']\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.\n author.id), description=f'{uuid}: ``{name}``'))\n\n @commands.command(name='colorcodes', aliases=['mccolorcodes', 'colors',\n 'cc'])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\n \"\"\"Text in Minecraft can be formatted using different codes and\nthe section (``§``) sign.\"\"\"\n )\n embed.set_author(name='Minecraft Formatting Codes')\n embed.add_field(name='Color Codes', value=\n \"\"\"<:red:697541699706028083> **Red** ``§c``\n<:yellow:697541699743776808> **Yellow** ``§e``\n<:green:697541699316219967> **Green** ``§a``\n<:aqua:697541699173613750> **Aqua** ``§b``\n<:blue:697541699655696787> **Blue** ``§9``\n<:light_purple:697541699546775612> **Light Purple** ``§d``\n<:white:697541699785719838> **White** ``§f``\n<:gray:697541699534061630> **Gray** ``§7``\n\"\"\"\n )\n embed.add_field(name='Color Codes', value=\n \"\"\"<:dark_red:697541699488055426> **Dark Red** ``§4``\n<:gold:697541699639050382> **Gold** ``§6``\n<:dark_green:697541699500769420> **Dark Green** ``§2``\n<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\n<:dark_blue:697541699488055437> **Dark Blue** ``§1``\n<:dark_purple:697541699437592666> **Dark Purple** ``§5``\n<:dark_gray:697541699471278120> **Dark Gray** ``§8``\n<:black:697541699496444025> **Black** ``§0``\n\"\"\"\n )\n embed.add_field(name='Formatting Codes', value=\n \"\"\"<:bold:697541699488186419> **Bold** ``§l``\n<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\n<:underline:697541699806953583> __Underline__ ``§n``\n<:italic:697541699152379995> *Italic* ``§o``\n<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\n<:reset:697541699697639446> Reset ``§r``\n\"\"\"\n )\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-5": "import aiohttp\nimport asyncio\nimport base64\nimport discord\nimport json\nfrom discord.ext import commands\n\n\nclass BasicMC(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n self.session = aiohttp.ClientSession()\n\n @commands.command(name=\"stealskin\", aliases=[\"skinsteal\", \"skin\"])\n @commands.cooldown(1, 4, commands.BucketType.user)\n async def skinner(self, ctx, gamertag: str):\n response = await self.session.get(f\"https://api.mojang.com/users/profiles/minecraft/{gamertag}\")\n if response.status == 204:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n uuid = json.loads(await response.text()).get(\"id\")\n if uuid is None:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n response = await self.session.get(\n f\"https://sessionserver.mojang.com/session/minecraft/profile/{uuid}?unsigned=false\")\n content = json.loads(await response.text())\n if \"error\" in content:\n if content[\"error\"] == \"TooManyRequestsException\":\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"Oops, we're being ratelimited by the Mojang API, try again later!\"))\n return\n if len(content[\"properties\"]) == 0:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"We can't get this person's skin for some reason...\"))\n return\n undec = base64.b64decode(content[\"properties\"][0][\"value\"])\n try:\n url = json.loads(undec)[\"textures\"][\"SKIN\"][\"url\"]\n except Exception:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"An error occurred while fetching that skin!\"))\n return\n skin_embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=f\"{gamertag}'s skin\\n[**[Download]**]({url})\")\n skin_embed.set_thumbnail(url=url)\n skin_embed.set_image(url=f\"https://mc-heads.net/body/{gamertag}\")\n await ctx.send(embed=skin_embed)\n\n @commands.command(name=\"nametouuid\", aliases=[\"uuid\", \"getuuid\"])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_uuid(self, ctx, gamertag: str):\n r = await self.session.post(\"https://api.mojang.com/profiles/minecraft\", json=[gamertag])\n j = json.loads(await r.text()) # [0]['id']\n if not j:\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"That user could not be found.\"))\n return\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f\"{gamertag}: ``{j[0]['id']}``\"))\n\n @commands.command(name=\"uuidtoname\", aliases=[\"getgamertag\"])\n @commands.cooldown(1, 2, commands.BucketType.user)\n async def get_gamertag(self, ctx, uuid: str):\n response = await self.session.get(f\"https://api.mojang.com/user/profiles/{uuid}/names\")\n if response.status == 204:\n await ctx.send(\n embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=\"That player doesn't exist!\"))\n return\n j = json.loads(await response.text())\n name = j[len(j) - 1][\"name\"]\n await ctx.send(embed=discord.Embed(color=await self.bot.cc(ctx.author.id), description=f\"{uuid}: ``{name}``\"))\n\n @commands.command(name=\"colorcodes\", aliases=[\"mccolorcodes\", \"colors\", \"cc\"])\n async def mc_color_codes(self, ctx):\n embed = discord.Embed(color=await self.bot.cc(ctx.author.id),\n description=\"Text in Minecraft can be formatted using different codes and\\nthe section (``§``) sign.\")\n embed.set_author(name=\"Minecraft Formatting Codes\")\n embed.add_field(name=\"Color Codes\", value=\"<:red:697541699706028083> **Red** ``§c``\\n\"\n \"<:yellow:697541699743776808> **Yellow** ``§e``\\n\"\n \"<:green:697541699316219967> **Green** ``§a``\\n\"\n \"<:aqua:697541699173613750> **Aqua** ``§b``\\n\"\n \"<:blue:697541699655696787> **Blue** ``§9``\\n\"\n \"<:light_purple:697541699546775612> **Light Purple** ``§d``\\n\"\n \"<:white:697541699785719838> **White** ``§f``\\n\"\n \"<:gray:697541699534061630> **Gray** ``§7``\\n\")\n embed.add_field(name=\"Color Codes\", value=\"<:dark_red:697541699488055426> **Dark Red** ``§4``\\n\"\n \"<:gold:697541699639050382> **Gold** ``§6``\\n\"\n \"<:dark_green:697541699500769420> **Dark Green** ``§2``\\n\"\n \"<:dark_aqua:697541699475472436> **Dark Aqua** ``§3``\\n\"\n \"<:dark_blue:697541699488055437> **Dark Blue** ``§1``\\n\"\n \"<:dark_purple:697541699437592666> **Dark Purple** ``§5``\\n\"\n \"<:dark_gray:697541699471278120> **Dark Gray** ``§8``\\n\"\n \"<:black:697541699496444025> **Black** ``§0``\\n\")\n embed.add_field(name=\"Formatting Codes\", value=\"<:bold:697541699488186419> **Bold** ``§l``\\n\"\n \"<:strikethrough:697541699768942711> ~~Strikethrough~~ ``§m``\\n\"\n \"<:underline:697541699806953583> __Underline__ ``§n``\\n\"\n \"<:italic:697541699152379995> *Italic* ``§o``\\n\"\n \"<:obfuscated:697541699769204736> ||Obfuscated|| ``§k``\\n\"\n \"<:reset:697541699697639446> Reset ``§r``\\n\")\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(BasicMC(bot))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Find two distinct numbers in values whose sum is equal to 100.
Assign one of them to value1 and the other one to value2.
If there are several solutions, any one will be marked as correct.
Optional step to check your answer:
Print the value of value1 and value2.
"""
values = [72, 50, 48, 50, 7, 66, 62, 32, 33, 75, 30, 85, 6, 85, 82, 88, 30, 32, 78, 39, 57, 96, 45, 57, 61, 10, 62, 48, 32, 96, 75, 15, 50, 50]
value1 = None
value2 = None
for x in values:
for y in values:
if x + y == 100 and x != y:
value1 = x
value2 = y
print(value1)
print(value2)
|
normal
|
{
"blob_id": "c0ebf10b8c0cb4af11608cafcdb85dbff4abdf90",
"index": 4755,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in values:\n for y in values:\n if x + y == 100 and x != y:\n value1 = x\n value2 = y\nprint(value1)\nprint(value2)\n",
"step-3": "<mask token>\nvalues = [72, 50, 48, 50, 7, 66, 62, 32, 33, 75, 30, 85, 6, 85, 82, 88, 30,\n 32, 78, 39, 57, 96, 45, 57, 61, 10, 62, 48, 32, 96, 75, 15, 50, 50]\nvalue1 = None\nvalue2 = None\nfor x in values:\n for y in values:\n if x + y == 100 and x != y:\n value1 = x\n value2 = y\nprint(value1)\nprint(value2)\n",
"step-4": "\"\"\"\r\nFind two distinct numbers in values whose sum is equal to 100.\r\nAssign one of them to value1 and the other one to value2.\r\nIf there are several solutions, any one will be marked as correct.\r\n\r\nOptional step to check your answer:\r\n\r\nPrint the value of value1 and value2.\r\n\"\"\"\r\n\r\n\r\nvalues = [72, 50, 48, 50, 7, 66, 62, 32, 33, 75, 30, 85, 6, 85, 82, 88, 30, 32, 78, 39, 57, 96, 45, 57, 61, 10, 62, 48, 32, 96, 75, 15, 50, 50]\r\n\r\nvalue1 = None\r\nvalue2 = None\r\nfor x in values:\r\n for y in values:\r\n if x + y == 100 and x != y:\r\n value1 = x\r\n value2 = y\r\n \r\nprint(value1)\r\nprint(value2)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import json
import eventlet
import datetime
import flask
from flask import Flask
from flask import render_template
__version__ = 0.1
PORT = 8000
HOST = '0.0.0.0'
DEBUG = False
RELDR = False
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretkey'
@app.route('/login/')
def login():
return render_template('login.html', name=None)
@app.route('/chat/')
def chat():
return render_template('chat.html', name=None)
@app.route('/messages/')
def msg_search():
return render_template('search.html', name=None)
from .event_handlers import *
socketio.run(app, host=HOST, port=PORT, use_reloader=RELDR, debug=DEBUG,
log_output=LOG)
|
normal
|
{
"blob_id": "a945d7f673d009a59e597cd3c99a886094ea9e57",
"index": 2639,
"step-1": "<mask token>\n\n\[email protected]('/login/')\ndef login():\n return render_template('login.html', name=None)\n\n\[email protected]('/chat/')\ndef chat():\n return render_template('chat.html', name=None)\n\n\[email protected]('/messages/')\ndef msg_search():\n return render_template('search.html', name=None)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/login/')\ndef login():\n return render_template('login.html', name=None)\n\n\[email protected]('/chat/')\ndef chat():\n return render_template('chat.html', name=None)\n\n\[email protected]('/messages/')\ndef msg_search():\n return render_template('search.html', name=None)\n\n\n<mask token>\nsocketio.run(app, host=HOST, port=PORT, use_reloader=RELDR, debug=DEBUG,\n log_output=LOG)\n",
"step-3": "<mask token>\n__version__ = 0.1\nPORT = 8000\nHOST = '0.0.0.0'\nDEBUG = False\nRELDR = False\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secretkey'\n\n\[email protected]('/login/')\ndef login():\n return render_template('login.html', name=None)\n\n\[email protected]('/chat/')\ndef chat():\n return render_template('chat.html', name=None)\n\n\[email protected]('/messages/')\ndef msg_search():\n return render_template('search.html', name=None)\n\n\n<mask token>\nsocketio.run(app, host=HOST, port=PORT, use_reloader=RELDR, debug=DEBUG,\n log_output=LOG)\n",
"step-4": "import sys\nimport json\nimport eventlet\nimport datetime\nimport flask\nfrom flask import Flask\nfrom flask import render_template\n__version__ = 0.1\nPORT = 8000\nHOST = '0.0.0.0'\nDEBUG = False\nRELDR = False\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secretkey'\n\n\[email protected]('/login/')\ndef login():\n return render_template('login.html', name=None)\n\n\[email protected]('/chat/')\ndef chat():\n return render_template('chat.html', name=None)\n\n\[email protected]('/messages/')\ndef msg_search():\n return render_template('search.html', name=None)\n\n\nfrom .event_handlers import *\nsocketio.run(app, host=HOST, port=PORT, use_reloader=RELDR, debug=DEBUG,\n log_output=LOG)\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
'''
Created on Dec 23, 2011
@author: boatkrap
'''
import kombu
from kombu.common import maybe_declare
from . import queues
import logging
logger = logging.getLogger(__name__)
import threading
cc = threading.Condition()
class Publisher:
def __init__(self, exchange_name, channel, routing_key=None):
self.exchange_name = exchange_name
self._producer = None
self.exchange = None
self.channel = channel
self.routing_key_list = []
self.routing_key = routing_key
self.reconnect(channel)
def reconnect(self, channel):
cc.acquire()
self.exchange = kombu.Exchange(
self.exchange_name, type="direct", durable=True)
self.channel = channel
try:
self._producer = kombu.Producer(exchange=self.exchange,
channel=channel, serializer="json",
routing_key=self.routing_key)
if self.routing_key:
self.queue_declare(self.routing_key)
except Exception as e:
logger.exception(e)
cc.release()
def queue_declare(self, routing_key):
if routing_key is None:
return
if routing_key in self.routing_key_list:
return
self.routing_key_list.append(routing_key)
queue = queues.QueueFactory().get_queue(self.exchange, routing_key)
if queue:
queue(self.channel).declare()
def send(self, message, routing_key=None):
result = False
cc.acquire()
try:
self._producer.publish(message, routing_key=routing_key)
result = True
except Exception as e:
logger.exception(e)
logger.debug("wait for connection")
cc.release()
return result
def drop_routing_key(self, routing_key):
logger.debug("drop_routing_key: %s" % routing_key)
if routing_key in self.routing_key_list:
self.routing_key_list.remove(routing_key)
class TopicPublisher(Publisher):
def __init__(self, exchange_name, channel, routing_key=None):
super().__init__(exchange_name, channel, routing_key)
def reconnect(self, channel):
self.exchange = kombu.Exchange(
self.exchange_name, type="topic", durable=True)
self.channel = channel
self._producer = kombu.Producer(exchange=self.exchange,
channel=channel, serializer="json",
routing_key=self.routing_key)
class PublisherFactory:
def __init__(self, channel):
self.channel = channel
def get_publisher(self, key):
publisher = None
logger.debug("routing_key: %s" % key)
if key == "nokkhum_compute.update_status":
routing_key = "nokkhum_compute.update_status"
publisher = Publisher(
"nokkunm_compute.update_status", self.channel, routing_key)
return publisher
else:
import fnmatch
import re
regex = fnmatch.translate('nokkhum_compute.*.rpc_*')
reobj = re.compile(regex)
if reobj.match(key):
routing_key = key
if "nokkhum_compute.*.rpc_response" in routing_key:
publisher = TopicPublisher(
"nokkunm_compute.compute_rpc", self.channel, routing_key)
elif "nokkhum_compute.*.rpc_request":
publisher = TopicPublisher(
"nokkunm_compute.rpc", self.channel, routing_key)
# logger.debug("get pub: %s"%publisher)
return publisher
return publisher
|
normal
|
{
"blob_id": "8205541dcdd4627a535b14c6775f04b80e7c0d15",
"index": 3354,
"step-1": "<mask token>\n\n\nclass Publisher:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TopicPublisher(Publisher):\n\n def __init__(self, exchange_name, channel, routing_key=None):\n super().__init__(exchange_name, channel, routing_key)\n\n def reconnect(self, channel):\n self.exchange = kombu.Exchange(self.exchange_name, type='topic',\n durable=True)\n self.channel = channel\n self._producer = kombu.Producer(exchange=self.exchange, channel=\n channel, serializer='json', routing_key=self.routing_key)\n\n\nclass PublisherFactory:\n\n def __init__(self, channel):\n self.channel = channel\n\n def get_publisher(self, key):\n publisher = None\n logger.debug('routing_key: %s' % key)\n if key == 'nokkhum_compute.update_status':\n routing_key = 'nokkhum_compute.update_status'\n publisher = Publisher('nokkunm_compute.update_status', self.\n channel, routing_key)\n return publisher\n else:\n import fnmatch\n import re\n regex = fnmatch.translate('nokkhum_compute.*.rpc_*')\n reobj = re.compile(regex)\n if reobj.match(key):\n routing_key = key\n if 'nokkhum_compute.*.rpc_response' in routing_key:\n publisher = TopicPublisher('nokkunm_compute.compute_rpc',\n self.channel, routing_key)\n elif 'nokkhum_compute.*.rpc_request':\n publisher = TopicPublisher('nokkunm_compute.rpc', self.\n channel, routing_key)\n return publisher\n return publisher\n",
"step-2": "<mask token>\n\n\nclass Publisher:\n <mask token>\n <mask token>\n\n def queue_declare(self, routing_key):\n if routing_key is None:\n return\n if routing_key in self.routing_key_list:\n return\n self.routing_key_list.append(routing_key)\n queue = queues.QueueFactory().get_queue(self.exchange, routing_key)\n if queue:\n queue(self.channel).declare()\n <mask token>\n\n def drop_routing_key(self, routing_key):\n logger.debug('drop_routing_key: %s' % routing_key)\n if routing_key in self.routing_key_list:\n self.routing_key_list.remove(routing_key)\n\n\nclass TopicPublisher(Publisher):\n\n def __init__(self, exchange_name, channel, routing_key=None):\n super().__init__(exchange_name, channel, routing_key)\n\n def reconnect(self, channel):\n self.exchange = kombu.Exchange(self.exchange_name, type='topic',\n durable=True)\n self.channel = channel\n self._producer = kombu.Producer(exchange=self.exchange, channel=\n channel, serializer='json', routing_key=self.routing_key)\n\n\nclass PublisherFactory:\n\n def __init__(self, channel):\n self.channel = channel\n\n def get_publisher(self, key):\n publisher = None\n logger.debug('routing_key: %s' % key)\n if key == 'nokkhum_compute.update_status':\n routing_key = 'nokkhum_compute.update_status'\n publisher = Publisher('nokkunm_compute.update_status', self.\n channel, routing_key)\n return publisher\n else:\n import fnmatch\n import re\n regex = fnmatch.translate('nokkhum_compute.*.rpc_*')\n reobj = re.compile(regex)\n if reobj.match(key):\n routing_key = key\n if 'nokkhum_compute.*.rpc_response' in routing_key:\n publisher = TopicPublisher('nokkunm_compute.compute_rpc',\n self.channel, routing_key)\n elif 'nokkhum_compute.*.rpc_request':\n publisher = TopicPublisher('nokkunm_compute.rpc', self.\n channel, routing_key)\n return publisher\n return publisher\n",
"step-3": "<mask token>\n\n\nclass Publisher:\n\n def __init__(self, exchange_name, channel, routing_key=None):\n self.exchange_name = exchange_name\n self._producer = None\n self.exchange = None\n self.channel = channel\n self.routing_key_list = []\n self.routing_key = routing_key\n self.reconnect(channel)\n\n def reconnect(self, channel):\n cc.acquire()\n self.exchange = kombu.Exchange(self.exchange_name, type='direct',\n durable=True)\n self.channel = channel\n try:\n self._producer = kombu.Producer(exchange=self.exchange, channel\n =channel, serializer='json', routing_key=self.routing_key)\n if self.routing_key:\n self.queue_declare(self.routing_key)\n except Exception as e:\n logger.exception(e)\n cc.release()\n\n def queue_declare(self, routing_key):\n if routing_key is None:\n return\n if routing_key in self.routing_key_list:\n return\n self.routing_key_list.append(routing_key)\n queue = queues.QueueFactory().get_queue(self.exchange, routing_key)\n if queue:\n queue(self.channel).declare()\n\n def send(self, message, routing_key=None):\n result = False\n cc.acquire()\n try:\n self._producer.publish(message, routing_key=routing_key)\n result = True\n except Exception as e:\n logger.exception(e)\n logger.debug('wait for connection')\n cc.release()\n return result\n\n def drop_routing_key(self, routing_key):\n logger.debug('drop_routing_key: %s' % routing_key)\n if routing_key in self.routing_key_list:\n self.routing_key_list.remove(routing_key)\n\n\nclass TopicPublisher(Publisher):\n\n def __init__(self, exchange_name, channel, routing_key=None):\n super().__init__(exchange_name, channel, routing_key)\n\n def reconnect(self, channel):\n self.exchange = kombu.Exchange(self.exchange_name, type='topic',\n durable=True)\n self.channel = channel\n self._producer = kombu.Producer(exchange=self.exchange, channel=\n channel, serializer='json', routing_key=self.routing_key)\n\n\nclass PublisherFactory:\n\n def __init__(self, channel):\n self.channel = channel\n\n def get_publisher(self, key):\n publisher = None\n logger.debug('routing_key: %s' % key)\n if key == 'nokkhum_compute.update_status':\n routing_key = 'nokkhum_compute.update_status'\n publisher = Publisher('nokkunm_compute.update_status', self.\n channel, routing_key)\n return publisher\n else:\n import fnmatch\n import re\n regex = fnmatch.translate('nokkhum_compute.*.rpc_*')\n reobj = re.compile(regex)\n if reobj.match(key):\n routing_key = key\n if 'nokkhum_compute.*.rpc_response' in routing_key:\n publisher = TopicPublisher('nokkunm_compute.compute_rpc',\n self.channel, routing_key)\n elif 'nokkhum_compute.*.rpc_request':\n publisher = TopicPublisher('nokkunm_compute.rpc', self.\n channel, routing_key)\n return publisher\n return publisher\n",
"step-4": "<mask token>\nlogger = logging.getLogger(__name__)\n<mask token>\ncc = threading.Condition()\n\n\nclass Publisher:\n\n def __init__(self, exchange_name, channel, routing_key=None):\n self.exchange_name = exchange_name\n self._producer = None\n self.exchange = None\n self.channel = channel\n self.routing_key_list = []\n self.routing_key = routing_key\n self.reconnect(channel)\n\n def reconnect(self, channel):\n cc.acquire()\n self.exchange = kombu.Exchange(self.exchange_name, type='direct',\n durable=True)\n self.channel = channel\n try:\n self._producer = kombu.Producer(exchange=self.exchange, channel\n =channel, serializer='json', routing_key=self.routing_key)\n if self.routing_key:\n self.queue_declare(self.routing_key)\n except Exception as e:\n logger.exception(e)\n cc.release()\n\n def queue_declare(self, routing_key):\n if routing_key is None:\n return\n if routing_key in self.routing_key_list:\n return\n self.routing_key_list.append(routing_key)\n queue = queues.QueueFactory().get_queue(self.exchange, routing_key)\n if queue:\n queue(self.channel).declare()\n\n def send(self, message, routing_key=None):\n result = False\n cc.acquire()\n try:\n self._producer.publish(message, routing_key=routing_key)\n result = True\n except Exception as e:\n logger.exception(e)\n logger.debug('wait for connection')\n cc.release()\n return result\n\n def drop_routing_key(self, routing_key):\n logger.debug('drop_routing_key: %s' % routing_key)\n if routing_key in self.routing_key_list:\n self.routing_key_list.remove(routing_key)\n\n\nclass TopicPublisher(Publisher):\n\n def __init__(self, exchange_name, channel, routing_key=None):\n super().__init__(exchange_name, channel, routing_key)\n\n def reconnect(self, channel):\n self.exchange = kombu.Exchange(self.exchange_name, type='topic',\n durable=True)\n self.channel = channel\n self._producer = kombu.Producer(exchange=self.exchange, channel=\n channel, serializer='json', routing_key=self.routing_key)\n\n\nclass PublisherFactory:\n\n def __init__(self, channel):\n self.channel = channel\n\n def get_publisher(self, key):\n publisher = None\n logger.debug('routing_key: %s' % key)\n if key == 'nokkhum_compute.update_status':\n routing_key = 'nokkhum_compute.update_status'\n publisher = Publisher('nokkunm_compute.update_status', self.\n channel, routing_key)\n return publisher\n else:\n import fnmatch\n import re\n regex = fnmatch.translate('nokkhum_compute.*.rpc_*')\n reobj = re.compile(regex)\n if reobj.match(key):\n routing_key = key\n if 'nokkhum_compute.*.rpc_response' in routing_key:\n publisher = TopicPublisher('nokkunm_compute.compute_rpc',\n self.channel, routing_key)\n elif 'nokkhum_compute.*.rpc_request':\n publisher = TopicPublisher('nokkunm_compute.rpc', self.\n channel, routing_key)\n return publisher\n return publisher\n",
"step-5": "'''\nCreated on Dec 23, 2011\n\n@author: boatkrap\n'''\n\nimport kombu\nfrom kombu.common import maybe_declare\n\nfrom . import queues\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport threading\ncc = threading.Condition()\n\n\nclass Publisher:\n\n def __init__(self, exchange_name, channel, routing_key=None):\n\n self.exchange_name = exchange_name\n self._producer = None\n\n self.exchange = None\n self.channel = channel\n self.routing_key_list = []\n self.routing_key = routing_key\n self.reconnect(channel)\n\n def reconnect(self, channel):\n cc.acquire()\n self.exchange = kombu.Exchange(\n self.exchange_name, type=\"direct\", durable=True)\n self.channel = channel\n try:\n self._producer = kombu.Producer(exchange=self.exchange,\n channel=channel, serializer=\"json\",\n routing_key=self.routing_key)\n\n if self.routing_key:\n self.queue_declare(self.routing_key)\n except Exception as e:\n logger.exception(e)\n\n cc.release()\n\n def queue_declare(self, routing_key):\n if routing_key is None:\n return\n\n if routing_key in self.routing_key_list:\n return\n\n self.routing_key_list.append(routing_key)\n\n queue = queues.QueueFactory().get_queue(self.exchange, routing_key)\n if queue:\n\n queue(self.channel).declare()\n\n def send(self, message, routing_key=None):\n result = False\n cc.acquire()\n try:\n self._producer.publish(message, routing_key=routing_key)\n result = True\n except Exception as e:\n logger.exception(e)\n logger.debug(\"wait for connection\")\n cc.release()\n return result\n\n def drop_routing_key(self, routing_key):\n logger.debug(\"drop_routing_key: %s\" % routing_key)\n if routing_key in self.routing_key_list:\n self.routing_key_list.remove(routing_key)\n\n\nclass TopicPublisher(Publisher):\n\n def __init__(self, exchange_name, channel, routing_key=None):\n super().__init__(exchange_name, channel, routing_key)\n\n def reconnect(self, channel):\n self.exchange = kombu.Exchange(\n self.exchange_name, type=\"topic\", durable=True)\n self.channel = channel\n self._producer = kombu.Producer(exchange=self.exchange,\n channel=channel, serializer=\"json\",\n routing_key=self.routing_key)\n\n\nclass PublisherFactory:\n\n def __init__(self, channel):\n self.channel = channel\n\n def get_publisher(self, key):\n\n publisher = None\n logger.debug(\"routing_key: %s\" % key)\n if key == \"nokkhum_compute.update_status\":\n routing_key = \"nokkhum_compute.update_status\"\n publisher = Publisher(\n \"nokkunm_compute.update_status\", self.channel, routing_key)\n\n return publisher\n\n else:\n import fnmatch\n import re\n regex = fnmatch.translate('nokkhum_compute.*.rpc_*')\n reobj = re.compile(regex)\n if reobj.match(key):\n routing_key = key\n\n if \"nokkhum_compute.*.rpc_response\" in routing_key:\n publisher = TopicPublisher(\n \"nokkunm_compute.compute_rpc\", self.channel, routing_key)\n elif \"nokkhum_compute.*.rpc_request\":\n publisher = TopicPublisher(\n \"nokkunm_compute.rpc\", self.channel, routing_key)\n # logger.debug(\"get pub: %s\"%publisher)\n return publisher\n\n return publisher\n",
"step-ids": [
7,
9,
12,
13,
15
]
}
|
[
7,
9,
12,
13,
15
] |
import json
import sys
from pkg_resources import resource_string
# Load a package data file resource as a string. This
_conf = json.loads(resource_string(__name__, 'conf.json'))
# Load a data file specified in "package_data" setup option for this pkg.
_pkg_data = resource_string(__name__, 'data/pkg1.dat')
# Load a data file included in "data_files" setup option.
# FIXME
try:
_sys_data = open(sys.prefix + '/data/data1.dat').read()
except Exception as exc:
print(exc)
_sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'
def hello():
print(_conf['greeting'])
print(_pkg_data)
print(_sys_data)
if __name__ == '__main__':
hello()
|
normal
|
{
"blob_id": "4689ee7f7178cef16ac1f5375481a9ee8a48f924",
"index": 3780,
"step-1": "<mask token>\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-3": "<mask token>\n_conf = json.loads(resource_string(__name__, 'conf.json'))\n_pkg_data = resource_string(__name__, 'data/pkg1.dat')\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-4": "import json\nimport sys\nfrom pkg_resources import resource_string\n_conf = json.loads(resource_string(__name__, 'conf.json'))\n_pkg_data = resource_string(__name__, 'data/pkg1.dat')\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-5": "import json\nimport sys\nfrom pkg_resources import resource_string\n\n# Load a package data file resource as a string. This\n_conf = json.loads(resource_string(__name__, 'conf.json'))\n\n# Load a data file specified in \"package_data\" setup option for this pkg.\n_pkg_data = resource_string(__name__, 'data/pkg1.dat')\n\n# Load a data file included in \"data_files\" setup option.\n# FIXME\ntry:\n _sys_data = open(sys.prefix + '/data/data1.dat').read()\nexcept Exception as exc:\n print(exc)\n _sys_data = '(In editable mode?) Unable to load data file: data/data1.dat'\n\n\ndef hello():\n print(_conf['greeting'])\n print(_pkg_data)\n print(_sys_data)\n\n\nif __name__ == '__main__':\n hello()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from app import db
class OrgStaff(db.Model):
__tablename__ = 'org_staff'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"))
invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete="CASCADE"))
org_id = db.Column(db.Integer, db.ForeignKey('organisations.id', ondelete="CASCADE"))
user = db.relationship("User", primaryjoin="User.id==OrgStaff.user_id")
referer = db.relationship("User", primaryjoin="User.id==OrgStaff.invited_by")
org = db.relationship("Organisation", primaryjoin="Organisation.id==OrgStaff.org_id", backref='staff')
created_at = db.Column(db.DateTime, default=db.func.now())
updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
|
normal
|
{
"blob_id": "b0f92b5e4cc972aca84a29b4568e85836f155273",
"index": 3774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass OrgStaff(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',\n ondelete='CASCADE'))\n user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')\n referer = db.relationship('User', primaryjoin=\n 'User.id==OrgStaff.invited_by')\n org = db.relationship('Organisation', primaryjoin=\n 'Organisation.id==OrgStaff.org_id', backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.\n func.now())\n",
"step-4": "from app import db\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\n 'CASCADE'))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id',\n ondelete='CASCADE'))\n user = db.relationship('User', primaryjoin='User.id==OrgStaff.user_id')\n referer = db.relationship('User', primaryjoin=\n 'User.id==OrgStaff.invited_by')\n org = db.relationship('Organisation', primaryjoin=\n 'Organisation.id==OrgStaff.org_id', backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.\n func.now())\n",
"step-5": "from app import db\n\n\nclass OrgStaff(db.Model):\n __tablename__ = 'org_staff'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\"CASCADE\"))\n invited_by = db.Column(db.Integer, db.ForeignKey('users.id', ondelete=\"CASCADE\"))\n org_id = db.Column(db.Integer, db.ForeignKey('organisations.id', ondelete=\"CASCADE\"))\n user = db.relationship(\"User\", primaryjoin=\"User.id==OrgStaff.user_id\")\n referer = db.relationship(\"User\", primaryjoin=\"User.id==OrgStaff.invited_by\")\n org = db.relationship(\"Organisation\", primaryjoin=\"Organisation.id==OrgStaff.org_id\", backref='staff')\n created_at = db.Column(db.DateTime, default=db.func.now())\n updated_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from access.ssh.session import Client
from access.ssh.datachannel import DataChannel
|
normal
|
{
"blob_id": "967c8348352c805b926643617b88b03a62df2d16",
"index": 2271,
"step-1": "<mask token>\n",
"step-2": "from access.ssh.session import Client\nfrom access.ssh.datachannel import DataChannel\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
pessoas=int(input('Digite o numero de pessoas que passa pela esada rolante:'))
for i in range(1,n+1,1):
tempo=int(input('Digite o tempo:'))
if i==1:
tempo1=tempo
elif i==n:
f=tempo+10
X=f-tempo1
print(x)
|
normal
|
{
"blob_id": "f98120d191e9e4b92984a6b59b25b1331b5d8c3a",
"index": 1970,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1, 1):\n tempo = int(input('Digite o tempo:'))\n if i == 1:\n tempo1 = tempo\n elif i == n:\n f = tempo + 10\n<mask token>\nprint(x)\n",
"step-3": "pessoas = int(input('Digite o numero de pessoas que passa pela esada rolante:')\n )\nfor i in range(1, n + 1, 1):\n tempo = int(input('Digite o tempo:'))\n if i == 1:\n tempo1 = tempo\n elif i == n:\n f = tempo + 10\nX = f - tempo1\nprint(x)\n",
"step-4": "# -*- coding: utf-8 -*-\npessoas=int(input('Digite o numero de pessoas que passa pela esada rolante:'))\nfor i in range(1,n+1,1):\n tempo=int(input('Digite o tempo:'))\n if i==1:\n tempo1=tempo\n elif i==n:\n f=tempo+10\nX=f-tempo1\nprint(x)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""Datasets, Dataloaders, and utils for dataloading"""
from enum import Enum
import torch
from torch.utils.data import Dataset
class Partition(Enum):
"""Names of dataset partitions"""
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
class RandomClassData(Dataset):
"""Standard normal distributed features and uniformly sampled discrete targets"""
def __init__(self, n_samples: int, n_dim: int, n_classes: int = 2):
super(RandomClassData, self).__init__()
self.features = torch.rand((n_samples, n_dim))
self.targets = torch.randint(0, n_classes, size=(n_samples,))
def __len__(self):
return len(self.targets)
def __getitem__(self, i):
return self.features[i], self.targets[i]
|
normal
|
{
"blob_id": "4c0c88f46c2d4607d9ac00755bf122e847ea2f6a",
"index": 6221,
"step-1": "<mask token>\n\n\nclass Partition(Enum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-2": "<mask token>\n\n\nclass Partition(Enum):\n <mask token>\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-3": "<mask token>\n\n\nclass Partition(Enum):\n \"\"\"Names of dataset partitions\"\"\"\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-4": "<mask token>\nfrom enum import Enum\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass Partition(Enum):\n \"\"\"Names of dataset partitions\"\"\"\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-5": "\"\"\"Datasets, Dataloaders, and utils for dataloading\"\"\"\nfrom enum import Enum\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass Partition(Enum):\n \"\"\"Names of dataset partitions\"\"\"\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int = 2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# Generated by Django 2.2.10 on 2020-05-06 14:43
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('planner', '0023_auto_20191226_1330'),
]
operations = [
migrations.AddField(
model_name='employee',
name='coefficient',
field=models.PositiveSmallIntegerField(default=100, validators=[django.core.validators.MaxValueValidator(200)], verbose_name='Коєфіцієнт плану'),
),
]
|
normal
|
{
"blob_id": "c7558486fc50623f6e64b58668153b75bb6149b9",
"index": 6613,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('planner', '0023_auto_20191226_1330')]\n operations = [migrations.AddField(model_name='employee', name=\n 'coefficient', field=models.PositiveSmallIntegerField(default=100,\n validators=[django.core.validators.MaxValueValidator(200)],\n verbose_name='Коєфіцієнт плану'))]\n",
"step-4": "import django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('planner', '0023_auto_20191226_1330')]\n operations = [migrations.AddField(model_name='employee', name=\n 'coefficient', field=models.PositiveSmallIntegerField(default=100,\n validators=[django.core.validators.MaxValueValidator(200)],\n verbose_name='Коєфіцієнт плану'))]\n",
"step-5": "# Generated by Django 2.2.10 on 2020-05-06 14:43\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('planner', '0023_auto_20191226_1330'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='employee',\n name='coefficient',\n field=models.PositiveSmallIntegerField(default=100, validators=[django.core.validators.MaxValueValidator(200)], verbose_name='Коєфіцієнт плану'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import multiprocessing
name = "flask_gunicorn"
workers = multiprocessing.cpu_count() * 2 + 1
loglevel = "debug"
bind = f"0.0.0.0:18080"
|
normal
|
{
"blob_id": "2ad326f739b42b9c7c252078b8c28e90da17b95d",
"index": 1802,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nname = 'flask_gunicorn'\nworkers = multiprocessing.cpu_count() * 2 + 1\nloglevel = 'debug'\nbind = f'0.0.0.0:18080'\n",
"step-3": "import multiprocessing\nname = 'flask_gunicorn'\nworkers = multiprocessing.cpu_count() * 2 + 1\nloglevel = 'debug'\nbind = f'0.0.0.0:18080'\n",
"step-4": "import multiprocessing\n\nname = \"flask_gunicorn\"\nworkers = multiprocessing.cpu_count() * 2 + 1\nloglevel = \"debug\"\nbind = f\"0.0.0.0:18080\"\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .. import dataclass # trigger the register in the dataclass package
|
normal
|
{
"blob_id": "681750dbf489a6a32e9ef1d6f64d493cc252b272",
"index": 6386,
"step-1": "<mask token>\n",
"step-2": "from .. import dataclass\n",
"step-3": "from .. import dataclass # trigger the register in the dataclass package\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.compute import multi_cell_list
from nova import test
class TestUtils(test.NoDBTestCase):
def test_compare_simple(self):
dt1 = datetime.datetime(2015, 11, 5, 20, 30, 00)
dt2 = datetime.datetime(1955, 10, 25, 1, 21, 00)
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456, 'key4': dt1}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123, 'key4': dt2}
# Equal key0, inst == inst2
ctx = multi_cell_list.RecordSortContext(['key0'], ['asc'])
self.assertEqual(0, ctx.compare_records(inst1, inst2))
# Equal key0, inst == inst2 (direction should not matter)
ctx = multi_cell_list.RecordSortContext(['key0'], ['desc'])
self.assertEqual(0, ctx.compare_records(inst1, inst2))
# Ascending by key1, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key1'], ['asc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Descending by key1, inst2 < inst1
ctx = multi_cell_list.RecordSortContext(['key1'], ['desc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Ascending by key2, inst2 < inst1
ctx = multi_cell_list.RecordSortContext(['key2'], ['asc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Descending by key2, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key2'], ['desc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Ascending by key4, inst1 > inst2
ctx = multi_cell_list.RecordSortContext(['key4'], ['asc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
# Descending by key4, inst1 < inst2
ctx = multi_cell_list.RecordSortContext(['key4'], ['desc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
def test_compare_multiple(self):
# key0 should not affect ordering, but key1 should
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}
# Should be equivalent to ascending by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'asc'])
self.assertEqual(-1, ctx.compare_records(inst1, inst2))
# Should be equivalent to descending by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'desc'])
self.assertEqual(1, ctx.compare_records(inst1, inst2))
def test_wrapper(self):
inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}
inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}
# Should sort by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'asc'])
iw1 = multi_cell_list.RecordWrapper(ctx, inst1)
iw2 = multi_cell_list.RecordWrapper(ctx, inst2)
# Check this both ways to make sure we're comparing against -1
# and not just nonzero return from cmp()
self.assertTrue(iw1 < iw2)
self.assertFalse(iw2 < iw1)
# Should sort reverse by key1
ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],
['asc', 'desc'])
iw1 = multi_cell_list.RecordWrapper(ctx, inst1)
iw2 = multi_cell_list.RecordWrapper(ctx, inst2)
# Check this both ways to make sure we're comparing against -1
# and not just nonzero return from cmp()
self.assertTrue(iw1 > iw2)
self.assertFalse(iw2 > iw1)
|
normal
|
{
"blob_id": "9fa1dab7cb0debf363ae0864af1407c87aad063a",
"index": 4926,
"step-1": "<mask token>\n\n\nclass TestUtils(test.NoDBTestCase):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestUtils(test.NoDBTestCase):\n <mask token>\n\n def test_compare_multiple(self):\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'asc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'desc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestUtils(test.NoDBTestCase):\n\n def test_compare_simple(self):\n dt1 = datetime.datetime(2015, 11, 5, 20, 30, 0)\n dt2 = datetime.datetime(1955, 10, 25, 1, 21, 0)\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456, 'key4': dt1}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123, 'key4': dt2}\n ctx = multi_cell_list.RecordSortContext(['key0'], ['asc'])\n self.assertEqual(0, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key0'], ['desc'])\n self.assertEqual(0, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key1'], ['asc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key1'], ['desc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key2'], ['asc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key2'], ['desc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key4'], ['asc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key4'], ['desc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n\n def test_compare_multiple(self):\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'asc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'desc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n\n def test_wrapper(self):\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'asc'])\n iw1 = multi_cell_list.RecordWrapper(ctx, inst1)\n iw2 = multi_cell_list.RecordWrapper(ctx, inst2)\n self.assertTrue(iw1 < iw2)\n self.assertFalse(iw2 < iw1)\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'desc'])\n iw1 = multi_cell_list.RecordWrapper(ctx, inst1)\n iw2 = multi_cell_list.RecordWrapper(ctx, inst2)\n self.assertTrue(iw1 > iw2)\n self.assertFalse(iw2 > iw1)\n",
"step-4": "import datetime\nfrom nova.compute import multi_cell_list\nfrom nova import test\n\n\nclass TestUtils(test.NoDBTestCase):\n\n def test_compare_simple(self):\n dt1 = datetime.datetime(2015, 11, 5, 20, 30, 0)\n dt2 = datetime.datetime(1955, 10, 25, 1, 21, 0)\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456, 'key4': dt1}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123, 'key4': dt2}\n ctx = multi_cell_list.RecordSortContext(['key0'], ['asc'])\n self.assertEqual(0, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key0'], ['desc'])\n self.assertEqual(0, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key1'], ['asc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key1'], ['desc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key2'], ['asc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key2'], ['desc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key4'], ['asc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key4'], ['desc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n\n def test_compare_multiple(self):\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'asc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'desc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n\n def test_wrapper(self):\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'asc'])\n iw1 = multi_cell_list.RecordWrapper(ctx, inst1)\n iw2 = multi_cell_list.RecordWrapper(ctx, inst2)\n self.assertTrue(iw1 < iw2)\n self.assertFalse(iw2 < iw1)\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'], ['asc',\n 'desc'])\n iw1 = multi_cell_list.RecordWrapper(ctx, inst1)\n iw2 = multi_cell_list.RecordWrapper(ctx, inst2)\n self.assertTrue(iw1 > iw2)\n self.assertFalse(iw2 > iw1)\n",
"step-5": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport datetime\n\nfrom nova.compute import multi_cell_list\nfrom nova import test\n\n\nclass TestUtils(test.NoDBTestCase):\n def test_compare_simple(self):\n dt1 = datetime.datetime(2015, 11, 5, 20, 30, 00)\n dt2 = datetime.datetime(1955, 10, 25, 1, 21, 00)\n\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456, 'key4': dt1}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123, 'key4': dt2}\n\n # Equal key0, inst == inst2\n ctx = multi_cell_list.RecordSortContext(['key0'], ['asc'])\n self.assertEqual(0, ctx.compare_records(inst1, inst2))\n\n # Equal key0, inst == inst2 (direction should not matter)\n ctx = multi_cell_list.RecordSortContext(['key0'], ['desc'])\n self.assertEqual(0, ctx.compare_records(inst1, inst2))\n\n # Ascending by key1, inst1 < inst2\n ctx = multi_cell_list.RecordSortContext(['key1'], ['asc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n\n # Descending by key1, inst2 < inst1\n ctx = multi_cell_list.RecordSortContext(['key1'], ['desc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n\n # Ascending by key2, inst2 < inst1\n ctx = multi_cell_list.RecordSortContext(['key2'], ['asc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n\n # Descending by key2, inst1 < inst2\n ctx = multi_cell_list.RecordSortContext(['key2'], ['desc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n\n # Ascending by key4, inst1 > inst2\n ctx = multi_cell_list.RecordSortContext(['key4'], ['asc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n\n # Descending by key4, inst1 < inst2\n ctx = multi_cell_list.RecordSortContext(['key4'], ['desc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n\n def test_compare_multiple(self):\n # key0 should not affect ordering, but key1 should\n\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}\n\n # Should be equivalent to ascending by key1\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],\n ['asc', 'asc'])\n self.assertEqual(-1, ctx.compare_records(inst1, inst2))\n\n # Should be equivalent to descending by key1\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],\n ['asc', 'desc'])\n self.assertEqual(1, ctx.compare_records(inst1, inst2))\n\n def test_wrapper(self):\n inst1 = {'key0': 'foo', 'key1': 'd', 'key2': 456}\n inst2 = {'key0': 'foo', 'key1': 's', 'key2': 123}\n\n # Should sort by key1\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],\n ['asc', 'asc'])\n iw1 = multi_cell_list.RecordWrapper(ctx, inst1)\n iw2 = multi_cell_list.RecordWrapper(ctx, inst2)\n # Check this both ways to make sure we're comparing against -1\n # and not just nonzero return from cmp()\n self.assertTrue(iw1 < iw2)\n self.assertFalse(iw2 < iw1)\n\n # Should sort reverse by key1\n ctx = multi_cell_list.RecordSortContext(['key0', 'key1'],\n ['asc', 'desc'])\n iw1 = multi_cell_list.RecordWrapper(ctx, inst1)\n iw2 = multi_cell_list.RecordWrapper(ctx, inst2)\n # Check this both ways to make sure we're comparing against -1\n # and not just nonzero return from cmp()\n self.assertTrue(iw1 > iw2)\n self.assertFalse(iw2 > iw1)\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
from django import forms
from django.forms import inlineformset_factory
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models import Max
from auction.models import *
from datetime import *
from decimal import *
import re
class UserForm(forms.ModelForm):
error_email = {
'email_exist': _("Email allready exist."),
}
error_password = {
'password_less': _("Password should be more than 6 characters."),
}
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,
'autocomplete': 'off'})
def clean_email(self):
email = self.cleaned_data.get("email")
check = User.objects.filter(email=email)
if self.instance.email == email:
return email
else:
if len(check) > 0:
raise forms.ValidationError(
_("This email address is already in use. Please supply a different email address."))
return email
def clean_password(self):
password = self.cleaned_data.get("password")
if len(password) < 6:
raise forms.ValidationError(
_("Password should be more than 6 characters."))
return password
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.username = self.cleaned_data["email"]
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ('id',)
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,
'autocomplete': 'off'})
UserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm, extra=1, can_delete=False)
class AuctionForm(forms.ModelForm):
class Meta:
model = Auction
exclude = ('account', 'slug', 'status', 'winner', 'is_active',)
def __init__(self, *args, **kwargs):
super(AuctionForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})
if field and field_name == 'expire':
field.widget.attrs.update({'class': 'form-control input-lg datepicker'})
def clean_expire(self):
expire = self.cleaned_data.get("expire").date()
if expire < (date.today() + timedelta(days=3)):
raise forms.ValidationError(_("Expire should be 72 hour from now on."))
return expire
class BidAuction(forms.ModelForm):
class Meta:
model = Bid
exclude = ('id', 'auction', 'bidder',)
def __init__(self, *args, **kwargs):
self.auction = kwargs.pop('auction', None)
super(BidAuction, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})
def clean_bid_price(self):
qs = Bid.objects.filter(auction = self.auction).aggregate(Max('bid_price'))['bid_price__max']
if qs is None:
qs = self.auction.price.amount
price = self.cleaned_data.get("bid_price")
# min_price = qs + (self.auction.price.amount * 5) / 100
min_price = qs + Decimal(0.05)
if price < min_price:
raise forms.ValidationError(_("Price should be more than %s." % "{0:.2f}".format(min_price)))
return price
|
normal
|
{
"blob_id": "5215b5e4efe2e126f18b3c4457dc3e3902923d49",
"index": 6360,
"step-1": "<mask token>\n\n\nclass UserForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\n<mask token>\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-2": "<mask token>\n\n\nclass UserForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n <mask token>\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n check = User.objects.filter(email=email)\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(_(\n 'This email address is already in use. Please supply a different email address.'\n ))\n return email\n <mask token>\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\n<mask token>\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-3": "<mask token>\n\n\nclass UserForm(forms.ModelForm):\n error_email = {'email_exist': _('Email allready exist.')}\n error_password = {'password_less': _(\n 'Password should be more than 6 characters.')}\n password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n check = User.objects.filter(email=email)\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(_(\n 'This email address is already in use. Please supply a different email address.'\n ))\n return email\n\n def clean_password(self):\n password = self.cleaned_data.get('password')\n if len(password) < 6:\n raise forms.ValidationError(_(\n 'Password should be more than 6 characters.'))\n return password\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\nUserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm,\n extra=1, can_delete=False)\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-4": "from django import forms\nfrom django.forms import inlineformset_factory\nfrom django.utils.translation import ugettext, ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.db.models import Max\nfrom auction.models import *\nfrom datetime import *\nfrom decimal import *\nimport re\n\n\nclass UserForm(forms.ModelForm):\n error_email = {'email_exist': _('Email allready exist.')}\n error_password = {'password_less': _(\n 'Password should be more than 6 characters.')}\n password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n check = User.objects.filter(email=email)\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(_(\n 'This email address is already in use. Please supply a different email address.'\n ))\n return email\n\n def clean_password(self):\n password = self.cleaned_data.get('password')\n if len(password) < 6:\n raise forms.ValidationError(_(\n 'Password should be more than 6 characters.'))\n return password\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\nUserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm,\n extra=1, can_delete=False)\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-5": "from django import forms\nfrom django.forms import inlineformset_factory\nfrom django.utils.translation import ugettext, ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.db.models import Max\nfrom auction.models import *\nfrom datetime import *\nfrom decimal import *\nimport re\n\n\nclass UserForm(forms.ModelForm):\n\n error_email = {\n 'email_exist': _(\"Email allready exist.\"),\n }\n\n error_password = {\n 'password_less': _(\"Password should be more than 6 characters.\"),\n }\n\n password = forms.CharField(label=_(\"Password\"), widget=forms.PasswordInput)\n\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email')\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,\n 'autocomplete': 'off'})\n\n def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n check = User.objects.filter(email=email)\n\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(\n _(\"This email address is already in use. Please supply a different email address.\"))\n return email\n\n def clean_password(self):\n password = self.cleaned_data.get(\"password\")\n if len(password) < 6:\n raise forms.ValidationError(\n _(\"Password should be more than 6 characters.\"))\n return password\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data[\"email\"]\n user.set_password(self.cleaned_data[\"password\"])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = Profile\n exclude = ('id',)\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,\n 'autocomplete': 'off'})\n\n\nUserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm, extra=1, can_delete=False)\n\n\n\nclass AuctionForm(forms.ModelForm):\n\n class Meta:\n model = Auction\n exclude = ('account', 'slug', 'status', 'winner', 'is_active',)\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class': 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get(\"expire\").date()\n if expire < (date.today() + timedelta(days=3)):\n raise forms.ValidationError(_(\"Expire should be 72 hour from now on.\"))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n class Meta:\n model = Bid\n exclude = ('id', 'auction', 'bidder',)\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction = self.auction).aggregate(Max('bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get(\"bid_price\")\n # min_price = qs + (self.auction.price.amount * 5) / 100\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_(\"Price should be more than %s.\" % \"{0:.2f}\".format(min_price)))\n return price\n",
"step-ids": [
10,
11,
15,
16,
17
]
}
|
[
10,
11,
15,
16,
17
] |
#!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "f799fdfde537bbe8f6c49a5e1a15cf6f910a0d45",
"index": 889,
"step-1": "<mask token>\n\n\nclass TestMaxInteger(unittest.TestCase):\n <mask token>\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\n<mask token>\n",
"step-3": "<mask token>\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/python3\n\"\"\"Unittest for max_integer([..])\n\"\"\"\nimport unittest\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
#!/usr/bin/python
#
# @name = 'fmsrutil.py'
#
# @description = "F-MSR utilities module."
#
# @author = ['YU Chiu Man', 'HU Yuchong', 'TANG Yang']
#
import sys
import os
import random
from finitefield import GF256int
from coeffvector import CoeffVector
from coeffvector import CoeffMatrix
import common
#Check if C library of F-MSR is installed:
import codings.clibfmsr.clibfmsr
useClibfmsr = True
def getNativeBlockNum(n, k):
'''Get number of native blocks.'''
return k*(n-k)
def getParityBlockNum(n, k):
'''Get number of parity blocks.'''
return n*(n-k)
def getNodeIdList(n, k):
'''Find the node id for a segment of blocks.'''
'''Return a list of node id for the blocks.'''
nodeidList = []
segmentSize = n-k
blockNum = getParityBlockNum(n, k)
for i in range(int(blockNum/segmentSize)):
for j in range(segmentSize):
nodeidList.append(i)
return nodeidList
def getParityCoeff(n, k):
'''Get the parity coefficients of the blocks.'''
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
parityCoeff = []
for i in range(parityBlockNum):
for j in range(nativeBlockNum):
parityCoeff.append(GF256int(i+1)**j)
return parityCoeff
def encode(n, k, src, parityCoeff, setting, metadata):
'''Encode src file to parity chunks.'''
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
infile = open(src, 'rb')
indatalist = infile.read()
infile.close()
totalchunk = nativeBlockNum
filesize = len(indatalist)
#Generate info for big-chunk:
for i in range(metadata.totalnode):
fileNode = common.FileNodeMetadata(i)
fileNode.nodekey = setting.nodeInfo[i].nodekey
fileNode.nodetype = setting.nodeInfo[i].nodetype
fileNode.bucketname = setting.nodeInfo[i].bucketname
fileNode.bigchunksize = 0
fileNode.chunknum = 0
metadata.fileNodeInfo.append(fileNode)
#Encode indatalist to outdatalist
if filesize > 0:
chunksize = filesize/totalchunk + 1
indatalist += '\0'*(chunksize*totalchunk - filesize)
parityCoeff_temp = ''.join([chr(parity) for parity in parityCoeff])
outdatalist = codings.clibfmsr.clibfmsr.encodeComputation(indatalist, \
parityCoeff_temp, nativeBlockNum, parityBlockNum, chunksize)
else:
chunksize = 0
#Generate info for small chunks:
nodeIdList = getNodeIdList(n, k)
for i in range(parityBlockNum):
chunk = common.ChunkMetadata(i)
chunk.chunkname = metadata.filename + '.chunk' + str(i)
chunk.chunksize = chunksize
chunk.chunktype = 'parity'
chunk.chunkpath = setting.chunkdir + '/' + chunk.chunkname
nodeid = nodeIdList[i]
chunk.nodeid = nodeid
chunk.nodekey = setting.nodeInfo[nodeid].nodekey
chunk.nodetype = setting.nodeInfo[nodeid].nodetype
chunk.bucketname = setting.nodeInfo[nodeid].bucketname
chunk.action = 'upload'
#Add chunk position inside big-chunk:
chunk.position = metadata.fileNodeInfo[nodeid].chunknum
metadata.chunkInfo.append(chunk)
#Add support for big-chunk:
metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize
metadata.fileNodeInfo[nodeid].chunknum += 1
metadata.totalchunk = parityBlockNum
metadata.parityCoeff = parityCoeff[:]
#Generate big-chunks:
startchunk = 0
writelen = 1048576
for i in range(metadata.totalnode):
dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)
if chunksize > 0:
f = open(dest, 'wb')
numchunks = nodeIdList.count(i)
writenext = startchunk*chunksize
for j in range(startchunk*chunksize, (startchunk+numchunks)*chunksize-writelen, writelen):
writenext = j+writelen
f.write(outdatalist[j:writenext])
f.write(outdatalist[writenext:(startchunk+numchunks)*chunksize])
f.close()
startchunk += numchunks
else:
open(dest, 'wb').close()
metadata.fileNodeInfo[i].bigchunkpath = dest
metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i)
metadata.fileNodeInfo[i].action = 'upload'
def reversematrix(n, k, gj_matrix):
'''Reverse matrix.'''
## The first elimination: decoding matrix -> lower unit triangular matrix
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
for rowNo in range(nativeBlockNum):
##1.find the rowNo row vector with 1st-coeff of valve non-zero
A = GF256int(0)
for i in range(rowNo,nativeBlockNum,1):
if gj_matrix[i][rowNo]!=0:
A = gj_matrix[i][rowNo]
break
##2. permutation between the rowNo row vector and the ith row vector
temp_vector = [GF256int(0)]*(nativeBlockNum*2)
if i!= rowNo:
for j in range(nativeBlockNum*2):
temp_vector[j] = gj_matrix[i][j]
gj_matrix[i][j] = gj_matrix[rowNo][j]
gj_matrix[rowNo][j] = temp_vector[j]
##3. in rowNo-th row vector, all the coeffs/1st coeff
for m in range(nativeBlockNum*2):
gj_matrix[rowNo][m] = gj_matrix[rowNo][m]/A
##4. The row vectors below rowNo-th row vector eliminate the rowNo-th coeff
for j in range(rowNo+1,nativeBlockNum,1):
B = gj_matrix[j][rowNo]
for m in range(rowNo,nativeBlockNum*2,1):
gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m]*B
# The second elimination: decoding matrix -> unit matrix
##5. The row vectors above rowNo-th row vector eliminate the rowNo-th coeff
for rowNo in range(nativeBlockNum-1,0,-1):
for j in range(0,rowNo,1):
C = gj_matrix[j][rowNo]
for m in range(nativeBlockNum*2):
gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m]*C
def decode(n, k, src, blocknums, parityCoeff, dest, filesize, setting):
'''Decode chunk files to dest file.'''
## special handling for 0B files
if filesize <= 0:
open(dest,'wb').close()
return
cv_temp=[]
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
enc_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(parityBlockNum)]
dec_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(nativeBlockNum)]
rev_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(nativeBlockNum)]
gj_matrix = [[GF256int(0) for col in range(nativeBlockNum*2)] for row in range(nativeBlockNum)]
## generate the encoding matrix
counter = 0
for i in range(parityBlockNum):
for j in range(nativeBlockNum):
enc_matrix[i][j] = GF256int(parityCoeff[counter])
counter += 1
cm1 = CoeffMatrix(nativeBlockNum)
for i in range(parityBlockNum):
cv_temp.append(CoeffVector(nativeBlockNum))
for j in range(nativeBlockNum):
cv_temp[i].coeff_[j] = enc_matrix[i][j]
cv_temp[i].first()
cm1.addcoeffvector(cv_temp[i])
## generate the decoding matrix
i=0
for selectChunkNo in blocknums:
for j in range(nativeBlockNum):
dec_matrix[i][j]=enc_matrix[selectChunkNo][j]
i += 1
## initialize the reverse matrix
for i in range(nativeBlockNum):
for j in range(nativeBlockNum):
if j==i:
rev_matrix[i][j]= GF256int(1)
## initialize the Gauss-Jordan matrix = [decoding,reverse]
for i in range(nativeBlockNum):
for j in range(nativeBlockNum*2):
if j<nativeBlockNum:
gj_matrix[i][j]= dec_matrix[i][j]
else:
gj_matrix[i][j]= rev_matrix[i][j-nativeBlockNum]
reversematrix(n, k, gj_matrix)
for i in range(nativeBlockNum):
for j in range(nativeBlockNum):
dec_matrix[i][j] = gj_matrix[i][j+nativeBlockNum]
##generate decode data chunks
selectchunk=[]
for filename in src:
infile = open(filename,'rb')
selectchunk.append(infile.read())
infile.close()
chunksize = os.path.getsize(src[0])
indatalist = ''.join(selectchunk)
##rebuild the original chunks
parityCoeff_temp = ''.join([chr(dec_matrix[i][j]) \
for i in range(nativeBlockNum) \
for j in range(nativeBlockNum)])
outdatalist = codings.clibfmsr.clibfmsr.decodeComputation(indatalist, \
parityCoeff_temp, nativeBlockNum, chunksize)
outfile = open(dest,'wb')
writelen = 1048576
writenext = 0
for i in range(0,filesize-writelen,writelen):
writenext = i+writelen
outfile.write(outdatalist[i:writenext])
outfile.write(outdatalist[writenext:filesize])
outfile.close()
def getCheckNum(parityBlockNum):
'''Get check number for checking strong MDS, for fmsr(k=n-2) only.'''
return int((parityBlockNum-2)*(parityBlockNum-2-1)/2 - ((parityBlockNum/2)-1))
def getStrongMDSPropertyDegree(repairNodeno, nativeBlockNum, parityBlockNum, checkNum, enc_matrix):
'''Get strong MDS property degree.'''
currentStrongMDSPropertyDegree = 0
survivalcoeffvectorset = []
flag = 0
for i in range(parityBlockNum):
#get coeff vectors of survival parity blocks
if int(i/2)!= repairNodeno:
survivalcoeffvectorset.append(CoeffVector(nativeBlockNum))
for j in range(nativeBlockNum):
survivalcoeffvectorset[i - flag*2].coeff_[j] = enc_matrix[i][j]
survivalcoeffvectorset[i - flag*2].first()
else:
flag =1
s = 0
for i in range(parityBlockNum-2):
for j in range(parityBlockNum-2):
if i<j:
checkmatrix = CoeffMatrix(nativeBlockNum)
for k in range (parityBlockNum-2):
if k!=i and k!=j:
checkmatrix.addcoeffvector(survivalcoeffvectorset[k].copy())
if checkmatrix.rank_ == nativeBlockNum:
currentStrongMDSPropertyDegree += 1
s += 1
return currentStrongMDSPropertyDegree
def checkMDS(MSR_n, MSR_k, enc_matrix):
'''Check MDS property, for fmsr(k=n-2) only.'''
'''Return a MDS property value.'''
nativeBlockNum = getNativeBlockNum(MSR_n, MSR_k)
parityBlockNum = getParityBlockNum(MSR_n, MSR_k)
MDSpropery = True
allcoeffvectors = []
for i in range(parityBlockNum):
allcoeffvectors.append(CoeffVector(nativeBlockNum))
for j in range(nativeBlockNum):
allcoeffvectors[i].coeff_[j] = enc_matrix[i][j]
allcoeffvectors[i].first()
permutation = int(MSR_n * (MSR_n - 1) / 2)
#permutation of selecting n-2 nodes from n nodes
checkmatrix = [CoeffMatrix(nativeBlockNum) for col in range(permutation)]
s = 0
for i in range (MSR_n):
for j in range(MSR_n):
if i<j:
for b in range(MSR_n):
if b !=i and b!=j:
checkmatrix[s].addcoeffvector(allcoeffvectors[b*2].copy())
checkmatrix[s].addcoeffvector(allcoeffvectors[b*2+1].copy())
if checkmatrix[s].rank_ != nativeBlockNum:
MDSpropery = False
s += 1
return MDSpropery
def checkstongMDS(n, k, nativeBlockNum, parityBlockNum, enc_matrix):
'''Check strong MDS property, for fmsr(k=n-2) only.'''
'''Return list of MDS property degrees.'''
strongMDSPropertyDegrees = []
#get check-combination number
checkNum = getCheckNum(parityBlockNum)
#Calculate total strong MDS property degree
for i in range(n):
strongMDSPropertyDegrees.append(getStrongMDSPropertyDegree(i, \
nativeBlockNum, parityBlockNum, checkNum, enc_matrix))
return strongMDSPropertyDegrees
def testStrongMDSProperty(strongMDSPropertyDegrees, checkNum,n):
'''Decide whether the current parity coefficient set passes the strong MDS property.'''
result = True
#threshold = checkNum
threshold = 2*(n-1)*(n-2)-(n-2)*(n-3)/2
#Important: currently the threshold value is hardcode
for degree in strongMDSPropertyDegrees:
if degree < threshold:
result = False
return result
def functionalRepair(n, k, src, blocknums, failedNode, parityCoeff, repairChunks, setting, metadata):
'''Functional repair by generating new parity chunks.'''
nativeBlockNum = getNativeBlockNum(n, k)
parityBlockNum = getParityBlockNum(n, k)
checkNum = getCheckNum(parityBlockNum)
## read the encoding matrix and repair
enc_matrix = metadata.enc_matrix
repairCodingCoeff = metadata.repairCodingCoeff
indatalist = []
for filepath in src:
infile = open(filepath, 'rb')
indatalist.append(infile.read())
infile.close()
chunksize = os.path.getsize(src[0])
if chunksize > 0:
#Repair computation:
indatalist_temp = ''.join(indatalist)
parityCoeff_temp = []
for i in range(n-k):
for j in range(n-1):
parityCoeff_temp.append(chr(repairCodingCoeff[i][j]))
parityCoeff_temp = ''.join(parityCoeff_temp)
outdatalist = codings.clibfmsr.clibfmsr.repairComputation(indatalist_temp, \
parityCoeff_temp, n, k, chunksize)
counter = 0
for i in range(parityBlockNum):
for j in range(nativeBlockNum):
parityCoeff[counter] = enc_matrix[i][j]
counter += 1
#Add support for big-chunk:
writelen = 1048576
writenext = 0
for i in range(metadata.totalnode):
if setting.nodeInfo[i].healthy == False:
dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)
filesize = metadata.fileNodeInfo[i].bigchunksize
if chunksize <= 0:
open(dest,'wb').close()
else:
outfile = open(dest, 'wb')
for j in range(0,filesize-writelen,writelen):
writenext = j+writelen
outfile.write(outdatalist[j:writenext])
outfile.write(outdatalist[writenext:filesize])
outfile.close()
metadata.fileNodeInfo[i].bigchunkpath = dest
metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i)
metadata.fileNodeInfo[i].action = 'upload'
|
normal
|
{
"blob_id": "0ebd19079a16a6e3da34da2ecfda0d159b8580b2",
"index": 9527,
"step-1": "<mask token>\n\n\ndef getNativeBlockNum(n, k):\n \"\"\"Get number of native blocks.\"\"\"\n return k * (n - k)\n\n\n<mask token>\n\n\ndef getNodeIdList(n, k):\n \"\"\"Find the node id for a segment of blocks.\"\"\"\n \"\"\"Return a list of node id for the blocks.\"\"\"\n nodeidList = []\n segmentSize = n - k\n blockNum = getParityBlockNum(n, k)\n for i in range(int(blockNum / segmentSize)):\n for j in range(segmentSize):\n nodeidList.append(i)\n return nodeidList\n\n\n<mask token>\n\n\ndef encode(n, k, src, parityCoeff, setting, metadata):\n \"\"\"Encode src file to parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n infile = open(src, 'rb')\n indatalist = infile.read()\n infile.close()\n totalchunk = nativeBlockNum\n filesize = len(indatalist)\n for i in range(metadata.totalnode):\n fileNode = common.FileNodeMetadata(i)\n fileNode.nodekey = setting.nodeInfo[i].nodekey\n fileNode.nodetype = setting.nodeInfo[i].nodetype\n fileNode.bucketname = setting.nodeInfo[i].bucketname\n fileNode.bigchunksize = 0\n fileNode.chunknum = 0\n metadata.fileNodeInfo.append(fileNode)\n if filesize > 0:\n chunksize = filesize / totalchunk + 1\n indatalist += '\\x00' * (chunksize * totalchunk - filesize)\n parityCoeff_temp = ''.join([chr(parity) for parity in parityCoeff])\n outdatalist = codings.clibfmsr.clibfmsr.encodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, parityBlockNum, chunksize)\n else:\n chunksize = 0\n nodeIdList = getNodeIdList(n, k)\n for i in range(parityBlockNum):\n chunk = common.ChunkMetadata(i)\n chunk.chunkname = metadata.filename + '.chunk' + str(i)\n chunk.chunksize = chunksize\n chunk.chunktype = 'parity'\n chunk.chunkpath = setting.chunkdir + '/' + chunk.chunkname\n nodeid = nodeIdList[i]\n chunk.nodeid = nodeid\n chunk.nodekey = setting.nodeInfo[nodeid].nodekey\n chunk.nodetype = setting.nodeInfo[nodeid].nodetype\n chunk.bucketname = setting.nodeInfo[nodeid].bucketname\n chunk.action = 'upload'\n chunk.position = metadata.fileNodeInfo[nodeid].chunknum\n metadata.chunkInfo.append(chunk)\n metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize\n metadata.fileNodeInfo[nodeid].chunknum += 1\n metadata.totalchunk = parityBlockNum\n metadata.parityCoeff = parityCoeff[:]\n startchunk = 0\n writelen = 1048576\n for i in range(metadata.totalnode):\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)\n if chunksize > 0:\n f = open(dest, 'wb')\n numchunks = nodeIdList.count(i)\n writenext = startchunk * chunksize\n for j in range(startchunk * chunksize, (startchunk + numchunks) *\n chunksize - writelen, writelen):\n writenext = j + writelen\n f.write(outdatalist[j:writenext])\n f.write(outdatalist[writenext:(startchunk + numchunks) * chunksize]\n )\n f.close()\n startchunk += numchunks\n else:\n open(dest, 'wb').close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n\n\ndef reversematrix(n, k, gj_matrix):\n \"\"\"Reverse matrix.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n for rowNo in range(nativeBlockNum):\n A = GF256int(0)\n for i in range(rowNo, nativeBlockNum, 1):\n if gj_matrix[i][rowNo] != 0:\n A = gj_matrix[i][rowNo]\n break\n temp_vector = [GF256int(0)] * (nativeBlockNum * 2)\n if i != rowNo:\n for j in range(nativeBlockNum * 2):\n temp_vector[j] = gj_matrix[i][j]\n gj_matrix[i][j] = gj_matrix[rowNo][j]\n gj_matrix[rowNo][j] = temp_vector[j]\n for m in range(nativeBlockNum * 2):\n gj_matrix[rowNo][m] = gj_matrix[rowNo][m] / A\n for j in range(rowNo + 1, nativeBlockNum, 1):\n B = gj_matrix[j][rowNo]\n for m in range(rowNo, nativeBlockNum * 2, 1):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * B\n for rowNo in range(nativeBlockNum - 1, 0, -1):\n for j in range(0, rowNo, 1):\n C = gj_matrix[j][rowNo]\n for m in range(nativeBlockNum * 2):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * C\n\n\ndef decode(n, k, src, blocknums, parityCoeff, dest, filesize, setting):\n \"\"\"Decode chunk files to dest file.\"\"\"\n if filesize <= 0:\n open(dest, 'wb').close()\n return\n cv_temp = []\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n enc_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(parityBlockNum)]\n dec_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n rev_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n gj_matrix = [[GF256int(0) for col in range(nativeBlockNum * 2)] for row in\n range(nativeBlockNum)]\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n enc_matrix[i][j] = GF256int(parityCoeff[counter])\n counter += 1\n cm1 = CoeffMatrix(nativeBlockNum)\n for i in range(parityBlockNum):\n cv_temp.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n cv_temp[i].coeff_[j] = enc_matrix[i][j]\n cv_temp[i].first()\n cm1.addcoeffvector(cv_temp[i])\n i = 0\n for selectChunkNo in blocknums:\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = enc_matrix[selectChunkNo][j]\n i += 1\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n if j == i:\n rev_matrix[i][j] = GF256int(1)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum * 2):\n if j < nativeBlockNum:\n gj_matrix[i][j] = dec_matrix[i][j]\n else:\n gj_matrix[i][j] = rev_matrix[i][j - nativeBlockNum]\n reversematrix(n, k, gj_matrix)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = gj_matrix[i][j + nativeBlockNum]\n selectchunk = []\n for filename in src:\n infile = open(filename, 'rb')\n selectchunk.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n indatalist = ''.join(selectchunk)\n parityCoeff_temp = ''.join([chr(dec_matrix[i][j]) for i in range(\n nativeBlockNum) for j in range(nativeBlockNum)])\n outdatalist = codings.clibfmsr.clibfmsr.decodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, chunksize)\n outfile = open(dest, 'wb')\n writelen = 1048576\n writenext = 0\n for i in range(0, filesize - writelen, writelen):\n writenext = i + writelen\n outfile.write(outdatalist[i:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n\n\ndef getCheckNum(parityBlockNum):\n \"\"\"Get check number for checking strong MDS, for fmsr(k=n-2) only.\"\"\"\n return int((parityBlockNum - 2) * (parityBlockNum - 2 - 1) / 2 - (\n parityBlockNum / 2 - 1))\n\n\ndef getStrongMDSPropertyDegree(repairNodeno, nativeBlockNum, parityBlockNum,\n checkNum, enc_matrix):\n \"\"\"Get strong MDS property degree.\"\"\"\n currentStrongMDSPropertyDegree = 0\n survivalcoeffvectorset = []\n flag = 0\n for i in range(parityBlockNum):\n if int(i / 2) != repairNodeno:\n survivalcoeffvectorset.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n survivalcoeffvectorset[i - flag * 2].coeff_[j] = enc_matrix[i][\n j]\n survivalcoeffvectorset[i - flag * 2].first()\n else:\n flag = 1\n s = 0\n for i in range(parityBlockNum - 2):\n for j in range(parityBlockNum - 2):\n if i < j:\n checkmatrix = CoeffMatrix(nativeBlockNum)\n for k in range(parityBlockNum - 2):\n if k != i and k != j:\n checkmatrix.addcoeffvector(survivalcoeffvectorset[k\n ].copy())\n if checkmatrix.rank_ == nativeBlockNum:\n currentStrongMDSPropertyDegree += 1\n s += 1\n return currentStrongMDSPropertyDegree\n\n\n<mask token>\n\n\ndef checkstongMDS(n, k, nativeBlockNum, parityBlockNum, enc_matrix):\n \"\"\"Check strong MDS property, for fmsr(k=n-2) only.\"\"\"\n \"\"\"Return list of MDS property degrees.\"\"\"\n strongMDSPropertyDegrees = []\n checkNum = getCheckNum(parityBlockNum)\n for i in range(n):\n strongMDSPropertyDegrees.append(getStrongMDSPropertyDegree(i,\n nativeBlockNum, parityBlockNum, checkNum, enc_matrix))\n return strongMDSPropertyDegrees\n\n\ndef testStrongMDSProperty(strongMDSPropertyDegrees, checkNum, n):\n \"\"\"Decide whether the current parity coefficient set passes the strong MDS property.\"\"\"\n result = True\n threshold = 2 * (n - 1) * (n - 2) - (n - 2) * (n - 3) / 2\n for degree in strongMDSPropertyDegrees:\n if degree < threshold:\n result = False\n return result\n\n\ndef functionalRepair(n, k, src, blocknums, failedNode, parityCoeff,\n repairChunks, setting, metadata):\n \"\"\"Functional repair by generating new parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n checkNum = getCheckNum(parityBlockNum)\n enc_matrix = metadata.enc_matrix\n repairCodingCoeff = metadata.repairCodingCoeff\n indatalist = []\n for filepath in src:\n infile = open(filepath, 'rb')\n indatalist.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n if chunksize > 0:\n indatalist_temp = ''.join(indatalist)\n parityCoeff_temp = []\n for i in range(n - k):\n for j in range(n - 1):\n parityCoeff_temp.append(chr(repairCodingCoeff[i][j]))\n parityCoeff_temp = ''.join(parityCoeff_temp)\n outdatalist = codings.clibfmsr.clibfmsr.repairComputation(\n indatalist_temp, parityCoeff_temp, n, k, chunksize)\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff[counter] = enc_matrix[i][j]\n counter += 1\n writelen = 1048576\n writenext = 0\n for i in range(metadata.totalnode):\n if setting.nodeInfo[i].healthy == False:\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i\n )\n filesize = metadata.fileNodeInfo[i].bigchunksize\n if chunksize <= 0:\n open(dest, 'wb').close()\n else:\n outfile = open(dest, 'wb')\n for j in range(0, filesize - writelen, writelen):\n writenext = j + writelen\n outfile.write(outdatalist[j:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n",
"step-2": "<mask token>\n\n\ndef getNativeBlockNum(n, k):\n \"\"\"Get number of native blocks.\"\"\"\n return k * (n - k)\n\n\ndef getParityBlockNum(n, k):\n \"\"\"Get number of parity blocks.\"\"\"\n return n * (n - k)\n\n\ndef getNodeIdList(n, k):\n \"\"\"Find the node id for a segment of blocks.\"\"\"\n \"\"\"Return a list of node id for the blocks.\"\"\"\n nodeidList = []\n segmentSize = n - k\n blockNum = getParityBlockNum(n, k)\n for i in range(int(blockNum / segmentSize)):\n for j in range(segmentSize):\n nodeidList.append(i)\n return nodeidList\n\n\n<mask token>\n\n\ndef encode(n, k, src, parityCoeff, setting, metadata):\n \"\"\"Encode src file to parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n infile = open(src, 'rb')\n indatalist = infile.read()\n infile.close()\n totalchunk = nativeBlockNum\n filesize = len(indatalist)\n for i in range(metadata.totalnode):\n fileNode = common.FileNodeMetadata(i)\n fileNode.nodekey = setting.nodeInfo[i].nodekey\n fileNode.nodetype = setting.nodeInfo[i].nodetype\n fileNode.bucketname = setting.nodeInfo[i].bucketname\n fileNode.bigchunksize = 0\n fileNode.chunknum = 0\n metadata.fileNodeInfo.append(fileNode)\n if filesize > 0:\n chunksize = filesize / totalchunk + 1\n indatalist += '\\x00' * (chunksize * totalchunk - filesize)\n parityCoeff_temp = ''.join([chr(parity) for parity in parityCoeff])\n outdatalist = codings.clibfmsr.clibfmsr.encodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, parityBlockNum, chunksize)\n else:\n chunksize = 0\n nodeIdList = getNodeIdList(n, k)\n for i in range(parityBlockNum):\n chunk = common.ChunkMetadata(i)\n chunk.chunkname = metadata.filename + '.chunk' + str(i)\n chunk.chunksize = chunksize\n chunk.chunktype = 'parity'\n chunk.chunkpath = setting.chunkdir + '/' + chunk.chunkname\n nodeid = nodeIdList[i]\n chunk.nodeid = nodeid\n chunk.nodekey = setting.nodeInfo[nodeid].nodekey\n chunk.nodetype = setting.nodeInfo[nodeid].nodetype\n chunk.bucketname = setting.nodeInfo[nodeid].bucketname\n chunk.action = 'upload'\n chunk.position = metadata.fileNodeInfo[nodeid].chunknum\n metadata.chunkInfo.append(chunk)\n metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize\n metadata.fileNodeInfo[nodeid].chunknum += 1\n metadata.totalchunk = parityBlockNum\n metadata.parityCoeff = parityCoeff[:]\n startchunk = 0\n writelen = 1048576\n for i in range(metadata.totalnode):\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)\n if chunksize > 0:\n f = open(dest, 'wb')\n numchunks = nodeIdList.count(i)\n writenext = startchunk * chunksize\n for j in range(startchunk * chunksize, (startchunk + numchunks) *\n chunksize - writelen, writelen):\n writenext = j + writelen\n f.write(outdatalist[j:writenext])\n f.write(outdatalist[writenext:(startchunk + numchunks) * chunksize]\n )\n f.close()\n startchunk += numchunks\n else:\n open(dest, 'wb').close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n\n\ndef reversematrix(n, k, gj_matrix):\n \"\"\"Reverse matrix.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n for rowNo in range(nativeBlockNum):\n A = GF256int(0)\n for i in range(rowNo, nativeBlockNum, 1):\n if gj_matrix[i][rowNo] != 0:\n A = gj_matrix[i][rowNo]\n break\n temp_vector = [GF256int(0)] * (nativeBlockNum * 2)\n if i != rowNo:\n for j in range(nativeBlockNum * 2):\n temp_vector[j] = gj_matrix[i][j]\n gj_matrix[i][j] = gj_matrix[rowNo][j]\n gj_matrix[rowNo][j] = temp_vector[j]\n for m in range(nativeBlockNum * 2):\n gj_matrix[rowNo][m] = gj_matrix[rowNo][m] / A\n for j in range(rowNo + 1, nativeBlockNum, 1):\n B = gj_matrix[j][rowNo]\n for m in range(rowNo, nativeBlockNum * 2, 1):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * B\n for rowNo in range(nativeBlockNum - 1, 0, -1):\n for j in range(0, rowNo, 1):\n C = gj_matrix[j][rowNo]\n for m in range(nativeBlockNum * 2):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * C\n\n\ndef decode(n, k, src, blocknums, parityCoeff, dest, filesize, setting):\n \"\"\"Decode chunk files to dest file.\"\"\"\n if filesize <= 0:\n open(dest, 'wb').close()\n return\n cv_temp = []\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n enc_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(parityBlockNum)]\n dec_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n rev_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n gj_matrix = [[GF256int(0) for col in range(nativeBlockNum * 2)] for row in\n range(nativeBlockNum)]\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n enc_matrix[i][j] = GF256int(parityCoeff[counter])\n counter += 1\n cm1 = CoeffMatrix(nativeBlockNum)\n for i in range(parityBlockNum):\n cv_temp.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n cv_temp[i].coeff_[j] = enc_matrix[i][j]\n cv_temp[i].first()\n cm1.addcoeffvector(cv_temp[i])\n i = 0\n for selectChunkNo in blocknums:\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = enc_matrix[selectChunkNo][j]\n i += 1\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n if j == i:\n rev_matrix[i][j] = GF256int(1)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum * 2):\n if j < nativeBlockNum:\n gj_matrix[i][j] = dec_matrix[i][j]\n else:\n gj_matrix[i][j] = rev_matrix[i][j - nativeBlockNum]\n reversematrix(n, k, gj_matrix)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = gj_matrix[i][j + nativeBlockNum]\n selectchunk = []\n for filename in src:\n infile = open(filename, 'rb')\n selectchunk.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n indatalist = ''.join(selectchunk)\n parityCoeff_temp = ''.join([chr(dec_matrix[i][j]) for i in range(\n nativeBlockNum) for j in range(nativeBlockNum)])\n outdatalist = codings.clibfmsr.clibfmsr.decodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, chunksize)\n outfile = open(dest, 'wb')\n writelen = 1048576\n writenext = 0\n for i in range(0, filesize - writelen, writelen):\n writenext = i + writelen\n outfile.write(outdatalist[i:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n\n\ndef getCheckNum(parityBlockNum):\n \"\"\"Get check number for checking strong MDS, for fmsr(k=n-2) only.\"\"\"\n return int((parityBlockNum - 2) * (parityBlockNum - 2 - 1) / 2 - (\n parityBlockNum / 2 - 1))\n\n\ndef getStrongMDSPropertyDegree(repairNodeno, nativeBlockNum, parityBlockNum,\n checkNum, enc_matrix):\n \"\"\"Get strong MDS property degree.\"\"\"\n currentStrongMDSPropertyDegree = 0\n survivalcoeffvectorset = []\n flag = 0\n for i in range(parityBlockNum):\n if int(i / 2) != repairNodeno:\n survivalcoeffvectorset.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n survivalcoeffvectorset[i - flag * 2].coeff_[j] = enc_matrix[i][\n j]\n survivalcoeffvectorset[i - flag * 2].first()\n else:\n flag = 1\n s = 0\n for i in range(parityBlockNum - 2):\n for j in range(parityBlockNum - 2):\n if i < j:\n checkmatrix = CoeffMatrix(nativeBlockNum)\n for k in range(parityBlockNum - 2):\n if k != i and k != j:\n checkmatrix.addcoeffvector(survivalcoeffvectorset[k\n ].copy())\n if checkmatrix.rank_ == nativeBlockNum:\n currentStrongMDSPropertyDegree += 1\n s += 1\n return currentStrongMDSPropertyDegree\n\n\ndef checkMDS(MSR_n, MSR_k, enc_matrix):\n \"\"\"Check MDS property, for fmsr(k=n-2) only.\"\"\"\n \"\"\"Return a MDS property value.\"\"\"\n nativeBlockNum = getNativeBlockNum(MSR_n, MSR_k)\n parityBlockNum = getParityBlockNum(MSR_n, MSR_k)\n MDSpropery = True\n allcoeffvectors = []\n for i in range(parityBlockNum):\n allcoeffvectors.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n allcoeffvectors[i].coeff_[j] = enc_matrix[i][j]\n allcoeffvectors[i].first()\n permutation = int(MSR_n * (MSR_n - 1) / 2)\n checkmatrix = [CoeffMatrix(nativeBlockNum) for col in range(permutation)]\n s = 0\n for i in range(MSR_n):\n for j in range(MSR_n):\n if i < j:\n for b in range(MSR_n):\n if b != i and b != j:\n checkmatrix[s].addcoeffvector(allcoeffvectors[b * 2\n ].copy())\n checkmatrix[s].addcoeffvector(allcoeffvectors[b * 2 +\n 1].copy())\n if checkmatrix[s].rank_ != nativeBlockNum:\n MDSpropery = False\n s += 1\n return MDSpropery\n\n\ndef checkstongMDS(n, k, nativeBlockNum, parityBlockNum, enc_matrix):\n \"\"\"Check strong MDS property, for fmsr(k=n-2) only.\"\"\"\n \"\"\"Return list of MDS property degrees.\"\"\"\n strongMDSPropertyDegrees = []\n checkNum = getCheckNum(parityBlockNum)\n for i in range(n):\n strongMDSPropertyDegrees.append(getStrongMDSPropertyDegree(i,\n nativeBlockNum, parityBlockNum, checkNum, enc_matrix))\n return strongMDSPropertyDegrees\n\n\ndef testStrongMDSProperty(strongMDSPropertyDegrees, checkNum, n):\n \"\"\"Decide whether the current parity coefficient set passes the strong MDS property.\"\"\"\n result = True\n threshold = 2 * (n - 1) * (n - 2) - (n - 2) * (n - 3) / 2\n for degree in strongMDSPropertyDegrees:\n if degree < threshold:\n result = False\n return result\n\n\ndef functionalRepair(n, k, src, blocknums, failedNode, parityCoeff,\n repairChunks, setting, metadata):\n \"\"\"Functional repair by generating new parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n checkNum = getCheckNum(parityBlockNum)\n enc_matrix = metadata.enc_matrix\n repairCodingCoeff = metadata.repairCodingCoeff\n indatalist = []\n for filepath in src:\n infile = open(filepath, 'rb')\n indatalist.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n if chunksize > 0:\n indatalist_temp = ''.join(indatalist)\n parityCoeff_temp = []\n for i in range(n - k):\n for j in range(n - 1):\n parityCoeff_temp.append(chr(repairCodingCoeff[i][j]))\n parityCoeff_temp = ''.join(parityCoeff_temp)\n outdatalist = codings.clibfmsr.clibfmsr.repairComputation(\n indatalist_temp, parityCoeff_temp, n, k, chunksize)\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff[counter] = enc_matrix[i][j]\n counter += 1\n writelen = 1048576\n writenext = 0\n for i in range(metadata.totalnode):\n if setting.nodeInfo[i].healthy == False:\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i\n )\n filesize = metadata.fileNodeInfo[i].bigchunksize\n if chunksize <= 0:\n open(dest, 'wb').close()\n else:\n outfile = open(dest, 'wb')\n for j in range(0, filesize - writelen, writelen):\n writenext = j + writelen\n outfile.write(outdatalist[j:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n",
"step-3": "<mask token>\n\n\ndef getNativeBlockNum(n, k):\n \"\"\"Get number of native blocks.\"\"\"\n return k * (n - k)\n\n\ndef getParityBlockNum(n, k):\n \"\"\"Get number of parity blocks.\"\"\"\n return n * (n - k)\n\n\ndef getNodeIdList(n, k):\n \"\"\"Find the node id for a segment of blocks.\"\"\"\n \"\"\"Return a list of node id for the blocks.\"\"\"\n nodeidList = []\n segmentSize = n - k\n blockNum = getParityBlockNum(n, k)\n for i in range(int(blockNum / segmentSize)):\n for j in range(segmentSize):\n nodeidList.append(i)\n return nodeidList\n\n\ndef getParityCoeff(n, k):\n \"\"\"Get the parity coefficients of the blocks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n parityCoeff = []\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff.append(GF256int(i + 1) ** j)\n return parityCoeff\n\n\ndef encode(n, k, src, parityCoeff, setting, metadata):\n \"\"\"Encode src file to parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n infile = open(src, 'rb')\n indatalist = infile.read()\n infile.close()\n totalchunk = nativeBlockNum\n filesize = len(indatalist)\n for i in range(metadata.totalnode):\n fileNode = common.FileNodeMetadata(i)\n fileNode.nodekey = setting.nodeInfo[i].nodekey\n fileNode.nodetype = setting.nodeInfo[i].nodetype\n fileNode.bucketname = setting.nodeInfo[i].bucketname\n fileNode.bigchunksize = 0\n fileNode.chunknum = 0\n metadata.fileNodeInfo.append(fileNode)\n if filesize > 0:\n chunksize = filesize / totalchunk + 1\n indatalist += '\\x00' * (chunksize * totalchunk - filesize)\n parityCoeff_temp = ''.join([chr(parity) for parity in parityCoeff])\n outdatalist = codings.clibfmsr.clibfmsr.encodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, parityBlockNum, chunksize)\n else:\n chunksize = 0\n nodeIdList = getNodeIdList(n, k)\n for i in range(parityBlockNum):\n chunk = common.ChunkMetadata(i)\n chunk.chunkname = metadata.filename + '.chunk' + str(i)\n chunk.chunksize = chunksize\n chunk.chunktype = 'parity'\n chunk.chunkpath = setting.chunkdir + '/' + chunk.chunkname\n nodeid = nodeIdList[i]\n chunk.nodeid = nodeid\n chunk.nodekey = setting.nodeInfo[nodeid].nodekey\n chunk.nodetype = setting.nodeInfo[nodeid].nodetype\n chunk.bucketname = setting.nodeInfo[nodeid].bucketname\n chunk.action = 'upload'\n chunk.position = metadata.fileNodeInfo[nodeid].chunknum\n metadata.chunkInfo.append(chunk)\n metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize\n metadata.fileNodeInfo[nodeid].chunknum += 1\n metadata.totalchunk = parityBlockNum\n metadata.parityCoeff = parityCoeff[:]\n startchunk = 0\n writelen = 1048576\n for i in range(metadata.totalnode):\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)\n if chunksize > 0:\n f = open(dest, 'wb')\n numchunks = nodeIdList.count(i)\n writenext = startchunk * chunksize\n for j in range(startchunk * chunksize, (startchunk + numchunks) *\n chunksize - writelen, writelen):\n writenext = j + writelen\n f.write(outdatalist[j:writenext])\n f.write(outdatalist[writenext:(startchunk + numchunks) * chunksize]\n )\n f.close()\n startchunk += numchunks\n else:\n open(dest, 'wb').close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n\n\ndef reversematrix(n, k, gj_matrix):\n \"\"\"Reverse matrix.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n for rowNo in range(nativeBlockNum):\n A = GF256int(0)\n for i in range(rowNo, nativeBlockNum, 1):\n if gj_matrix[i][rowNo] != 0:\n A = gj_matrix[i][rowNo]\n break\n temp_vector = [GF256int(0)] * (nativeBlockNum * 2)\n if i != rowNo:\n for j in range(nativeBlockNum * 2):\n temp_vector[j] = gj_matrix[i][j]\n gj_matrix[i][j] = gj_matrix[rowNo][j]\n gj_matrix[rowNo][j] = temp_vector[j]\n for m in range(nativeBlockNum * 2):\n gj_matrix[rowNo][m] = gj_matrix[rowNo][m] / A\n for j in range(rowNo + 1, nativeBlockNum, 1):\n B = gj_matrix[j][rowNo]\n for m in range(rowNo, nativeBlockNum * 2, 1):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * B\n for rowNo in range(nativeBlockNum - 1, 0, -1):\n for j in range(0, rowNo, 1):\n C = gj_matrix[j][rowNo]\n for m in range(nativeBlockNum * 2):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * C\n\n\ndef decode(n, k, src, blocknums, parityCoeff, dest, filesize, setting):\n \"\"\"Decode chunk files to dest file.\"\"\"\n if filesize <= 0:\n open(dest, 'wb').close()\n return\n cv_temp = []\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n enc_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(parityBlockNum)]\n dec_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n rev_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n gj_matrix = [[GF256int(0) for col in range(nativeBlockNum * 2)] for row in\n range(nativeBlockNum)]\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n enc_matrix[i][j] = GF256int(parityCoeff[counter])\n counter += 1\n cm1 = CoeffMatrix(nativeBlockNum)\n for i in range(parityBlockNum):\n cv_temp.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n cv_temp[i].coeff_[j] = enc_matrix[i][j]\n cv_temp[i].first()\n cm1.addcoeffvector(cv_temp[i])\n i = 0\n for selectChunkNo in blocknums:\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = enc_matrix[selectChunkNo][j]\n i += 1\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n if j == i:\n rev_matrix[i][j] = GF256int(1)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum * 2):\n if j < nativeBlockNum:\n gj_matrix[i][j] = dec_matrix[i][j]\n else:\n gj_matrix[i][j] = rev_matrix[i][j - nativeBlockNum]\n reversematrix(n, k, gj_matrix)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = gj_matrix[i][j + nativeBlockNum]\n selectchunk = []\n for filename in src:\n infile = open(filename, 'rb')\n selectchunk.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n indatalist = ''.join(selectchunk)\n parityCoeff_temp = ''.join([chr(dec_matrix[i][j]) for i in range(\n nativeBlockNum) for j in range(nativeBlockNum)])\n outdatalist = codings.clibfmsr.clibfmsr.decodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, chunksize)\n outfile = open(dest, 'wb')\n writelen = 1048576\n writenext = 0\n for i in range(0, filesize - writelen, writelen):\n writenext = i + writelen\n outfile.write(outdatalist[i:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n\n\ndef getCheckNum(parityBlockNum):\n \"\"\"Get check number for checking strong MDS, for fmsr(k=n-2) only.\"\"\"\n return int((parityBlockNum - 2) * (parityBlockNum - 2 - 1) / 2 - (\n parityBlockNum / 2 - 1))\n\n\ndef getStrongMDSPropertyDegree(repairNodeno, nativeBlockNum, parityBlockNum,\n checkNum, enc_matrix):\n \"\"\"Get strong MDS property degree.\"\"\"\n currentStrongMDSPropertyDegree = 0\n survivalcoeffvectorset = []\n flag = 0\n for i in range(parityBlockNum):\n if int(i / 2) != repairNodeno:\n survivalcoeffvectorset.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n survivalcoeffvectorset[i - flag * 2].coeff_[j] = enc_matrix[i][\n j]\n survivalcoeffvectorset[i - flag * 2].first()\n else:\n flag = 1\n s = 0\n for i in range(parityBlockNum - 2):\n for j in range(parityBlockNum - 2):\n if i < j:\n checkmatrix = CoeffMatrix(nativeBlockNum)\n for k in range(parityBlockNum - 2):\n if k != i and k != j:\n checkmatrix.addcoeffvector(survivalcoeffvectorset[k\n ].copy())\n if checkmatrix.rank_ == nativeBlockNum:\n currentStrongMDSPropertyDegree += 1\n s += 1\n return currentStrongMDSPropertyDegree\n\n\ndef checkMDS(MSR_n, MSR_k, enc_matrix):\n \"\"\"Check MDS property, for fmsr(k=n-2) only.\"\"\"\n \"\"\"Return a MDS property value.\"\"\"\n nativeBlockNum = getNativeBlockNum(MSR_n, MSR_k)\n parityBlockNum = getParityBlockNum(MSR_n, MSR_k)\n MDSpropery = True\n allcoeffvectors = []\n for i in range(parityBlockNum):\n allcoeffvectors.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n allcoeffvectors[i].coeff_[j] = enc_matrix[i][j]\n allcoeffvectors[i].first()\n permutation = int(MSR_n * (MSR_n - 1) / 2)\n checkmatrix = [CoeffMatrix(nativeBlockNum) for col in range(permutation)]\n s = 0\n for i in range(MSR_n):\n for j in range(MSR_n):\n if i < j:\n for b in range(MSR_n):\n if b != i and b != j:\n checkmatrix[s].addcoeffvector(allcoeffvectors[b * 2\n ].copy())\n checkmatrix[s].addcoeffvector(allcoeffvectors[b * 2 +\n 1].copy())\n if checkmatrix[s].rank_ != nativeBlockNum:\n MDSpropery = False\n s += 1\n return MDSpropery\n\n\ndef checkstongMDS(n, k, nativeBlockNum, parityBlockNum, enc_matrix):\n \"\"\"Check strong MDS property, for fmsr(k=n-2) only.\"\"\"\n \"\"\"Return list of MDS property degrees.\"\"\"\n strongMDSPropertyDegrees = []\n checkNum = getCheckNum(parityBlockNum)\n for i in range(n):\n strongMDSPropertyDegrees.append(getStrongMDSPropertyDegree(i,\n nativeBlockNum, parityBlockNum, checkNum, enc_matrix))\n return strongMDSPropertyDegrees\n\n\ndef testStrongMDSProperty(strongMDSPropertyDegrees, checkNum, n):\n \"\"\"Decide whether the current parity coefficient set passes the strong MDS property.\"\"\"\n result = True\n threshold = 2 * (n - 1) * (n - 2) - (n - 2) * (n - 3) / 2\n for degree in strongMDSPropertyDegrees:\n if degree < threshold:\n result = False\n return result\n\n\ndef functionalRepair(n, k, src, blocknums, failedNode, parityCoeff,\n repairChunks, setting, metadata):\n \"\"\"Functional repair by generating new parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n checkNum = getCheckNum(parityBlockNum)\n enc_matrix = metadata.enc_matrix\n repairCodingCoeff = metadata.repairCodingCoeff\n indatalist = []\n for filepath in src:\n infile = open(filepath, 'rb')\n indatalist.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n if chunksize > 0:\n indatalist_temp = ''.join(indatalist)\n parityCoeff_temp = []\n for i in range(n - k):\n for j in range(n - 1):\n parityCoeff_temp.append(chr(repairCodingCoeff[i][j]))\n parityCoeff_temp = ''.join(parityCoeff_temp)\n outdatalist = codings.clibfmsr.clibfmsr.repairComputation(\n indatalist_temp, parityCoeff_temp, n, k, chunksize)\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff[counter] = enc_matrix[i][j]\n counter += 1\n writelen = 1048576\n writenext = 0\n for i in range(metadata.totalnode):\n if setting.nodeInfo[i].healthy == False:\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i\n )\n filesize = metadata.fileNodeInfo[i].bigchunksize\n if chunksize <= 0:\n open(dest, 'wb').close()\n else:\n outfile = open(dest, 'wb')\n for j in range(0, filesize - writelen, writelen):\n writenext = j + writelen\n outfile.write(outdatalist[j:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n",
"step-4": "import sys\nimport os\nimport random\nfrom finitefield import GF256int\nfrom coeffvector import CoeffVector\nfrom coeffvector import CoeffMatrix\nimport common\nimport codings.clibfmsr.clibfmsr\nuseClibfmsr = True\n\n\ndef getNativeBlockNum(n, k):\n \"\"\"Get number of native blocks.\"\"\"\n return k * (n - k)\n\n\ndef getParityBlockNum(n, k):\n \"\"\"Get number of parity blocks.\"\"\"\n return n * (n - k)\n\n\ndef getNodeIdList(n, k):\n \"\"\"Find the node id for a segment of blocks.\"\"\"\n \"\"\"Return a list of node id for the blocks.\"\"\"\n nodeidList = []\n segmentSize = n - k\n blockNum = getParityBlockNum(n, k)\n for i in range(int(blockNum / segmentSize)):\n for j in range(segmentSize):\n nodeidList.append(i)\n return nodeidList\n\n\ndef getParityCoeff(n, k):\n \"\"\"Get the parity coefficients of the blocks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n parityCoeff = []\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff.append(GF256int(i + 1) ** j)\n return parityCoeff\n\n\ndef encode(n, k, src, parityCoeff, setting, metadata):\n \"\"\"Encode src file to parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n infile = open(src, 'rb')\n indatalist = infile.read()\n infile.close()\n totalchunk = nativeBlockNum\n filesize = len(indatalist)\n for i in range(metadata.totalnode):\n fileNode = common.FileNodeMetadata(i)\n fileNode.nodekey = setting.nodeInfo[i].nodekey\n fileNode.nodetype = setting.nodeInfo[i].nodetype\n fileNode.bucketname = setting.nodeInfo[i].bucketname\n fileNode.bigchunksize = 0\n fileNode.chunknum = 0\n metadata.fileNodeInfo.append(fileNode)\n if filesize > 0:\n chunksize = filesize / totalchunk + 1\n indatalist += '\\x00' * (chunksize * totalchunk - filesize)\n parityCoeff_temp = ''.join([chr(parity) for parity in parityCoeff])\n outdatalist = codings.clibfmsr.clibfmsr.encodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, parityBlockNum, chunksize)\n else:\n chunksize = 0\n nodeIdList = getNodeIdList(n, k)\n for i in range(parityBlockNum):\n chunk = common.ChunkMetadata(i)\n chunk.chunkname = metadata.filename + '.chunk' + str(i)\n chunk.chunksize = chunksize\n chunk.chunktype = 'parity'\n chunk.chunkpath = setting.chunkdir + '/' + chunk.chunkname\n nodeid = nodeIdList[i]\n chunk.nodeid = nodeid\n chunk.nodekey = setting.nodeInfo[nodeid].nodekey\n chunk.nodetype = setting.nodeInfo[nodeid].nodetype\n chunk.bucketname = setting.nodeInfo[nodeid].bucketname\n chunk.action = 'upload'\n chunk.position = metadata.fileNodeInfo[nodeid].chunknum\n metadata.chunkInfo.append(chunk)\n metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize\n metadata.fileNodeInfo[nodeid].chunknum += 1\n metadata.totalchunk = parityBlockNum\n metadata.parityCoeff = parityCoeff[:]\n startchunk = 0\n writelen = 1048576\n for i in range(metadata.totalnode):\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)\n if chunksize > 0:\n f = open(dest, 'wb')\n numchunks = nodeIdList.count(i)\n writenext = startchunk * chunksize\n for j in range(startchunk * chunksize, (startchunk + numchunks) *\n chunksize - writelen, writelen):\n writenext = j + writelen\n f.write(outdatalist[j:writenext])\n f.write(outdatalist[writenext:(startchunk + numchunks) * chunksize]\n )\n f.close()\n startchunk += numchunks\n else:\n open(dest, 'wb').close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n\n\ndef reversematrix(n, k, gj_matrix):\n \"\"\"Reverse matrix.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n for rowNo in range(nativeBlockNum):\n A = GF256int(0)\n for i in range(rowNo, nativeBlockNum, 1):\n if gj_matrix[i][rowNo] != 0:\n A = gj_matrix[i][rowNo]\n break\n temp_vector = [GF256int(0)] * (nativeBlockNum * 2)\n if i != rowNo:\n for j in range(nativeBlockNum * 2):\n temp_vector[j] = gj_matrix[i][j]\n gj_matrix[i][j] = gj_matrix[rowNo][j]\n gj_matrix[rowNo][j] = temp_vector[j]\n for m in range(nativeBlockNum * 2):\n gj_matrix[rowNo][m] = gj_matrix[rowNo][m] / A\n for j in range(rowNo + 1, nativeBlockNum, 1):\n B = gj_matrix[j][rowNo]\n for m in range(rowNo, nativeBlockNum * 2, 1):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * B\n for rowNo in range(nativeBlockNum - 1, 0, -1):\n for j in range(0, rowNo, 1):\n C = gj_matrix[j][rowNo]\n for m in range(nativeBlockNum * 2):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m] * C\n\n\ndef decode(n, k, src, blocknums, parityCoeff, dest, filesize, setting):\n \"\"\"Decode chunk files to dest file.\"\"\"\n if filesize <= 0:\n open(dest, 'wb').close()\n return\n cv_temp = []\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n enc_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(parityBlockNum)]\n dec_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n rev_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in\n range(nativeBlockNum)]\n gj_matrix = [[GF256int(0) for col in range(nativeBlockNum * 2)] for row in\n range(nativeBlockNum)]\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n enc_matrix[i][j] = GF256int(parityCoeff[counter])\n counter += 1\n cm1 = CoeffMatrix(nativeBlockNum)\n for i in range(parityBlockNum):\n cv_temp.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n cv_temp[i].coeff_[j] = enc_matrix[i][j]\n cv_temp[i].first()\n cm1.addcoeffvector(cv_temp[i])\n i = 0\n for selectChunkNo in blocknums:\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = enc_matrix[selectChunkNo][j]\n i += 1\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n if j == i:\n rev_matrix[i][j] = GF256int(1)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum * 2):\n if j < nativeBlockNum:\n gj_matrix[i][j] = dec_matrix[i][j]\n else:\n gj_matrix[i][j] = rev_matrix[i][j - nativeBlockNum]\n reversematrix(n, k, gj_matrix)\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = gj_matrix[i][j + nativeBlockNum]\n selectchunk = []\n for filename in src:\n infile = open(filename, 'rb')\n selectchunk.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n indatalist = ''.join(selectchunk)\n parityCoeff_temp = ''.join([chr(dec_matrix[i][j]) for i in range(\n nativeBlockNum) for j in range(nativeBlockNum)])\n outdatalist = codings.clibfmsr.clibfmsr.decodeComputation(indatalist,\n parityCoeff_temp, nativeBlockNum, chunksize)\n outfile = open(dest, 'wb')\n writelen = 1048576\n writenext = 0\n for i in range(0, filesize - writelen, writelen):\n writenext = i + writelen\n outfile.write(outdatalist[i:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n\n\ndef getCheckNum(parityBlockNum):\n \"\"\"Get check number for checking strong MDS, for fmsr(k=n-2) only.\"\"\"\n return int((parityBlockNum - 2) * (parityBlockNum - 2 - 1) / 2 - (\n parityBlockNum / 2 - 1))\n\n\ndef getStrongMDSPropertyDegree(repairNodeno, nativeBlockNum, parityBlockNum,\n checkNum, enc_matrix):\n \"\"\"Get strong MDS property degree.\"\"\"\n currentStrongMDSPropertyDegree = 0\n survivalcoeffvectorset = []\n flag = 0\n for i in range(parityBlockNum):\n if int(i / 2) != repairNodeno:\n survivalcoeffvectorset.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n survivalcoeffvectorset[i - flag * 2].coeff_[j] = enc_matrix[i][\n j]\n survivalcoeffvectorset[i - flag * 2].first()\n else:\n flag = 1\n s = 0\n for i in range(parityBlockNum - 2):\n for j in range(parityBlockNum - 2):\n if i < j:\n checkmatrix = CoeffMatrix(nativeBlockNum)\n for k in range(parityBlockNum - 2):\n if k != i and k != j:\n checkmatrix.addcoeffvector(survivalcoeffvectorset[k\n ].copy())\n if checkmatrix.rank_ == nativeBlockNum:\n currentStrongMDSPropertyDegree += 1\n s += 1\n return currentStrongMDSPropertyDegree\n\n\ndef checkMDS(MSR_n, MSR_k, enc_matrix):\n \"\"\"Check MDS property, for fmsr(k=n-2) only.\"\"\"\n \"\"\"Return a MDS property value.\"\"\"\n nativeBlockNum = getNativeBlockNum(MSR_n, MSR_k)\n parityBlockNum = getParityBlockNum(MSR_n, MSR_k)\n MDSpropery = True\n allcoeffvectors = []\n for i in range(parityBlockNum):\n allcoeffvectors.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n allcoeffvectors[i].coeff_[j] = enc_matrix[i][j]\n allcoeffvectors[i].first()\n permutation = int(MSR_n * (MSR_n - 1) / 2)\n checkmatrix = [CoeffMatrix(nativeBlockNum) for col in range(permutation)]\n s = 0\n for i in range(MSR_n):\n for j in range(MSR_n):\n if i < j:\n for b in range(MSR_n):\n if b != i and b != j:\n checkmatrix[s].addcoeffvector(allcoeffvectors[b * 2\n ].copy())\n checkmatrix[s].addcoeffvector(allcoeffvectors[b * 2 +\n 1].copy())\n if checkmatrix[s].rank_ != nativeBlockNum:\n MDSpropery = False\n s += 1\n return MDSpropery\n\n\ndef checkstongMDS(n, k, nativeBlockNum, parityBlockNum, enc_matrix):\n \"\"\"Check strong MDS property, for fmsr(k=n-2) only.\"\"\"\n \"\"\"Return list of MDS property degrees.\"\"\"\n strongMDSPropertyDegrees = []\n checkNum = getCheckNum(parityBlockNum)\n for i in range(n):\n strongMDSPropertyDegrees.append(getStrongMDSPropertyDegree(i,\n nativeBlockNum, parityBlockNum, checkNum, enc_matrix))\n return strongMDSPropertyDegrees\n\n\ndef testStrongMDSProperty(strongMDSPropertyDegrees, checkNum, n):\n \"\"\"Decide whether the current parity coefficient set passes the strong MDS property.\"\"\"\n result = True\n threshold = 2 * (n - 1) * (n - 2) - (n - 2) * (n - 3) / 2\n for degree in strongMDSPropertyDegrees:\n if degree < threshold:\n result = False\n return result\n\n\ndef functionalRepair(n, k, src, blocknums, failedNode, parityCoeff,\n repairChunks, setting, metadata):\n \"\"\"Functional repair by generating new parity chunks.\"\"\"\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n checkNum = getCheckNum(parityBlockNum)\n enc_matrix = metadata.enc_matrix\n repairCodingCoeff = metadata.repairCodingCoeff\n indatalist = []\n for filepath in src:\n infile = open(filepath, 'rb')\n indatalist.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n if chunksize > 0:\n indatalist_temp = ''.join(indatalist)\n parityCoeff_temp = []\n for i in range(n - k):\n for j in range(n - 1):\n parityCoeff_temp.append(chr(repairCodingCoeff[i][j]))\n parityCoeff_temp = ''.join(parityCoeff_temp)\n outdatalist = codings.clibfmsr.clibfmsr.repairComputation(\n indatalist_temp, parityCoeff_temp, n, k, chunksize)\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff[counter] = enc_matrix[i][j]\n counter += 1\n writelen = 1048576\n writenext = 0\n for i in range(metadata.totalnode):\n if setting.nodeInfo[i].healthy == False:\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i\n )\n filesize = metadata.fileNodeInfo[i].bigchunksize\n if chunksize <= 0:\n open(dest, 'wb').close()\n else:\n outfile = open(dest, 'wb')\n for j in range(0, filesize - writelen, writelen):\n writenext = j + writelen\n outfile.write(outdatalist[j:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i\n ].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n",
"step-5": "#!/usr/bin/python\n#\n# @name = 'fmsrutil.py'\n# \n# @description = \"F-MSR utilities module.\"\n#\n# @author = ['YU Chiu Man', 'HU Yuchong', 'TANG Yang']\n#\n\nimport sys\nimport os\nimport random\n\nfrom finitefield import GF256int\nfrom coeffvector import CoeffVector\nfrom coeffvector import CoeffMatrix\n\nimport common\n\n#Check if C library of F-MSR is installed:\nimport codings.clibfmsr.clibfmsr\nuseClibfmsr = True\n\n\ndef getNativeBlockNum(n, k):\n '''Get number of native blocks.'''\n return k*(n-k)\n\n\ndef getParityBlockNum(n, k):\n '''Get number of parity blocks.'''\n return n*(n-k)\n\n\ndef getNodeIdList(n, k):\n '''Find the node id for a segment of blocks.'''\n '''Return a list of node id for the blocks.'''\n nodeidList = []\n segmentSize = n-k\n blockNum = getParityBlockNum(n, k)\n for i in range(int(blockNum/segmentSize)):\n for j in range(segmentSize):\n nodeidList.append(i)\n return nodeidList\n\n\ndef getParityCoeff(n, k):\n '''Get the parity coefficients of the blocks.'''\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n parityCoeff = []\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff.append(GF256int(i+1)**j)\n return parityCoeff\n\n\ndef encode(n, k, src, parityCoeff, setting, metadata):\n '''Encode src file to parity chunks.'''\n\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n\n infile = open(src, 'rb')\n indatalist = infile.read()\n infile.close()\n totalchunk = nativeBlockNum\n filesize = len(indatalist)\n\n #Generate info for big-chunk:\n for i in range(metadata.totalnode):\n fileNode = common.FileNodeMetadata(i)\n fileNode.nodekey = setting.nodeInfo[i].nodekey\n fileNode.nodetype = setting.nodeInfo[i].nodetype\n fileNode.bucketname = setting.nodeInfo[i].bucketname\n fileNode.bigchunksize = 0\n fileNode.chunknum = 0\n metadata.fileNodeInfo.append(fileNode)\n\n #Encode indatalist to outdatalist\n if filesize > 0:\n chunksize = filesize/totalchunk + 1\n indatalist += '\\0'*(chunksize*totalchunk - filesize)\n parityCoeff_temp = ''.join([chr(parity) for parity in parityCoeff])\n outdatalist = codings.clibfmsr.clibfmsr.encodeComputation(indatalist, \\\n parityCoeff_temp, nativeBlockNum, parityBlockNum, chunksize)\n else:\n chunksize = 0\n\n #Generate info for small chunks:\n nodeIdList = getNodeIdList(n, k)\n for i in range(parityBlockNum):\n chunk = common.ChunkMetadata(i)\n chunk.chunkname = metadata.filename + '.chunk' + str(i)\n chunk.chunksize = chunksize\n chunk.chunktype = 'parity'\n chunk.chunkpath = setting.chunkdir + '/' + chunk.chunkname\n nodeid = nodeIdList[i]\n chunk.nodeid = nodeid\n chunk.nodekey = setting.nodeInfo[nodeid].nodekey\n chunk.nodetype = setting.nodeInfo[nodeid].nodetype\n chunk.bucketname = setting.nodeInfo[nodeid].bucketname\n chunk.action = 'upload'\n #Add chunk position inside big-chunk:\n chunk.position = metadata.fileNodeInfo[nodeid].chunknum\n metadata.chunkInfo.append(chunk)\n #Add support for big-chunk:\n metadata.fileNodeInfo[nodeid].bigchunksize += chunk.chunksize\n metadata.fileNodeInfo[nodeid].chunknum += 1\n metadata.totalchunk = parityBlockNum\n metadata.parityCoeff = parityCoeff[:]\n\n #Generate big-chunks:\n startchunk = 0\n writelen = 1048576\n for i in range(metadata.totalnode):\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)\n if chunksize > 0:\n f = open(dest, 'wb')\n numchunks = nodeIdList.count(i)\n writenext = startchunk*chunksize\n for j in range(startchunk*chunksize, (startchunk+numchunks)*chunksize-writelen, writelen):\n writenext = j+writelen\n f.write(outdatalist[j:writenext])\n f.write(outdatalist[writenext:(startchunk+numchunks)*chunksize])\n f.close()\n startchunk += numchunks\n else:\n open(dest, 'wb').close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n\n\ndef reversematrix(n, k, gj_matrix):\n '''Reverse matrix.'''\n\n ## The first elimination: decoding matrix -> lower unit triangular matrix\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n\n for rowNo in range(nativeBlockNum): \n ##1.find the rowNo row vector with 1st-coeff of valve non-zero \n A = GF256int(0) \n for i in range(rowNo,nativeBlockNum,1):\n if gj_matrix[i][rowNo]!=0:\n A = gj_matrix[i][rowNo]\n break\n\n ##2. permutation between the rowNo row vector and the ith row vector\n temp_vector = [GF256int(0)]*(nativeBlockNum*2)\n\n if i!= rowNo:\n for j in range(nativeBlockNum*2):\n temp_vector[j] = gj_matrix[i][j] \n gj_matrix[i][j] = gj_matrix[rowNo][j]\n gj_matrix[rowNo][j] = temp_vector[j] \n ##3. in rowNo-th row vector, all the coeffs/1st coeff\n\n for m in range(nativeBlockNum*2):\n gj_matrix[rowNo][m] = gj_matrix[rowNo][m]/A \n\n ##4. The row vectors below rowNo-th row vector eliminate the rowNo-th coeff\n for j in range(rowNo+1,nativeBlockNum,1):\n B = gj_matrix[j][rowNo]\n for m in range(rowNo,nativeBlockNum*2,1):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m]*B\n\n # The second elimination: decoding matrix -> unit matrix\n ##5. The row vectors above rowNo-th row vector eliminate the rowNo-th coeff \n for rowNo in range(nativeBlockNum-1,0,-1): \n for j in range(0,rowNo,1):\n C = gj_matrix[j][rowNo]\n for m in range(nativeBlockNum*2):\n gj_matrix[j][m] = gj_matrix[j][m] - gj_matrix[rowNo][m]*C\n\n\ndef decode(n, k, src, blocknums, parityCoeff, dest, filesize, setting):\n '''Decode chunk files to dest file.'''\n\n ## special handling for 0B files\n if filesize <= 0:\n open(dest,'wb').close()\n return\n\n cv_temp=[]\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n enc_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(parityBlockNum)]\n dec_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(nativeBlockNum)]\n rev_matrix = [[GF256int(0) for col in range(nativeBlockNum)] for row in range(nativeBlockNum)]\n gj_matrix = [[GF256int(0) for col in range(nativeBlockNum*2)] for row in range(nativeBlockNum)]\n\n ## generate the encoding matrix\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n enc_matrix[i][j] = GF256int(parityCoeff[counter])\n counter += 1\n\n cm1 = CoeffMatrix(nativeBlockNum)\n for i in range(parityBlockNum):\n cv_temp.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n cv_temp[i].coeff_[j] = enc_matrix[i][j]\n cv_temp[i].first()\n cm1.addcoeffvector(cv_temp[i])\n\n ## generate the decoding matrix\n i=0\n for selectChunkNo in blocknums:\n for j in range(nativeBlockNum):\n dec_matrix[i][j]=enc_matrix[selectChunkNo][j]\n i += 1\n\n ## initialize the reverse matrix\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n if j==i:\n rev_matrix[i][j]= GF256int(1)\n\n ## initialize the Gauss-Jordan matrix = [decoding,reverse]\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum*2):\n if j<nativeBlockNum:\n gj_matrix[i][j]= dec_matrix[i][j]\n else:\n gj_matrix[i][j]= rev_matrix[i][j-nativeBlockNum]\n\n reversematrix(n, k, gj_matrix)\n\n for i in range(nativeBlockNum):\n for j in range(nativeBlockNum):\n dec_matrix[i][j] = gj_matrix[i][j+nativeBlockNum]\n\n ##generate decode data chunks\n selectchunk=[]\n for filename in src:\n infile = open(filename,'rb')\n selectchunk.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n indatalist = ''.join(selectchunk)\n\n ##rebuild the original chunks\n parityCoeff_temp = ''.join([chr(dec_matrix[i][j]) \\\n for i in range(nativeBlockNum) \\\n for j in range(nativeBlockNum)])\n outdatalist = codings.clibfmsr.clibfmsr.decodeComputation(indatalist, \\\n parityCoeff_temp, nativeBlockNum, chunksize)\n\n outfile = open(dest,'wb')\n writelen = 1048576\n writenext = 0\n for i in range(0,filesize-writelen,writelen):\n writenext = i+writelen\n outfile.write(outdatalist[i:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n\n\ndef getCheckNum(parityBlockNum):\n '''Get check number for checking strong MDS, for fmsr(k=n-2) only.'''\n return int((parityBlockNum-2)*(parityBlockNum-2-1)/2 - ((parityBlockNum/2)-1))\n\ndef getStrongMDSPropertyDegree(repairNodeno, nativeBlockNum, parityBlockNum, checkNum, enc_matrix):\n '''Get strong MDS property degree.'''\n\n currentStrongMDSPropertyDegree = 0\n survivalcoeffvectorset = []\n flag = 0\n for i in range(parityBlockNum):\n #get coeff vectors of survival parity blocks\n if int(i/2)!= repairNodeno:\n survivalcoeffvectorset.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n survivalcoeffvectorset[i - flag*2].coeff_[j] = enc_matrix[i][j]\n survivalcoeffvectorset[i - flag*2].first() \n else:\n flag =1\n\n s = 0\n for i in range(parityBlockNum-2):\n for j in range(parityBlockNum-2):\n if i<j:\n checkmatrix = CoeffMatrix(nativeBlockNum)\n for k in range (parityBlockNum-2):\n if k!=i and k!=j:\n checkmatrix.addcoeffvector(survivalcoeffvectorset[k].copy())\n if checkmatrix.rank_ == nativeBlockNum:\n currentStrongMDSPropertyDegree += 1\n s += 1\n return currentStrongMDSPropertyDegree\n\ndef checkMDS(MSR_n, MSR_k, enc_matrix):\n '''Check MDS property, for fmsr(k=n-2) only.'''\n '''Return a MDS property value.'''\n\n nativeBlockNum = getNativeBlockNum(MSR_n, MSR_k)\n parityBlockNum = getParityBlockNum(MSR_n, MSR_k)\n MDSpropery = True\n allcoeffvectors = []\n for i in range(parityBlockNum):\n allcoeffvectors.append(CoeffVector(nativeBlockNum))\n for j in range(nativeBlockNum):\n allcoeffvectors[i].coeff_[j] = enc_matrix[i][j]\n allcoeffvectors[i].first()\n permutation = int(MSR_n * (MSR_n - 1) / 2)\n #permutation of selecting n-2 nodes from n nodes\n checkmatrix = [CoeffMatrix(nativeBlockNum) for col in range(permutation)]\n s = 0\n for i in range (MSR_n):\n for j in range(MSR_n):\n if i<j:\n for b in range(MSR_n):\n if b !=i and b!=j:\n checkmatrix[s].addcoeffvector(allcoeffvectors[b*2].copy())\n checkmatrix[s].addcoeffvector(allcoeffvectors[b*2+1].copy())\n if checkmatrix[s].rank_ != nativeBlockNum:\n MDSpropery = False\n s += 1\n return MDSpropery\n\ndef checkstongMDS(n, k, nativeBlockNum, parityBlockNum, enc_matrix):\n '''Check strong MDS property, for fmsr(k=n-2) only.'''\n '''Return list of MDS property degrees.'''\n\n strongMDSPropertyDegrees = []\n #get check-combination number\n checkNum = getCheckNum(parityBlockNum)\n #Calculate total strong MDS property degree\n for i in range(n):\n strongMDSPropertyDegrees.append(getStrongMDSPropertyDegree(i, \\\n nativeBlockNum, parityBlockNum, checkNum, enc_matrix))\n return strongMDSPropertyDegrees\n\n\ndef testStrongMDSProperty(strongMDSPropertyDegrees, checkNum,n):\n '''Decide whether the current parity coefficient set passes the strong MDS property.'''\n\n result = True\n #threshold = checkNum\n threshold = 2*(n-1)*(n-2)-(n-2)*(n-3)/2\n #Important: currently the threshold value is hardcode\n for degree in strongMDSPropertyDegrees:\n if degree < threshold:\n result = False\n return result\n\n\ndef functionalRepair(n, k, src, blocknums, failedNode, parityCoeff, repairChunks, setting, metadata):\n '''Functional repair by generating new parity chunks.'''\n\n nativeBlockNum = getNativeBlockNum(n, k)\n parityBlockNum = getParityBlockNum(n, k)\n checkNum = getCheckNum(parityBlockNum)\n\n ## read the encoding matrix and repair\n enc_matrix = metadata.enc_matrix\n repairCodingCoeff = metadata.repairCodingCoeff\n\n indatalist = []\n for filepath in src:\n infile = open(filepath, 'rb')\n indatalist.append(infile.read())\n infile.close()\n chunksize = os.path.getsize(src[0])\n\n if chunksize > 0:\n #Repair computation:\n indatalist_temp = ''.join(indatalist)\n parityCoeff_temp = []\n for i in range(n-k):\n for j in range(n-1):\n parityCoeff_temp.append(chr(repairCodingCoeff[i][j]))\n parityCoeff_temp = ''.join(parityCoeff_temp)\n outdatalist = codings.clibfmsr.clibfmsr.repairComputation(indatalist_temp, \\\n parityCoeff_temp, n, k, chunksize)\n\n counter = 0\n for i in range(parityBlockNum):\n for j in range(nativeBlockNum):\n parityCoeff[counter] = enc_matrix[i][j]\n counter += 1\n\n #Add support for big-chunk:\n writelen = 1048576\n writenext = 0\n for i in range(metadata.totalnode):\n if setting.nodeInfo[i].healthy == False:\n dest = setting.chunkdir + '/' + metadata.filename + '.node' + str(i)\n filesize = metadata.fileNodeInfo[i].bigchunksize\n if chunksize <= 0:\n open(dest,'wb').close()\n else:\n outfile = open(dest, 'wb')\n for j in range(0,filesize-writelen,writelen):\n writenext = j+writelen\n outfile.write(outdatalist[j:writenext])\n outfile.write(outdatalist[writenext:filesize])\n outfile.close()\n metadata.fileNodeInfo[i].bigchunkpath = dest\n metadata.fileNodeInfo[i].bigchunkname = metadata.filename + '.node' + str(i)\n metadata.fileNodeInfo[i].action = 'upload'\n\n",
"step-ids": [
10,
12,
13,
15,
16
]
}
|
[
10,
12,
13,
15,
16
] |
botName = "firstBot"
username = "mrthemafia"
password = "oblivion"
client_id = "Y3LQwponbEp07w"
client_secret = "R4oyCEj6hSTJWHfWMwb-DGUOBm8"
|
normal
|
{
"blob_id": "3031f695d57492cf3b29694fecd0a41c469a3e00",
"index": 7481,
"step-1": "<mask token>\n",
"step-2": "botName = 'firstBot'\nusername = 'mrthemafia'\npassword = 'oblivion'\nclient_id = 'Y3LQwponbEp07w'\nclient_secret = 'R4oyCEj6hSTJWHfWMwb-DGUOBm8'\n",
"step-3": "botName = \"firstBot\"\nusername = \"mrthemafia\"\npassword = \"oblivion\"\nclient_id = \"Y3LQwponbEp07w\"\nclient_secret = \"R4oyCEj6hSTJWHfWMwb-DGUOBm8\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# orm/relationships.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Heuristics related to join conditions as used in
:func:`_orm.relationship`.
Provides the :class:`.JoinCondition` object, which encapsulates
SQL annotation and aliasing behavior focused on the `primaryjoin`
and `secondaryjoin` aspects of :func:`_orm.relationship`.
"""
from __future__ import annotations
import collections
from collections import abc
import dataclasses
import inspect as _py_inspect
import re
import typing
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import List
from typing import NamedTuple
from typing import NoReturn
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import weakref
from . import attributes
from . import strategy_options
from ._typing import insp_is_aliased_class
from ._typing import is_has_collection_adapter
from .base import _DeclarativeMapped
from .base import _is_mapped_class
from .base import class_mapper
from .base import DynamicMapped
from .base import LoaderCallableStatus
from .base import PassiveFlag
from .base import state_str
from .base import WriteOnlyMapped
from .interfaces import _AttributeOptions
from .interfaces import _IntrospectsAnnotations
from .interfaces import MANYTOMANY
from .interfaces import MANYTOONE
from .interfaces import ONETOMANY
from .interfaces import PropComparator
from .interfaces import RelationshipDirection
from .interfaces import StrategizedProperty
from .util import _orm_annotate
from .util import _orm_deannotate
from .util import CascadeOptions
from .. import exc as sa_exc
from .. import Exists
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..inspection import inspect
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import visitors
from ..sql._typing import _ColumnExpressionArgument
from ..sql._typing import _HasClauseElement
from ..sql.annotation import _safe_annotate
from ..sql.elements import ColumnClause
from ..sql.elements import ColumnElement
from ..sql.util import _deep_annotate
from ..sql.util import _deep_deannotate
from ..sql.util import _shallow_annotate
from ..sql.util import adapt_criterion_to_null
from ..sql.util import ClauseAdapter
from ..sql.util import join_condition
from ..sql.util import selectables_overlap
from ..sql.util import visit_binary_product
from ..util.typing import de_optionalize_union_types
from ..util.typing import Literal
from ..util.typing import resolve_name_to_real_class_name
if typing.TYPE_CHECKING:
from ._typing import _EntityType
from ._typing import _ExternalEntityType
from ._typing import _IdentityKeyType
from ._typing import _InstanceDict
from ._typing import _InternalEntityType
from ._typing import _O
from ._typing import _RegistryType
from .base import Mapped
from .clsregistry import _class_resolver
from .clsregistry import _ModNS
from .decl_base import _ClassScanMapperConfig
from .dependency import DependencyProcessor
from .mapper import Mapper
from .query import Query
from .session import Session
from .state import InstanceState
from .strategies import LazyLoader
from .util import AliasedClass
from .util import AliasedInsp
from ..sql._typing import _CoreAdapterProto
from ..sql._typing import _EquivalentColumnMap
from ..sql._typing import _InfoType
from ..sql.annotation import _AnnotationDict
from ..sql.annotation import SupportsAnnotations
from ..sql.elements import BinaryExpression
from ..sql.elements import BindParameter
from ..sql.elements import ClauseElement
from ..sql.schema import Table
from ..sql.selectable import FromClause
from ..util.typing import _AnnotationScanType
from ..util.typing import RODescriptorReference
_T = TypeVar("_T", bound=Any)
_T1 = TypeVar("_T1", bound=Any)
_T2 = TypeVar("_T2", bound=Any)
_PT = TypeVar("_PT", bound=Any)
_PT2 = TypeVar("_PT2", bound=Any)
_RelationshipArgumentType = Union[
str,
Type[_T],
Callable[[], Type[_T]],
"Mapper[_T]",
"AliasedClass[_T]",
Callable[[], "Mapper[_T]"],
Callable[[], "AliasedClass[_T]"],
]
_LazyLoadArgumentType = Literal[
"select",
"joined",
"selectin",
"subquery",
"raise",
"raise_on_sql",
"noload",
"immediate",
"write_only",
"dynamic",
True,
False,
None,
]
_RelationshipJoinConditionArgument = Union[
str, _ColumnExpressionArgument[bool]
]
_RelationshipSecondaryArgument = Union[
"FromClause", str, Callable[[], "FromClause"]
]
_ORMOrderByArgument = Union[
Literal[False],
str,
_ColumnExpressionArgument[Any],
Callable[[], _ColumnExpressionArgument[Any]],
Callable[[], Iterable[_ColumnExpressionArgument[Any]]],
Iterable[Union[str, _ColumnExpressionArgument[Any]]],
]
ORMBackrefArgument = Union[str, Tuple[str, Dict[str, Any]]]
_ORMColCollectionElement = Union[
ColumnClause[Any], _HasClauseElement, roles.DMLColumnRole, "Mapped[Any]"
]
_ORMColCollectionArgument = Union[
str,
Sequence[_ORMColCollectionElement],
Callable[[], Sequence[_ORMColCollectionElement]],
Callable[[], _ORMColCollectionElement],
_ORMColCollectionElement,
]
_CEA = TypeVar("_CEA", bound=_ColumnExpressionArgument[Any])
_CE = TypeVar("_CE", bound="ColumnElement[Any]")
_ColumnPairIterable = Iterable[Tuple[ColumnElement[Any], ColumnElement[Any]]]
_ColumnPairs = Sequence[Tuple[ColumnElement[Any], ColumnElement[Any]]]
_MutableColumnPairs = List[Tuple[ColumnElement[Any], ColumnElement[Any]]]
def remote(expr: _CEA) -> _CEA:
"""Annotate a portion of a primaryjoin expression
with a 'remote' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.foreign`
"""
return _annotate_columns( # type: ignore
coercions.expect(roles.ColumnArgumentRole, expr), {"remote": True}
)
def foreign(expr: _CEA) -> _CEA:
"""Annotate a portion of a primaryjoin expression
with a 'foreign' annotation.
See the section :ref:`relationship_custom_foreign` for a
description of use.
.. seealso::
:ref:`relationship_custom_foreign`
:func:`.remote`
"""
return _annotate_columns( # type: ignore
coercions.expect(roles.ColumnArgumentRole, expr), {"foreign": True}
)
@dataclasses.dataclass
class _RelationshipArg(Generic[_T1, _T2]):
"""stores a user-defined parameter value that must be resolved and
parsed later at mapper configuration time.
"""
__slots__ = "name", "argument", "resolved"
name: str
argument: _T1
resolved: Optional[_T2]
def _is_populated(self) -> bool:
return self.argument is not None
def _resolve_against_registry(
self, clsregistry_resolver: Callable[[str, bool], _class_resolver]
) -> None:
attr_value = self.argument
if isinstance(attr_value, str):
self.resolved = clsregistry_resolver(
attr_value, self.name == "secondary"
)()
elif callable(attr_value) and not _is_mapped_class(attr_value):
self.resolved = attr_value()
else:
self.resolved = attr_value
class _RelationshipArgs(NamedTuple):
"""stores user-passed parameters that are resolved at mapper configuration
time.
"""
secondary: _RelationshipArg[
Optional[_RelationshipSecondaryArgument],
Optional[FromClause],
]
primaryjoin: _RelationshipArg[
Optional[_RelationshipJoinConditionArgument],
Optional[ColumnElement[Any]],
]
secondaryjoin: _RelationshipArg[
Optional[_RelationshipJoinConditionArgument],
Optional[ColumnElement[Any]],
]
order_by: _RelationshipArg[
_ORMOrderByArgument,
Union[Literal[None, False], Tuple[ColumnElement[Any], ...]],
]
foreign_keys: _RelationshipArg[
Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]]
]
remote_side: _RelationshipArg[
Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]]
]
@log.class_logger
class RelationshipProperty(
_IntrospectsAnnotations, StrategizedProperty[_T], log.Identified
):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
"""
strategy_wildcard_key = strategy_options._RELATIONSHIP_TOKEN
inherit_cache = True
""":meta private:"""
_links_to_entity = True
_is_relationship = True
_overlaps: Sequence[str]
_lazy_strategy: LazyLoader
_persistence_only = dict(
passive_deletes=False,
passive_updates=True,
enable_typechecks=True,
active_history=False,
cascade_backrefs=False,
)
_dependency_processor: Optional[DependencyProcessor] = None
primaryjoin: ColumnElement[bool]
secondaryjoin: Optional[ColumnElement[bool]]
secondary: Optional[FromClause]
_join_condition: JoinCondition
order_by: Union[Literal[False], Tuple[ColumnElement[Any], ...]]
_user_defined_foreign_keys: Set[ColumnElement[Any]]
_calculated_foreign_keys: Set[ColumnElement[Any]]
remote_side: Set[ColumnElement[Any]]
local_columns: Set[ColumnElement[Any]]
synchronize_pairs: _ColumnPairs
secondary_synchronize_pairs: Optional[_ColumnPairs]
local_remote_pairs: Optional[_ColumnPairs]
direction: RelationshipDirection
_init_args: _RelationshipArgs
def __init__(
self,
argument: Optional[_RelationshipArgumentType[_T]] = None,
secondary: Optional[_RelationshipSecondaryArgument] = None,
*,
uselist: Optional[bool] = None,
collection_class: Optional[
Union[Type[Collection[Any]], Callable[[], Collection[Any]]]
] = None,
primaryjoin: Optional[_RelationshipJoinConditionArgument] = None,
secondaryjoin: Optional[_RelationshipJoinConditionArgument] = None,
back_populates: Optional[str] = None,
order_by: _ORMOrderByArgument = False,
backref: Optional[ORMBackrefArgument] = None,
overlaps: Optional[str] = None,
post_update: bool = False,
cascade: str = "save-update, merge",
viewonly: bool = False,
attribute_options: Optional[_AttributeOptions] = None,
lazy: _LazyLoadArgumentType = "select",
passive_deletes: Union[Literal["all"], bool] = False,
passive_updates: bool = True,
active_history: bool = False,
enable_typechecks: bool = True,
foreign_keys: Optional[_ORMColCollectionArgument] = None,
remote_side: Optional[_ORMColCollectionArgument] = None,
join_depth: Optional[int] = None,
comparator_factory: Optional[
Type[RelationshipProperty.Comparator[Any]]
] = None,
single_parent: bool = False,
innerjoin: bool = False,
distinct_target_key: Optional[bool] = None,
load_on_pending: bool = False,
query_class: Optional[Type[Query[Any]]] = None,
info: Optional[_InfoType] = None,
omit_join: Literal[None, False] = None,
sync_backref: Optional[bool] = None,
doc: Optional[str] = None,
bake_queries: Literal[True] = True,
cascade_backrefs: Literal[False] = False,
_local_remote_pairs: Optional[_ColumnPairs] = None,
_legacy_inactive_history_style: bool = False,
):
super().__init__(attribute_options=attribute_options)
self.uselist = uselist
self.argument = argument
self._init_args = _RelationshipArgs(
_RelationshipArg("secondary", secondary, None),
_RelationshipArg("primaryjoin", primaryjoin, None),
_RelationshipArg("secondaryjoin", secondaryjoin, None),
_RelationshipArg("order_by", order_by, None),
_RelationshipArg("foreign_keys", foreign_keys, None),
_RelationshipArg("remote_side", remote_side, None),
)
self.post_update = post_update
self.viewonly = viewonly
if viewonly:
self._warn_for_persistence_only_flags(
passive_deletes=passive_deletes,
passive_updates=passive_updates,
enable_typechecks=enable_typechecks,
active_history=active_history,
cascade_backrefs=cascade_backrefs,
)
if viewonly and sync_backref:
raise sa_exc.ArgumentError(
"sync_backref and viewonly cannot both be True"
)
self.sync_backref = sync_backref
self.lazy = lazy
self.single_parent = single_parent
self.collection_class = collection_class
self.passive_deletes = passive_deletes
if cascade_backrefs:
raise sa_exc.ArgumentError(
"The 'cascade_backrefs' parameter passed to "
"relationship() may only be set to False."
)
self.passive_updates = passive_updates
self.enable_typechecks = enable_typechecks
self.query_class = query_class
self.innerjoin = innerjoin
self.distinct_target_key = distinct_target_key
self.doc = doc
self.active_history = active_history
self._legacy_inactive_history_style = _legacy_inactive_history_style
self.join_depth = join_depth
if omit_join:
util.warn(
"setting omit_join to True is not supported; selectin "
"loading of this relationship may not work correctly if this "
"flag is set explicitly. omit_join optimization is "
"automatically detected for conditions under which it is "
"supported."
)
self.omit_join = omit_join
self.local_remote_pairs = _local_remote_pairs
self.load_on_pending = load_on_pending
self.comparator_factory = (
comparator_factory or RelationshipProperty.Comparator
)
util.set_creation_order(self)
if info is not None:
self.info.update(info)
self.strategy_key = (("lazy", self.lazy),)
self._reverse_property: Set[RelationshipProperty[Any]] = set()
if overlaps:
self._overlaps = set(re.split(r"\s*,\s*", overlaps)) # type: ignore # noqa: E501
else:
self._overlaps = ()
# mypy ignoring the @property setter
self.cascade = cascade # type: ignore
self.back_populates = back_populates
if self.back_populates:
if backref:
raise sa_exc.ArgumentError(
"backref and back_populates keyword arguments "
"are mutually exclusive"
)
self.backref = None
else:
self.backref = backref
def _warn_for_persistence_only_flags(self, **kw: Any) -> None:
for k, v in kw.items():
if v != self._persistence_only[k]:
# we are warning here rather than warn deprecated as this is a
# configuration mistake, and Python shows regular warnings more
# aggressively than deprecation warnings by default. Unlike the
# case of setting viewonly with cascade, the settings being
# warned about here are not actively doing the wrong thing
# against viewonly=True, so it is not as urgent to have these
# raise an error.
util.warn(
"Setting %s on relationship() while also "
"setting viewonly=True does not make sense, as a "
"viewonly=True relationship does not perform persistence "
"operations. This configuration may raise an error "
"in a future release." % (k,)
)
def instrument_class(self, mapper: Mapper[Any]) -> None:
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
class Comparator(util.MemoizedSlots, PropComparator[_PT]):
"""Produce boolean, comparison, and other operators for
:class:`.RelationshipProperty` attributes.
See the documentation for :class:`.PropComparator` for a brief
overview of ORM level operator definition.
.. seealso::
:class:`.PropComparator`
:class:`.ColumnProperty.Comparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__slots__ = (
"entity",
"mapper",
"property",
"_of_type",
"_extra_criteria",
)
prop: RODescriptorReference[RelationshipProperty[_PT]]
_of_type: Optional[_EntityType[_PT]]
def __init__(
self,
prop: RelationshipProperty[_PT],
parentmapper: _InternalEntityType[Any],
adapt_to_entity: Optional[AliasedInsp[Any]] = None,
of_type: Optional[_EntityType[_PT]] = None,
extra_criteria: Tuple[ColumnElement[bool], ...] = (),
):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self._parententity = parentmapper
self._adapt_to_entity = adapt_to_entity
if of_type:
self._of_type = of_type
else:
self._of_type = None
self._extra_criteria = extra_criteria
def adapt_to_entity(
self, adapt_to_entity: AliasedInsp[Any]
) -> RelationshipProperty.Comparator[Any]:
return self.__class__(
self.prop,
self._parententity,
adapt_to_entity=adapt_to_entity,
of_type=self._of_type,
)
entity: _InternalEntityType[_PT]
"""The target entity referred to by this
:class:`.RelationshipProperty.Comparator`.
This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`
object.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
mapper: Mapper[_PT]
"""The target :class:`_orm.Mapper` referred to by this
:class:`.RelationshipProperty.Comparator`.
This is the "target" or "remote" side of the
:func:`_orm.relationship`.
"""
def _memoized_attr_entity(self) -> _InternalEntityType[_PT]:
if self._of_type:
return inspect(self._of_type) # type: ignore
else:
return self.prop.entity
def _memoized_attr_mapper(self) -> Mapper[_PT]:
return self.entity.mapper
def _source_selectable(self) -> FromClause:
if self._adapt_to_entity:
return self._adapt_to_entity.selectable
else:
return self.property.parent._with_polymorphic_selectable
def __clause_element__(self) -> ColumnElement[bool]:
adapt_from = self._source_selectable()
if self._of_type:
of_type_entity = inspect(self._of_type)
else:
of_type_entity = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.prop._create_joins(
source_selectable=adapt_from,
source_polymorphic=True,
of_type_entity=of_type_entity,
alias_secondary=True,
extra_criteria=self._extra_criteria,
)
if sj is not None:
return pj & sj
else:
return pj
def of_type(self, class_: _EntityType[Any]) -> PropComparator[_PT]:
r"""Redefine this object in terms of a polymorphic subclass.
See :meth:`.PropComparator.of_type` for an example.
"""
return RelationshipProperty.Comparator(
self.prop,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=class_,
extra_criteria=self._extra_criteria,
)
def and_(
self, *criteria: _ColumnExpressionArgument[bool]
) -> PropComparator[Any]:
"""Add AND criteria.
See :meth:`.PropComparator.and_` for an example.
.. versionadded:: 1.4
"""
exprs = tuple(
coercions.expect(roles.WhereHavingRole, clause)
for clause in util.coerce_generator_arg(criteria)
)
return RelationshipProperty.Comparator(
self.prop,
self._parententity,
adapt_to_entity=self._adapt_to_entity,
of_type=self._of_type,
extra_criteria=self._extra_criteria + exprs,
)
def in_(self, other: Any) -> NoReturn:
"""Produce an IN clause - this is not implemented
for :func:`_orm.relationship`-based attributes at this time.
"""
raise NotImplementedError(
"in_() not yet supported for "
"relationships. For a simple "
"many-to-one, use in_() against "
"the set of foreign key values."
)
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501
"""Implement the ``==`` operator.
In a many-to-one context, such as::
MyClass.some_prop == <some object>
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.Relationship.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.Relationship.Comparator.has` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce a NOT EXISTS clause.
"""
if other is None or isinstance(other, expression.Null):
if self.property.direction in [ONETOMANY, MANYTOMANY]:
return ~self._criterion_exists()
else:
return _orm_annotate(
self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection to an object or collection; "
"use contains() to test for membership."
)
else:
return _orm_annotate(
self.property._optimized_compare(
other, adapt_source=self.adapter
)
)
def _criterion_exists(
self,
criterion: Optional[_ColumnExpressionArgument[bool]] = None,
**kwargs: Any,
) -> Exists:
where_criteria = (
coercions.expect(roles.WhereHavingRole, criterion)
if criterion is not None
else None
)
if getattr(self, "_of_type", None):
info: Optional[_InternalEntityType[Any]] = inspect(
self._of_type
)
assert info is not None
target_mapper, to_selectable, is_aliased_class = (
info.mapper,
info.selectable,
info.is_aliased_class,
)
if self.property._is_self_referential and not is_aliased_class:
to_selectable = to_selectable._anonymous_fromclause()
single_crit = target_mapper._single_table_criterion
if single_crit is not None:
if where_criteria is not None:
where_criteria = single_crit & where_criteria
else:
where_criteria = single_crit
else:
is_aliased_class = False
to_selectable = None
if self.adapter:
source_selectable = self._source_selectable()
else:
source_selectable = None
(
pj,
sj,
source,
dest,
secondary,
target_adapter,
) = self.property._create_joins(
dest_selectable=to_selectable,
source_selectable=source_selectable,
)
for k in kwargs:
crit = getattr(self.property.mapper.class_, k) == kwargs[k]
if where_criteria is None:
where_criteria = crit
else:
where_criteria = where_criteria & crit
# annotate the *local* side of the join condition, in the case
# of pj + sj this is the full primaryjoin, in the case of just
# pj its the local side of the primaryjoin.
if sj is not None:
j = _orm_annotate(pj) & sj
else:
j = _orm_annotate(pj, exclude=self.property.remote_side)
if (
where_criteria is not None
and target_adapter
and not is_aliased_class
):
# limit this adapter to annotated only?
where_criteria = target_adapter.traverse(where_criteria)
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if where_criteria is not None:
where_criteria = where_criteria._annotate(
{"no_replacement_traverse": True}
)
crit = j & sql.True_._ifnone(where_criteria)
if secondary is not None:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest, secondary)
.correlate_except(dest, secondary)
)
else:
ex = (
sql.exists(1)
.where(crit)
.select_from(dest)
.correlate_except(dest)
)
return ex
def any(
self,
criterion: Optional[_ColumnExpressionArgument[bool]] = None,
**kwargs: Any,
) -> ColumnElement[bool]:
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.Relationship.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.Relationship.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT (EXISTS (SELECT 1 FROM related WHERE
related.my_id=my_table.id))
:meth:`~.Relationship.Comparator.any` is only
valid for collections, i.e. a :func:`_orm.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.Relationship.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
"'any()' not implemented for scalar "
"attributes. Use has()."
)
return self._criterion_exists(criterion, **kwargs)
def has(
self,
criterion: Optional[_ColumnExpressionArgument[bool]] = None,
**kwargs: Any,
) -> ColumnElement[bool]:
"""Produce an expression that tests a scalar reference against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE
related.id==my_table.related_id AND related.x=2)
Because :meth:`~.Relationship.Comparator.has` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.Relationship.Comparator.has` is only
valid for scalar references, i.e. a :func:`_orm.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.Relationship.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
"'has()' not implemented for collections. " "Use any()."
)
return self._criterion_exists(criterion, **kwargs)
def contains(
self, other: _ColumnExpressionArgument[Any], **kwargs: Any
) -> ColumnElement[bool]:
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.Relationship.Comparator.contains` is
only valid for a collection, i.e. a
:func:`_orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.Relationship.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.Relationship.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.Relationship.Comparator.contains`
will **not** work with many-to-many collections when
used in queries that move beyond simple AND
conjunctions, such as multiple
:meth:`~.Relationship.Comparator.contains`
expressions joined by OR. In such cases subqueries or
explicit "outer joins" will need to be used instead.
See :meth:`~.Relationship.Comparator.any` for
a less-performant alternative using EXISTS, or refer
to :meth:`_query.Query.outerjoin`
as well as :ref:`orm_queryguide_joins`
for more details on constructing outer joins.
kwargs may be ignored by this operator but are required for API
conformance.
"""
if not self.prop.uselist:
raise sa_exc.InvalidRequestError(
"'contains' not implemented for scalar "
"attributes. Use =="
)
clause = self.prop._optimized_compare(
other, adapt_source=self.adapter
)
if self.prop.secondaryjoin is not None:
clause.negation_clause = self.__negated_contains_or_equals(
other
)
return clause
def __negated_contains_or_equals(
self, other: Any
) -> ColumnElement[bool]:
if self.prop.direction == MANYTOONE:
state = attributes.instance_state(other)
def state_bindparam(
local_col: ColumnElement[Any],
state: InstanceState[Any],
remote_col: ColumnElement[Any],
) -> BindParameter[Any]:
dict_ = state.dict
return sql.bindparam(
local_col.key,
type_=local_col.type,
unique=True,
callable_=self.prop._get_attr_w_warn_on_none(
self.prop.mapper, state, dict_, remote_col
),
)
def adapt(col: _CE) -> _CE:
if self.adapter:
return self.adapter(col)
else:
return col
if self.property._use_get:
return sql.and_(
*[
sql.or_(
adapt(x)
!= state_bindparam(adapt(x), state, y),
adapt(x) == None,
)
for (x, y) in self.property.local_remote_pairs
]
)
criterion = sql.and_(
*[
x == y
for (x, y) in zip(
self.property.mapper.primary_key,
self.property.mapper.primary_key_from_instance(other),
)
]
)
return ~self._criterion_exists(criterion)
def __ne__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.Relationship.Comparator.contains`
in conjunction with :func:`_expression.not_`.
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
main body of the query. This will not work for
queries that go beyond simple AND conjunctions of
comparisons, such as those which use OR. Use
explicit joins, outerjoins, or
:meth:`~.Relationship.Comparator.has` in
conjunction with :func:`_expression.not_` for
more comprehensive non-many-to-one scalar
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if other is None or isinstance(other, expression.Null):
if self.property.direction == MANYTOONE:
return _orm_annotate(
~self.property._optimized_compare(
None, adapt_source=self.adapter
)
)
else:
return self._criterion_exists()
elif self.property.uselist:
raise sa_exc.InvalidRequestError(
"Can't compare a collection"
" to an object or collection; use "
"contains() to test for membership."
)
else:
return _orm_annotate(self.__negated_contains_or_equals(other))
def _memoized_attr_property(self) -> RelationshipProperty[_PT]:
self.prop.parent._check_configure()
return self.prop
def _with_parent(
self,
instance: object,
alias_secondary: bool = True,
from_entity: Optional[_EntityType[Any]] = None,
) -> ColumnElement[bool]:
assert instance is not None
adapt_source: Optional[_CoreAdapterProto] = None
if from_entity is not None:
insp: Optional[_InternalEntityType[Any]] = inspect(from_entity)
assert insp is not None
if insp_is_aliased_class(insp):
adapt_source = insp._adapter.adapt_clause
return self._optimized_compare(
instance,
value_is_parent=True,
adapt_source=adapt_source,
alias_secondary=alias_secondary,
)
def _optimized_compare(
self,
state: Any,
value_is_parent: bool = False,
adapt_source: Optional[_CoreAdapterProto] = None,
alias_secondary: bool = True,
) -> ColumnElement[bool]:
if state is not None:
try:
state = inspect(state)
except sa_exc.NoInspectionAvailable:
state = None
if state is None or not getattr(state, "is_instance", False):
raise sa_exc.ArgumentError(
"Mapped instance expected for relationship "
"comparison to object. Classes, queries and other "
"SQL elements are not accepted in this context; for "
"comparison with a subquery, "
"use %s.has(**criteria)." % self
)
reverse_direction = not value_is_parent
if state is None:
return self._lazy_none_clause(
reverse_direction, adapt_source=adapt_source
)
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
if reverse_direction:
mapper = self.mapper
else:
mapper = self.parent
dict_ = attributes.instance_dict(state.obj())
def visit_bindparam(bindparam: BindParameter[Any]) -> None:
if bindparam._identifying_key in bind_to_col:
bindparam.callable = self._get_attr_w_warn_on_none(
mapper,
state,
dict_,
bind_to_col[bindparam._identifying_key],
)
if self.secondary is not None and alias_secondary:
criterion = ClauseAdapter(
self.secondary._anonymous_fromclause()
).traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _get_attr_w_warn_on_none(
self,
mapper: Mapper[Any],
state: InstanceState[Any],
dict_: _InstanceDict,
column: ColumnElement[Any],
) -> Callable[[], Any]:
"""Create the callable that is used in a many-to-one expression.
E.g.::
u1 = s.query(User).get(5)
expr = Address.user == u1
Above, the SQL should be "address.user_id = 5". The callable
returned by this method produces the value "5" based on the identity
of ``u1``.
"""
# in this callable, we're trying to thread the needle through
# a wide variety of scenarios, including:
#
# * the object hasn't been flushed yet and there's no value for
# the attribute as of yet
#
# * the object hasn't been flushed yet but it has a user-defined
# value
#
# * the object has a value but it's expired and not locally present
#
# * the object has a value but it's expired and not locally present,
# and the object is also detached
#
# * The object hadn't been flushed yet, there was no value, but
# later, the object has been expired and detached, and *now*
# they're trying to evaluate it
#
# * the object had a value, but it was changed to a new value, and
# then expired
#
# * the object had a value, but it was changed to a new value, and
# then expired, then the object was detached
#
# * the object has a user-set value, but it's None and we don't do
# the comparison correctly for that so warn
#
prop = mapper.get_property_by_column(column)
# by invoking this method, InstanceState will track the last known
# value for this key each time the attribute is to be expired.
# this feature was added explicitly for use in this method.
state._track_last_known_value(prop.key)
lkv_fixed = state._last_known_values
def _go() -> Any:
assert lkv_fixed is not None
last_known = to_return = lkv_fixed[prop.key]
existing_is_available = (
last_known is not LoaderCallableStatus.NO_VALUE
)
# we support that the value may have changed. so here we
# try to get the most recent value including re-fetching.
# only if we can't get a value now due to detachment do we return
# the last known value
current_value = mapper._get_state_attr_by_column(
state,
dict_,
column,
passive=PassiveFlag.PASSIVE_OFF
if state.persistent
else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK,
)
if current_value is LoaderCallableStatus.NEVER_SET:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; no value has been set for this column"
% (column, state_str(state))
)
elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT:
if not existing_is_available:
raise sa_exc.InvalidRequestError(
"Can't resolve value for column %s on object "
"%s; the object is detached and the value was "
"expired" % (column, state_str(state))
)
else:
to_return = current_value
if to_return is None:
util.warn(
"Got None for value of column %s; this is unsupported "
"for a relationship comparison and will not "
"currently produce an IS comparison "
"(but may in a future release)" % column
)
return to_return
return _go
def _lazy_none_clause(
self,
reverse_direction: bool = False,
adapt_source: Optional[_CoreAdapterProto] = None,
) -> ColumnElement[bool]:
if not reverse_direction:
criterion, bind_to_col = (
self._lazy_strategy._lazywhere,
self._lazy_strategy._bind_to_col,
)
else:
criterion, bind_to_col = (
self._lazy_strategy._rev_lazywhere,
self._lazy_strategy._rev_bind_to_col,
)
criterion = adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def __str__(self) -> str:
return str(self.parent.class_.__name__) + "." + self.key
def merge(
self,
session: Session,
source_state: InstanceState[Any],
source_dict: _InstanceDict,
dest_state: InstanceState[Any],
dest_dict: _InstanceDict,
load: bool,
_recursive: Dict[Any, object],
_resolve_conflict_map: Dict[_IdentityKeyType[Any], object],
) -> None:
if load:
for r in self._reverse_property:
if (source_state, r) in _recursive:
return
if "merge" not in self._cascade:
return
if self.key not in source_dict:
return
if self.uselist:
impl = source_state.get_impl(self.key)
assert is_has_collection_adapter(impl)
instances_iterable = impl.get_collection(source_state, source_dict)
# if this is a CollectionAttributeImpl, then empty should
# be False, otherwise "self.key in source_dict" should not be
# True
assert not instances_iterable.empty if impl.collection else True
if load:
# for a full merge, pre-load the destination collection,
# so that individual _merge of each item pulls from identity
# map for those already present.
# also assumes CollectionAttributeImpl behavior of loading
# "old" list in any case
dest_state.get_impl(self.key).get(
dest_state, dest_dict, passive=PassiveFlag.PASSIVE_MERGE
)
dest_list = []
for current in instances_iterable:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
if obj is not None:
dest_list.append(obj)
if not load:
coll = attributes.init_state_collection(
dest_state, dest_dict, self.key
)
for c in dest_list:
coll.append_without_event(c)
else:
dest_impl = dest_state.get_impl(self.key)
assert is_has_collection_adapter(dest_impl)
dest_impl.set(
dest_state,
dest_dict,
dest_list,
_adapt=False,
passive=PassiveFlag.PASSIVE_MERGE,
)
else:
current = source_dict[self.key]
if current is not None:
current_state = attributes.instance_state(current)
current_dict = attributes.instance_dict(current)
_recursive[(current_state, self)] = True
obj = session._merge(
current_state,
current_dict,
load=load,
_recursive=_recursive,
_resolve_conflict_map=_resolve_conflict_map,
)
else:
obj = None
if not load:
dest_dict[self.key] = obj
else:
dest_state.get_impl(self.key).set(
dest_state, dest_dict, obj, None
)
def _value_as_iterable(
self,
state: InstanceState[_O],
dict_: _InstanceDict,
key: str,
passive: PassiveFlag = PassiveFlag.PASSIVE_OFF,
) -> Sequence[Tuple[InstanceState[_O], _O]]:
"""Return a list of tuples (state, obj) for the given
key.
returns an empty list if the value is None/empty/PASSIVE_NO_RESULT
"""
impl = state.manager[key].impl
x = impl.get(state, dict_, passive=passive)
if x is LoaderCallableStatus.PASSIVE_NO_RESULT or x is None:
return []
elif is_has_collection_adapter(impl):
return [
(attributes.instance_state(o), o)
for o in impl.get_collection(state, dict_, x, passive=passive)
]
else:
return [(attributes.instance_state(x), x)]
def cascade_iterator(
self,
type_: str,
state: InstanceState[Any],
dict_: _InstanceDict,
visited_states: Set[InstanceState[Any]],
halt_on: Optional[Callable[[InstanceState[Any]], bool]] = None,
) -> Iterator[Tuple[Any, Mapper[Any], InstanceState[Any], _InstanceDict]]:
# assert type_ in self._cascade
# only actively lazy load on the 'delete' cascade
if type_ != "delete" or self.passive_deletes:
passive = PassiveFlag.PASSIVE_NO_INITIALIZE
else:
passive = PassiveFlag.PASSIVE_OFF | PassiveFlag.NO_RAISE
if type_ == "save-update":
tuples = state.manager[self.key].impl.get_all_pending(state, dict_)
else:
tuples = self._value_as_iterable(
state, dict_, self.key, passive=passive
)
skip_pending = (
type_ == "refresh-expire" and "delete-orphan" not in self._cascade
)
for instance_state, c in tuples:
if instance_state in visited_states:
continue
if c is None:
# would like to emit a warning here, but
# would not be consistent with collection.append(None)
# current behavior of silently skipping.
# see [ticket:2229]
continue
assert instance_state is not None
instance_dict = attributes.instance_dict(c)
if halt_on and halt_on(instance_state):
continue
if skip_pending and not instance_state.key:
continue
instance_mapper = instance_state.manager.mapper
if not instance_mapper.isa(self.mapper.class_manager.mapper):
raise AssertionError(
"Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'"
% (self.key, self.parent.class_, c.__class__)
)
visited_states.add(instance_state)
yield c, instance_mapper, instance_state, instance_dict
@property
def _effective_sync_backref(self) -> bool:
if self.viewonly:
return False
else:
return self.sync_backref is not False
@staticmethod
def _check_sync_backref(
rel_a: RelationshipProperty[Any], rel_b: RelationshipProperty[Any]
) -> None:
if rel_a.viewonly and rel_b.sync_backref:
raise sa_exc.InvalidRequestError(
"Relationship %s cannot specify sync_backref=True since %s "
"includes viewonly=True." % (rel_b, rel_a)
)
if (
rel_a.viewonly
and not rel_b.viewonly
and rel_b.sync_backref is not False
):
rel_b.sync_backref = False
def _add_reverse_property(self, key: str) -> None:
other = self.mapper.get_property(key, _configure_mappers=False)
if not isinstance(other, RelationshipProperty):
raise sa_exc.InvalidRequestError(
"back_populates on relationship '%s' refers to attribute '%s' "
"that is not a relationship. The back_populates parameter "
"should refer to the name of a relationship on the target "
"class." % (self, other)
)
# viewonly and sync_backref cases
# 1. self.viewonly==True and other.sync_backref==True -> error
# 2. self.viewonly==True and other.viewonly==False and
# other.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(self, other)
# 3. other.viewonly==True and self.sync_backref==True -> error
# 4. other.viewonly==True and self.viewonly==False and
# self.sync_backref==None -> warn sync_backref=False, set to False
self._check_sync_backref(other, self)
self._reverse_property.add(other)
other._reverse_property.add(self)
other._setup_entity()
if not other.mapper.common_parent(self.parent):
raise sa_exc.ArgumentError(
"reverse_property %r on "
"relationship %s references relationship %s, which "
"does not reference mapper %s"
% (key, self, other, self.parent)
)
if (
other._configure_started
and self.direction in (ONETOMANY, MANYTOONE)
and self.direction == other.direction
):
raise sa_exc.ArgumentError(
"%s and back-reference %s are "
"both of the same direction %r. Did you mean to "
"set remote_side on the many-to-one side ?"
% (other, self, self.direction)
)
@util.memoized_property
def entity(self) -> _InternalEntityType[_T]:
"""Return the target mapped entity, which is an inspect() of the
class or aliased class that is referred towards.
"""
self.parent._check_configure()
return self.entity
@util.memoized_property
def mapper(self) -> Mapper[_T]:
"""Return the targeted :class:`_orm.Mapper` for this
:class:`.RelationshipProperty`.
"""
return self.entity.mapper
def do_init(self) -> None:
self._check_conflicts()
self._process_dependent_arguments()
self._setup_entity()
self._setup_registry_dependencies()
self._setup_join_conditions()
self._check_cascade_settings(self._cascade)
self._post_init()
self._generate_backref()
self._join_condition._warn_for_conflicting_sync_targets()
super().do_init()
self._lazy_strategy = cast(
"LazyLoader", self._get_strategy((("lazy", "select"),))
)
def _setup_registry_dependencies(self) -> None:
self.parent.mapper.registry._set_depends_on(
self.entity.mapper.registry
)
def _process_dependent_arguments(self) -> None:
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
# by declarative "string configs" and some recipes.
init_args = self._init_args
for attr in (
"order_by",
"primaryjoin",
"secondaryjoin",
"secondary",
"foreign_keys",
"remote_side",
):
rel_arg = getattr(init_args, attr)
rel_arg._resolve_against_registry(self._clsregistry_resolvers[1])
# remove "annotations" which are present if mapped class
# descriptors are used to create the join expression.
for attr in "primaryjoin", "secondaryjoin":
rel_arg = getattr(init_args, attr)
val = rel_arg.resolved
if val is not None:
rel_arg.resolved = _orm_deannotate(
coercions.expect(
roles.ColumnArgumentRole, val, argname=attr
)
)
secondary = init_args.secondary.resolved
if secondary is not None and _is_mapped_class(secondary):
raise sa_exc.ArgumentError(
"secondary argument %s passed to to relationship() %s must "
"be a Table object or other FROM clause; can't send a mapped "
"class directly as rows in 'secondary' are persisted "
"independently of a class that is mapped "
"to that same table." % (secondary, self)
)
# ensure expressions in self.order_by, foreign_keys,
# remote_side are all columns, not strings.
if (
init_args.order_by.resolved is not False
and init_args.order_by.resolved is not None
):
self.order_by = tuple(
coercions.expect(
roles.ColumnArgumentRole, x, argname="order_by"
)
for x in util.to_list(init_args.order_by.resolved)
)
else:
self.order_by = False
self._user_defined_foreign_keys = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="foreign_keys"
)
for x in util.to_column_set(init_args.foreign_keys.resolved)
)
self.remote_side = util.column_set(
coercions.expect(
roles.ColumnArgumentRole, x, argname="remote_side"
)
for x in util.to_column_set(init_args.remote_side.resolved)
)
def declarative_scan(
self,
decl_scan: _ClassScanMapperConfig,
registry: _RegistryType,
cls: Type[Any],
originating_module: Optional[str],
key: str,
mapped_container: Optional[Type[Mapped[Any]]],
annotation: Optional[_AnnotationScanType],
extracted_mapped_annotation: Optional[_AnnotationScanType],
is_dataclass_field: bool,
) -> None:
argument = extracted_mapped_annotation
if extracted_mapped_annotation is None:
if self.argument is None:
self._raise_for_required(key, cls)
else:
return
argument = extracted_mapped_annotation
assert originating_module is not None
is_write_only = mapped_container is not None and issubclass(
mapped_container, WriteOnlyMapped
)
if is_write_only:
self.lazy = "write_only"
self.strategy_key = (("lazy", self.lazy),)
is_dynamic = mapped_container is not None and issubclass(
mapped_container, DynamicMapped
)
if is_dynamic:
self.lazy = "dynamic"
self.strategy_key = (("lazy", self.lazy),)
argument = de_optionalize_union_types(argument)
if hasattr(argument, "__origin__"):
arg_origin = argument.__origin__ # type: ignore
if isinstance(arg_origin, type) and issubclass(
arg_origin, abc.Collection
):
if self.collection_class is None:
if _py_inspect.isabstract(arg_origin):
raise sa_exc.ArgumentError(
f"Collection annotation type {arg_origin} cannot "
"be instantiated; please provide an explicit "
"'collection_class' parameter "
"(e.g. list, set, etc.) to the "
"relationship() function to accompany this "
"annotation"
)
self.collection_class = arg_origin
elif not is_write_only and not is_dynamic:
self.uselist = False
if argument.__args__: # type: ignore
if isinstance(arg_origin, type) and issubclass(
arg_origin, typing.Mapping # type: ignore
):
type_arg = argument.__args__[-1] # type: ignore
else:
type_arg = argument.__args__[0] # type: ignore
if hasattr(type_arg, "__forward_arg__"):
str_argument = type_arg.__forward_arg__
argument = resolve_name_to_real_class_name(
str_argument, originating_module
)
else:
argument = type_arg
else:
raise sa_exc.ArgumentError(
f"Generic alias {argument} requires an argument"
)
elif hasattr(argument, "__forward_arg__"):
argument = argument.__forward_arg__ # type: ignore
argument = resolve_name_to_real_class_name(
argument, originating_module
)
# we don't allow the collection class to be a
# __forward_arg__ right now, so if we see a forward arg here,
# we know there was no collection class either
if (
self.collection_class is None
and not is_write_only
and not is_dynamic
):
self.uselist = False
# ticket #8759
# if a lead argument was given to relationship(), like
# `relationship("B")`, use that, don't replace it with class we
# found in the annotation. The declarative_scan() method call here is
# still useful, as we continue to derive collection type and do
# checking of the annotation in any case.
if self.argument is None:
self.argument = cast("_RelationshipArgumentType[_T]", argument)
@util.preload_module("sqlalchemy.orm.mapper")
def _setup_entity(self, __argument: Any = None) -> None:
if "entity" in self.__dict__:
return
mapperlib = util.preloaded.orm_mapper
if __argument:
argument = __argument
else:
argument = self.argument
resolved_argument: _ExternalEntityType[Any]
if isinstance(argument, str):
# we might want to cleanup clsregistry API to make this
# more straightforward
resolved_argument = cast(
"_ExternalEntityType[Any]",
self._clsregistry_resolve_name(argument)(),
)
elif callable(argument) and not isinstance(
argument, (type, mapperlib.Mapper)
):
resolved_argument = argument()
else:
resolved_argument = argument
entity: _InternalEntityType[Any]
if isinstance(resolved_argument, type):
entity = class_mapper(resolved_argument, configure=False)
else:
try:
entity = inspect(resolved_argument)
except sa_exc.NoInspectionAvailable:
entity = None # type: ignore
if not hasattr(entity, "mapper"):
raise sa_exc.ArgumentError(
"relationship '%s' expects "
"a class or a mapper argument (received: %s)"
% (self.key, type(resolved_argument))
)
self.entity = entity # type: ignore
self.target = self.entity.persist_selectable
def _setup_join_conditions(self) -> None:
self._join_condition = jc = JoinCondition(
parent_persist_selectable=self.parent.persist_selectable,
child_persist_selectable=self.entity.persist_selectable,
parent_local_selectable=self.parent.local_table,
child_local_selectable=self.entity.local_table,
primaryjoin=self._init_args.primaryjoin.resolved,
secondary=self._init_args.secondary.resolved,
secondaryjoin=self._init_args.secondaryjoin.resolved,
parent_equivalents=self.parent._equivalent_columns,
child_equivalents=self.mapper._equivalent_columns,
consider_as_foreign_keys=self._user_defined_foreign_keys,
local_remote_pairs=self.local_remote_pairs,
remote_side=self.remote_side,
self_referential=self._is_self_referential,
prop=self,
support_sync=not self.viewonly,
can_be_synced_fn=self._columns_are_mapped,
)
self.primaryjoin = jc.primaryjoin
self.secondaryjoin = jc.secondaryjoin
self.secondary = jc.secondary
self.direction = jc.direction
self.local_remote_pairs = jc.local_remote_pairs
self.remote_side = jc.remote_columns
self.local_columns = jc.local_columns
self.synchronize_pairs = jc.synchronize_pairs
self._calculated_foreign_keys = jc.foreign_key_columns
self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs
@property
def _clsregistry_resolve_arg(
self,
) -> Callable[[str, bool], _class_resolver]:
return self._clsregistry_resolvers[1]
@property
def _clsregistry_resolve_name(
self,
) -> Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]]:
return self._clsregistry_resolvers[0]
@util.memoized_property
@util.preload_module("sqlalchemy.orm.clsregistry")
def _clsregistry_resolvers(
self,
) -> Tuple[
Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]],
Callable[[str, bool], _class_resolver],
]:
_resolver = util.preloaded.orm_clsregistry._resolver
return _resolver(self.parent.class_, self)
def _check_conflicts(self) -> None:
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if self.parent.non_primary and not class_mapper(
self.parent.class_, configure=False
).has_property(self.key):
raise sa_exc.ArgumentError(
"Attempting to assign a new "
"relationship '%s' to a non-primary mapper on "
"class '%s'. New relationships can only be added "
"to the primary mapper, i.e. the very first mapper "
"created for class '%s' "
% (
self.key,
self.parent.class_.__name__,
self.parent.class_.__name__,
)
)
@property
def cascade(self) -> CascadeOptions:
"""Return the current cascade setting for this
:class:`.RelationshipProperty`.
"""
return self._cascade
@cascade.setter
def cascade(self, cascade: Union[str, CascadeOptions]) -> None:
self._set_cascade(cascade)
def _set_cascade(self, cascade_arg: Union[str, CascadeOptions]) -> None:
cascade = CascadeOptions(cascade_arg)
if self.viewonly:
cascade = CascadeOptions(
cascade.intersection(CascadeOptions._viewonly_cascades)
)
if "mapper" in self.__dict__:
self._check_cascade_settings(cascade)
self._cascade = cascade
if self._dependency_processor:
self._dependency_processor.cascade = cascade
def _check_cascade_settings(self, cascade: CascadeOptions) -> None:
if (
cascade.delete_orphan
and not self.single_parent
and (self.direction is MANYTOMANY or self.direction is MANYTOONE)
):
raise sa_exc.ArgumentError(
"For %(direction)s relationship %(rel)s, delete-orphan "
"cascade is normally "
'configured only on the "one" side of a one-to-many '
"relationship, "
'and not on the "many" side of a many-to-one or many-to-many '
"relationship. "
"To force this relationship to allow a particular "
'"%(relatedcls)s" object to be referred towards by only '
'a single "%(clsname)s" object at a time via the '
"%(rel)s relationship, which "
"would allow "
"delete-orphan cascade to take place in this direction, set "
"the single_parent=True flag."
% {
"rel": self,
"direction": "many-to-one"
if self.direction is MANYTOONE
else "many-to-many",
"clsname": self.parent.class_.__name__,
"relatedcls": self.mapper.class_.__name__,
},
code="bbf0",
)
if self.passive_deletes == "all" and (
"delete" in cascade or "delete-orphan" in cascade
):
raise sa_exc.ArgumentError(
"On %s, can't set passive_deletes='all' in conjunction "
"with 'delete' or 'delete-orphan' cascade" % self
)
if cascade.delete_orphan:
self.mapper.primary_mapper()._delete_orphans.append(
(self.key, self.parent.class_)
)
def _persists_for(self, mapper: Mapper[Any]) -> bool:
"""Return True if this property will persist values on behalf
of the given mapper.
"""
return (
self.key in mapper.relationships
and mapper.relationships[self.key] is self
)
def _columns_are_mapped(self, *cols: ColumnElement[Any]) -> bool:
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.RelationshipProperty`.
"""
secondary = self._init_args.secondary.resolved
for c in cols:
if secondary is not None and secondary.c.contains_column(c):
continue
if not self.parent.persist_selectable.c.contains_column(
c
) and not self.target.c.contains_column(c):
return False
return True
def _generate_backref(self) -> None:
"""Interpret the 'backref' instruction to create a
:func:`_orm.relationship` complementary to this one."""
if self.parent.non_primary:
return
if self.backref is not None and not self.back_populates:
kwargs: Dict[str, Any]
if isinstance(self.backref, str):
backref_key, kwargs = self.backref, {}
else:
backref_key, kwargs = self.backref
mapper = self.mapper.primary_mapper()
if not mapper.concrete:
check = set(mapper.iterate_to_root()).union(
mapper.self_and_descendants
)
for m in check:
if m.has_property(backref_key) and not m.concrete:
raise sa_exc.ArgumentError(
"Error creating backref "
"'%s' on relationship '%s': property of that "
"name exists on mapper '%s'"
% (backref_key, self, m)
)
# determine primaryjoin/secondaryjoin for the
# backref. Use the one we had, so that
# a custom join doesn't have to be specified in
# both directions.
if self.secondary is not None:
# for many to many, just switch primaryjoin/
# secondaryjoin. use the annotated
# pj/sj on the _join_condition.
pj = kwargs.pop(
"primaryjoin",
self._join_condition.secondaryjoin_minus_local,
)
sj = kwargs.pop(
"secondaryjoin",
self._join_condition.primaryjoin_minus_local,
)
else:
pj = kwargs.pop(
"primaryjoin",
self._join_condition.primaryjoin_reverse_remote,
)
sj = kwargs.pop("secondaryjoin", None)
if sj:
raise sa_exc.InvalidRequestError(
"Can't assign 'secondaryjoin' on a backref "
"against a non-secondary relationship."
)
foreign_keys = kwargs.pop(
"foreign_keys", self._user_defined_foreign_keys
)
parent = self.parent.primary_mapper()
kwargs.setdefault("viewonly", self.viewonly)
kwargs.setdefault("post_update", self.post_update)
kwargs.setdefault("passive_updates", self.passive_updates)
kwargs.setdefault("sync_backref", self.sync_backref)
self.back_populates = backref_key
relationship = RelationshipProperty(
parent,
self.secondary,
primaryjoin=pj,
secondaryjoin=sj,
foreign_keys=foreign_keys,
back_populates=self.key,
**kwargs,
)
mapper._configure_property(
backref_key, relationship, warn_for_existing=True
)
if self.back_populates:
self._add_reverse_property(self.back_populates)
@util.preload_module("sqlalchemy.orm.dependency")
def _post_init(self) -> None:
dependency = util.preloaded.orm_dependency
if self.uselist is None:
self.uselist = self.direction is not MANYTOONE
if not self.viewonly:
self._dependency_processor = ( # type: ignore
dependency.DependencyProcessor.from_relationship
)(self)
@util.memoized_property
def _use_get(self) -> bool:
"""memoize the 'use_get' attribute of this RelationshipLoader's
lazyloader."""
strategy = self._lazy_strategy
return strategy.use_get
@util.memoized_property
def _is_self_referential(self) -> bool:
return self.mapper.common_parent(self.parent)
def _create_joins(
self,
source_polymorphic: bool = False,
source_selectable: Optional[FromClause] = None,
dest_selectable: Optional[FromClause] = None,
of_type_entity: Optional[_InternalEntityType[Any]] = None,
alias_secondary: bool = False,
extra_criteria: Tuple[ColumnElement[bool], ...] = (),
) -> Tuple[
ColumnElement[bool],
Optional[ColumnElement[bool]],
FromClause,
FromClause,
Optional[FromClause],
Optional[ClauseAdapter],
]:
aliased = False
if alias_secondary and self.secondary is not None:
aliased = True
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
source_selectable = self.parent._with_polymorphic_selectable
if of_type_entity:
dest_mapper = of_type_entity.mapper
if dest_selectable is None:
dest_selectable = of_type_entity.selectable
aliased = True
else:
dest_mapper = self.mapper
if dest_selectable is None:
dest_selectable = self.entity.selectable
if self.mapper.with_polymorphic:
aliased = True
if self._is_self_referential and source_selectable is None:
dest_selectable = dest_selectable._anonymous_fromclause()
aliased = True
elif (
dest_selectable is not self.mapper._with_polymorphic_selectable
or self.mapper.with_polymorphic
):
aliased = True
single_crit = dest_mapper._single_table_criterion
aliased = aliased or (
source_selectable is not None
and (
source_selectable
is not self.parent._with_polymorphic_selectable
or source_selectable._is_subquery
)
)
(
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
) = self._join_condition.join_targets(
source_selectable,
dest_selectable,
aliased,
single_crit,
extra_criteria,
)
if source_selectable is None:
source_selectable = self.parent.local_table
if dest_selectable is None:
dest_selectable = self.entity.local_table
return (
primaryjoin,
secondaryjoin,
source_selectable,
dest_selectable,
secondary,
target_adapter,
)
def _annotate_columns(element: _CE, annotations: _AnnotationDict) -> _CE:
def clone(elem: _CE) -> _CE:
if isinstance(elem, expression.ColumnClause):
elem = elem._annotate(annotations.copy()) # type: ignore
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
clone = None # type: ignore # remove gc cycles
return element
class JoinCondition:
primaryjoin_initial: Optional[ColumnElement[bool]]
primaryjoin: ColumnElement[bool]
secondaryjoin: Optional[ColumnElement[bool]]
secondary: Optional[FromClause]
prop: RelationshipProperty[Any]
synchronize_pairs: _ColumnPairs
secondary_synchronize_pairs: _ColumnPairs
direction: RelationshipDirection
parent_persist_selectable: FromClause
child_persist_selectable: FromClause
parent_local_selectable: FromClause
child_local_selectable: FromClause
_local_remote_pairs: Optional[_ColumnPairs]
def __init__(
self,
parent_persist_selectable: FromClause,
child_persist_selectable: FromClause,
parent_local_selectable: FromClause,
child_local_selectable: FromClause,
*,
primaryjoin: Optional[ColumnElement[bool]] = None,
secondary: Optional[FromClause] = None,
secondaryjoin: Optional[ColumnElement[bool]] = None,
parent_equivalents: Optional[_EquivalentColumnMap] = None,
child_equivalents: Optional[_EquivalentColumnMap] = None,
consider_as_foreign_keys: Any = None,
local_remote_pairs: Optional[_ColumnPairs] = None,
remote_side: Any = None,
self_referential: Any = False,
prop: RelationshipProperty[Any],
support_sync: bool = True,
can_be_synced_fn: Callable[..., bool] = lambda *c: True,
):
self.parent_persist_selectable = parent_persist_selectable
self.parent_local_selectable = parent_local_selectable
self.child_persist_selectable = child_persist_selectable
self.child_local_selectable = child_local_selectable
self.parent_equivalents = parent_equivalents
self.child_equivalents = child_equivalents
self.primaryjoin_initial = primaryjoin
self.secondaryjoin = secondaryjoin
self.secondary = secondary
self.consider_as_foreign_keys = consider_as_foreign_keys
self._local_remote_pairs = local_remote_pairs
self._remote_side = remote_side
self.prop = prop
self.self_referential = self_referential
self.support_sync = support_sync
self.can_be_synced_fn = can_be_synced_fn
self._determine_joins()
assert self.primaryjoin is not None
self._sanitize_joins()
self._annotate_fks()
self._annotate_remote()
self._annotate_local()
self._annotate_parentmapper()
self._setup_pairs()
self._check_foreign_cols(self.primaryjoin, True)
if self.secondaryjoin is not None:
self._check_foreign_cols(self.secondaryjoin, False)
self._determine_direction()
self._check_remote_side()
self._log_joins()
def _log_joins(self) -> None:
log = self.prop.logger
log.info("%s setup primary join %s", self.prop, self.primaryjoin)
log.info("%s setup secondary join %s", self.prop, self.secondaryjoin)
log.info(
"%s synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r) for (l, r) in self.synchronize_pairs
),
)
log.info(
"%s secondary synchronize pairs [%s]",
self.prop,
",".join(
"(%s => %s)" % (l, r)
for (l, r) in self.secondary_synchronize_pairs or []
),
)
log.info(
"%s local/remote pairs [%s]",
self.prop,
",".join(
"(%s / %s)" % (l, r) for (l, r) in self.local_remote_pairs
),
)
log.info(
"%s remote columns [%s]",
self.prop,
",".join("%s" % col for col in self.remote_columns),
)
log.info(
"%s local columns [%s]",
self.prop,
",".join("%s" % col for col in self.local_columns),
)
log.info("%s relationship direction %s", self.prop, self.direction)
def _sanitize_joins(self) -> None:
"""remove the parententity annotation from our join conditions which
can leak in here based on some declarative patterns and maybe others.
"parentmapper" is relied upon both by the ORM evaluator as well as
the use case in _join_fixture_inh_selfref_w_entity
that relies upon it being present, see :ticket:`3364`.
"""
self.primaryjoin = _deep_deannotate(
self.primaryjoin, values=("parententity", "proxy_key")
)
if self.secondaryjoin is not None:
self.secondaryjoin = _deep_deannotate(
self.secondaryjoin, values=("parententity", "proxy_key")
)
def _determine_joins(self) -> None:
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError(
"Property %s specified with secondary "
"join condition but "
"no secondary argument" % self.prop
)
# find a join between the given mapper's mapped table and
# the given table. will try the mapper's local table first
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
try:
consider_as_foreign_keys = self.consider_as_foreign_keys or None
if self.secondary is not None:
if self.secondaryjoin is None:
self.secondaryjoin = join_condition(
self.child_persist_selectable,
self.secondary,
a_subset=self.child_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
if self.primaryjoin_initial is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.secondary,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
self.primaryjoin = self.primaryjoin_initial
else:
if self.primaryjoin_initial is None:
self.primaryjoin = join_condition(
self.parent_persist_selectable,
self.child_persist_selectable,
a_subset=self.parent_local_selectable,
consider_as_foreign_keys=consider_as_foreign_keys,
)
else:
self.primaryjoin = self.primaryjoin_initial
except sa_exc.NoForeignKeysError as nfe:
if self.secondary is not None:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables via secondary table '%s'. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify 'primaryjoin' and 'secondaryjoin' "
"expressions." % (self.prop, self.secondary)
) from nfe
else:
raise sa_exc.NoForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are no foreign keys "
"linking these tables. "
"Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or "
"specify a 'primaryjoin' expression." % self.prop
) from nfe
except sa_exc.AmbiguousForeignKeysError as afe:
if self.secondary is not None:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables via secondary table '%s'. "
"Specify the 'foreign_keys' "
"argument, providing a list of those columns which "
"should be counted as containing a foreign key "
"reference from the secondary table to each of the "
"parent and child tables." % (self.prop, self.secondary)
) from afe
else:
raise sa_exc.AmbiguousForeignKeysError(
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table." % self.prop
) from afe
@property
def primaryjoin_minus_local(self) -> ColumnElement[bool]:
return _deep_deannotate(self.primaryjoin, values=("local", "remote"))
@property
def secondaryjoin_minus_local(self) -> ColumnElement[bool]:
assert self.secondaryjoin is not None
return _deep_deannotate(self.secondaryjoin, values=("local", "remote"))
@util.memoized_property
def primaryjoin_reverse_remote(self) -> ColumnElement[bool]:
"""Return the primaryjoin condition suitable for the
"reverse" direction.
If the primaryjoin was delivered here with pre-existing
"remote" annotations, the local/remote annotations
are reversed. Otherwise, the local/remote annotations
are removed.
"""
if self._has_remote_annotations:
def replace(element: _CE, **kw: Any) -> Optional[_CE]:
if "remote" in element._annotations:
v = dict(element._annotations)
del v["remote"]
v["local"] = True
return element._with_annotations(v)
elif "local" in element._annotations:
v = dict(element._annotations)
del v["local"]
v["remote"] = True
return element._with_annotations(v)
return None
return visitors.replacement_traverse(self.primaryjoin, {}, replace)
else:
if self._has_foreign_annotations:
# TODO: coverage
return _deep_deannotate(
self.primaryjoin, values=("local", "remote")
)
else:
return _deep_deannotate(self.primaryjoin)
def _has_annotation(self, clause: ClauseElement, annotation: str) -> bool:
for col in visitors.iterate(clause, {}):
if annotation in col._annotations:
return True
else:
return False
@util.memoized_property
def _has_foreign_annotations(self) -> bool:
return self._has_annotation(self.primaryjoin, "foreign")
@util.memoized_property
def _has_remote_annotations(self) -> bool:
return self._has_annotation(self.primaryjoin, "remote")
def _annotate_fks(self) -> None:
"""Annotate the primaryjoin and secondaryjoin
structures with 'foreign' annotations marking columns
considered as foreign.
"""
if self._has_foreign_annotations:
return
if self.consider_as_foreign_keys:
self._annotate_from_fk_list()
else:
self._annotate_present_fks()
def _annotate_from_fk_list(self) -> None:
def check_fk(element: _CE, **kw: Any) -> Optional[_CE]:
if element in self.consider_as_foreign_keys:
return element._annotate({"foreign": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, check_fk
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, check_fk
)
def _annotate_present_fks(self) -> None:
if self.secondary is not None:
secondarycols = util.column_set(self.secondary.c)
else:
secondarycols = set()
def is_foreign(
a: ColumnElement[Any], b: ColumnElement[Any]
) -> Optional[ColumnElement[Any]]:
if isinstance(a, schema.Column) and isinstance(b, schema.Column):
if a.references(b):
return a
elif b.references(a):
return b
if secondarycols:
if a in secondarycols and b not in secondarycols:
return a
elif b in secondarycols and a not in secondarycols:
return b
return None
def visit_binary(binary: BinaryExpression[Any]) -> None:
if not isinstance(
binary.left, sql.ColumnElement
) or not isinstance(binary.right, sql.ColumnElement):
return
if (
"foreign" not in binary.left._annotations
and "foreign" not in binary.right._annotations
):
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
binary.left = binary.left._annotate({"foreign": True})
elif col.compare(binary.right):
binary.right = binary.right._annotate(
{"foreign": True}
)
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
if self.secondaryjoin is not None:
self.secondaryjoin = visitors.cloned_traverse(
self.secondaryjoin, {}, {"binary": visit_binary}
)
def _refers_to_parent_table(self) -> bool:
"""Return True if the join condition contains column
comparisons where both columns are in both tables.
"""
pt = self.parent_persist_selectable
mt = self.child_persist_selectable
result = False
def visit_binary(binary: BinaryExpression[Any]) -> None:
nonlocal result
c, f = binary.left, binary.right
if (
isinstance(c, expression.ColumnClause)
and isinstance(f, expression.ColumnClause)
and pt.is_derived_from(c.table)
and pt.is_derived_from(f.table)
and mt.is_derived_from(c.table)
and mt.is_derived_from(f.table)
):
result = True
visitors.traverse(self.primaryjoin, {}, {"binary": visit_binary})
return result
def _tables_overlap(self) -> bool:
"""Return True if parent/child tables have some overlap."""
return selectables_overlap(
self.parent_persist_selectable, self.child_persist_selectable
)
def _annotate_remote(self) -> None:
"""Annotate the primaryjoin and secondaryjoin
structures with 'remote' annotations marking columns
considered as part of the 'remote' side.
"""
if self._has_remote_annotations:
return
if self.secondary is not None:
self._annotate_remote_secondary()
elif self._local_remote_pairs or self._remote_side:
self._annotate_remote_from_args()
elif self._refers_to_parent_table():
self._annotate_selfref(
lambda col: "foreign" in col._annotations, False
)
elif self._tables_overlap():
self._annotate_remote_with_overlap()
else:
self._annotate_remote_distinct_selectables()
def _annotate_remote_secondary(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when 'secondary' is present.
"""
assert self.secondary is not None
fixed_secondary = self.secondary
def repl(element: _CE, **kw: Any) -> Optional[_CE]:
if fixed_secondary.c.contains_column(element):
return element._annotate({"remote": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
assert self.secondaryjoin is not None
self.secondaryjoin = visitors.replacement_traverse(
self.secondaryjoin, {}, repl
)
def _annotate_selfref(
self, fn: Callable[[ColumnElement[Any]], bool], remote_side_given: bool
) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the relationship is detected as self-referential.
"""
def visit_binary(binary: BinaryExpression[Any]) -> None:
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and isinstance(
binary.right, expression.ColumnClause
):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
if fn(binary.right) and not equated:
binary.right = binary.right._annotate({"remote": True})
elif not remote_side_given:
self._warn_non_column_elements()
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_from_args(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the 'remote_side' or '_local_remote_pairs'
arguments are used.
"""
if self._local_remote_pairs:
if self._remote_side:
raise sa_exc.ArgumentError(
"remote_side argument is redundant "
"against more detailed _local_remote_side "
"argument."
)
remote_side = [r for (l, r) in self._local_remote_pairs]
else:
remote_side = self._remote_side
if self._refers_to_parent_table():
self._annotate_selfref(lambda col: col in remote_side, True)
else:
def repl(element: _CE, **kw: Any) -> Optional[_CE]:
# use set() to avoid generating ``__eq__()`` expressions
# against each element
if element in set(remote_side):
return element._annotate({"remote": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _annotate_remote_with_overlap(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables have some set of
tables in common, though is not a fully self-referential
relationship.
"""
def visit_binary(binary: BinaryExpression[Any]) -> None:
binary.left, binary.right = proc_left_right(
binary.left, binary.right
)
binary.right, binary.left = proc_left_right(
binary.right, binary.left
)
check_entities = (
self.prop is not None and self.prop.mapper is not self.prop.parent
)
def proc_left_right(
left: ColumnElement[Any], right: ColumnElement[Any]
) -> Tuple[ColumnElement[Any], ColumnElement[Any]]:
if isinstance(left, expression.ColumnClause) and isinstance(
right, expression.ColumnClause
):
if self.child_persist_selectable.c.contains_column(
right
) and self.parent_persist_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
elif (
check_entities
and right._annotations.get("parentmapper") is self.prop.mapper
):
right = right._annotate({"remote": True})
elif (
check_entities
and left._annotations.get("parentmapper") is self.prop.mapper
):
left = left._annotate({"remote": True})
else:
self._warn_non_column_elements()
return left, right
self.primaryjoin = visitors.cloned_traverse(
self.primaryjoin, {}, {"binary": visit_binary}
)
def _annotate_remote_distinct_selectables(self) -> None:
"""annotate 'remote' in primaryjoin, secondaryjoin
when the parent/child tables are entirely
separate.
"""
def repl(element: _CE, **kw: Any) -> Optional[_CE]:
if self.child_persist_selectable.c.contains_column(element) and (
not self.parent_local_selectable.c.contains_column(element)
or self.child_local_selectable.c.contains_column(element)
):
return element._annotate({"remote": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl
)
def _warn_non_column_elements(self) -> None:
util.warn(
"Non-simple column elements in primary "
"join condition for property %s - consider using "
"remote() annotations to mark the remote side." % self.prop
)
def _annotate_local(self) -> None:
"""Annotate the primaryjoin and secondaryjoin
structures with 'local' annotations.
This annotates all column elements found
simultaneously in the parent table
and the join condition that don't have a
'remote' annotation set up from
_annotate_remote() or user-defined.
"""
if self._has_annotation(self.primaryjoin, "local"):
return
if self._local_remote_pairs:
local_side = util.column_set(
[l for (l, r) in self._local_remote_pairs]
)
else:
local_side = util.column_set(self.parent_persist_selectable.c)
def locals_(element: _CE, **kw: Any) -> Optional[_CE]:
if "remote" not in element._annotations and element in local_side:
return element._annotate({"local": True})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
)
def _annotate_parentmapper(self) -> None:
def parentmappers_(element: _CE, **kw: Any) -> Optional[_CE]:
if "remote" in element._annotations:
return element._annotate({"parentmapper": self.prop.mapper})
elif "local" in element._annotations:
return element._annotate({"parentmapper": self.prop.parent})
return None
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, parentmappers_
)
def _check_remote_side(self) -> None:
if not self.local_remote_pairs:
raise sa_exc.ArgumentError(
"Relationship %s could "
"not determine any unambiguous local/remote column "
"pairs based on join condition and remote_side "
"arguments. "
"Consider using the remote() annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of "
"the relationship." % (self.prop,)
)
else:
not_target = util.column_set(
self.parent_persist_selectable.c
).difference(self.child_persist_selectable.c)
for _, rmt in self.local_remote_pairs:
if rmt in not_target:
util.warn(
"Expression %s is marked as 'remote', but these "
"column(s) are local to the local side. The "
"remote() annotation is needed only for a "
"self-referential relationship where both sides "
"of the relationship refer to the same tables."
% (rmt,)
)
def _check_foreign_cols(
self, join_condition: ColumnElement[bool], primary: bool
) -> None:
"""Check the foreign key columns collected and emit error
messages."""
can_sync = False
foreign_cols = self._gather_columns_with_annotation(
join_condition, "foreign"
)
has_foreign = bool(foreign_cols)
if primary:
can_sync = bool(self.synchronize_pairs)
else:
can_sync = bool(self.secondary_synchronize_pairs)
if (
self.support_sync
and can_sync
or (not self.support_sync and has_foreign)
):
return
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if self.support_sync and has_foreign and not can_sync:
err = (
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for "
"%s join condition "
"'%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation. To allow comparison operators other than "
"'==', the relationship can be marked as viewonly=True."
)
raise sa_exc.ArgumentError(err)
else:
err = (
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s."
% (
primary and "primary" or "secondary",
join_condition,
self.prop,
)
)
err += (
" Ensure that referencing columns are associated "
"with a ForeignKey or ForeignKeyConstraint, or are "
"annotated in the join condition with the foreign() "
"annotation."
)
raise sa_exc.ArgumentError(err)
def _determine_direction(self) -> None:
"""Determine if this relationship is one to many, many to one,
many to many.
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
else:
parentcols = util.column_set(self.parent_persist_selectable.c)
targetcols = util.column_set(self.child_persist_selectable.c)
# fk collection which suggests ONETOMANY.
onetomany_fk = targetcols.intersection(self.foreign_key_columns)
# fk collection which suggests MANYTOONE.
manytoone_fk = parentcols.intersection(self.foreign_key_columns)
if onetomany_fk and manytoone_fk:
# fks on both sides. test for overlap of local/remote
# with foreign key.
# we will gather columns directly from their annotations
# without deannotating, so that we can distinguish on a column
# that refers to itself.
# 1. columns that are both remote and FK suggest
# onetomany.
onetomany_local = self._gather_columns_with_annotation(
self.primaryjoin, "remote", "foreign"
)
# 2. columns that are FK but are not remote (e.g. local)
# suggest manytoone.
manytoone_local = {
c
for c in self._gather_columns_with_annotation(
self.primaryjoin, "foreign"
)
if "remote" not in c._annotations
}
# 3. if both collections are present, remove columns that
# refer to themselves. This is for the case of
# and_(Me.id == Me.remote_id, Me.version == Me.version)
if onetomany_local and manytoone_local:
self_equated = self.remote_columns.intersection(
self.local_columns
)
onetomany_local = onetomany_local.difference(self_equated)
manytoone_local = manytoone_local.difference(self_equated)
# at this point, if only one or the other collection is
# present, we know the direction, otherwise it's still
# ambiguous.
if onetomany_local and not manytoone_local:
self.direction = ONETOMANY
elif manytoone_local and not onetomany_local:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
"via the foreign_keys argument." % self.prop
)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
self.direction = MANYTOONE
else:
raise sa_exc.ArgumentError(
"Can't determine relationship "
"direction for relationship '%s' - foreign "
"key columns are present in neither the parent "
"nor the child's mapped tables" % self.prop
)
def _deannotate_pairs(
self, collection: _ColumnPairIterable
) -> _MutableColumnPairs:
"""provide deannotation for the various lists of
pairs, so that using them in hashes doesn't incur
high-overhead __eq__() comparisons against
original columns mapped.
"""
return [(x._deannotate(), y._deannotate()) for x, y in collection]
def _setup_pairs(self) -> None:
sync_pairs: _MutableColumnPairs = []
lrp: util.OrderedSet[
Tuple[ColumnElement[Any], ColumnElement[Any]]
] = util.OrderedSet([])
secondary_sync_pairs: _MutableColumnPairs = []
def go(
joincond: ColumnElement[bool],
collection: _MutableColumnPairs,
) -> None:
def visit_binary(
binary: BinaryExpression[Any],
left: ColumnElement[Any],
right: ColumnElement[Any],
) -> None:
if (
"remote" in right._annotations
and "remote" not in left._annotations
and self.can_be_synced_fn(left)
):
lrp.add((left, right))
elif (
"remote" in left._annotations
and "remote" not in right._annotations
and self.can_be_synced_fn(right)
):
lrp.add((right, left))
if binary.operator is operators.eq and self.can_be_synced_fn(
left, right
):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
collection.append((right, left))
visit_binary_product(visit_binary, joincond)
for joincond, collection in [
(self.primaryjoin, sync_pairs),
(self.secondaryjoin, secondary_sync_pairs),
]:
if joincond is None:
continue
go(joincond, collection)
self.local_remote_pairs = self._deannotate_pairs(lrp)
self.synchronize_pairs = self._deannotate_pairs(sync_pairs)
self.secondary_synchronize_pairs = self._deannotate_pairs(
secondary_sync_pairs
)
_track_overlapping_sync_targets: weakref.WeakKeyDictionary[
ColumnElement[Any],
weakref.WeakKeyDictionary[
RelationshipProperty[Any], ColumnElement[Any]
],
] = weakref.WeakKeyDictionary()
def _warn_for_conflicting_sync_targets(self) -> None:
if not self.support_sync:
return
# we would like to detect if we are synchronizing any column
# pairs in conflict with another relationship that wishes to sync
# an entirely different column to the same target. This is a
# very rare edge case so we will try to minimize the memory/overhead
# impact of this check
for from_, to_ in [
(from_, to_) for (from_, to_) in self.synchronize_pairs
] + [
(from_, to_) for (from_, to_) in self.secondary_synchronize_pairs
]:
# save ourselves a ton of memory and overhead by only
# considering columns that are subject to a overlapping
# FK constraints at the core level. This condition can arise
# if multiple relationships overlap foreign() directly, but
# we're going to assume it's typically a ForeignKeyConstraint-
# level configuration that benefits from this warning.
if to_ not in self._track_overlapping_sync_targets:
self._track_overlapping_sync_targets[
to_
] = weakref.WeakKeyDictionary({self.prop: from_})
else:
other_props = []
prop_to_from = self._track_overlapping_sync_targets[to_]
for pr, fr_ in prop_to_from.items():
if (
not pr.mapper._dispose_called
and pr not in self.prop._reverse_property
and pr.key not in self.prop._overlaps
and self.prop.key not in pr._overlaps
# note: the "__*" symbol is used internally by
# SQLAlchemy as a general means of suppressing the
# overlaps warning for some extension cases, however
# this is not currently
# a publicly supported symbol and may change at
# any time.
and "__*" not in self.prop._overlaps
and "__*" not in pr._overlaps
and not self.prop.parent.is_sibling(pr.parent)
and not self.prop.mapper.is_sibling(pr.mapper)
and not self.prop.parent.is_sibling(pr.mapper)
and not self.prop.mapper.is_sibling(pr.parent)
and (
self.prop.key != pr.key
or not self.prop.parent.common_parent(pr.parent)
)
):
other_props.append((pr, fr_))
if other_props:
util.warn(
"relationship '%s' will copy column %s to column %s, "
"which conflicts with relationship(s): %s. "
"If this is not the intention, consider if these "
"relationships should be linked with "
"back_populates, or if viewonly=True should be "
"applied to one or more if they are read-only. "
"For the less common case that foreign key "
"constraints are partially overlapping, the "
"orm.foreign() "
"annotation can be used to isolate the columns that "
"should be written towards. To silence this "
"warning, add the parameter 'overlaps=\"%s\"' to the "
"'%s' relationship."
% (
self.prop,
from_,
to_,
", ".join(
sorted(
"'%s' (copies %s to %s)" % (pr, fr_, to_)
for (pr, fr_) in other_props
)
),
",".join(sorted(pr.key for pr, fr in other_props)),
self.prop,
),
code="qzyx",
)
self._track_overlapping_sync_targets[to_][self.prop] = from_
@util.memoized_property
def remote_columns(self) -> Set[ColumnElement[Any]]:
return self._gather_join_annotations("remote")
@util.memoized_property
def local_columns(self) -> Set[ColumnElement[Any]]:
return self._gather_join_annotations("local")
@util.memoized_property
def foreign_key_columns(self) -> Set[ColumnElement[Any]]:
return self._gather_join_annotations("foreign")
def _gather_join_annotations(
self, annotation: str
) -> Set[ColumnElement[Any]]:
s = set(
self._gather_columns_with_annotation(self.primaryjoin, annotation)
)
if self.secondaryjoin is not None:
s.update(
self._gather_columns_with_annotation(
self.secondaryjoin, annotation
)
)
return {x._deannotate() for x in s}
def _gather_columns_with_annotation(
self, clause: ColumnElement[Any], *annotation: Iterable[str]
) -> Set[ColumnElement[Any]]:
annotation_set = set(annotation)
return {
cast(ColumnElement[Any], col)
for col in visitors.iterate(clause, {})
if annotation_set.issubset(col._annotations)
}
def join_targets(
self,
source_selectable: Optional[FromClause],
dest_selectable: FromClause,
aliased: bool,
single_crit: Optional[ColumnElement[bool]] = None,
extra_criteria: Tuple[ColumnElement[bool], ...] = (),
) -> Tuple[
ColumnElement[bool],
Optional[ColumnElement[bool]],
Optional[FromClause],
Optional[ClauseAdapter],
FromClause,
]:
"""Given a source and destination selectable, create a
join between them.
This takes into account aliasing the join clause
to reference the appropriate corresponding columns
in the target objects, as well as the extra child
criterion, equivalent column sets, etc.
"""
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable, {"no_replacement_traverse": True}
)
primaryjoin, secondaryjoin, secondary = (
self.primaryjoin,
self.secondaryjoin,
self.secondary,
)
# adjust the join condition for single table inheritance,
# in the case that the join is to a subclass
# this is analogous to the
# "_adjust_for_single_table_inheritance()" method in Query.
if single_crit is not None:
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & single_crit
else:
primaryjoin = primaryjoin & single_crit
if extra_criteria:
def mark_unrelated_columns_as_ok_to_adapt(
elem: SupportsAnnotations, annotations: _AnnotationDict
) -> SupportsAnnotations:
"""note unrelated columns in the "extra criteria" as OK
to adapt, even though they are not part of our "local"
or "remote" side.
see #9779 for this case
"""
parentmapper_for_element = elem._annotations.get(
"parentmapper", None
)
if (
parentmapper_for_element is not self.prop.parent
and parentmapper_for_element is not self.prop.mapper
):
return _safe_annotate(elem, annotations)
else:
return elem
extra_criteria = tuple(
_deep_annotate(
elem,
{"ok_to_adapt_in_join_condition": True},
annotate_callable=mark_unrelated_columns_as_ok_to_adapt,
)
for elem in extra_criteria
)
if secondaryjoin is not None:
secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)
else:
primaryjoin = primaryjoin & sql.and_(*extra_criteria)
if aliased:
if secondary is not None:
secondary = secondary._anonymous_fromclause(flat=True)
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
)
secondary_aliasizer = ClauseAdapter(
dest_selectable, equivalents=self.child_equivalents
).chain(primary_aliasizer)
if source_selectable is not None:
primary_aliasizer = ClauseAdapter(
secondary, exclude_fn=_ColInAnnotations("local")
).chain(
ClauseAdapter(
source_selectable,
equivalents=self.parent_equivalents,
)
)
secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)
else:
primary_aliasizer = ClauseAdapter(
dest_selectable,
exclude_fn=_ColInAnnotations("local"),
equivalents=self.child_equivalents,
)
if source_selectable is not None:
primary_aliasizer.chain(
ClauseAdapter(
source_selectable,
exclude_fn=_ColInAnnotations("remote"),
equivalents=self.parent_equivalents,
)
)
secondary_aliasizer = None
primaryjoin = primary_aliasizer.traverse(primaryjoin)
target_adapter = secondary_aliasizer or primary_aliasizer
target_adapter.exclude_fn = None
else:
target_adapter = None
return (
primaryjoin,
secondaryjoin,
secondary,
target_adapter,
dest_selectable,
)
def create_lazy_clause(
self, reverse_direction: bool = False
) -> Tuple[
ColumnElement[bool],
Dict[str, ColumnElement[Any]],
Dict[ColumnElement[Any], ColumnElement[Any]],
]:
binds: Dict[ColumnElement[Any], BindParameter[Any]] = {}
equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {}
has_secondary = self.secondaryjoin is not None
if has_secondary:
lookup = collections.defaultdict(list)
for l, r in self.local_remote_pairs:
lookup[l].append((l, r))
equated_columns[r] = l
elif not reverse_direction:
for l, r in self.local_remote_pairs:
equated_columns[r] = l
else:
for l, r in self.local_remote_pairs:
equated_columns[l] = r
def col_to_bind(
element: ColumnElement[Any], **kw: Any
) -> Optional[BindParameter[Any]]:
if (
(not reverse_direction and "local" in element._annotations)
or reverse_direction
and (
(has_secondary and element in lookup)
or (not has_secondary and "remote" in element._annotations)
)
):
if element not in binds:
binds[element] = sql.bindparam(
None, None, type_=element.type, unique=True
)
return binds[element]
return None
lazywhere = self.primaryjoin
if self.secondaryjoin is None or not reverse_direction:
lazywhere = visitors.replacement_traverse(
lazywhere, {}, col_to_bind
)
if self.secondaryjoin is not None:
secondaryjoin = self.secondaryjoin
if reverse_direction:
secondaryjoin = visitors.replacement_traverse(
secondaryjoin, {}, col_to_bind
)
lazywhere = sql.and_(lazywhere, secondaryjoin)
bind_to_col = {binds[col].key: col for col in binds}
return lazywhere, bind_to_col, equated_columns
class _ColInAnnotations:
"""Serializable object that tests for a name in c._annotations."""
__slots__ = ("name",)
def __init__(self, name: str):
self.name = name
def __call__(self, c: ClauseElement) -> bool:
return (
self.name in c._annotations
or "ok_to_adapt_in_join_condition" in c._annotations
)
class Relationship( # type: ignore
RelationshipProperty[_T],
_DeclarativeMapped[_T],
WriteOnlyMapped[_T], # not compatible with Mapped[_T]
DynamicMapped[_T], # not compatible with Mapped[_T]
):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`_orm.relationship` function.
.. seealso::
:ref:`relationship_config_toplevel`
.. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative
compatible subclass for :class:`_orm.RelationshipProperty`.
"""
inherit_cache = True
""":meta private:"""
|
normal
|
{
"blob_id": "5f8303ce91c5de779bbddbaafb3fb828596babe5",
"index": 8669,
"step-1": "<mask token>\n\n\nclass JoinCondition:\n primaryjoin_initial: Optional[ColumnElement[bool]]\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n prop: RelationshipProperty[Any]\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: _ColumnPairs\n direction: RelationshipDirection\n parent_persist_selectable: FromClause\n child_persist_selectable: FromClause\n parent_local_selectable: FromClause\n child_local_selectable: FromClause\n _local_remote_pairs: Optional[_ColumnPairs]\n\n def __init__(self, parent_persist_selectable: FromClause,\n child_persist_selectable: FromClause, parent_local_selectable:\n FromClause, child_local_selectable: FromClause, *, primaryjoin:\n Optional[ColumnElement[bool]]=None, secondary: Optional[FromClause]\n =None, secondaryjoin: Optional[ColumnElement[bool]]=None,\n parent_equivalents: Optional[_EquivalentColumnMap]=None,\n child_equivalents: Optional[_EquivalentColumnMap]=None,\n consider_as_foreign_keys: Any=None, local_remote_pairs: Optional[\n _ColumnPairs]=None, remote_side: Any=None, self_referential: Any=\n False, prop: RelationshipProperty[Any], support_sync: bool=True,\n can_be_synced_fn: Callable[..., bool]=lambda *c: True):\n self.parent_persist_selectable = parent_persist_selectable\n self.parent_local_selectable = parent_local_selectable\n self.child_persist_selectable = child_persist_selectable\n self.child_local_selectable = child_local_selectable\n self.parent_equivalents = parent_equivalents\n self.child_equivalents = child_equivalents\n self.primaryjoin_initial = primaryjoin\n self.secondaryjoin = secondaryjoin\n self.secondary = secondary\n self.consider_as_foreign_keys = consider_as_foreign_keys\n self._local_remote_pairs = local_remote_pairs\n self._remote_side = remote_side\n self.prop = prop\n self.self_referential = self_referential\n self.support_sync = support_sync\n self.can_be_synced_fn = can_be_synced_fn\n self._determine_joins()\n assert self.primaryjoin is not None\n self._sanitize_joins()\n self._annotate_fks()\n self._annotate_remote()\n self._annotate_local()\n self._annotate_parentmapper()\n self._setup_pairs()\n self._check_foreign_cols(self.primaryjoin, True)\n if self.secondaryjoin is not None:\n self._check_foreign_cols(self.secondaryjoin, False)\n self._determine_direction()\n self._check_remote_side()\n self._log_joins()\n\n def _log_joins(self) ->None:\n log = self.prop.logger\n log.info('%s setup primary join %s', self.prop, self.primaryjoin)\n log.info('%s setup secondary join %s', self.prop, self.secondaryjoin)\n log.info('%s synchronize pairs [%s]', self.prop, ','.join(\n '(%s => %s)' % (l, r) for l, r in self.synchronize_pairs))\n log.info('%s secondary synchronize pairs [%s]', self.prop, ','.join\n ('(%s => %s)' % (l, r) for l, r in self.\n secondary_synchronize_pairs or []))\n log.info('%s local/remote pairs [%s]', self.prop, ','.join(\n '(%s / %s)' % (l, r) for l, r in self.local_remote_pairs))\n log.info('%s remote columns [%s]', self.prop, ','.join('%s' % col for\n col in self.remote_columns))\n log.info('%s local columns [%s]', self.prop, ','.join('%s' % col for\n col in self.local_columns))\n log.info('%s relationship direction %s', self.prop, self.direction)\n\n def _sanitize_joins(self) ->None:\n \"\"\"remove the parententity annotation from our join conditions which\n can leak in here based on some declarative patterns and maybe others.\n\n \"parentmapper\" is relied upon both by the ORM evaluator as well as\n the use case in _join_fixture_inh_selfref_w_entity\n that relies upon it being present, see :ticket:`3364`.\n\n \"\"\"\n self.primaryjoin = _deep_deannotate(self.primaryjoin, values=(\n 'parententity', 'proxy_key'))\n if self.secondaryjoin is not None:\n self.secondaryjoin = _deep_deannotate(self.secondaryjoin,\n values=('parententity', 'proxy_key'))\n\n def _determine_joins(self) ->None:\n \"\"\"Determine the 'primaryjoin' and 'secondaryjoin' attributes,\n if not passed to the constructor already.\n\n This is based on analysis of the foreign key relationships\n between the parent and target mapped selectables.\n\n \"\"\"\n if self.secondaryjoin is not None and self.secondary is None:\n raise sa_exc.ArgumentError(\n 'Property %s specified with secondary join condition but no secondary argument'\n % self.prop)\n try:\n consider_as_foreign_keys = self.consider_as_foreign_keys or None\n if self.secondary is not None:\n if self.secondaryjoin is None:\n self.secondaryjoin = join_condition(self.\n child_persist_selectable, self.secondary, a_subset=\n self.child_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.secondary, a_subset\n =self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n elif self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.\n child_persist_selectable, a_subset=self.\n parent_local_selectable, consider_as_foreign_keys=\n consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n except sa_exc.NoForeignKeysError as nfe:\n if self.secondary is not None:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables via secondary table '%s'. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify 'primaryjoin' and 'secondaryjoin' expressions.\"\n % (self.prop, self.secondary)) from nfe\n else:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify a 'primaryjoin' expression.\"\n % self.prop) from nfe\n except sa_exc.AmbiguousForeignKeysError as afe:\n if self.secondary is not None:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables via secondary table '%s'. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference from the secondary table to each of the parent and child tables.\"\n % (self.prop, self.secondary)) from afe\n else:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference to the parent table.\"\n % self.prop) from afe\n\n @property\n def primaryjoin_minus_local(self) ->ColumnElement[bool]:\n return _deep_deannotate(self.primaryjoin, values=('local', 'remote'))\n\n @property\n def secondaryjoin_minus_local(self) ->ColumnElement[bool]:\n assert self.secondaryjoin is not None\n return _deep_deannotate(self.secondaryjoin, values=('local', 'remote'))\n\n @util.memoized_property\n def primaryjoin_reverse_remote(self) ->ColumnElement[bool]:\n \"\"\"Return the primaryjoin condition suitable for the\n \"reverse\" direction.\n\n If the primaryjoin was delivered here with pre-existing\n \"remote\" annotations, the local/remote annotations\n are reversed. Otherwise, the local/remote annotations\n are removed.\n\n \"\"\"\n if self._has_remote_annotations:\n\n def replace(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n v = dict(element._annotations)\n del v['remote']\n v['local'] = True\n return element._with_annotations(v)\n elif 'local' in element._annotations:\n v = dict(element._annotations)\n del v['local']\n v['remote'] = True\n return element._with_annotations(v)\n return None\n return visitors.replacement_traverse(self.primaryjoin, {}, replace)\n elif self._has_foreign_annotations:\n return _deep_deannotate(self.primaryjoin, values=('local',\n 'remote'))\n else:\n return _deep_deannotate(self.primaryjoin)\n\n def _has_annotation(self, clause: ClauseElement, annotation: str) ->bool:\n for col in visitors.iterate(clause, {}):\n if annotation in col._annotations:\n return True\n else:\n return False\n\n @util.memoized_property\n def _has_foreign_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'foreign')\n\n @util.memoized_property\n def _has_remote_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'remote')\n\n def _annotate_fks(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'foreign' annotations marking columns\n considered as foreign.\n\n \"\"\"\n if self._has_foreign_annotations:\n return\n if self.consider_as_foreign_keys:\n self._annotate_from_fk_list()\n else:\n self._annotate_present_fks()\n\n def _annotate_from_fk_list(self) ->None:\n\n def check_fk(element: _CE, **kw: Any) ->Optional[_CE]:\n if element in self.consider_as_foreign_keys:\n return element._annotate({'foreign': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, check_fk)\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, check_fk)\n\n def _annotate_present_fks(self) ->None:\n if self.secondary is not None:\n secondarycols = util.column_set(self.secondary.c)\n else:\n secondarycols = set()\n\n def is_foreign(a: ColumnElement[Any], b: ColumnElement[Any]\n ) ->Optional[ColumnElement[Any]]:\n if isinstance(a, schema.Column) and isinstance(b, schema.Column):\n if a.references(b):\n return a\n elif b.references(a):\n return b\n if secondarycols:\n if a in secondarycols and b not in secondarycols:\n return a\n elif b in secondarycols and a not in secondarycols:\n return b\n return None\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n if not isinstance(binary.left, sql.ColumnElement\n ) or not isinstance(binary.right, sql.ColumnElement):\n return\n if ('foreign' not in binary.left._annotations and 'foreign' not in\n binary.right._annotations):\n col = is_foreign(binary.left, binary.right)\n if col is not None:\n if col.compare(binary.left):\n binary.left = binary.left._annotate({'foreign': True})\n elif col.compare(binary.right):\n binary.right = binary.right._annotate({'foreign': True}\n )\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.cloned_traverse(self.\n secondaryjoin, {}, {'binary': visit_binary})\n\n def _refers_to_parent_table(self) ->bool:\n \"\"\"Return True if the join condition contains column\n comparisons where both columns are in both tables.\n\n \"\"\"\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n nonlocal result\n c, f = binary.left, binary.right\n if isinstance(c, expression.ColumnClause) and isinstance(f,\n expression.ColumnClause) and pt.is_derived_from(c.table\n ) and pt.is_derived_from(f.table) and mt.is_derived_from(c.\n table) and mt.is_derived_from(f.table):\n result = True\n visitors.traverse(self.primaryjoin, {}, {'binary': visit_binary})\n return result\n\n def _tables_overlap(self) ->bool:\n \"\"\"Return True if parent/child tables have some overlap.\"\"\"\n return selectables_overlap(self.parent_persist_selectable, self.\n child_persist_selectable)\n\n def _annotate_remote(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'remote' annotations marking columns\n considered as part of the 'remote' side.\n\n \"\"\"\n if self._has_remote_annotations:\n return\n if self.secondary is not None:\n self._annotate_remote_secondary()\n elif self._local_remote_pairs or self._remote_side:\n self._annotate_remote_from_args()\n elif self._refers_to_parent_table():\n self._annotate_selfref(lambda col: 'foreign' in col.\n _annotations, False)\n elif self._tables_overlap():\n self._annotate_remote_with_overlap()\n else:\n self._annotate_remote_distinct_selectables()\n\n def _annotate_remote_secondary(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when 'secondary' is present.\n\n \"\"\"\n assert self.secondary is not None\n fixed_secondary = self.secondary\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if fixed_secondary.c.contains_column(element):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n assert self.secondaryjoin is not None\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, repl)\n\n def _annotate_selfref(self, fn: Callable[[ColumnElement[Any]], bool],\n remote_side_given: bool) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the relationship is detected as self-referential.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n equated = binary.left.compare(binary.right)\n if isinstance(binary.left, expression.ColumnClause) and isinstance(\n binary.right, expression.ColumnClause):\n if fn(binary.left):\n binary.left = binary.left._annotate({'remote': True})\n if fn(binary.right) and not equated:\n binary.right = binary.right._annotate({'remote': True})\n elif not remote_side_given:\n self._warn_non_column_elements()\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n <mask token>\n\n def _annotate_remote_with_overlap(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables have some set of\n tables in common, though is not a fully self-referential\n relationship.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n binary.left, binary.right = proc_left_right(binary.left, binary\n .right)\n binary.right, binary.left = proc_left_right(binary.right,\n binary.left)\n check_entities = (self.prop is not None and self.prop.mapper is not\n self.prop.parent)\n\n def proc_left_right(left: ColumnElement[Any], right: ColumnElement[Any]\n ) ->Tuple[ColumnElement[Any], ColumnElement[Any]]:\n if isinstance(left, expression.ColumnClause) and isinstance(right,\n expression.ColumnClause):\n if self.child_persist_selectable.c.contains_column(right\n ) and self.parent_persist_selectable.c.contains_column(left\n ):\n right = right._annotate({'remote': True})\n elif check_entities and right._annotations.get('parentmapper'\n ) is self.prop.mapper:\n right = right._annotate({'remote': True})\n elif check_entities and left._annotations.get('parentmapper'\n ) is self.prop.mapper:\n left = left._annotate({'remote': True})\n else:\n self._warn_non_column_elements()\n return left, right\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n\n def _annotate_remote_distinct_selectables(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables are entirely\n separate.\n\n \"\"\"\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if self.child_persist_selectable.c.contains_column(element) and (\n not self.parent_local_selectable.c.contains_column(element) or\n self.child_local_selectable.c.contains_column(element)):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n\n def _warn_non_column_elements(self) ->None:\n util.warn(\n 'Non-simple column elements in primary join condition for property %s - consider using remote() annotations to mark the remote side.'\n % self.prop)\n\n def _annotate_local(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'local' annotations.\n\n This annotates all column elements found\n simultaneously in the parent table\n and the join condition that don't have a\n 'remote' annotation set up from\n _annotate_remote() or user-defined.\n\n \"\"\"\n if self._has_annotation(self.primaryjoin, 'local'):\n return\n if self._local_remote_pairs:\n local_side = util.column_set([l for l, r in self.\n _local_remote_pairs])\n else:\n local_side = util.column_set(self.parent_persist_selectable.c)\n\n def locals_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' not in element._annotations and element in local_side:\n return element._annotate({'local': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, locals_)\n\n def _annotate_parentmapper(self) ->None:\n\n def parentmappers_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n return element._annotate({'parentmapper': self.prop.mapper})\n elif 'local' in element._annotations:\n return element._annotate({'parentmapper': self.prop.parent})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, parentmappers_)\n\n def _check_remote_side(self) ->None:\n if not self.local_remote_pairs:\n raise sa_exc.ArgumentError(\n 'Relationship %s could not determine any unambiguous local/remote column pairs based on join condition and remote_side arguments. Consider using the remote() annotation to accurately mark those elements of the join condition that are on the remote side of the relationship.'\n % (self.prop,))\n else:\n not_target = util.column_set(self.parent_persist_selectable.c\n ).difference(self.child_persist_selectable.c)\n for _, rmt in self.local_remote_pairs:\n if rmt in not_target:\n util.warn(\n \"Expression %s is marked as 'remote', but these column(s) are local to the local side. The remote() annotation is needed only for a self-referential relationship where both sides of the relationship refer to the same tables.\"\n % (rmt,))\n\n def _check_foreign_cols(self, join_condition: ColumnElement[bool],\n primary: bool) ->None:\n \"\"\"Check the foreign key columns collected and emit error\n messages.\"\"\"\n can_sync = False\n foreign_cols = self._gather_columns_with_annotation(join_condition,\n 'foreign')\n has_foreign = bool(foreign_cols)\n if primary:\n can_sync = bool(self.synchronize_pairs)\n else:\n can_sync = bool(self.secondary_synchronize_pairs)\n if (self.support_sync and can_sync or not self.support_sync and\n has_foreign):\n return\n if self.support_sync and has_foreign and not can_sync:\n err = (\n \"Could not locate any simple equality expressions involving locally mapped foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n \" Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation. To allow comparison operators other than '==', the relationship can be marked as viewonly=True.\"\n )\n raise sa_exc.ArgumentError(err)\n else:\n err = (\n \"Could not locate any relevant foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n ' Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation.'\n )\n raise sa_exc.ArgumentError(err)\n\n def _determine_direction(self) ->None:\n \"\"\"Determine if this relationship is one to many, many to one,\n many to many.\n\n \"\"\"\n if self.secondaryjoin is not None:\n self.direction = MANYTOMANY\n else:\n parentcols = util.column_set(self.parent_persist_selectable.c)\n targetcols = util.column_set(self.child_persist_selectable.c)\n onetomany_fk = targetcols.intersection(self.foreign_key_columns)\n manytoone_fk = parentcols.intersection(self.foreign_key_columns)\n if onetomany_fk and manytoone_fk:\n onetomany_local = self._gather_columns_with_annotation(self\n .primaryjoin, 'remote', 'foreign')\n manytoone_local = {c for c in self.\n _gather_columns_with_annotation(self.primaryjoin,\n 'foreign') if 'remote' not in c._annotations}\n if onetomany_local and manytoone_local:\n self_equated = self.remote_columns.intersection(self.\n local_columns)\n onetomany_local = onetomany_local.difference(self_equated)\n manytoone_local = manytoone_local.difference(self_equated)\n if onetomany_local and not manytoone_local:\n self.direction = ONETOMANY\n elif manytoone_local and not onetomany_local:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns within the join condition are present in both the parent and the child's mapped tables. Ensure that only those columns referring to a parent column are marked as foreign, either via the foreign() annotation or via the foreign_keys argument.\"\n % self.prop)\n elif onetomany_fk:\n self.direction = ONETOMANY\n elif manytoone_fk:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns are present in neither the parent nor the child's mapped tables\"\n % self.prop)\n\n def _deannotate_pairs(self, collection: _ColumnPairIterable\n ) ->_MutableColumnPairs:\n \"\"\"provide deannotation for the various lists of\n pairs, so that using them in hashes doesn't incur\n high-overhead __eq__() comparisons against\n original columns mapped.\n\n \"\"\"\n return [(x._deannotate(), y._deannotate()) for x, y in collection]\n <mask token>\n _track_overlapping_sync_targets: weakref.WeakKeyDictionary[\n ColumnElement[Any], weakref.WeakKeyDictionary[RelationshipProperty[\n Any], ColumnElement[Any]]] = weakref.WeakKeyDictionary()\n\n def _warn_for_conflicting_sync_targets(self) ->None:\n if not self.support_sync:\n return\n for from_, to_ in ([(from_, to_) for from_, to_ in self.\n synchronize_pairs] + [(from_, to_) for from_, to_ in self.\n secondary_synchronize_pairs]):\n if to_ not in self._track_overlapping_sync_targets:\n self._track_overlapping_sync_targets[to_\n ] = weakref.WeakKeyDictionary({self.prop: from_})\n else:\n other_props = []\n prop_to_from = self._track_overlapping_sync_targets[to_]\n for pr, fr_ in prop_to_from.items():\n if (not pr.mapper._dispose_called and pr not in self.\n prop._reverse_property and pr.key not in self.prop.\n _overlaps and self.prop.key not in pr._overlaps and\n '__*' not in self.prop._overlaps and '__*' not in\n pr._overlaps and not self.prop.parent.is_sibling(pr\n .parent) and not self.prop.mapper.is_sibling(pr.\n mapper) and not self.prop.parent.is_sibling(pr.\n mapper) and not self.prop.mapper.is_sibling(pr.\n parent) and (self.prop.key != pr.key or not self.\n prop.parent.common_parent(pr.parent))):\n other_props.append((pr, fr_))\n if other_props:\n util.warn(\n 'relationship \\'%s\\' will copy column %s to column %s, which conflicts with relationship(s): %s. If this is not the intention, consider if these relationships should be linked with back_populates, or if viewonly=True should be applied to one or more if they are read-only. For the less common case that foreign key constraints are partially overlapping, the orm.foreign() annotation can be used to isolate the columns that should be written towards. To silence this warning, add the parameter \\'overlaps=\"%s\"\\' to the \\'%s\\' relationship.'\n % (self.prop, from_, to_, ', '.join(sorted(\n \"'%s' (copies %s to %s)\" % (pr, fr_, to_) for pr,\n fr_ in other_props)), ','.join(sorted(pr.key for pr,\n fr in other_props)), self.prop), code='qzyx')\n self._track_overlapping_sync_targets[to_][self.prop] = from_\n\n @util.memoized_property\n def remote_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('remote')\n\n @util.memoized_property\n def local_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('local')\n\n @util.memoized_property\n def foreign_key_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('foreign')\n\n def _gather_join_annotations(self, annotation: str) ->Set[ColumnElement\n [Any]]:\n s = set(self._gather_columns_with_annotation(self.primaryjoin,\n annotation))\n if self.secondaryjoin is not None:\n s.update(self._gather_columns_with_annotation(self.\n secondaryjoin, annotation))\n return {x._deannotate() for x in s}\n\n def _gather_columns_with_annotation(self, clause: ColumnElement[Any], *\n annotation: Iterable[str]) ->Set[ColumnElement[Any]]:\n annotation_set = set(annotation)\n return {cast(ColumnElement[Any], col) for col in visitors.iterate(\n clause, {}) if annotation_set.issubset(col._annotations)}\n\n def join_targets(self, source_selectable: Optional[FromClause],\n dest_selectable: FromClause, aliased: bool, single_crit: Optional[\n ColumnElement[bool]]=None, extra_criteria: Tuple[ColumnElement[bool\n ], ...]=()) ->Tuple[ColumnElement[bool], Optional[ColumnElement[\n bool]], Optional[FromClause], Optional[ClauseAdapter], FromClause]:\n \"\"\"Given a source and destination selectable, create a\n join between them.\n\n This takes into account aliasing the join clause\n to reference the appropriate corresponding columns\n in the target objects, as well as the extra child\n criterion, equivalent column sets, etc.\n\n \"\"\"\n dest_selectable = _shallow_annotate(dest_selectable, {\n 'no_replacement_traverse': True})\n primaryjoin, secondaryjoin, secondary = (self.primaryjoin, self.\n secondaryjoin, self.secondary)\n if single_crit is not None:\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & single_crit\n else:\n primaryjoin = primaryjoin & single_crit\n if extra_criteria:\n\n def mark_unrelated_columns_as_ok_to_adapt(elem:\n SupportsAnnotations, annotations: _AnnotationDict\n ) ->SupportsAnnotations:\n \"\"\"note unrelated columns in the \"extra criteria\" as OK\n to adapt, even though they are not part of our \"local\"\n or \"remote\" side.\n\n see #9779 for this case\n\n \"\"\"\n parentmapper_for_element = elem._annotations.get('parentmapper'\n , None)\n if (parentmapper_for_element is not self.prop.parent and \n parentmapper_for_element is not self.prop.mapper):\n return _safe_annotate(elem, annotations)\n else:\n return elem\n extra_criteria = tuple(_deep_annotate(elem, {\n 'ok_to_adapt_in_join_condition': True}, annotate_callable=\n mark_unrelated_columns_as_ok_to_adapt) for elem in\n extra_criteria)\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)\n else:\n primaryjoin = primaryjoin & sql.and_(*extra_criteria)\n if aliased:\n if secondary is not None:\n secondary = secondary._anonymous_fromclause(flat=True)\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn=\n _ColInAnnotations('local'))\n secondary_aliasizer = ClauseAdapter(dest_selectable,\n equivalents=self.child_equivalents).chain(primary_aliasizer\n )\n if source_selectable is not None:\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn\n =_ColInAnnotations('local')).chain(ClauseAdapter(\n source_selectable, equivalents=self.parent_equivalents)\n )\n secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)\n else:\n primary_aliasizer = ClauseAdapter(dest_selectable,\n exclude_fn=_ColInAnnotations('local'), equivalents=self\n .child_equivalents)\n if source_selectable is not None:\n primary_aliasizer.chain(ClauseAdapter(source_selectable,\n exclude_fn=_ColInAnnotations('remote'), equivalents\n =self.parent_equivalents))\n secondary_aliasizer = None\n primaryjoin = primary_aliasizer.traverse(primaryjoin)\n target_adapter = secondary_aliasizer or primary_aliasizer\n target_adapter.exclude_fn = None\n else:\n target_adapter = None\n return (primaryjoin, secondaryjoin, secondary, target_adapter,\n dest_selectable)\n\n def create_lazy_clause(self, reverse_direction: bool=False) ->Tuple[\n ColumnElement[bool], Dict[str, ColumnElement[Any]], Dict[\n ColumnElement[Any], ColumnElement[Any]]]:\n binds: Dict[ColumnElement[Any], BindParameter[Any]] = {}\n equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {}\n has_secondary = self.secondaryjoin is not None\n if has_secondary:\n lookup = collections.defaultdict(list)\n for l, r in self.local_remote_pairs:\n lookup[l].append((l, r))\n equated_columns[r] = l\n elif not reverse_direction:\n for l, r in self.local_remote_pairs:\n equated_columns[r] = l\n else:\n for l, r in self.local_remote_pairs:\n equated_columns[l] = r\n\n def col_to_bind(element: ColumnElement[Any], **kw: Any) ->Optional[\n BindParameter[Any]]:\n if (not reverse_direction and 'local' in element._annotations or\n reverse_direction and (has_secondary and element in lookup or\n not has_secondary and 'remote' in element._annotations)):\n if element not in binds:\n binds[element] = sql.bindparam(None, None, type_=\n element.type, unique=True)\n return binds[element]\n return None\n lazywhere = self.primaryjoin\n if self.secondaryjoin is None or not reverse_direction:\n lazywhere = visitors.replacement_traverse(lazywhere, {},\n col_to_bind)\n if self.secondaryjoin is not None:\n secondaryjoin = self.secondaryjoin\n if reverse_direction:\n secondaryjoin = visitors.replacement_traverse(secondaryjoin,\n {}, col_to_bind)\n lazywhere = sql.and_(lazywhere, secondaryjoin)\n bind_to_col = {binds[col].key: col for col in binds}\n return lazywhere, bind_to_col, equated_columns\n\n\nclass _ColInAnnotations:\n \"\"\"Serializable object that tests for a name in c._annotations.\"\"\"\n __slots__ = 'name',\n\n def __init__(self, name: str):\n self.name = name\n\n def __call__(self, c: ClauseElement) ->bool:\n return (self.name in c._annotations or \n 'ok_to_adapt_in_join_condition' in c._annotations)\n\n\nclass Relationship(RelationshipProperty[_T], _DeclarativeMapped[_T],\n WriteOnlyMapped[_T], DynamicMapped[_T]):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n .. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative\n compatible subclass for :class:`_orm.RelationshipProperty`.\n\n \"\"\"\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n",
"step-2": "<mask token>\n\n\[email protected]_logger\nclass RelationshipProperty(_IntrospectsAnnotations, StrategizedProperty[_T],\n log.Identified):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n _overlaps: Sequence[str]\n _lazy_strategy: LazyLoader\n <mask token>\n _dependency_processor: Optional[DependencyProcessor] = None\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n _join_condition: JoinCondition\n order_by: Union[Literal[False], Tuple[ColumnElement[Any], ...]]\n _user_defined_foreign_keys: Set[ColumnElement[Any]]\n _calculated_foreign_keys: Set[ColumnElement[Any]]\n remote_side: Set[ColumnElement[Any]]\n local_columns: Set[ColumnElement[Any]]\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: Optional[_ColumnPairs]\n local_remote_pairs: Optional[_ColumnPairs]\n direction: RelationshipDirection\n _init_args: _RelationshipArgs\n\n def __init__(self, argument: Optional[_RelationshipArgumentType[_T]]=\n None, secondary: Optional[_RelationshipSecondaryArgument]=None, *,\n uselist: Optional[bool]=None, collection_class: Optional[Union[Type\n [Collection[Any]], Callable[[], Collection[Any]]]]=None,\n primaryjoin: Optional[_RelationshipJoinConditionArgument]=None,\n secondaryjoin: Optional[_RelationshipJoinConditionArgument]=None,\n back_populates: Optional[str]=None, order_by: _ORMOrderByArgument=\n False, backref: Optional[ORMBackrefArgument]=None, overlaps:\n Optional[str]=None, post_update: bool=False, cascade: str=\n 'save-update, merge', viewonly: bool=False, attribute_options:\n Optional[_AttributeOptions]=None, lazy: _LazyLoadArgumentType=\n 'select', passive_deletes: Union[Literal['all'], bool]=False,\n passive_updates: bool=True, active_history: bool=False,\n enable_typechecks: bool=True, foreign_keys: Optional[\n _ORMColCollectionArgument]=None, remote_side: Optional[\n _ORMColCollectionArgument]=None, join_depth: Optional[int]=None,\n comparator_factory: Optional[Type[RelationshipProperty.Comparator[\n Any]]]=None, single_parent: bool=False, innerjoin: bool=False,\n distinct_target_key: Optional[bool]=None, load_on_pending: bool=\n False, query_class: Optional[Type[Query[Any]]]=None, info: Optional\n [_InfoType]=None, omit_join: Literal[None, False]=None,\n sync_backref: Optional[bool]=None, doc: Optional[str]=None,\n bake_queries: Literal[True]=True, cascade_backrefs: Literal[False]=\n False, _local_remote_pairs: Optional[_ColumnPairs]=None,\n _legacy_inactive_history_style: bool=False):\n super().__init__(attribute_options=attribute_options)\n self.uselist = uselist\n self.argument = argument\n self._init_args = _RelationshipArgs(_RelationshipArg('secondary',\n secondary, None), _RelationshipArg('primaryjoin', primaryjoin,\n None), _RelationshipArg('secondaryjoin', secondaryjoin, None),\n _RelationshipArg('order_by', order_by, None), _RelationshipArg(\n 'foreign_keys', foreign_keys, None), _RelationshipArg(\n 'remote_side', remote_side, None))\n self.post_update = post_update\n self.viewonly = viewonly\n if viewonly:\n self._warn_for_persistence_only_flags(passive_deletes=\n passive_deletes, passive_updates=passive_updates,\n enable_typechecks=enable_typechecks, active_history=\n active_history, cascade_backrefs=cascade_backrefs)\n if viewonly and sync_backref:\n raise sa_exc.ArgumentError(\n 'sync_backref and viewonly cannot both be True')\n self.sync_backref = sync_backref\n self.lazy = lazy\n self.single_parent = single_parent\n self.collection_class = collection_class\n self.passive_deletes = passive_deletes\n if cascade_backrefs:\n raise sa_exc.ArgumentError(\n \"The 'cascade_backrefs' parameter passed to relationship() may only be set to False.\"\n )\n self.passive_updates = passive_updates\n self.enable_typechecks = enable_typechecks\n self.query_class = query_class\n self.innerjoin = innerjoin\n self.distinct_target_key = distinct_target_key\n self.doc = doc\n self.active_history = active_history\n self._legacy_inactive_history_style = _legacy_inactive_history_style\n self.join_depth = join_depth\n if omit_join:\n util.warn(\n 'setting omit_join to True is not supported; selectin loading of this relationship may not work correctly if this flag is set explicitly. omit_join optimization is automatically detected for conditions under which it is supported.'\n )\n self.omit_join = omit_join\n self.local_remote_pairs = _local_remote_pairs\n self.load_on_pending = load_on_pending\n self.comparator_factory = (comparator_factory or\n RelationshipProperty.Comparator)\n util.set_creation_order(self)\n if info is not None:\n self.info.update(info)\n self.strategy_key = ('lazy', self.lazy),\n self._reverse_property: Set[RelationshipProperty[Any]] = set()\n if overlaps:\n self._overlaps = set(re.split('\\\\s*,\\\\s*', overlaps))\n else:\n self._overlaps = ()\n self.cascade = cascade\n self.back_populates = back_populates\n if self.back_populates:\n if backref:\n raise sa_exc.ArgumentError(\n 'backref and back_populates keyword arguments are mutually exclusive'\n )\n self.backref = None\n else:\n self.backref = backref\n\n def _warn_for_persistence_only_flags(self, **kw: Any) ->None:\n for k, v in kw.items():\n if v != self._persistence_only[k]:\n util.warn(\n 'Setting %s on relationship() while also setting viewonly=True does not make sense, as a viewonly=True relationship does not perform persistence operations. This configuration may raise an error in a future release.'\n % (k,))\n\n def instrument_class(self, mapper: Mapper[Any]) ->None:\n attributes.register_descriptor(mapper.class_, self.key, comparator=\n self.comparator_factory(self, mapper), parententity=mapper, doc\n =self.doc)\n\n\n class Comparator(util.MemoizedSlots, PropComparator[_PT]):\n \"\"\"Produce boolean, comparison, and other operators for\n :class:`.RelationshipProperty` attributes.\n\n See the documentation for :class:`.PropComparator` for a brief\n overview of ORM level operator definition.\n\n .. seealso::\n\n :class:`.PropComparator`\n\n :class:`.ColumnProperty.Comparator`\n\n :class:`.ColumnOperators`\n\n :ref:`types_operators`\n\n :attr:`.TypeEngine.comparator_factory`\n\n \"\"\"\n __slots__ = ('entity', 'mapper', 'property', '_of_type',\n '_extra_criteria')\n prop: RODescriptorReference[RelationshipProperty[_PT]]\n _of_type: Optional[_EntityType[_PT]]\n\n def __init__(self, prop: RelationshipProperty[_PT], parentmapper:\n _InternalEntityType[Any], adapt_to_entity: Optional[AliasedInsp\n [Any]]=None, of_type: Optional[_EntityType[_PT]]=None,\n extra_criteria: Tuple[ColumnElement[bool], ...]=()):\n \"\"\"Construction of :class:`.RelationshipProperty.Comparator`\n is internal to the ORM's attribute mechanics.\n\n \"\"\"\n self.prop = prop\n self._parententity = parentmapper\n self._adapt_to_entity = adapt_to_entity\n if of_type:\n self._of_type = of_type\n else:\n self._of_type = None\n self._extra_criteria = extra_criteria\n\n def adapt_to_entity(self, adapt_to_entity: AliasedInsp[Any]\n ) ->RelationshipProperty.Comparator[Any]:\n return self.__class__(self.prop, self._parententity,\n adapt_to_entity=adapt_to_entity, of_type=self._of_type)\n entity: _InternalEntityType[_PT]\n \"\"\"The target entity referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`\n object.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n mapper: Mapper[_PT]\n \"\"\"The target :class:`_orm.Mapper` referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n\n def _memoized_attr_entity(self) ->_InternalEntityType[_PT]:\n if self._of_type:\n return inspect(self._of_type)\n else:\n return self.prop.entity\n\n def _memoized_attr_mapper(self) ->Mapper[_PT]:\n return self.entity.mapper\n\n def _source_selectable(self) ->FromClause:\n if self._adapt_to_entity:\n return self._adapt_to_entity.selectable\n else:\n return self.property.parent._with_polymorphic_selectable\n\n def __clause_element__(self) ->ColumnElement[bool]:\n adapt_from = self._source_selectable()\n if self._of_type:\n of_type_entity = inspect(self._of_type)\n else:\n of_type_entity = None\n pj, sj, source, dest, secondary, target_adapter = (self.prop.\n _create_joins(source_selectable=adapt_from,\n source_polymorphic=True, of_type_entity=of_type_entity,\n alias_secondary=True, extra_criteria=self._extra_criteria))\n if sj is not None:\n return pj & sj\n else:\n return pj\n\n def of_type(self, class_: _EntityType[Any]) ->PropComparator[_PT]:\n \"\"\"Redefine this object in terms of a polymorphic subclass.\n\n See :meth:`.PropComparator.of_type` for an example.\n\n\n \"\"\"\n return RelationshipProperty.Comparator(self.prop, self.\n _parententity, adapt_to_entity=self._adapt_to_entity,\n of_type=class_, extra_criteria=self._extra_criteria)\n\n def and_(self, *criteria: _ColumnExpressionArgument[bool]\n ) ->PropComparator[Any]:\n \"\"\"Add AND criteria.\n\n See :meth:`.PropComparator.and_` for an example.\n\n .. versionadded:: 1.4\n\n \"\"\"\n exprs = tuple(coercions.expect(roles.WhereHavingRole, clause) for\n clause in util.coerce_generator_arg(criteria))\n return RelationshipProperty.Comparator(self.prop, self.\n _parententity, adapt_to_entity=self._adapt_to_entity,\n of_type=self._of_type, extra_criteria=self._extra_criteria +\n exprs)\n\n def in_(self, other: Any) ->NoReturn:\n \"\"\"Produce an IN clause - this is not implemented\n for :func:`_orm.relationship`-based attributes at this time.\n\n \"\"\"\n raise NotImplementedError(\n 'in_() not yet supported for relationships. For a simple many-to-one, use in_() against the set of foreign key values.'\n )\n __hash__ = None\n\n def __eq__(self, other: Any) ->ColumnElement[bool]:\n \"\"\"Implement the ``==`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop == <some object>\n\n this will typically produce a\n clause such as::\n\n mytable.related_id == <some id>\n\n Where ``<some id>`` is the primary key of the given\n object.\n\n The ``==`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use :meth:`~.Relationship.Comparator.contains`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce a NOT EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction in [ONETOMANY, MANYTOMANY]:\n return ~self._criterion_exists()\n else:\n return _orm_annotate(self.property._optimized_compare(\n None, adapt_source=self.adapter))\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(self.property._optimized_compare(other,\n adapt_source=self.adapter))\n\n def _criterion_exists(self, criterion: Optional[\n _ColumnExpressionArgument[bool]]=None, **kwargs: Any) ->Exists:\n where_criteria = coercions.expect(roles.WhereHavingRole, criterion\n ) if criterion is not None else None\n if getattr(self, '_of_type', None):\n info: Optional[_InternalEntityType[Any]] = inspect(self.\n _of_type)\n assert info is not None\n target_mapper, to_selectable, is_aliased_class = (info.\n mapper, info.selectable, info.is_aliased_class)\n if self.property._is_self_referential and not is_aliased_class:\n to_selectable = to_selectable._anonymous_fromclause()\n single_crit = target_mapper._single_table_criterion\n if single_crit is not None:\n if where_criteria is not None:\n where_criteria = single_crit & where_criteria\n else:\n where_criteria = single_crit\n else:\n is_aliased_class = False\n to_selectable = None\n if self.adapter:\n source_selectable = self._source_selectable()\n else:\n source_selectable = None\n pj, sj, source, dest, secondary, target_adapter = (self.\n property._create_joins(dest_selectable=to_selectable,\n source_selectable=source_selectable))\n for k in kwargs:\n crit = getattr(self.property.mapper.class_, k) == kwargs[k]\n if where_criteria is None:\n where_criteria = crit\n else:\n where_criteria = where_criteria & crit\n if sj is not None:\n j = _orm_annotate(pj) & sj\n else:\n j = _orm_annotate(pj, exclude=self.property.remote_side)\n if (where_criteria is not None and target_adapter and not\n is_aliased_class):\n where_criteria = target_adapter.traverse(where_criteria)\n if where_criteria is not None:\n where_criteria = where_criteria._annotate({\n 'no_replacement_traverse': True})\n crit = j & sql.True_._ifnone(where_criteria)\n if secondary is not None:\n ex = sql.exists(1).where(crit).select_from(dest, secondary\n ).correlate_except(dest, secondary)\n else:\n ex = sql.exists(1).where(crit).select_from(dest\n ).correlate_except(dest)\n return ex\n\n def any(self, criterion: Optional[_ColumnExpressionArgument[bool]]=\n None, **kwargs: Any) ->ColumnElement[bool]:\n \"\"\"Produce an expression that tests a collection against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.any(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id\n AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.any` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.any` is particularly\n useful for testing for empty collections::\n\n session.query(MyClass).filter(\n ~MyClass.somereference.any()\n )\n\n will produce::\n\n SELECT * FROM my_table WHERE\n NOT (EXISTS (SELECT 1 FROM related WHERE\n related.my_id=my_table.id))\n\n :meth:`~.Relationship.Comparator.any` is only\n valid for collections, i.e. a :func:`_orm.relationship`\n that has ``uselist=True``. For scalar references,\n use :meth:`~.Relationship.Comparator.has`.\n\n \"\"\"\n if not self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'any()' not implemented for scalar attributes. Use has().\"\n )\n return self._criterion_exists(criterion, **kwargs)\n\n def has(self, criterion: Optional[_ColumnExpressionArgument[bool]]=\n None, **kwargs: Any) ->ColumnElement[bool]:\n \"\"\"Produce an expression that tests a scalar reference against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.has(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE\n related.id==my_table.related_id AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.has` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.has` is only\n valid for scalar references, i.e. a :func:`_orm.relationship`\n that has ``uselist=False``. For collection references,\n use :meth:`~.Relationship.Comparator.any`.\n\n \"\"\"\n if self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'has()' not implemented for collections. Use any().\")\n return self._criterion_exists(criterion, **kwargs)\n\n def contains(self, other: _ColumnExpressionArgument[Any], **kwargs: Any\n ) ->ColumnElement[bool]:\n \"\"\"Return a simple expression that tests a collection for\n containment of a particular item.\n\n :meth:`~.Relationship.Comparator.contains` is\n only valid for a collection, i.e. a\n :func:`_orm.relationship` that implements\n one-to-many or many-to-many with ``uselist=True``.\n\n When used in a simple one-to-many context, an\n expression like::\n\n MyClass.contains(other)\n\n Produces a clause like::\n\n mytable.id == <some id>\n\n Where ``<some id>`` is the value of the foreign key\n attribute on ``other`` which refers to the primary\n key of its parent object. From this it follows that\n :meth:`~.Relationship.Comparator.contains` is\n very useful when used with simple one-to-many\n operations.\n\n For many-to-many operations, the behavior of\n :meth:`~.Relationship.Comparator.contains`\n has more caveats. The association table will be\n rendered in the statement, producing an \"implicit\"\n join, that is, includes multiple tables in the FROM\n clause which are equated in the WHERE clause::\n\n query(MyClass).filter(MyClass.contains(other))\n\n Produces a query like::\n\n SELECT * FROM my_table, my_association_table AS\n my_association_table_1 WHERE\n my_table.id = my_association_table_1.parent_id\n AND my_association_table_1.child_id = <some id>\n\n Where ``<some id>`` would be the primary key of\n ``other``. From the above, it is clear that\n :meth:`~.Relationship.Comparator.contains`\n will **not** work with many-to-many collections when\n used in queries that move beyond simple AND\n conjunctions, such as multiple\n :meth:`~.Relationship.Comparator.contains`\n expressions joined by OR. In such cases subqueries or\n explicit \"outer joins\" will need to be used instead.\n See :meth:`~.Relationship.Comparator.any` for\n a less-performant alternative using EXISTS, or refer\n to :meth:`_query.Query.outerjoin`\n as well as :ref:`orm_queryguide_joins`\n for more details on constructing outer joins.\n\n kwargs may be ignored by this operator but are required for API\n conformance.\n \"\"\"\n if not self.prop.uselist:\n raise sa_exc.InvalidRequestError(\n \"'contains' not implemented for scalar attributes. Use ==\"\n )\n clause = self.prop._optimized_compare(other, adapt_source=self.\n adapter)\n if self.prop.secondaryjoin is not None:\n clause.negation_clause = self.__negated_contains_or_equals(\n other)\n return clause\n\n def __negated_contains_or_equals(self, other: Any) ->ColumnElement[bool\n ]:\n if self.prop.direction == MANYTOONE:\n state = attributes.instance_state(other)\n\n def state_bindparam(local_col: ColumnElement[Any], state:\n InstanceState[Any], remote_col: ColumnElement[Any]\n ) ->BindParameter[Any]:\n dict_ = state.dict\n return sql.bindparam(local_col.key, type_=local_col.\n type, unique=True, callable_=self.prop.\n _get_attr_w_warn_on_none(self.prop.mapper, state,\n dict_, remote_col))\n\n def adapt(col: _CE) ->_CE:\n if self.adapter:\n return self.adapter(col)\n else:\n return col\n if self.property._use_get:\n return sql.and_(*[sql.or_(adapt(x) != state_bindparam(\n adapt(x), state, y), adapt(x) == None) for x, y in\n self.property.local_remote_pairs])\n criterion = sql.and_(*[(x == y) for x, y in zip(self.property.\n mapper.primary_key, self.property.mapper.\n primary_key_from_instance(other))])\n return ~self._criterion_exists(criterion)\n\n def __ne__(self, other: Any) ->ColumnElement[bool]:\n \"\"\"Implement the ``!=`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop != <some object>\n\n This will typically produce a clause such as::\n\n mytable.related_id != <some id>\n\n Where ``<some id>`` is the primary key of the\n given object.\n\n The ``!=`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use\n :meth:`~.Relationship.Comparator.contains`\n in conjunction with :func:`_expression.not_`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` in\n conjunction with :func:`_expression.not_` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce an EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction == MANYTOONE:\n return _orm_annotate(~self.property._optimized_compare(\n None, adapt_source=self.adapter))\n else:\n return self._criterion_exists()\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(self.__negated_contains_or_equals(other))\n\n def _memoized_attr_property(self) ->RelationshipProperty[_PT]:\n self.prop.parent._check_configure()\n return self.prop\n\n def _with_parent(self, instance: object, alias_secondary: bool=True,\n from_entity: Optional[_EntityType[Any]]=None) ->ColumnElement[bool]:\n assert instance is not None\n adapt_source: Optional[_CoreAdapterProto] = None\n if from_entity is not None:\n insp: Optional[_InternalEntityType[Any]] = inspect(from_entity)\n assert insp is not None\n if insp_is_aliased_class(insp):\n adapt_source = insp._adapter.adapt_clause\n return self._optimized_compare(instance, value_is_parent=True,\n adapt_source=adapt_source, alias_secondary=alias_secondary)\n <mask token>\n\n def _get_attr_w_warn_on_none(self, mapper: Mapper[Any], state:\n InstanceState[Any], dict_: _InstanceDict, column: ColumnElement[Any]\n ) ->Callable[[], Any]:\n \"\"\"Create the callable that is used in a many-to-one expression.\n\n E.g.::\n\n u1 = s.query(User).get(5)\n\n expr = Address.user == u1\n\n Above, the SQL should be \"address.user_id = 5\". The callable\n returned by this method produces the value \"5\" based on the identity\n of ``u1``.\n\n \"\"\"\n prop = mapper.get_property_by_column(column)\n state._track_last_known_value(prop.key)\n lkv_fixed = state._last_known_values\n\n def _go() ->Any:\n assert lkv_fixed is not None\n last_known = to_return = lkv_fixed[prop.key]\n existing_is_available = (last_known is not LoaderCallableStatus\n .NO_VALUE)\n current_value = mapper._get_state_attr_by_column(state, dict_,\n column, passive=PassiveFlag.PASSIVE_OFF if state.persistent\n else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK)\n if current_value is LoaderCallableStatus.NEVER_SET:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object %s; no value has been set for this column\"\n % (column, state_str(state)))\n elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object %s; the object is detached and the value was expired\"\n % (column, state_str(state)))\n else:\n to_return = current_value\n if to_return is None:\n util.warn(\n 'Got None for value of column %s; this is unsupported for a relationship comparison and will not currently produce an IS comparison (but may in a future release)'\n % column)\n return to_return\n return _go\n\n def _lazy_none_clause(self, reverse_direction: bool=False, adapt_source:\n Optional[_CoreAdapterProto]=None) ->ColumnElement[bool]:\n if not reverse_direction:\n criterion, bind_to_col = (self._lazy_strategy._lazywhere, self.\n _lazy_strategy._bind_to_col)\n else:\n criterion, bind_to_col = (self._lazy_strategy._rev_lazywhere,\n self._lazy_strategy._rev_bind_to_col)\n criterion = adapt_criterion_to_null(criterion, bind_to_col)\n if adapt_source:\n criterion = adapt_source(criterion)\n return criterion\n <mask token>\n\n def merge(self, session: Session, source_state: InstanceState[Any],\n source_dict: _InstanceDict, dest_state: InstanceState[Any],\n dest_dict: _InstanceDict, load: bool, _recursive: Dict[Any, object],\n _resolve_conflict_map: Dict[_IdentityKeyType[Any], object]) ->None:\n if load:\n for r in self._reverse_property:\n if (source_state, r) in _recursive:\n return\n if 'merge' not in self._cascade:\n return\n if self.key not in source_dict:\n return\n if self.uselist:\n impl = source_state.get_impl(self.key)\n assert is_has_collection_adapter(impl)\n instances_iterable = impl.get_collection(source_state, source_dict)\n assert not instances_iterable.empty if impl.collection else True\n if load:\n dest_state.get_impl(self.key).get(dest_state, dest_dict,\n passive=PassiveFlag.PASSIVE_MERGE)\n dest_list = []\n for current in instances_iterable:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[current_state, self] = True\n obj = session._merge(current_state, current_dict, load=load,\n _recursive=_recursive, _resolve_conflict_map=\n _resolve_conflict_map)\n if obj is not None:\n dest_list.append(obj)\n if not load:\n coll = attributes.init_state_collection(dest_state,\n dest_dict, self.key)\n for c in dest_list:\n coll.append_without_event(c)\n else:\n dest_impl = dest_state.get_impl(self.key)\n assert is_has_collection_adapter(dest_impl)\n dest_impl.set(dest_state, dest_dict, dest_list, _adapt=\n False, passive=PassiveFlag.PASSIVE_MERGE)\n else:\n current = source_dict[self.key]\n if current is not None:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[current_state, self] = True\n obj = session._merge(current_state, current_dict, load=load,\n _recursive=_recursive, _resolve_conflict_map=\n _resolve_conflict_map)\n else:\n obj = None\n if not load:\n dest_dict[self.key] = obj\n else:\n dest_state.get_impl(self.key).set(dest_state, dest_dict,\n obj, None)\n\n def _value_as_iterable(self, state: InstanceState[_O], dict_:\n _InstanceDict, key: str, passive: PassiveFlag=PassiveFlag.PASSIVE_OFF\n ) ->Sequence[Tuple[InstanceState[_O], _O]]:\n \"\"\"Return a list of tuples (state, obj) for the given\n key.\n\n returns an empty list if the value is None/empty/PASSIVE_NO_RESULT\n \"\"\"\n impl = state.manager[key].impl\n x = impl.get(state, dict_, passive=passive)\n if x is LoaderCallableStatus.PASSIVE_NO_RESULT or x is None:\n return []\n elif is_has_collection_adapter(impl):\n return [(attributes.instance_state(o), o) for o in impl.\n get_collection(state, dict_, x, passive=passive)]\n else:\n return [(attributes.instance_state(x), x)]\n\n def cascade_iterator(self, type_: str, state: InstanceState[Any], dict_:\n _InstanceDict, visited_states: Set[InstanceState[Any]], halt_on:\n Optional[Callable[[InstanceState[Any]], bool]]=None) ->Iterator[Tuple\n [Any, Mapper[Any], InstanceState[Any], _InstanceDict]]:\n if type_ != 'delete' or self.passive_deletes:\n passive = PassiveFlag.PASSIVE_NO_INITIALIZE\n else:\n passive = PassiveFlag.PASSIVE_OFF | PassiveFlag.NO_RAISE\n if type_ == 'save-update':\n tuples = state.manager[self.key].impl.get_all_pending(state, dict_)\n else:\n tuples = self._value_as_iterable(state, dict_, self.key,\n passive=passive)\n skip_pending = (type_ == 'refresh-expire' and 'delete-orphan' not in\n self._cascade)\n for instance_state, c in tuples:\n if instance_state in visited_states:\n continue\n if c is None:\n continue\n assert instance_state is not None\n instance_dict = attributes.instance_dict(c)\n if halt_on and halt_on(instance_state):\n continue\n if skip_pending and not instance_state.key:\n continue\n instance_mapper = instance_state.manager.mapper\n if not instance_mapper.isa(self.mapper.class_manager.mapper):\n raise AssertionError(\n \"Attribute '%s' on class '%s' doesn't handle objects of type '%s'\"\n % (self.key, self.parent.class_, c.__class__))\n visited_states.add(instance_state)\n yield c, instance_mapper, instance_state, instance_dict\n <mask token>\n\n @staticmethod\n def _check_sync_backref(rel_a: RelationshipProperty[Any], rel_b:\n RelationshipProperty[Any]) ->None:\n if rel_a.viewonly and rel_b.sync_backref:\n raise sa_exc.InvalidRequestError(\n 'Relationship %s cannot specify sync_backref=True since %s includes viewonly=True.'\n % (rel_b, rel_a))\n if (rel_a.viewonly and not rel_b.viewonly and rel_b.sync_backref is not\n False):\n rel_b.sync_backref = False\n\n def _add_reverse_property(self, key: str) ->None:\n other = self.mapper.get_property(key, _configure_mappers=False)\n if not isinstance(other, RelationshipProperty):\n raise sa_exc.InvalidRequestError(\n \"back_populates on relationship '%s' refers to attribute '%s' that is not a relationship. The back_populates parameter should refer to the name of a relationship on the target class.\"\n % (self, other))\n self._check_sync_backref(self, other)\n self._check_sync_backref(other, self)\n self._reverse_property.add(other)\n other._reverse_property.add(self)\n other._setup_entity()\n if not other.mapper.common_parent(self.parent):\n raise sa_exc.ArgumentError(\n 'reverse_property %r on relationship %s references relationship %s, which does not reference mapper %s'\n % (key, self, other, self.parent))\n if other._configure_started and self.direction in (ONETOMANY, MANYTOONE\n ) and self.direction == other.direction:\n raise sa_exc.ArgumentError(\n '%s and back-reference %s are both of the same direction %r. Did you mean to set remote_side on the many-to-one side ?'\n % (other, self, self.direction))\n\n @util.memoized_property\n def entity(self) ->_InternalEntityType[_T]:\n \"\"\"Return the target mapped entity, which is an inspect() of the\n class or aliased class that is referred towards.\n\n \"\"\"\n self.parent._check_configure()\n return self.entity\n\n @util.memoized_property\n def mapper(self) ->Mapper[_T]:\n \"\"\"Return the targeted :class:`_orm.Mapper` for this\n :class:`.RelationshipProperty`.\n\n \"\"\"\n return self.entity.mapper\n\n def do_init(self) ->None:\n self._check_conflicts()\n self._process_dependent_arguments()\n self._setup_entity()\n self._setup_registry_dependencies()\n self._setup_join_conditions()\n self._check_cascade_settings(self._cascade)\n self._post_init()\n self._generate_backref()\n self._join_condition._warn_for_conflicting_sync_targets()\n super().do_init()\n self._lazy_strategy = cast('LazyLoader', self._get_strategy(((\n 'lazy', 'select'),)))\n\n def _setup_registry_dependencies(self) ->None:\n self.parent.mapper.registry._set_depends_on(self.entity.mapper.registry\n )\n\n def _process_dependent_arguments(self) ->None:\n \"\"\"Convert incoming configuration arguments to their\n proper form.\n\n Callables are resolved, ORM annotations removed.\n\n \"\"\"\n init_args = self._init_args\n for attr in ('order_by', 'primaryjoin', 'secondaryjoin',\n 'secondary', 'foreign_keys', 'remote_side'):\n rel_arg = getattr(init_args, attr)\n rel_arg._resolve_against_registry(self._clsregistry_resolvers[1])\n for attr in ('primaryjoin', 'secondaryjoin'):\n rel_arg = getattr(init_args, attr)\n val = rel_arg.resolved\n if val is not None:\n rel_arg.resolved = _orm_deannotate(coercions.expect(roles.\n ColumnArgumentRole, val, argname=attr))\n secondary = init_args.secondary.resolved\n if secondary is not None and _is_mapped_class(secondary):\n raise sa_exc.ArgumentError(\n \"secondary argument %s passed to to relationship() %s must be a Table object or other FROM clause; can't send a mapped class directly as rows in 'secondary' are persisted independently of a class that is mapped to that same table.\"\n % (secondary, self))\n if (init_args.order_by.resolved is not False and init_args.order_by\n .resolved is not None):\n self.order_by = tuple(coercions.expect(roles.ColumnArgumentRole,\n x, argname='order_by') for x in util.to_list(init_args.\n order_by.resolved))\n else:\n self.order_by = False\n self._user_defined_foreign_keys = util.column_set(coercions.expect(\n roles.ColumnArgumentRole, x, argname='foreign_keys') for x in\n util.to_column_set(init_args.foreign_keys.resolved))\n self.remote_side = util.column_set(coercions.expect(roles.\n ColumnArgumentRole, x, argname='remote_side') for x in util.\n to_column_set(init_args.remote_side.resolved))\n\n def declarative_scan(self, decl_scan: _ClassScanMapperConfig, registry:\n _RegistryType, cls: Type[Any], originating_module: Optional[str],\n key: str, mapped_container: Optional[Type[Mapped[Any]]], annotation:\n Optional[_AnnotationScanType], extracted_mapped_annotation:\n Optional[_AnnotationScanType], is_dataclass_field: bool) ->None:\n argument = extracted_mapped_annotation\n if extracted_mapped_annotation is None:\n if self.argument is None:\n self._raise_for_required(key, cls)\n else:\n return\n argument = extracted_mapped_annotation\n assert originating_module is not None\n is_write_only = mapped_container is not None and issubclass(\n mapped_container, WriteOnlyMapped)\n if is_write_only:\n self.lazy = 'write_only'\n self.strategy_key = ('lazy', self.lazy),\n is_dynamic = mapped_container is not None and issubclass(\n mapped_container, DynamicMapped)\n if is_dynamic:\n self.lazy = 'dynamic'\n self.strategy_key = ('lazy', self.lazy),\n argument = de_optionalize_union_types(argument)\n if hasattr(argument, '__origin__'):\n arg_origin = argument.__origin__\n if isinstance(arg_origin, type) and issubclass(arg_origin, abc.\n Collection):\n if self.collection_class is None:\n if _py_inspect.isabstract(arg_origin):\n raise sa_exc.ArgumentError(\n f\"Collection annotation type {arg_origin} cannot be instantiated; please provide an explicit 'collection_class' parameter (e.g. list, set, etc.) to the relationship() function to accompany this annotation\"\n )\n self.collection_class = arg_origin\n elif not is_write_only and not is_dynamic:\n self.uselist = False\n if argument.__args__:\n if isinstance(arg_origin, type) and issubclass(arg_origin,\n typing.Mapping):\n type_arg = argument.__args__[-1]\n else:\n type_arg = argument.__args__[0]\n if hasattr(type_arg, '__forward_arg__'):\n str_argument = type_arg.__forward_arg__\n argument = resolve_name_to_real_class_name(str_argument,\n originating_module)\n else:\n argument = type_arg\n else:\n raise sa_exc.ArgumentError(\n f'Generic alias {argument} requires an argument')\n elif hasattr(argument, '__forward_arg__'):\n argument = argument.__forward_arg__\n argument = resolve_name_to_real_class_name(argument,\n originating_module)\n if (self.collection_class is None and not is_write_only and not\n is_dynamic):\n self.uselist = False\n if self.argument is None:\n self.argument = cast('_RelationshipArgumentType[_T]', argument)\n\n @util.preload_module('sqlalchemy.orm.mapper')\n def _setup_entity(self, __argument: Any=None) ->None:\n if 'entity' in self.__dict__:\n return\n mapperlib = util.preloaded.orm_mapper\n if __argument:\n argument = __argument\n else:\n argument = self.argument\n resolved_argument: _ExternalEntityType[Any]\n if isinstance(argument, str):\n resolved_argument = cast('_ExternalEntityType[Any]', self.\n _clsregistry_resolve_name(argument)())\n elif callable(argument) and not isinstance(argument, (type,\n mapperlib.Mapper)):\n resolved_argument = argument()\n else:\n resolved_argument = argument\n entity: _InternalEntityType[Any]\n if isinstance(resolved_argument, type):\n entity = class_mapper(resolved_argument, configure=False)\n else:\n try:\n entity = inspect(resolved_argument)\n except sa_exc.NoInspectionAvailable:\n entity = None\n if not hasattr(entity, 'mapper'):\n raise sa_exc.ArgumentError(\n \"relationship '%s' expects a class or a mapper argument (received: %s)\"\n % (self.key, type(resolved_argument)))\n self.entity = entity\n self.target = self.entity.persist_selectable\n\n def _setup_join_conditions(self) ->None:\n self._join_condition = jc = JoinCondition(parent_persist_selectable\n =self.parent.persist_selectable, child_persist_selectable=self.\n entity.persist_selectable, parent_local_selectable=self.parent.\n local_table, child_local_selectable=self.entity.local_table,\n primaryjoin=self._init_args.primaryjoin.resolved, secondary=\n self._init_args.secondary.resolved, secondaryjoin=self.\n _init_args.secondaryjoin.resolved, parent_equivalents=self.\n parent._equivalent_columns, child_equivalents=self.mapper.\n _equivalent_columns, consider_as_foreign_keys=self.\n _user_defined_foreign_keys, local_remote_pairs=self.\n local_remote_pairs, remote_side=self.remote_side,\n self_referential=self._is_self_referential, prop=self,\n support_sync=not self.viewonly, can_be_synced_fn=self.\n _columns_are_mapped)\n self.primaryjoin = jc.primaryjoin\n self.secondaryjoin = jc.secondaryjoin\n self.secondary = jc.secondary\n self.direction = jc.direction\n self.local_remote_pairs = jc.local_remote_pairs\n self.remote_side = jc.remote_columns\n self.local_columns = jc.local_columns\n self.synchronize_pairs = jc.synchronize_pairs\n self._calculated_foreign_keys = jc.foreign_key_columns\n self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs\n\n @property\n def _clsregistry_resolve_arg(self) ->Callable[[str, bool], _class_resolver\n ]:\n return self._clsregistry_resolvers[1]\n\n @property\n def _clsregistry_resolve_name(self) ->Callable[[str], Callable[[],\n Union[Type[Any], Table, _ModNS]]]:\n return self._clsregistry_resolvers[0]\n\n @util.memoized_property\n @util.preload_module('sqlalchemy.orm.clsregistry')\n def _clsregistry_resolvers(self) ->Tuple[Callable[[str], Callable[[],\n Union[Type[Any], Table, _ModNS]]], Callable[[str, bool],\n _class_resolver]]:\n _resolver = util.preloaded.orm_clsregistry._resolver\n return _resolver(self.parent.class_, self)\n\n def _check_conflicts(self) ->None:\n \"\"\"Test that this relationship is legal, warn about\n inheritance conflicts.\"\"\"\n if self.parent.non_primary and not class_mapper(self.parent.class_,\n configure=False).has_property(self.key):\n raise sa_exc.ArgumentError(\n \"Attempting to assign a new relationship '%s' to a non-primary mapper on class '%s'. New relationships can only be added to the primary mapper, i.e. the very first mapper created for class '%s' \"\n % (self.key, self.parent.class_.__name__, self.parent.\n class_.__name__))\n\n @property\n def cascade(self) ->CascadeOptions:\n \"\"\"Return the current cascade setting for this\n :class:`.RelationshipProperty`.\n \"\"\"\n return self._cascade\n <mask token>\n\n def _set_cascade(self, cascade_arg: Union[str, CascadeOptions]) ->None:\n cascade = CascadeOptions(cascade_arg)\n if self.viewonly:\n cascade = CascadeOptions(cascade.intersection(CascadeOptions.\n _viewonly_cascades))\n if 'mapper' in self.__dict__:\n self._check_cascade_settings(cascade)\n self._cascade = cascade\n if self._dependency_processor:\n self._dependency_processor.cascade = cascade\n\n def _check_cascade_settings(self, cascade: CascadeOptions) ->None:\n if cascade.delete_orphan and not self.single_parent and (self.\n direction is MANYTOMANY or self.direction is MANYTOONE):\n raise sa_exc.ArgumentError(\n 'For %(direction)s relationship %(rel)s, delete-orphan cascade is normally configured only on the \"one\" side of a one-to-many relationship, and not on the \"many\" side of a many-to-one or many-to-many relationship. To force this relationship to allow a particular \"%(relatedcls)s\" object to be referred towards by only a single \"%(clsname)s\" object at a time via the %(rel)s relationship, which would allow delete-orphan cascade to take place in this direction, set the single_parent=True flag.'\n % {'rel': self, 'direction': 'many-to-one' if self.\n direction is MANYTOONE else 'many-to-many', 'clsname': self\n .parent.class_.__name__, 'relatedcls': self.mapper.class_.\n __name__}, code='bbf0')\n if self.passive_deletes == 'all' and ('delete' in cascade or \n 'delete-orphan' in cascade):\n raise sa_exc.ArgumentError(\n \"On %s, can't set passive_deletes='all' in conjunction with 'delete' or 'delete-orphan' cascade\"\n % self)\n if cascade.delete_orphan:\n self.mapper.primary_mapper()._delete_orphans.append((self.key,\n self.parent.class_))\n\n def _persists_for(self, mapper: Mapper[Any]) ->bool:\n \"\"\"Return True if this property will persist values on behalf\n of the given mapper.\n\n \"\"\"\n return self.key in mapper.relationships and mapper.relationships[self\n .key] is self\n\n def _columns_are_mapped(self, *cols: ColumnElement[Any]) ->bool:\n \"\"\"Return True if all columns in the given collection are\n mapped by the tables referenced by this :class:`.RelationshipProperty`.\n\n \"\"\"\n secondary = self._init_args.secondary.resolved\n for c in cols:\n if secondary is not None and secondary.c.contains_column(c):\n continue\n if not self.parent.persist_selectable.c.contains_column(c\n ) and not self.target.c.contains_column(c):\n return False\n return True\n\n def _generate_backref(self) ->None:\n \"\"\"Interpret the 'backref' instruction to create a\n :func:`_orm.relationship` complementary to this one.\"\"\"\n if self.parent.non_primary:\n return\n if self.backref is not None and not self.back_populates:\n kwargs: Dict[str, Any]\n if isinstance(self.backref, str):\n backref_key, kwargs = self.backref, {}\n else:\n backref_key, kwargs = self.backref\n mapper = self.mapper.primary_mapper()\n if not mapper.concrete:\n check = set(mapper.iterate_to_root()).union(mapper.\n self_and_descendants)\n for m in check:\n if m.has_property(backref_key) and not m.concrete:\n raise sa_exc.ArgumentError(\n \"Error creating backref '%s' on relationship '%s': property of that name exists on mapper '%s'\"\n % (backref_key, self, m))\n if self.secondary is not None:\n pj = kwargs.pop('primaryjoin', self._join_condition.\n secondaryjoin_minus_local)\n sj = kwargs.pop('secondaryjoin', self._join_condition.\n primaryjoin_minus_local)\n else:\n pj = kwargs.pop('primaryjoin', self._join_condition.\n primaryjoin_reverse_remote)\n sj = kwargs.pop('secondaryjoin', None)\n if sj:\n raise sa_exc.InvalidRequestError(\n \"Can't assign 'secondaryjoin' on a backref against a non-secondary relationship.\"\n )\n foreign_keys = kwargs.pop('foreign_keys', self.\n _user_defined_foreign_keys)\n parent = self.parent.primary_mapper()\n kwargs.setdefault('viewonly', self.viewonly)\n kwargs.setdefault('post_update', self.post_update)\n kwargs.setdefault('passive_updates', self.passive_updates)\n kwargs.setdefault('sync_backref', self.sync_backref)\n self.back_populates = backref_key\n relationship = RelationshipProperty(parent, self.secondary,\n primaryjoin=pj, secondaryjoin=sj, foreign_keys=foreign_keys,\n back_populates=self.key, **kwargs)\n mapper._configure_property(backref_key, relationship,\n warn_for_existing=True)\n if self.back_populates:\n self._add_reverse_property(self.back_populates)\n\n @util.preload_module('sqlalchemy.orm.dependency')\n def _post_init(self) ->None:\n dependency = util.preloaded.orm_dependency\n if self.uselist is None:\n self.uselist = self.direction is not MANYTOONE\n if not self.viewonly:\n self._dependency_processor = (dependency.DependencyProcessor.\n from_relationship(self))\n\n @util.memoized_property\n def _use_get(self) ->bool:\n \"\"\"memoize the 'use_get' attribute of this RelationshipLoader's\n lazyloader.\"\"\"\n strategy = self._lazy_strategy\n return strategy.use_get\n\n @util.memoized_property\n def _is_self_referential(self) ->bool:\n return self.mapper.common_parent(self.parent)\n <mask token>\n\n\n<mask token>\n\n\nclass JoinCondition:\n primaryjoin_initial: Optional[ColumnElement[bool]]\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n prop: RelationshipProperty[Any]\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: _ColumnPairs\n direction: RelationshipDirection\n parent_persist_selectable: FromClause\n child_persist_selectable: FromClause\n parent_local_selectable: FromClause\n child_local_selectable: FromClause\n _local_remote_pairs: Optional[_ColumnPairs]\n\n def __init__(self, parent_persist_selectable: FromClause,\n child_persist_selectable: FromClause, parent_local_selectable:\n FromClause, child_local_selectable: FromClause, *, primaryjoin:\n Optional[ColumnElement[bool]]=None, secondary: Optional[FromClause]\n =None, secondaryjoin: Optional[ColumnElement[bool]]=None,\n parent_equivalents: Optional[_EquivalentColumnMap]=None,\n child_equivalents: Optional[_EquivalentColumnMap]=None,\n consider_as_foreign_keys: Any=None, local_remote_pairs: Optional[\n _ColumnPairs]=None, remote_side: Any=None, self_referential: Any=\n False, prop: RelationshipProperty[Any], support_sync: bool=True,\n can_be_synced_fn: Callable[..., bool]=lambda *c: True):\n self.parent_persist_selectable = parent_persist_selectable\n self.parent_local_selectable = parent_local_selectable\n self.child_persist_selectable = child_persist_selectable\n self.child_local_selectable = child_local_selectable\n self.parent_equivalents = parent_equivalents\n self.child_equivalents = child_equivalents\n self.primaryjoin_initial = primaryjoin\n self.secondaryjoin = secondaryjoin\n self.secondary = secondary\n self.consider_as_foreign_keys = consider_as_foreign_keys\n self._local_remote_pairs = local_remote_pairs\n self._remote_side = remote_side\n self.prop = prop\n self.self_referential = self_referential\n self.support_sync = support_sync\n self.can_be_synced_fn = can_be_synced_fn\n self._determine_joins()\n assert self.primaryjoin is not None\n self._sanitize_joins()\n self._annotate_fks()\n self._annotate_remote()\n self._annotate_local()\n self._annotate_parentmapper()\n self._setup_pairs()\n self._check_foreign_cols(self.primaryjoin, True)\n if self.secondaryjoin is not None:\n self._check_foreign_cols(self.secondaryjoin, False)\n self._determine_direction()\n self._check_remote_side()\n self._log_joins()\n\n def _log_joins(self) ->None:\n log = self.prop.logger\n log.info('%s setup primary join %s', self.prop, self.primaryjoin)\n log.info('%s setup secondary join %s', self.prop, self.secondaryjoin)\n log.info('%s synchronize pairs [%s]', self.prop, ','.join(\n '(%s => %s)' % (l, r) for l, r in self.synchronize_pairs))\n log.info('%s secondary synchronize pairs [%s]', self.prop, ','.join\n ('(%s => %s)' % (l, r) for l, r in self.\n secondary_synchronize_pairs or []))\n log.info('%s local/remote pairs [%s]', self.prop, ','.join(\n '(%s / %s)' % (l, r) for l, r in self.local_remote_pairs))\n log.info('%s remote columns [%s]', self.prop, ','.join('%s' % col for\n col in self.remote_columns))\n log.info('%s local columns [%s]', self.prop, ','.join('%s' % col for\n col in self.local_columns))\n log.info('%s relationship direction %s', self.prop, self.direction)\n\n def _sanitize_joins(self) ->None:\n \"\"\"remove the parententity annotation from our join conditions which\n can leak in here based on some declarative patterns and maybe others.\n\n \"parentmapper\" is relied upon both by the ORM evaluator as well as\n the use case in _join_fixture_inh_selfref_w_entity\n that relies upon it being present, see :ticket:`3364`.\n\n \"\"\"\n self.primaryjoin = _deep_deannotate(self.primaryjoin, values=(\n 'parententity', 'proxy_key'))\n if self.secondaryjoin is not None:\n self.secondaryjoin = _deep_deannotate(self.secondaryjoin,\n values=('parententity', 'proxy_key'))\n\n def _determine_joins(self) ->None:\n \"\"\"Determine the 'primaryjoin' and 'secondaryjoin' attributes,\n if not passed to the constructor already.\n\n This is based on analysis of the foreign key relationships\n between the parent and target mapped selectables.\n\n \"\"\"\n if self.secondaryjoin is not None and self.secondary is None:\n raise sa_exc.ArgumentError(\n 'Property %s specified with secondary join condition but no secondary argument'\n % self.prop)\n try:\n consider_as_foreign_keys = self.consider_as_foreign_keys or None\n if self.secondary is not None:\n if self.secondaryjoin is None:\n self.secondaryjoin = join_condition(self.\n child_persist_selectable, self.secondary, a_subset=\n self.child_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.secondary, a_subset\n =self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n elif self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.\n child_persist_selectable, a_subset=self.\n parent_local_selectable, consider_as_foreign_keys=\n consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n except sa_exc.NoForeignKeysError as nfe:\n if self.secondary is not None:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables via secondary table '%s'. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify 'primaryjoin' and 'secondaryjoin' expressions.\"\n % (self.prop, self.secondary)) from nfe\n else:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify a 'primaryjoin' expression.\"\n % self.prop) from nfe\n except sa_exc.AmbiguousForeignKeysError as afe:\n if self.secondary is not None:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables via secondary table '%s'. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference from the secondary table to each of the parent and child tables.\"\n % (self.prop, self.secondary)) from afe\n else:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference to the parent table.\"\n % self.prop) from afe\n\n @property\n def primaryjoin_minus_local(self) ->ColumnElement[bool]:\n return _deep_deannotate(self.primaryjoin, values=('local', 'remote'))\n\n @property\n def secondaryjoin_minus_local(self) ->ColumnElement[bool]:\n assert self.secondaryjoin is not None\n return _deep_deannotate(self.secondaryjoin, values=('local', 'remote'))\n\n @util.memoized_property\n def primaryjoin_reverse_remote(self) ->ColumnElement[bool]:\n \"\"\"Return the primaryjoin condition suitable for the\n \"reverse\" direction.\n\n If the primaryjoin was delivered here with pre-existing\n \"remote\" annotations, the local/remote annotations\n are reversed. Otherwise, the local/remote annotations\n are removed.\n\n \"\"\"\n if self._has_remote_annotations:\n\n def replace(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n v = dict(element._annotations)\n del v['remote']\n v['local'] = True\n return element._with_annotations(v)\n elif 'local' in element._annotations:\n v = dict(element._annotations)\n del v['local']\n v['remote'] = True\n return element._with_annotations(v)\n return None\n return visitors.replacement_traverse(self.primaryjoin, {}, replace)\n elif self._has_foreign_annotations:\n return _deep_deannotate(self.primaryjoin, values=('local',\n 'remote'))\n else:\n return _deep_deannotate(self.primaryjoin)\n\n def _has_annotation(self, clause: ClauseElement, annotation: str) ->bool:\n for col in visitors.iterate(clause, {}):\n if annotation in col._annotations:\n return True\n else:\n return False\n\n @util.memoized_property\n def _has_foreign_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'foreign')\n\n @util.memoized_property\n def _has_remote_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'remote')\n\n def _annotate_fks(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'foreign' annotations marking columns\n considered as foreign.\n\n \"\"\"\n if self._has_foreign_annotations:\n return\n if self.consider_as_foreign_keys:\n self._annotate_from_fk_list()\n else:\n self._annotate_present_fks()\n\n def _annotate_from_fk_list(self) ->None:\n\n def check_fk(element: _CE, **kw: Any) ->Optional[_CE]:\n if element in self.consider_as_foreign_keys:\n return element._annotate({'foreign': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, check_fk)\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, check_fk)\n\n def _annotate_present_fks(self) ->None:\n if self.secondary is not None:\n secondarycols = util.column_set(self.secondary.c)\n else:\n secondarycols = set()\n\n def is_foreign(a: ColumnElement[Any], b: ColumnElement[Any]\n ) ->Optional[ColumnElement[Any]]:\n if isinstance(a, schema.Column) and isinstance(b, schema.Column):\n if a.references(b):\n return a\n elif b.references(a):\n return b\n if secondarycols:\n if a in secondarycols and b not in secondarycols:\n return a\n elif b in secondarycols and a not in secondarycols:\n return b\n return None\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n if not isinstance(binary.left, sql.ColumnElement\n ) or not isinstance(binary.right, sql.ColumnElement):\n return\n if ('foreign' not in binary.left._annotations and 'foreign' not in\n binary.right._annotations):\n col = is_foreign(binary.left, binary.right)\n if col is not None:\n if col.compare(binary.left):\n binary.left = binary.left._annotate({'foreign': True})\n elif col.compare(binary.right):\n binary.right = binary.right._annotate({'foreign': True}\n )\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.cloned_traverse(self.\n secondaryjoin, {}, {'binary': visit_binary})\n\n def _refers_to_parent_table(self) ->bool:\n \"\"\"Return True if the join condition contains column\n comparisons where both columns are in both tables.\n\n \"\"\"\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n nonlocal result\n c, f = binary.left, binary.right\n if isinstance(c, expression.ColumnClause) and isinstance(f,\n expression.ColumnClause) and pt.is_derived_from(c.table\n ) and pt.is_derived_from(f.table) and mt.is_derived_from(c.\n table) and mt.is_derived_from(f.table):\n result = True\n visitors.traverse(self.primaryjoin, {}, {'binary': visit_binary})\n return result\n\n def _tables_overlap(self) ->bool:\n \"\"\"Return True if parent/child tables have some overlap.\"\"\"\n return selectables_overlap(self.parent_persist_selectable, self.\n child_persist_selectable)\n\n def _annotate_remote(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'remote' annotations marking columns\n considered as part of the 'remote' side.\n\n \"\"\"\n if self._has_remote_annotations:\n return\n if self.secondary is not None:\n self._annotate_remote_secondary()\n elif self._local_remote_pairs or self._remote_side:\n self._annotate_remote_from_args()\n elif self._refers_to_parent_table():\n self._annotate_selfref(lambda col: 'foreign' in col.\n _annotations, False)\n elif self._tables_overlap():\n self._annotate_remote_with_overlap()\n else:\n self._annotate_remote_distinct_selectables()\n\n def _annotate_remote_secondary(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when 'secondary' is present.\n\n \"\"\"\n assert self.secondary is not None\n fixed_secondary = self.secondary\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if fixed_secondary.c.contains_column(element):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n assert self.secondaryjoin is not None\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, repl)\n\n def _annotate_selfref(self, fn: Callable[[ColumnElement[Any]], bool],\n remote_side_given: bool) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the relationship is detected as self-referential.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n equated = binary.left.compare(binary.right)\n if isinstance(binary.left, expression.ColumnClause) and isinstance(\n binary.right, expression.ColumnClause):\n if fn(binary.left):\n binary.left = binary.left._annotate({'remote': True})\n if fn(binary.right) and not equated:\n binary.right = binary.right._annotate({'remote': True})\n elif not remote_side_given:\n self._warn_non_column_elements()\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n\n def _annotate_remote_from_args(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the 'remote_side' or '_local_remote_pairs'\n arguments are used.\n\n \"\"\"\n if self._local_remote_pairs:\n if self._remote_side:\n raise sa_exc.ArgumentError(\n 'remote_side argument is redundant against more detailed _local_remote_side argument.'\n )\n remote_side = [r for l, r in self._local_remote_pairs]\n else:\n remote_side = self._remote_side\n if self._refers_to_parent_table():\n self._annotate_selfref(lambda col: col in remote_side, True)\n else:\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if element in set(remote_side):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.\n primaryjoin, {}, repl)\n\n def _annotate_remote_with_overlap(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables have some set of\n tables in common, though is not a fully self-referential\n relationship.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n binary.left, binary.right = proc_left_right(binary.left, binary\n .right)\n binary.right, binary.left = proc_left_right(binary.right,\n binary.left)\n check_entities = (self.prop is not None and self.prop.mapper is not\n self.prop.parent)\n\n def proc_left_right(left: ColumnElement[Any], right: ColumnElement[Any]\n ) ->Tuple[ColumnElement[Any], ColumnElement[Any]]:\n if isinstance(left, expression.ColumnClause) and isinstance(right,\n expression.ColumnClause):\n if self.child_persist_selectable.c.contains_column(right\n ) and self.parent_persist_selectable.c.contains_column(left\n ):\n right = right._annotate({'remote': True})\n elif check_entities and right._annotations.get('parentmapper'\n ) is self.prop.mapper:\n right = right._annotate({'remote': True})\n elif check_entities and left._annotations.get('parentmapper'\n ) is self.prop.mapper:\n left = left._annotate({'remote': True})\n else:\n self._warn_non_column_elements()\n return left, right\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n\n def _annotate_remote_distinct_selectables(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables are entirely\n separate.\n\n \"\"\"\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if self.child_persist_selectable.c.contains_column(element) and (\n not self.parent_local_selectable.c.contains_column(element) or\n self.child_local_selectable.c.contains_column(element)):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n\n def _warn_non_column_elements(self) ->None:\n util.warn(\n 'Non-simple column elements in primary join condition for property %s - consider using remote() annotations to mark the remote side.'\n % self.prop)\n\n def _annotate_local(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'local' annotations.\n\n This annotates all column elements found\n simultaneously in the parent table\n and the join condition that don't have a\n 'remote' annotation set up from\n _annotate_remote() or user-defined.\n\n \"\"\"\n if self._has_annotation(self.primaryjoin, 'local'):\n return\n if self._local_remote_pairs:\n local_side = util.column_set([l for l, r in self.\n _local_remote_pairs])\n else:\n local_side = util.column_set(self.parent_persist_selectable.c)\n\n def locals_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' not in element._annotations and element in local_side:\n return element._annotate({'local': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, locals_)\n\n def _annotate_parentmapper(self) ->None:\n\n def parentmappers_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n return element._annotate({'parentmapper': self.prop.mapper})\n elif 'local' in element._annotations:\n return element._annotate({'parentmapper': self.prop.parent})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, parentmappers_)\n\n def _check_remote_side(self) ->None:\n if not self.local_remote_pairs:\n raise sa_exc.ArgumentError(\n 'Relationship %s could not determine any unambiguous local/remote column pairs based on join condition and remote_side arguments. Consider using the remote() annotation to accurately mark those elements of the join condition that are on the remote side of the relationship.'\n % (self.prop,))\n else:\n not_target = util.column_set(self.parent_persist_selectable.c\n ).difference(self.child_persist_selectable.c)\n for _, rmt in self.local_remote_pairs:\n if rmt in not_target:\n util.warn(\n \"Expression %s is marked as 'remote', but these column(s) are local to the local side. The remote() annotation is needed only for a self-referential relationship where both sides of the relationship refer to the same tables.\"\n % (rmt,))\n\n def _check_foreign_cols(self, join_condition: ColumnElement[bool],\n primary: bool) ->None:\n \"\"\"Check the foreign key columns collected and emit error\n messages.\"\"\"\n can_sync = False\n foreign_cols = self._gather_columns_with_annotation(join_condition,\n 'foreign')\n has_foreign = bool(foreign_cols)\n if primary:\n can_sync = bool(self.synchronize_pairs)\n else:\n can_sync = bool(self.secondary_synchronize_pairs)\n if (self.support_sync and can_sync or not self.support_sync and\n has_foreign):\n return\n if self.support_sync and has_foreign and not can_sync:\n err = (\n \"Could not locate any simple equality expressions involving locally mapped foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n \" Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation. To allow comparison operators other than '==', the relationship can be marked as viewonly=True.\"\n )\n raise sa_exc.ArgumentError(err)\n else:\n err = (\n \"Could not locate any relevant foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n ' Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation.'\n )\n raise sa_exc.ArgumentError(err)\n\n def _determine_direction(self) ->None:\n \"\"\"Determine if this relationship is one to many, many to one,\n many to many.\n\n \"\"\"\n if self.secondaryjoin is not None:\n self.direction = MANYTOMANY\n else:\n parentcols = util.column_set(self.parent_persist_selectable.c)\n targetcols = util.column_set(self.child_persist_selectable.c)\n onetomany_fk = targetcols.intersection(self.foreign_key_columns)\n manytoone_fk = parentcols.intersection(self.foreign_key_columns)\n if onetomany_fk and manytoone_fk:\n onetomany_local = self._gather_columns_with_annotation(self\n .primaryjoin, 'remote', 'foreign')\n manytoone_local = {c for c in self.\n _gather_columns_with_annotation(self.primaryjoin,\n 'foreign') if 'remote' not in c._annotations}\n if onetomany_local and manytoone_local:\n self_equated = self.remote_columns.intersection(self.\n local_columns)\n onetomany_local = onetomany_local.difference(self_equated)\n manytoone_local = manytoone_local.difference(self_equated)\n if onetomany_local and not manytoone_local:\n self.direction = ONETOMANY\n elif manytoone_local and not onetomany_local:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns within the join condition are present in both the parent and the child's mapped tables. Ensure that only those columns referring to a parent column are marked as foreign, either via the foreign() annotation or via the foreign_keys argument.\"\n % self.prop)\n elif onetomany_fk:\n self.direction = ONETOMANY\n elif manytoone_fk:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns are present in neither the parent nor the child's mapped tables\"\n % self.prop)\n\n def _deannotate_pairs(self, collection: _ColumnPairIterable\n ) ->_MutableColumnPairs:\n \"\"\"provide deannotation for the various lists of\n pairs, so that using them in hashes doesn't incur\n high-overhead __eq__() comparisons against\n original columns mapped.\n\n \"\"\"\n return [(x._deannotate(), y._deannotate()) for x, y in collection]\n\n def _setup_pairs(self) ->None:\n sync_pairs: _MutableColumnPairs = []\n lrp: util.OrderedSet[Tuple[ColumnElement[Any], ColumnElement[Any]]\n ] = util.OrderedSet([])\n secondary_sync_pairs: _MutableColumnPairs = []\n\n def go(joincond: ColumnElement[bool], collection: _MutableColumnPairs\n ) ->None:\n\n def visit_binary(binary: BinaryExpression[Any], left:\n ColumnElement[Any], right: ColumnElement[Any]) ->None:\n if ('remote' in right._annotations and 'remote' not in left\n ._annotations and self.can_be_synced_fn(left)):\n lrp.add((left, right))\n elif 'remote' in left._annotations and 'remote' not in right._annotations and self.can_be_synced_fn(\n right):\n lrp.add((right, left))\n if binary.operator is operators.eq and self.can_be_synced_fn(\n left, right):\n if 'foreign' in right._annotations:\n collection.append((left, right))\n elif 'foreign' in left._annotations:\n collection.append((right, left))\n visit_binary_product(visit_binary, joincond)\n for joincond, collection in [(self.primaryjoin, sync_pairs), (self.\n secondaryjoin, secondary_sync_pairs)]:\n if joincond is None:\n continue\n go(joincond, collection)\n self.local_remote_pairs = self._deannotate_pairs(lrp)\n self.synchronize_pairs = self._deannotate_pairs(sync_pairs)\n self.secondary_synchronize_pairs = self._deannotate_pairs(\n secondary_sync_pairs)\n _track_overlapping_sync_targets: weakref.WeakKeyDictionary[\n ColumnElement[Any], weakref.WeakKeyDictionary[RelationshipProperty[\n Any], ColumnElement[Any]]] = weakref.WeakKeyDictionary()\n\n def _warn_for_conflicting_sync_targets(self) ->None:\n if not self.support_sync:\n return\n for from_, to_ in ([(from_, to_) for from_, to_ in self.\n synchronize_pairs] + [(from_, to_) for from_, to_ in self.\n secondary_synchronize_pairs]):\n if to_ not in self._track_overlapping_sync_targets:\n self._track_overlapping_sync_targets[to_\n ] = weakref.WeakKeyDictionary({self.prop: from_})\n else:\n other_props = []\n prop_to_from = self._track_overlapping_sync_targets[to_]\n for pr, fr_ in prop_to_from.items():\n if (not pr.mapper._dispose_called and pr not in self.\n prop._reverse_property and pr.key not in self.prop.\n _overlaps and self.prop.key not in pr._overlaps and\n '__*' not in self.prop._overlaps and '__*' not in\n pr._overlaps and not self.prop.parent.is_sibling(pr\n .parent) and not self.prop.mapper.is_sibling(pr.\n mapper) and not self.prop.parent.is_sibling(pr.\n mapper) and not self.prop.mapper.is_sibling(pr.\n parent) and (self.prop.key != pr.key or not self.\n prop.parent.common_parent(pr.parent))):\n other_props.append((pr, fr_))\n if other_props:\n util.warn(\n 'relationship \\'%s\\' will copy column %s to column %s, which conflicts with relationship(s): %s. If this is not the intention, consider if these relationships should be linked with back_populates, or if viewonly=True should be applied to one or more if they are read-only. For the less common case that foreign key constraints are partially overlapping, the orm.foreign() annotation can be used to isolate the columns that should be written towards. To silence this warning, add the parameter \\'overlaps=\"%s\"\\' to the \\'%s\\' relationship.'\n % (self.prop, from_, to_, ', '.join(sorted(\n \"'%s' (copies %s to %s)\" % (pr, fr_, to_) for pr,\n fr_ in other_props)), ','.join(sorted(pr.key for pr,\n fr in other_props)), self.prop), code='qzyx')\n self._track_overlapping_sync_targets[to_][self.prop] = from_\n\n @util.memoized_property\n def remote_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('remote')\n\n @util.memoized_property\n def local_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('local')\n\n @util.memoized_property\n def foreign_key_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('foreign')\n\n def _gather_join_annotations(self, annotation: str) ->Set[ColumnElement\n [Any]]:\n s = set(self._gather_columns_with_annotation(self.primaryjoin,\n annotation))\n if self.secondaryjoin is not None:\n s.update(self._gather_columns_with_annotation(self.\n secondaryjoin, annotation))\n return {x._deannotate() for x in s}\n\n def _gather_columns_with_annotation(self, clause: ColumnElement[Any], *\n annotation: Iterable[str]) ->Set[ColumnElement[Any]]:\n annotation_set = set(annotation)\n return {cast(ColumnElement[Any], col) for col in visitors.iterate(\n clause, {}) if annotation_set.issubset(col._annotations)}\n\n def join_targets(self, source_selectable: Optional[FromClause],\n dest_selectable: FromClause, aliased: bool, single_crit: Optional[\n ColumnElement[bool]]=None, extra_criteria: Tuple[ColumnElement[bool\n ], ...]=()) ->Tuple[ColumnElement[bool], Optional[ColumnElement[\n bool]], Optional[FromClause], Optional[ClauseAdapter], FromClause]:\n \"\"\"Given a source and destination selectable, create a\n join between them.\n\n This takes into account aliasing the join clause\n to reference the appropriate corresponding columns\n in the target objects, as well as the extra child\n criterion, equivalent column sets, etc.\n\n \"\"\"\n dest_selectable = _shallow_annotate(dest_selectable, {\n 'no_replacement_traverse': True})\n primaryjoin, secondaryjoin, secondary = (self.primaryjoin, self.\n secondaryjoin, self.secondary)\n if single_crit is not None:\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & single_crit\n else:\n primaryjoin = primaryjoin & single_crit\n if extra_criteria:\n\n def mark_unrelated_columns_as_ok_to_adapt(elem:\n SupportsAnnotations, annotations: _AnnotationDict\n ) ->SupportsAnnotations:\n \"\"\"note unrelated columns in the \"extra criteria\" as OK\n to adapt, even though they are not part of our \"local\"\n or \"remote\" side.\n\n see #9779 for this case\n\n \"\"\"\n parentmapper_for_element = elem._annotations.get('parentmapper'\n , None)\n if (parentmapper_for_element is not self.prop.parent and \n parentmapper_for_element is not self.prop.mapper):\n return _safe_annotate(elem, annotations)\n else:\n return elem\n extra_criteria = tuple(_deep_annotate(elem, {\n 'ok_to_adapt_in_join_condition': True}, annotate_callable=\n mark_unrelated_columns_as_ok_to_adapt) for elem in\n extra_criteria)\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)\n else:\n primaryjoin = primaryjoin & sql.and_(*extra_criteria)\n if aliased:\n if secondary is not None:\n secondary = secondary._anonymous_fromclause(flat=True)\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn=\n _ColInAnnotations('local'))\n secondary_aliasizer = ClauseAdapter(dest_selectable,\n equivalents=self.child_equivalents).chain(primary_aliasizer\n )\n if source_selectable is not None:\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn\n =_ColInAnnotations('local')).chain(ClauseAdapter(\n source_selectable, equivalents=self.parent_equivalents)\n )\n secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)\n else:\n primary_aliasizer = ClauseAdapter(dest_selectable,\n exclude_fn=_ColInAnnotations('local'), equivalents=self\n .child_equivalents)\n if source_selectable is not None:\n primary_aliasizer.chain(ClauseAdapter(source_selectable,\n exclude_fn=_ColInAnnotations('remote'), equivalents\n =self.parent_equivalents))\n secondary_aliasizer = None\n primaryjoin = primary_aliasizer.traverse(primaryjoin)\n target_adapter = secondary_aliasizer or primary_aliasizer\n target_adapter.exclude_fn = None\n else:\n target_adapter = None\n return (primaryjoin, secondaryjoin, secondary, target_adapter,\n dest_selectable)\n\n def create_lazy_clause(self, reverse_direction: bool=False) ->Tuple[\n ColumnElement[bool], Dict[str, ColumnElement[Any]], Dict[\n ColumnElement[Any], ColumnElement[Any]]]:\n binds: Dict[ColumnElement[Any], BindParameter[Any]] = {}\n equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {}\n has_secondary = self.secondaryjoin is not None\n if has_secondary:\n lookup = collections.defaultdict(list)\n for l, r in self.local_remote_pairs:\n lookup[l].append((l, r))\n equated_columns[r] = l\n elif not reverse_direction:\n for l, r in self.local_remote_pairs:\n equated_columns[r] = l\n else:\n for l, r in self.local_remote_pairs:\n equated_columns[l] = r\n\n def col_to_bind(element: ColumnElement[Any], **kw: Any) ->Optional[\n BindParameter[Any]]:\n if (not reverse_direction and 'local' in element._annotations or\n reverse_direction and (has_secondary and element in lookup or\n not has_secondary and 'remote' in element._annotations)):\n if element not in binds:\n binds[element] = sql.bindparam(None, None, type_=\n element.type, unique=True)\n return binds[element]\n return None\n lazywhere = self.primaryjoin\n if self.secondaryjoin is None or not reverse_direction:\n lazywhere = visitors.replacement_traverse(lazywhere, {},\n col_to_bind)\n if self.secondaryjoin is not None:\n secondaryjoin = self.secondaryjoin\n if reverse_direction:\n secondaryjoin = visitors.replacement_traverse(secondaryjoin,\n {}, col_to_bind)\n lazywhere = sql.and_(lazywhere, secondaryjoin)\n bind_to_col = {binds[col].key: col for col in binds}\n return lazywhere, bind_to_col, equated_columns\n\n\nclass _ColInAnnotations:\n \"\"\"Serializable object that tests for a name in c._annotations.\"\"\"\n __slots__ = 'name',\n\n def __init__(self, name: str):\n self.name = name\n\n def __call__(self, c: ClauseElement) ->bool:\n return (self.name in c._annotations or \n 'ok_to_adapt_in_join_condition' in c._annotations)\n\n\nclass Relationship(RelationshipProperty[_T], _DeclarativeMapped[_T],\n WriteOnlyMapped[_T], DynamicMapped[_T]):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n .. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative\n compatible subclass for :class:`_orm.RelationshipProperty`.\n\n \"\"\"\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n",
"step-3": "<mask token>\n\n\nclass _RelationshipArgs(NamedTuple):\n \"\"\"stores user-passed parameters that are resolved at mapper configuration\n time.\n\n \"\"\"\n secondary: _RelationshipArg[Optional[_RelationshipSecondaryArgument],\n Optional[FromClause]]\n primaryjoin: _RelationshipArg[Optional[\n _RelationshipJoinConditionArgument], Optional[ColumnElement[Any]]]\n secondaryjoin: _RelationshipArg[Optional[\n _RelationshipJoinConditionArgument], Optional[ColumnElement[Any]]]\n order_by: _RelationshipArg[_ORMOrderByArgument, Union[Literal[None, \n False], Tuple[ColumnElement[Any], ...]]]\n foreign_keys: _RelationshipArg[Optional[_ORMColCollectionArgument], Set\n [ColumnElement[Any]]]\n remote_side: _RelationshipArg[Optional[_ORMColCollectionArgument], Set[\n ColumnElement[Any]]]\n\n\[email protected]_logger\nclass RelationshipProperty(_IntrospectsAnnotations, StrategizedProperty[_T],\n log.Identified):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n \"\"\"\n strategy_wildcard_key = strategy_options._RELATIONSHIP_TOKEN\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n _links_to_entity = True\n _is_relationship = True\n _overlaps: Sequence[str]\n _lazy_strategy: LazyLoader\n _persistence_only = dict(passive_deletes=False, passive_updates=True,\n enable_typechecks=True, active_history=False, cascade_backrefs=False)\n _dependency_processor: Optional[DependencyProcessor] = None\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n _join_condition: JoinCondition\n order_by: Union[Literal[False], Tuple[ColumnElement[Any], ...]]\n _user_defined_foreign_keys: Set[ColumnElement[Any]]\n _calculated_foreign_keys: Set[ColumnElement[Any]]\n remote_side: Set[ColumnElement[Any]]\n local_columns: Set[ColumnElement[Any]]\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: Optional[_ColumnPairs]\n local_remote_pairs: Optional[_ColumnPairs]\n direction: RelationshipDirection\n _init_args: _RelationshipArgs\n\n def __init__(self, argument: Optional[_RelationshipArgumentType[_T]]=\n None, secondary: Optional[_RelationshipSecondaryArgument]=None, *,\n uselist: Optional[bool]=None, collection_class: Optional[Union[Type\n [Collection[Any]], Callable[[], Collection[Any]]]]=None,\n primaryjoin: Optional[_RelationshipJoinConditionArgument]=None,\n secondaryjoin: Optional[_RelationshipJoinConditionArgument]=None,\n back_populates: Optional[str]=None, order_by: _ORMOrderByArgument=\n False, backref: Optional[ORMBackrefArgument]=None, overlaps:\n Optional[str]=None, post_update: bool=False, cascade: str=\n 'save-update, merge', viewonly: bool=False, attribute_options:\n Optional[_AttributeOptions]=None, lazy: _LazyLoadArgumentType=\n 'select', passive_deletes: Union[Literal['all'], bool]=False,\n passive_updates: bool=True, active_history: bool=False,\n enable_typechecks: bool=True, foreign_keys: Optional[\n _ORMColCollectionArgument]=None, remote_side: Optional[\n _ORMColCollectionArgument]=None, join_depth: Optional[int]=None,\n comparator_factory: Optional[Type[RelationshipProperty.Comparator[\n Any]]]=None, single_parent: bool=False, innerjoin: bool=False,\n distinct_target_key: Optional[bool]=None, load_on_pending: bool=\n False, query_class: Optional[Type[Query[Any]]]=None, info: Optional\n [_InfoType]=None, omit_join: Literal[None, False]=None,\n sync_backref: Optional[bool]=None, doc: Optional[str]=None,\n bake_queries: Literal[True]=True, cascade_backrefs: Literal[False]=\n False, _local_remote_pairs: Optional[_ColumnPairs]=None,\n _legacy_inactive_history_style: bool=False):\n super().__init__(attribute_options=attribute_options)\n self.uselist = uselist\n self.argument = argument\n self._init_args = _RelationshipArgs(_RelationshipArg('secondary',\n secondary, None), _RelationshipArg('primaryjoin', primaryjoin,\n None), _RelationshipArg('secondaryjoin', secondaryjoin, None),\n _RelationshipArg('order_by', order_by, None), _RelationshipArg(\n 'foreign_keys', foreign_keys, None), _RelationshipArg(\n 'remote_side', remote_side, None))\n self.post_update = post_update\n self.viewonly = viewonly\n if viewonly:\n self._warn_for_persistence_only_flags(passive_deletes=\n passive_deletes, passive_updates=passive_updates,\n enable_typechecks=enable_typechecks, active_history=\n active_history, cascade_backrefs=cascade_backrefs)\n if viewonly and sync_backref:\n raise sa_exc.ArgumentError(\n 'sync_backref and viewonly cannot both be True')\n self.sync_backref = sync_backref\n self.lazy = lazy\n self.single_parent = single_parent\n self.collection_class = collection_class\n self.passive_deletes = passive_deletes\n if cascade_backrefs:\n raise sa_exc.ArgumentError(\n \"The 'cascade_backrefs' parameter passed to relationship() may only be set to False.\"\n )\n self.passive_updates = passive_updates\n self.enable_typechecks = enable_typechecks\n self.query_class = query_class\n self.innerjoin = innerjoin\n self.distinct_target_key = distinct_target_key\n self.doc = doc\n self.active_history = active_history\n self._legacy_inactive_history_style = _legacy_inactive_history_style\n self.join_depth = join_depth\n if omit_join:\n util.warn(\n 'setting omit_join to True is not supported; selectin loading of this relationship may not work correctly if this flag is set explicitly. omit_join optimization is automatically detected for conditions under which it is supported.'\n )\n self.omit_join = omit_join\n self.local_remote_pairs = _local_remote_pairs\n self.load_on_pending = load_on_pending\n self.comparator_factory = (comparator_factory or\n RelationshipProperty.Comparator)\n util.set_creation_order(self)\n if info is not None:\n self.info.update(info)\n self.strategy_key = ('lazy', self.lazy),\n self._reverse_property: Set[RelationshipProperty[Any]] = set()\n if overlaps:\n self._overlaps = set(re.split('\\\\s*,\\\\s*', overlaps))\n else:\n self._overlaps = ()\n self.cascade = cascade\n self.back_populates = back_populates\n if self.back_populates:\n if backref:\n raise sa_exc.ArgumentError(\n 'backref and back_populates keyword arguments are mutually exclusive'\n )\n self.backref = None\n else:\n self.backref = backref\n\n def _warn_for_persistence_only_flags(self, **kw: Any) ->None:\n for k, v in kw.items():\n if v != self._persistence_only[k]:\n util.warn(\n 'Setting %s on relationship() while also setting viewonly=True does not make sense, as a viewonly=True relationship does not perform persistence operations. This configuration may raise an error in a future release.'\n % (k,))\n\n def instrument_class(self, mapper: Mapper[Any]) ->None:\n attributes.register_descriptor(mapper.class_, self.key, comparator=\n self.comparator_factory(self, mapper), parententity=mapper, doc\n =self.doc)\n\n\n class Comparator(util.MemoizedSlots, PropComparator[_PT]):\n \"\"\"Produce boolean, comparison, and other operators for\n :class:`.RelationshipProperty` attributes.\n\n See the documentation for :class:`.PropComparator` for a brief\n overview of ORM level operator definition.\n\n .. seealso::\n\n :class:`.PropComparator`\n\n :class:`.ColumnProperty.Comparator`\n\n :class:`.ColumnOperators`\n\n :ref:`types_operators`\n\n :attr:`.TypeEngine.comparator_factory`\n\n \"\"\"\n __slots__ = ('entity', 'mapper', 'property', '_of_type',\n '_extra_criteria')\n prop: RODescriptorReference[RelationshipProperty[_PT]]\n _of_type: Optional[_EntityType[_PT]]\n\n def __init__(self, prop: RelationshipProperty[_PT], parentmapper:\n _InternalEntityType[Any], adapt_to_entity: Optional[AliasedInsp\n [Any]]=None, of_type: Optional[_EntityType[_PT]]=None,\n extra_criteria: Tuple[ColumnElement[bool], ...]=()):\n \"\"\"Construction of :class:`.RelationshipProperty.Comparator`\n is internal to the ORM's attribute mechanics.\n\n \"\"\"\n self.prop = prop\n self._parententity = parentmapper\n self._adapt_to_entity = adapt_to_entity\n if of_type:\n self._of_type = of_type\n else:\n self._of_type = None\n self._extra_criteria = extra_criteria\n\n def adapt_to_entity(self, adapt_to_entity: AliasedInsp[Any]\n ) ->RelationshipProperty.Comparator[Any]:\n return self.__class__(self.prop, self._parententity,\n adapt_to_entity=adapt_to_entity, of_type=self._of_type)\n entity: _InternalEntityType[_PT]\n \"\"\"The target entity referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`\n object.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n mapper: Mapper[_PT]\n \"\"\"The target :class:`_orm.Mapper` referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n\n def _memoized_attr_entity(self) ->_InternalEntityType[_PT]:\n if self._of_type:\n return inspect(self._of_type)\n else:\n return self.prop.entity\n\n def _memoized_attr_mapper(self) ->Mapper[_PT]:\n return self.entity.mapper\n\n def _source_selectable(self) ->FromClause:\n if self._adapt_to_entity:\n return self._adapt_to_entity.selectable\n else:\n return self.property.parent._with_polymorphic_selectable\n\n def __clause_element__(self) ->ColumnElement[bool]:\n adapt_from = self._source_selectable()\n if self._of_type:\n of_type_entity = inspect(self._of_type)\n else:\n of_type_entity = None\n pj, sj, source, dest, secondary, target_adapter = (self.prop.\n _create_joins(source_selectable=adapt_from,\n source_polymorphic=True, of_type_entity=of_type_entity,\n alias_secondary=True, extra_criteria=self._extra_criteria))\n if sj is not None:\n return pj & sj\n else:\n return pj\n\n def of_type(self, class_: _EntityType[Any]) ->PropComparator[_PT]:\n \"\"\"Redefine this object in terms of a polymorphic subclass.\n\n See :meth:`.PropComparator.of_type` for an example.\n\n\n \"\"\"\n return RelationshipProperty.Comparator(self.prop, self.\n _parententity, adapt_to_entity=self._adapt_to_entity,\n of_type=class_, extra_criteria=self._extra_criteria)\n\n def and_(self, *criteria: _ColumnExpressionArgument[bool]\n ) ->PropComparator[Any]:\n \"\"\"Add AND criteria.\n\n See :meth:`.PropComparator.and_` for an example.\n\n .. versionadded:: 1.4\n\n \"\"\"\n exprs = tuple(coercions.expect(roles.WhereHavingRole, clause) for\n clause in util.coerce_generator_arg(criteria))\n return RelationshipProperty.Comparator(self.prop, self.\n _parententity, adapt_to_entity=self._adapt_to_entity,\n of_type=self._of_type, extra_criteria=self._extra_criteria +\n exprs)\n\n def in_(self, other: Any) ->NoReturn:\n \"\"\"Produce an IN clause - this is not implemented\n for :func:`_orm.relationship`-based attributes at this time.\n\n \"\"\"\n raise NotImplementedError(\n 'in_() not yet supported for relationships. For a simple many-to-one, use in_() against the set of foreign key values.'\n )\n __hash__ = None\n\n def __eq__(self, other: Any) ->ColumnElement[bool]:\n \"\"\"Implement the ``==`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop == <some object>\n\n this will typically produce a\n clause such as::\n\n mytable.related_id == <some id>\n\n Where ``<some id>`` is the primary key of the given\n object.\n\n The ``==`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use :meth:`~.Relationship.Comparator.contains`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce a NOT EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction in [ONETOMANY, MANYTOMANY]:\n return ~self._criterion_exists()\n else:\n return _orm_annotate(self.property._optimized_compare(\n None, adapt_source=self.adapter))\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(self.property._optimized_compare(other,\n adapt_source=self.adapter))\n\n def _criterion_exists(self, criterion: Optional[\n _ColumnExpressionArgument[bool]]=None, **kwargs: Any) ->Exists:\n where_criteria = coercions.expect(roles.WhereHavingRole, criterion\n ) if criterion is not None else None\n if getattr(self, '_of_type', None):\n info: Optional[_InternalEntityType[Any]] = inspect(self.\n _of_type)\n assert info is not None\n target_mapper, to_selectable, is_aliased_class = (info.\n mapper, info.selectable, info.is_aliased_class)\n if self.property._is_self_referential and not is_aliased_class:\n to_selectable = to_selectable._anonymous_fromclause()\n single_crit = target_mapper._single_table_criterion\n if single_crit is not None:\n if where_criteria is not None:\n where_criteria = single_crit & where_criteria\n else:\n where_criteria = single_crit\n else:\n is_aliased_class = False\n to_selectable = None\n if self.adapter:\n source_selectable = self._source_selectable()\n else:\n source_selectable = None\n pj, sj, source, dest, secondary, target_adapter = (self.\n property._create_joins(dest_selectable=to_selectable,\n source_selectable=source_selectable))\n for k in kwargs:\n crit = getattr(self.property.mapper.class_, k) == kwargs[k]\n if where_criteria is None:\n where_criteria = crit\n else:\n where_criteria = where_criteria & crit\n if sj is not None:\n j = _orm_annotate(pj) & sj\n else:\n j = _orm_annotate(pj, exclude=self.property.remote_side)\n if (where_criteria is not None and target_adapter and not\n is_aliased_class):\n where_criteria = target_adapter.traverse(where_criteria)\n if where_criteria is not None:\n where_criteria = where_criteria._annotate({\n 'no_replacement_traverse': True})\n crit = j & sql.True_._ifnone(where_criteria)\n if secondary is not None:\n ex = sql.exists(1).where(crit).select_from(dest, secondary\n ).correlate_except(dest, secondary)\n else:\n ex = sql.exists(1).where(crit).select_from(dest\n ).correlate_except(dest)\n return ex\n\n def any(self, criterion: Optional[_ColumnExpressionArgument[bool]]=\n None, **kwargs: Any) ->ColumnElement[bool]:\n \"\"\"Produce an expression that tests a collection against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.any(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id\n AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.any` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.any` is particularly\n useful for testing for empty collections::\n\n session.query(MyClass).filter(\n ~MyClass.somereference.any()\n )\n\n will produce::\n\n SELECT * FROM my_table WHERE\n NOT (EXISTS (SELECT 1 FROM related WHERE\n related.my_id=my_table.id))\n\n :meth:`~.Relationship.Comparator.any` is only\n valid for collections, i.e. a :func:`_orm.relationship`\n that has ``uselist=True``. For scalar references,\n use :meth:`~.Relationship.Comparator.has`.\n\n \"\"\"\n if not self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'any()' not implemented for scalar attributes. Use has().\"\n )\n return self._criterion_exists(criterion, **kwargs)\n\n def has(self, criterion: Optional[_ColumnExpressionArgument[bool]]=\n None, **kwargs: Any) ->ColumnElement[bool]:\n \"\"\"Produce an expression that tests a scalar reference against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.has(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE\n related.id==my_table.related_id AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.has` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.has` is only\n valid for scalar references, i.e. a :func:`_orm.relationship`\n that has ``uselist=False``. For collection references,\n use :meth:`~.Relationship.Comparator.any`.\n\n \"\"\"\n if self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'has()' not implemented for collections. Use any().\")\n return self._criterion_exists(criterion, **kwargs)\n\n def contains(self, other: _ColumnExpressionArgument[Any], **kwargs: Any\n ) ->ColumnElement[bool]:\n \"\"\"Return a simple expression that tests a collection for\n containment of a particular item.\n\n :meth:`~.Relationship.Comparator.contains` is\n only valid for a collection, i.e. a\n :func:`_orm.relationship` that implements\n one-to-many or many-to-many with ``uselist=True``.\n\n When used in a simple one-to-many context, an\n expression like::\n\n MyClass.contains(other)\n\n Produces a clause like::\n\n mytable.id == <some id>\n\n Where ``<some id>`` is the value of the foreign key\n attribute on ``other`` which refers to the primary\n key of its parent object. From this it follows that\n :meth:`~.Relationship.Comparator.contains` is\n very useful when used with simple one-to-many\n operations.\n\n For many-to-many operations, the behavior of\n :meth:`~.Relationship.Comparator.contains`\n has more caveats. The association table will be\n rendered in the statement, producing an \"implicit\"\n join, that is, includes multiple tables in the FROM\n clause which are equated in the WHERE clause::\n\n query(MyClass).filter(MyClass.contains(other))\n\n Produces a query like::\n\n SELECT * FROM my_table, my_association_table AS\n my_association_table_1 WHERE\n my_table.id = my_association_table_1.parent_id\n AND my_association_table_1.child_id = <some id>\n\n Where ``<some id>`` would be the primary key of\n ``other``. From the above, it is clear that\n :meth:`~.Relationship.Comparator.contains`\n will **not** work with many-to-many collections when\n used in queries that move beyond simple AND\n conjunctions, such as multiple\n :meth:`~.Relationship.Comparator.contains`\n expressions joined by OR. In such cases subqueries or\n explicit \"outer joins\" will need to be used instead.\n See :meth:`~.Relationship.Comparator.any` for\n a less-performant alternative using EXISTS, or refer\n to :meth:`_query.Query.outerjoin`\n as well as :ref:`orm_queryguide_joins`\n for more details on constructing outer joins.\n\n kwargs may be ignored by this operator but are required for API\n conformance.\n \"\"\"\n if not self.prop.uselist:\n raise sa_exc.InvalidRequestError(\n \"'contains' not implemented for scalar attributes. Use ==\"\n )\n clause = self.prop._optimized_compare(other, adapt_source=self.\n adapter)\n if self.prop.secondaryjoin is not None:\n clause.negation_clause = self.__negated_contains_or_equals(\n other)\n return clause\n\n def __negated_contains_or_equals(self, other: Any) ->ColumnElement[bool\n ]:\n if self.prop.direction == MANYTOONE:\n state = attributes.instance_state(other)\n\n def state_bindparam(local_col: ColumnElement[Any], state:\n InstanceState[Any], remote_col: ColumnElement[Any]\n ) ->BindParameter[Any]:\n dict_ = state.dict\n return sql.bindparam(local_col.key, type_=local_col.\n type, unique=True, callable_=self.prop.\n _get_attr_w_warn_on_none(self.prop.mapper, state,\n dict_, remote_col))\n\n def adapt(col: _CE) ->_CE:\n if self.adapter:\n return self.adapter(col)\n else:\n return col\n if self.property._use_get:\n return sql.and_(*[sql.or_(adapt(x) != state_bindparam(\n adapt(x), state, y), adapt(x) == None) for x, y in\n self.property.local_remote_pairs])\n criterion = sql.and_(*[(x == y) for x, y in zip(self.property.\n mapper.primary_key, self.property.mapper.\n primary_key_from_instance(other))])\n return ~self._criterion_exists(criterion)\n\n def __ne__(self, other: Any) ->ColumnElement[bool]:\n \"\"\"Implement the ``!=`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop != <some object>\n\n This will typically produce a clause such as::\n\n mytable.related_id != <some id>\n\n Where ``<some id>`` is the primary key of the\n given object.\n\n The ``!=`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use\n :meth:`~.Relationship.Comparator.contains`\n in conjunction with :func:`_expression.not_`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` in\n conjunction with :func:`_expression.not_` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce an EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction == MANYTOONE:\n return _orm_annotate(~self.property._optimized_compare(\n None, adapt_source=self.adapter))\n else:\n return self._criterion_exists()\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(self.__negated_contains_or_equals(other))\n\n def _memoized_attr_property(self) ->RelationshipProperty[_PT]:\n self.prop.parent._check_configure()\n return self.prop\n\n def _with_parent(self, instance: object, alias_secondary: bool=True,\n from_entity: Optional[_EntityType[Any]]=None) ->ColumnElement[bool]:\n assert instance is not None\n adapt_source: Optional[_CoreAdapterProto] = None\n if from_entity is not None:\n insp: Optional[_InternalEntityType[Any]] = inspect(from_entity)\n assert insp is not None\n if insp_is_aliased_class(insp):\n adapt_source = insp._adapter.adapt_clause\n return self._optimized_compare(instance, value_is_parent=True,\n adapt_source=adapt_source, alias_secondary=alias_secondary)\n\n def _optimized_compare(self, state: Any, value_is_parent: bool=False,\n adapt_source: Optional[_CoreAdapterProto]=None, alias_secondary:\n bool=True) ->ColumnElement[bool]:\n if state is not None:\n try:\n state = inspect(state)\n except sa_exc.NoInspectionAvailable:\n state = None\n if state is None or not getattr(state, 'is_instance', False):\n raise sa_exc.ArgumentError(\n 'Mapped instance expected for relationship comparison to object. Classes, queries and other SQL elements are not accepted in this context; for comparison with a subquery, use %s.has(**criteria).'\n % self)\n reverse_direction = not value_is_parent\n if state is None:\n return self._lazy_none_clause(reverse_direction, adapt_source=\n adapt_source)\n if not reverse_direction:\n criterion, bind_to_col = (self._lazy_strategy._lazywhere, self.\n _lazy_strategy._bind_to_col)\n else:\n criterion, bind_to_col = (self._lazy_strategy._rev_lazywhere,\n self._lazy_strategy._rev_bind_to_col)\n if reverse_direction:\n mapper = self.mapper\n else:\n mapper = self.parent\n dict_ = attributes.instance_dict(state.obj())\n\n def visit_bindparam(bindparam: BindParameter[Any]) ->None:\n if bindparam._identifying_key in bind_to_col:\n bindparam.callable = self._get_attr_w_warn_on_none(mapper,\n state, dict_, bind_to_col[bindparam._identifying_key])\n if self.secondary is not None and alias_secondary:\n criterion = ClauseAdapter(self.secondary._anonymous_fromclause()\n ).traverse(criterion)\n criterion = visitors.cloned_traverse(criterion, {}, {'bindparam':\n visit_bindparam})\n if adapt_source:\n criterion = adapt_source(criterion)\n return criterion\n\n def _get_attr_w_warn_on_none(self, mapper: Mapper[Any], state:\n InstanceState[Any], dict_: _InstanceDict, column: ColumnElement[Any]\n ) ->Callable[[], Any]:\n \"\"\"Create the callable that is used in a many-to-one expression.\n\n E.g.::\n\n u1 = s.query(User).get(5)\n\n expr = Address.user == u1\n\n Above, the SQL should be \"address.user_id = 5\". The callable\n returned by this method produces the value \"5\" based on the identity\n of ``u1``.\n\n \"\"\"\n prop = mapper.get_property_by_column(column)\n state._track_last_known_value(prop.key)\n lkv_fixed = state._last_known_values\n\n def _go() ->Any:\n assert lkv_fixed is not None\n last_known = to_return = lkv_fixed[prop.key]\n existing_is_available = (last_known is not LoaderCallableStatus\n .NO_VALUE)\n current_value = mapper._get_state_attr_by_column(state, dict_,\n column, passive=PassiveFlag.PASSIVE_OFF if state.persistent\n else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK)\n if current_value is LoaderCallableStatus.NEVER_SET:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object %s; no value has been set for this column\"\n % (column, state_str(state)))\n elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object %s; the object is detached and the value was expired\"\n % (column, state_str(state)))\n else:\n to_return = current_value\n if to_return is None:\n util.warn(\n 'Got None for value of column %s; this is unsupported for a relationship comparison and will not currently produce an IS comparison (but may in a future release)'\n % column)\n return to_return\n return _go\n\n def _lazy_none_clause(self, reverse_direction: bool=False, adapt_source:\n Optional[_CoreAdapterProto]=None) ->ColumnElement[bool]:\n if not reverse_direction:\n criterion, bind_to_col = (self._lazy_strategy._lazywhere, self.\n _lazy_strategy._bind_to_col)\n else:\n criterion, bind_to_col = (self._lazy_strategy._rev_lazywhere,\n self._lazy_strategy._rev_bind_to_col)\n criterion = adapt_criterion_to_null(criterion, bind_to_col)\n if adapt_source:\n criterion = adapt_source(criterion)\n return criterion\n\n def __str__(self) ->str:\n return str(self.parent.class_.__name__) + '.' + self.key\n\n def merge(self, session: Session, source_state: InstanceState[Any],\n source_dict: _InstanceDict, dest_state: InstanceState[Any],\n dest_dict: _InstanceDict, load: bool, _recursive: Dict[Any, object],\n _resolve_conflict_map: Dict[_IdentityKeyType[Any], object]) ->None:\n if load:\n for r in self._reverse_property:\n if (source_state, r) in _recursive:\n return\n if 'merge' not in self._cascade:\n return\n if self.key not in source_dict:\n return\n if self.uselist:\n impl = source_state.get_impl(self.key)\n assert is_has_collection_adapter(impl)\n instances_iterable = impl.get_collection(source_state, source_dict)\n assert not instances_iterable.empty if impl.collection else True\n if load:\n dest_state.get_impl(self.key).get(dest_state, dest_dict,\n passive=PassiveFlag.PASSIVE_MERGE)\n dest_list = []\n for current in instances_iterable:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[current_state, self] = True\n obj = session._merge(current_state, current_dict, load=load,\n _recursive=_recursive, _resolve_conflict_map=\n _resolve_conflict_map)\n if obj is not None:\n dest_list.append(obj)\n if not load:\n coll = attributes.init_state_collection(dest_state,\n dest_dict, self.key)\n for c in dest_list:\n coll.append_without_event(c)\n else:\n dest_impl = dest_state.get_impl(self.key)\n assert is_has_collection_adapter(dest_impl)\n dest_impl.set(dest_state, dest_dict, dest_list, _adapt=\n False, passive=PassiveFlag.PASSIVE_MERGE)\n else:\n current = source_dict[self.key]\n if current is not None:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[current_state, self] = True\n obj = session._merge(current_state, current_dict, load=load,\n _recursive=_recursive, _resolve_conflict_map=\n _resolve_conflict_map)\n else:\n obj = None\n if not load:\n dest_dict[self.key] = obj\n else:\n dest_state.get_impl(self.key).set(dest_state, dest_dict,\n obj, None)\n\n def _value_as_iterable(self, state: InstanceState[_O], dict_:\n _InstanceDict, key: str, passive: PassiveFlag=PassiveFlag.PASSIVE_OFF\n ) ->Sequence[Tuple[InstanceState[_O], _O]]:\n \"\"\"Return a list of tuples (state, obj) for the given\n key.\n\n returns an empty list if the value is None/empty/PASSIVE_NO_RESULT\n \"\"\"\n impl = state.manager[key].impl\n x = impl.get(state, dict_, passive=passive)\n if x is LoaderCallableStatus.PASSIVE_NO_RESULT or x is None:\n return []\n elif is_has_collection_adapter(impl):\n return [(attributes.instance_state(o), o) for o in impl.\n get_collection(state, dict_, x, passive=passive)]\n else:\n return [(attributes.instance_state(x), x)]\n\n def cascade_iterator(self, type_: str, state: InstanceState[Any], dict_:\n _InstanceDict, visited_states: Set[InstanceState[Any]], halt_on:\n Optional[Callable[[InstanceState[Any]], bool]]=None) ->Iterator[Tuple\n [Any, Mapper[Any], InstanceState[Any], _InstanceDict]]:\n if type_ != 'delete' or self.passive_deletes:\n passive = PassiveFlag.PASSIVE_NO_INITIALIZE\n else:\n passive = PassiveFlag.PASSIVE_OFF | PassiveFlag.NO_RAISE\n if type_ == 'save-update':\n tuples = state.manager[self.key].impl.get_all_pending(state, dict_)\n else:\n tuples = self._value_as_iterable(state, dict_, self.key,\n passive=passive)\n skip_pending = (type_ == 'refresh-expire' and 'delete-orphan' not in\n self._cascade)\n for instance_state, c in tuples:\n if instance_state in visited_states:\n continue\n if c is None:\n continue\n assert instance_state is not None\n instance_dict = attributes.instance_dict(c)\n if halt_on and halt_on(instance_state):\n continue\n if skip_pending and not instance_state.key:\n continue\n instance_mapper = instance_state.manager.mapper\n if not instance_mapper.isa(self.mapper.class_manager.mapper):\n raise AssertionError(\n \"Attribute '%s' on class '%s' doesn't handle objects of type '%s'\"\n % (self.key, self.parent.class_, c.__class__))\n visited_states.add(instance_state)\n yield c, instance_mapper, instance_state, instance_dict\n\n @property\n def _effective_sync_backref(self) ->bool:\n if self.viewonly:\n return False\n else:\n return self.sync_backref is not False\n\n @staticmethod\n def _check_sync_backref(rel_a: RelationshipProperty[Any], rel_b:\n RelationshipProperty[Any]) ->None:\n if rel_a.viewonly and rel_b.sync_backref:\n raise sa_exc.InvalidRequestError(\n 'Relationship %s cannot specify sync_backref=True since %s includes viewonly=True.'\n % (rel_b, rel_a))\n if (rel_a.viewonly and not rel_b.viewonly and rel_b.sync_backref is not\n False):\n rel_b.sync_backref = False\n\n def _add_reverse_property(self, key: str) ->None:\n other = self.mapper.get_property(key, _configure_mappers=False)\n if not isinstance(other, RelationshipProperty):\n raise sa_exc.InvalidRequestError(\n \"back_populates on relationship '%s' refers to attribute '%s' that is not a relationship. The back_populates parameter should refer to the name of a relationship on the target class.\"\n % (self, other))\n self._check_sync_backref(self, other)\n self._check_sync_backref(other, self)\n self._reverse_property.add(other)\n other._reverse_property.add(self)\n other._setup_entity()\n if not other.mapper.common_parent(self.parent):\n raise sa_exc.ArgumentError(\n 'reverse_property %r on relationship %s references relationship %s, which does not reference mapper %s'\n % (key, self, other, self.parent))\n if other._configure_started and self.direction in (ONETOMANY, MANYTOONE\n ) and self.direction == other.direction:\n raise sa_exc.ArgumentError(\n '%s and back-reference %s are both of the same direction %r. Did you mean to set remote_side on the many-to-one side ?'\n % (other, self, self.direction))\n\n @util.memoized_property\n def entity(self) ->_InternalEntityType[_T]:\n \"\"\"Return the target mapped entity, which is an inspect() of the\n class or aliased class that is referred towards.\n\n \"\"\"\n self.parent._check_configure()\n return self.entity\n\n @util.memoized_property\n def mapper(self) ->Mapper[_T]:\n \"\"\"Return the targeted :class:`_orm.Mapper` for this\n :class:`.RelationshipProperty`.\n\n \"\"\"\n return self.entity.mapper\n\n def do_init(self) ->None:\n self._check_conflicts()\n self._process_dependent_arguments()\n self._setup_entity()\n self._setup_registry_dependencies()\n self._setup_join_conditions()\n self._check_cascade_settings(self._cascade)\n self._post_init()\n self._generate_backref()\n self._join_condition._warn_for_conflicting_sync_targets()\n super().do_init()\n self._lazy_strategy = cast('LazyLoader', self._get_strategy(((\n 'lazy', 'select'),)))\n\n def _setup_registry_dependencies(self) ->None:\n self.parent.mapper.registry._set_depends_on(self.entity.mapper.registry\n )\n\n def _process_dependent_arguments(self) ->None:\n \"\"\"Convert incoming configuration arguments to their\n proper form.\n\n Callables are resolved, ORM annotations removed.\n\n \"\"\"\n init_args = self._init_args\n for attr in ('order_by', 'primaryjoin', 'secondaryjoin',\n 'secondary', 'foreign_keys', 'remote_side'):\n rel_arg = getattr(init_args, attr)\n rel_arg._resolve_against_registry(self._clsregistry_resolvers[1])\n for attr in ('primaryjoin', 'secondaryjoin'):\n rel_arg = getattr(init_args, attr)\n val = rel_arg.resolved\n if val is not None:\n rel_arg.resolved = _orm_deannotate(coercions.expect(roles.\n ColumnArgumentRole, val, argname=attr))\n secondary = init_args.secondary.resolved\n if secondary is not None and _is_mapped_class(secondary):\n raise sa_exc.ArgumentError(\n \"secondary argument %s passed to to relationship() %s must be a Table object or other FROM clause; can't send a mapped class directly as rows in 'secondary' are persisted independently of a class that is mapped to that same table.\"\n % (secondary, self))\n if (init_args.order_by.resolved is not False and init_args.order_by\n .resolved is not None):\n self.order_by = tuple(coercions.expect(roles.ColumnArgumentRole,\n x, argname='order_by') for x in util.to_list(init_args.\n order_by.resolved))\n else:\n self.order_by = False\n self._user_defined_foreign_keys = util.column_set(coercions.expect(\n roles.ColumnArgumentRole, x, argname='foreign_keys') for x in\n util.to_column_set(init_args.foreign_keys.resolved))\n self.remote_side = util.column_set(coercions.expect(roles.\n ColumnArgumentRole, x, argname='remote_side') for x in util.\n to_column_set(init_args.remote_side.resolved))\n\n def declarative_scan(self, decl_scan: _ClassScanMapperConfig, registry:\n _RegistryType, cls: Type[Any], originating_module: Optional[str],\n key: str, mapped_container: Optional[Type[Mapped[Any]]], annotation:\n Optional[_AnnotationScanType], extracted_mapped_annotation:\n Optional[_AnnotationScanType], is_dataclass_field: bool) ->None:\n argument = extracted_mapped_annotation\n if extracted_mapped_annotation is None:\n if self.argument is None:\n self._raise_for_required(key, cls)\n else:\n return\n argument = extracted_mapped_annotation\n assert originating_module is not None\n is_write_only = mapped_container is not None and issubclass(\n mapped_container, WriteOnlyMapped)\n if is_write_only:\n self.lazy = 'write_only'\n self.strategy_key = ('lazy', self.lazy),\n is_dynamic = mapped_container is not None and issubclass(\n mapped_container, DynamicMapped)\n if is_dynamic:\n self.lazy = 'dynamic'\n self.strategy_key = ('lazy', self.lazy),\n argument = de_optionalize_union_types(argument)\n if hasattr(argument, '__origin__'):\n arg_origin = argument.__origin__\n if isinstance(arg_origin, type) and issubclass(arg_origin, abc.\n Collection):\n if self.collection_class is None:\n if _py_inspect.isabstract(arg_origin):\n raise sa_exc.ArgumentError(\n f\"Collection annotation type {arg_origin} cannot be instantiated; please provide an explicit 'collection_class' parameter (e.g. list, set, etc.) to the relationship() function to accompany this annotation\"\n )\n self.collection_class = arg_origin\n elif not is_write_only and not is_dynamic:\n self.uselist = False\n if argument.__args__:\n if isinstance(arg_origin, type) and issubclass(arg_origin,\n typing.Mapping):\n type_arg = argument.__args__[-1]\n else:\n type_arg = argument.__args__[0]\n if hasattr(type_arg, '__forward_arg__'):\n str_argument = type_arg.__forward_arg__\n argument = resolve_name_to_real_class_name(str_argument,\n originating_module)\n else:\n argument = type_arg\n else:\n raise sa_exc.ArgumentError(\n f'Generic alias {argument} requires an argument')\n elif hasattr(argument, '__forward_arg__'):\n argument = argument.__forward_arg__\n argument = resolve_name_to_real_class_name(argument,\n originating_module)\n if (self.collection_class is None and not is_write_only and not\n is_dynamic):\n self.uselist = False\n if self.argument is None:\n self.argument = cast('_RelationshipArgumentType[_T]', argument)\n\n @util.preload_module('sqlalchemy.orm.mapper')\n def _setup_entity(self, __argument: Any=None) ->None:\n if 'entity' in self.__dict__:\n return\n mapperlib = util.preloaded.orm_mapper\n if __argument:\n argument = __argument\n else:\n argument = self.argument\n resolved_argument: _ExternalEntityType[Any]\n if isinstance(argument, str):\n resolved_argument = cast('_ExternalEntityType[Any]', self.\n _clsregistry_resolve_name(argument)())\n elif callable(argument) and not isinstance(argument, (type,\n mapperlib.Mapper)):\n resolved_argument = argument()\n else:\n resolved_argument = argument\n entity: _InternalEntityType[Any]\n if isinstance(resolved_argument, type):\n entity = class_mapper(resolved_argument, configure=False)\n else:\n try:\n entity = inspect(resolved_argument)\n except sa_exc.NoInspectionAvailable:\n entity = None\n if not hasattr(entity, 'mapper'):\n raise sa_exc.ArgumentError(\n \"relationship '%s' expects a class or a mapper argument (received: %s)\"\n % (self.key, type(resolved_argument)))\n self.entity = entity\n self.target = self.entity.persist_selectable\n\n def _setup_join_conditions(self) ->None:\n self._join_condition = jc = JoinCondition(parent_persist_selectable\n =self.parent.persist_selectable, child_persist_selectable=self.\n entity.persist_selectable, parent_local_selectable=self.parent.\n local_table, child_local_selectable=self.entity.local_table,\n primaryjoin=self._init_args.primaryjoin.resolved, secondary=\n self._init_args.secondary.resolved, secondaryjoin=self.\n _init_args.secondaryjoin.resolved, parent_equivalents=self.\n parent._equivalent_columns, child_equivalents=self.mapper.\n _equivalent_columns, consider_as_foreign_keys=self.\n _user_defined_foreign_keys, local_remote_pairs=self.\n local_remote_pairs, remote_side=self.remote_side,\n self_referential=self._is_self_referential, prop=self,\n support_sync=not self.viewonly, can_be_synced_fn=self.\n _columns_are_mapped)\n self.primaryjoin = jc.primaryjoin\n self.secondaryjoin = jc.secondaryjoin\n self.secondary = jc.secondary\n self.direction = jc.direction\n self.local_remote_pairs = jc.local_remote_pairs\n self.remote_side = jc.remote_columns\n self.local_columns = jc.local_columns\n self.synchronize_pairs = jc.synchronize_pairs\n self._calculated_foreign_keys = jc.foreign_key_columns\n self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs\n\n @property\n def _clsregistry_resolve_arg(self) ->Callable[[str, bool], _class_resolver\n ]:\n return self._clsregistry_resolvers[1]\n\n @property\n def _clsregistry_resolve_name(self) ->Callable[[str], Callable[[],\n Union[Type[Any], Table, _ModNS]]]:\n return self._clsregistry_resolvers[0]\n\n @util.memoized_property\n @util.preload_module('sqlalchemy.orm.clsregistry')\n def _clsregistry_resolvers(self) ->Tuple[Callable[[str], Callable[[],\n Union[Type[Any], Table, _ModNS]]], Callable[[str, bool],\n _class_resolver]]:\n _resolver = util.preloaded.orm_clsregistry._resolver\n return _resolver(self.parent.class_, self)\n\n def _check_conflicts(self) ->None:\n \"\"\"Test that this relationship is legal, warn about\n inheritance conflicts.\"\"\"\n if self.parent.non_primary and not class_mapper(self.parent.class_,\n configure=False).has_property(self.key):\n raise sa_exc.ArgumentError(\n \"Attempting to assign a new relationship '%s' to a non-primary mapper on class '%s'. New relationships can only be added to the primary mapper, i.e. the very first mapper created for class '%s' \"\n % (self.key, self.parent.class_.__name__, self.parent.\n class_.__name__))\n\n @property\n def cascade(self) ->CascadeOptions:\n \"\"\"Return the current cascade setting for this\n :class:`.RelationshipProperty`.\n \"\"\"\n return self._cascade\n\n @cascade.setter\n def cascade(self, cascade: Union[str, CascadeOptions]) ->None:\n self._set_cascade(cascade)\n\n def _set_cascade(self, cascade_arg: Union[str, CascadeOptions]) ->None:\n cascade = CascadeOptions(cascade_arg)\n if self.viewonly:\n cascade = CascadeOptions(cascade.intersection(CascadeOptions.\n _viewonly_cascades))\n if 'mapper' in self.__dict__:\n self._check_cascade_settings(cascade)\n self._cascade = cascade\n if self._dependency_processor:\n self._dependency_processor.cascade = cascade\n\n def _check_cascade_settings(self, cascade: CascadeOptions) ->None:\n if cascade.delete_orphan and not self.single_parent and (self.\n direction is MANYTOMANY or self.direction is MANYTOONE):\n raise sa_exc.ArgumentError(\n 'For %(direction)s relationship %(rel)s, delete-orphan cascade is normally configured only on the \"one\" side of a one-to-many relationship, and not on the \"many\" side of a many-to-one or many-to-many relationship. To force this relationship to allow a particular \"%(relatedcls)s\" object to be referred towards by only a single \"%(clsname)s\" object at a time via the %(rel)s relationship, which would allow delete-orphan cascade to take place in this direction, set the single_parent=True flag.'\n % {'rel': self, 'direction': 'many-to-one' if self.\n direction is MANYTOONE else 'many-to-many', 'clsname': self\n .parent.class_.__name__, 'relatedcls': self.mapper.class_.\n __name__}, code='bbf0')\n if self.passive_deletes == 'all' and ('delete' in cascade or \n 'delete-orphan' in cascade):\n raise sa_exc.ArgumentError(\n \"On %s, can't set passive_deletes='all' in conjunction with 'delete' or 'delete-orphan' cascade\"\n % self)\n if cascade.delete_orphan:\n self.mapper.primary_mapper()._delete_orphans.append((self.key,\n self.parent.class_))\n\n def _persists_for(self, mapper: Mapper[Any]) ->bool:\n \"\"\"Return True if this property will persist values on behalf\n of the given mapper.\n\n \"\"\"\n return self.key in mapper.relationships and mapper.relationships[self\n .key] is self\n\n def _columns_are_mapped(self, *cols: ColumnElement[Any]) ->bool:\n \"\"\"Return True if all columns in the given collection are\n mapped by the tables referenced by this :class:`.RelationshipProperty`.\n\n \"\"\"\n secondary = self._init_args.secondary.resolved\n for c in cols:\n if secondary is not None and secondary.c.contains_column(c):\n continue\n if not self.parent.persist_selectable.c.contains_column(c\n ) and not self.target.c.contains_column(c):\n return False\n return True\n\n def _generate_backref(self) ->None:\n \"\"\"Interpret the 'backref' instruction to create a\n :func:`_orm.relationship` complementary to this one.\"\"\"\n if self.parent.non_primary:\n return\n if self.backref is not None and not self.back_populates:\n kwargs: Dict[str, Any]\n if isinstance(self.backref, str):\n backref_key, kwargs = self.backref, {}\n else:\n backref_key, kwargs = self.backref\n mapper = self.mapper.primary_mapper()\n if not mapper.concrete:\n check = set(mapper.iterate_to_root()).union(mapper.\n self_and_descendants)\n for m in check:\n if m.has_property(backref_key) and not m.concrete:\n raise sa_exc.ArgumentError(\n \"Error creating backref '%s' on relationship '%s': property of that name exists on mapper '%s'\"\n % (backref_key, self, m))\n if self.secondary is not None:\n pj = kwargs.pop('primaryjoin', self._join_condition.\n secondaryjoin_minus_local)\n sj = kwargs.pop('secondaryjoin', self._join_condition.\n primaryjoin_minus_local)\n else:\n pj = kwargs.pop('primaryjoin', self._join_condition.\n primaryjoin_reverse_remote)\n sj = kwargs.pop('secondaryjoin', None)\n if sj:\n raise sa_exc.InvalidRequestError(\n \"Can't assign 'secondaryjoin' on a backref against a non-secondary relationship.\"\n )\n foreign_keys = kwargs.pop('foreign_keys', self.\n _user_defined_foreign_keys)\n parent = self.parent.primary_mapper()\n kwargs.setdefault('viewonly', self.viewonly)\n kwargs.setdefault('post_update', self.post_update)\n kwargs.setdefault('passive_updates', self.passive_updates)\n kwargs.setdefault('sync_backref', self.sync_backref)\n self.back_populates = backref_key\n relationship = RelationshipProperty(parent, self.secondary,\n primaryjoin=pj, secondaryjoin=sj, foreign_keys=foreign_keys,\n back_populates=self.key, **kwargs)\n mapper._configure_property(backref_key, relationship,\n warn_for_existing=True)\n if self.back_populates:\n self._add_reverse_property(self.back_populates)\n\n @util.preload_module('sqlalchemy.orm.dependency')\n def _post_init(self) ->None:\n dependency = util.preloaded.orm_dependency\n if self.uselist is None:\n self.uselist = self.direction is not MANYTOONE\n if not self.viewonly:\n self._dependency_processor = (dependency.DependencyProcessor.\n from_relationship(self))\n\n @util.memoized_property\n def _use_get(self) ->bool:\n \"\"\"memoize the 'use_get' attribute of this RelationshipLoader's\n lazyloader.\"\"\"\n strategy = self._lazy_strategy\n return strategy.use_get\n\n @util.memoized_property\n def _is_self_referential(self) ->bool:\n return self.mapper.common_parent(self.parent)\n\n def _create_joins(self, source_polymorphic: bool=False,\n source_selectable: Optional[FromClause]=None, dest_selectable:\n Optional[FromClause]=None, of_type_entity: Optional[\n _InternalEntityType[Any]]=None, alias_secondary: bool=False,\n extra_criteria: Tuple[ColumnElement[bool], ...]=()) ->Tuple[\n ColumnElement[bool], Optional[ColumnElement[bool]], FromClause,\n FromClause, Optional[FromClause], Optional[ClauseAdapter]]:\n aliased = False\n if alias_secondary and self.secondary is not None:\n aliased = True\n if source_selectable is None:\n if source_polymorphic and self.parent.with_polymorphic:\n source_selectable = self.parent._with_polymorphic_selectable\n if of_type_entity:\n dest_mapper = of_type_entity.mapper\n if dest_selectable is None:\n dest_selectable = of_type_entity.selectable\n aliased = True\n else:\n dest_mapper = self.mapper\n if dest_selectable is None:\n dest_selectable = self.entity.selectable\n if self.mapper.with_polymorphic:\n aliased = True\n if self._is_self_referential and source_selectable is None:\n dest_selectable = dest_selectable._anonymous_fromclause()\n aliased = True\n elif dest_selectable is not self.mapper._with_polymorphic_selectable or self.mapper.with_polymorphic:\n aliased = True\n single_crit = dest_mapper._single_table_criterion\n aliased = aliased or source_selectable is not None and (\n source_selectable is not self.parent.\n _with_polymorphic_selectable or source_selectable._is_subquery)\n (primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable\n ) = (self._join_condition.join_targets(source_selectable,\n dest_selectable, aliased, single_crit, extra_criteria))\n if source_selectable is None:\n source_selectable = self.parent.local_table\n if dest_selectable is None:\n dest_selectable = self.entity.local_table\n return (primaryjoin, secondaryjoin, source_selectable,\n dest_selectable, secondary, target_adapter)\n\n\n<mask token>\n\n\nclass JoinCondition:\n primaryjoin_initial: Optional[ColumnElement[bool]]\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n prop: RelationshipProperty[Any]\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: _ColumnPairs\n direction: RelationshipDirection\n parent_persist_selectable: FromClause\n child_persist_selectable: FromClause\n parent_local_selectable: FromClause\n child_local_selectable: FromClause\n _local_remote_pairs: Optional[_ColumnPairs]\n\n def __init__(self, parent_persist_selectable: FromClause,\n child_persist_selectable: FromClause, parent_local_selectable:\n FromClause, child_local_selectable: FromClause, *, primaryjoin:\n Optional[ColumnElement[bool]]=None, secondary: Optional[FromClause]\n =None, secondaryjoin: Optional[ColumnElement[bool]]=None,\n parent_equivalents: Optional[_EquivalentColumnMap]=None,\n child_equivalents: Optional[_EquivalentColumnMap]=None,\n consider_as_foreign_keys: Any=None, local_remote_pairs: Optional[\n _ColumnPairs]=None, remote_side: Any=None, self_referential: Any=\n False, prop: RelationshipProperty[Any], support_sync: bool=True,\n can_be_synced_fn: Callable[..., bool]=lambda *c: True):\n self.parent_persist_selectable = parent_persist_selectable\n self.parent_local_selectable = parent_local_selectable\n self.child_persist_selectable = child_persist_selectable\n self.child_local_selectable = child_local_selectable\n self.parent_equivalents = parent_equivalents\n self.child_equivalents = child_equivalents\n self.primaryjoin_initial = primaryjoin\n self.secondaryjoin = secondaryjoin\n self.secondary = secondary\n self.consider_as_foreign_keys = consider_as_foreign_keys\n self._local_remote_pairs = local_remote_pairs\n self._remote_side = remote_side\n self.prop = prop\n self.self_referential = self_referential\n self.support_sync = support_sync\n self.can_be_synced_fn = can_be_synced_fn\n self._determine_joins()\n assert self.primaryjoin is not None\n self._sanitize_joins()\n self._annotate_fks()\n self._annotate_remote()\n self._annotate_local()\n self._annotate_parentmapper()\n self._setup_pairs()\n self._check_foreign_cols(self.primaryjoin, True)\n if self.secondaryjoin is not None:\n self._check_foreign_cols(self.secondaryjoin, False)\n self._determine_direction()\n self._check_remote_side()\n self._log_joins()\n\n def _log_joins(self) ->None:\n log = self.prop.logger\n log.info('%s setup primary join %s', self.prop, self.primaryjoin)\n log.info('%s setup secondary join %s', self.prop, self.secondaryjoin)\n log.info('%s synchronize pairs [%s]', self.prop, ','.join(\n '(%s => %s)' % (l, r) for l, r in self.synchronize_pairs))\n log.info('%s secondary synchronize pairs [%s]', self.prop, ','.join\n ('(%s => %s)' % (l, r) for l, r in self.\n secondary_synchronize_pairs or []))\n log.info('%s local/remote pairs [%s]', self.prop, ','.join(\n '(%s / %s)' % (l, r) for l, r in self.local_remote_pairs))\n log.info('%s remote columns [%s]', self.prop, ','.join('%s' % col for\n col in self.remote_columns))\n log.info('%s local columns [%s]', self.prop, ','.join('%s' % col for\n col in self.local_columns))\n log.info('%s relationship direction %s', self.prop, self.direction)\n\n def _sanitize_joins(self) ->None:\n \"\"\"remove the parententity annotation from our join conditions which\n can leak in here based on some declarative patterns and maybe others.\n\n \"parentmapper\" is relied upon both by the ORM evaluator as well as\n the use case in _join_fixture_inh_selfref_w_entity\n that relies upon it being present, see :ticket:`3364`.\n\n \"\"\"\n self.primaryjoin = _deep_deannotate(self.primaryjoin, values=(\n 'parententity', 'proxy_key'))\n if self.secondaryjoin is not None:\n self.secondaryjoin = _deep_deannotate(self.secondaryjoin,\n values=('parententity', 'proxy_key'))\n\n def _determine_joins(self) ->None:\n \"\"\"Determine the 'primaryjoin' and 'secondaryjoin' attributes,\n if not passed to the constructor already.\n\n This is based on analysis of the foreign key relationships\n between the parent and target mapped selectables.\n\n \"\"\"\n if self.secondaryjoin is not None and self.secondary is None:\n raise sa_exc.ArgumentError(\n 'Property %s specified with secondary join condition but no secondary argument'\n % self.prop)\n try:\n consider_as_foreign_keys = self.consider_as_foreign_keys or None\n if self.secondary is not None:\n if self.secondaryjoin is None:\n self.secondaryjoin = join_condition(self.\n child_persist_selectable, self.secondary, a_subset=\n self.child_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.secondary, a_subset\n =self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n elif self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.\n child_persist_selectable, a_subset=self.\n parent_local_selectable, consider_as_foreign_keys=\n consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n except sa_exc.NoForeignKeysError as nfe:\n if self.secondary is not None:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables via secondary table '%s'. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify 'primaryjoin' and 'secondaryjoin' expressions.\"\n % (self.prop, self.secondary)) from nfe\n else:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify a 'primaryjoin' expression.\"\n % self.prop) from nfe\n except sa_exc.AmbiguousForeignKeysError as afe:\n if self.secondary is not None:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables via secondary table '%s'. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference from the secondary table to each of the parent and child tables.\"\n % (self.prop, self.secondary)) from afe\n else:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference to the parent table.\"\n % self.prop) from afe\n\n @property\n def primaryjoin_minus_local(self) ->ColumnElement[bool]:\n return _deep_deannotate(self.primaryjoin, values=('local', 'remote'))\n\n @property\n def secondaryjoin_minus_local(self) ->ColumnElement[bool]:\n assert self.secondaryjoin is not None\n return _deep_deannotate(self.secondaryjoin, values=('local', 'remote'))\n\n @util.memoized_property\n def primaryjoin_reverse_remote(self) ->ColumnElement[bool]:\n \"\"\"Return the primaryjoin condition suitable for the\n \"reverse\" direction.\n\n If the primaryjoin was delivered here with pre-existing\n \"remote\" annotations, the local/remote annotations\n are reversed. Otherwise, the local/remote annotations\n are removed.\n\n \"\"\"\n if self._has_remote_annotations:\n\n def replace(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n v = dict(element._annotations)\n del v['remote']\n v['local'] = True\n return element._with_annotations(v)\n elif 'local' in element._annotations:\n v = dict(element._annotations)\n del v['local']\n v['remote'] = True\n return element._with_annotations(v)\n return None\n return visitors.replacement_traverse(self.primaryjoin, {}, replace)\n elif self._has_foreign_annotations:\n return _deep_deannotate(self.primaryjoin, values=('local',\n 'remote'))\n else:\n return _deep_deannotate(self.primaryjoin)\n\n def _has_annotation(self, clause: ClauseElement, annotation: str) ->bool:\n for col in visitors.iterate(clause, {}):\n if annotation in col._annotations:\n return True\n else:\n return False\n\n @util.memoized_property\n def _has_foreign_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'foreign')\n\n @util.memoized_property\n def _has_remote_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'remote')\n\n def _annotate_fks(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'foreign' annotations marking columns\n considered as foreign.\n\n \"\"\"\n if self._has_foreign_annotations:\n return\n if self.consider_as_foreign_keys:\n self._annotate_from_fk_list()\n else:\n self._annotate_present_fks()\n\n def _annotate_from_fk_list(self) ->None:\n\n def check_fk(element: _CE, **kw: Any) ->Optional[_CE]:\n if element in self.consider_as_foreign_keys:\n return element._annotate({'foreign': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, check_fk)\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, check_fk)\n\n def _annotate_present_fks(self) ->None:\n if self.secondary is not None:\n secondarycols = util.column_set(self.secondary.c)\n else:\n secondarycols = set()\n\n def is_foreign(a: ColumnElement[Any], b: ColumnElement[Any]\n ) ->Optional[ColumnElement[Any]]:\n if isinstance(a, schema.Column) and isinstance(b, schema.Column):\n if a.references(b):\n return a\n elif b.references(a):\n return b\n if secondarycols:\n if a in secondarycols and b not in secondarycols:\n return a\n elif b in secondarycols and a not in secondarycols:\n return b\n return None\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n if not isinstance(binary.left, sql.ColumnElement\n ) or not isinstance(binary.right, sql.ColumnElement):\n return\n if ('foreign' not in binary.left._annotations and 'foreign' not in\n binary.right._annotations):\n col = is_foreign(binary.left, binary.right)\n if col is not None:\n if col.compare(binary.left):\n binary.left = binary.left._annotate({'foreign': True})\n elif col.compare(binary.right):\n binary.right = binary.right._annotate({'foreign': True}\n )\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.cloned_traverse(self.\n secondaryjoin, {}, {'binary': visit_binary})\n\n def _refers_to_parent_table(self) ->bool:\n \"\"\"Return True if the join condition contains column\n comparisons where both columns are in both tables.\n\n \"\"\"\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n nonlocal result\n c, f = binary.left, binary.right\n if isinstance(c, expression.ColumnClause) and isinstance(f,\n expression.ColumnClause) and pt.is_derived_from(c.table\n ) and pt.is_derived_from(f.table) and mt.is_derived_from(c.\n table) and mt.is_derived_from(f.table):\n result = True\n visitors.traverse(self.primaryjoin, {}, {'binary': visit_binary})\n return result\n\n def _tables_overlap(self) ->bool:\n \"\"\"Return True if parent/child tables have some overlap.\"\"\"\n return selectables_overlap(self.parent_persist_selectable, self.\n child_persist_selectable)\n\n def _annotate_remote(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'remote' annotations marking columns\n considered as part of the 'remote' side.\n\n \"\"\"\n if self._has_remote_annotations:\n return\n if self.secondary is not None:\n self._annotate_remote_secondary()\n elif self._local_remote_pairs or self._remote_side:\n self._annotate_remote_from_args()\n elif self._refers_to_parent_table():\n self._annotate_selfref(lambda col: 'foreign' in col.\n _annotations, False)\n elif self._tables_overlap():\n self._annotate_remote_with_overlap()\n else:\n self._annotate_remote_distinct_selectables()\n\n def _annotate_remote_secondary(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when 'secondary' is present.\n\n \"\"\"\n assert self.secondary is not None\n fixed_secondary = self.secondary\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if fixed_secondary.c.contains_column(element):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n assert self.secondaryjoin is not None\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, repl)\n\n def _annotate_selfref(self, fn: Callable[[ColumnElement[Any]], bool],\n remote_side_given: bool) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the relationship is detected as self-referential.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n equated = binary.left.compare(binary.right)\n if isinstance(binary.left, expression.ColumnClause) and isinstance(\n binary.right, expression.ColumnClause):\n if fn(binary.left):\n binary.left = binary.left._annotate({'remote': True})\n if fn(binary.right) and not equated:\n binary.right = binary.right._annotate({'remote': True})\n elif not remote_side_given:\n self._warn_non_column_elements()\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n\n def _annotate_remote_from_args(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the 'remote_side' or '_local_remote_pairs'\n arguments are used.\n\n \"\"\"\n if self._local_remote_pairs:\n if self._remote_side:\n raise sa_exc.ArgumentError(\n 'remote_side argument is redundant against more detailed _local_remote_side argument.'\n )\n remote_side = [r for l, r in self._local_remote_pairs]\n else:\n remote_side = self._remote_side\n if self._refers_to_parent_table():\n self._annotate_selfref(lambda col: col in remote_side, True)\n else:\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if element in set(remote_side):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.\n primaryjoin, {}, repl)\n\n def _annotate_remote_with_overlap(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables have some set of\n tables in common, though is not a fully self-referential\n relationship.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n binary.left, binary.right = proc_left_right(binary.left, binary\n .right)\n binary.right, binary.left = proc_left_right(binary.right,\n binary.left)\n check_entities = (self.prop is not None and self.prop.mapper is not\n self.prop.parent)\n\n def proc_left_right(left: ColumnElement[Any], right: ColumnElement[Any]\n ) ->Tuple[ColumnElement[Any], ColumnElement[Any]]:\n if isinstance(left, expression.ColumnClause) and isinstance(right,\n expression.ColumnClause):\n if self.child_persist_selectable.c.contains_column(right\n ) and self.parent_persist_selectable.c.contains_column(left\n ):\n right = right._annotate({'remote': True})\n elif check_entities and right._annotations.get('parentmapper'\n ) is self.prop.mapper:\n right = right._annotate({'remote': True})\n elif check_entities and left._annotations.get('parentmapper'\n ) is self.prop.mapper:\n left = left._annotate({'remote': True})\n else:\n self._warn_non_column_elements()\n return left, right\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n\n def _annotate_remote_distinct_selectables(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables are entirely\n separate.\n\n \"\"\"\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if self.child_persist_selectable.c.contains_column(element) and (\n not self.parent_local_selectable.c.contains_column(element) or\n self.child_local_selectable.c.contains_column(element)):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n\n def _warn_non_column_elements(self) ->None:\n util.warn(\n 'Non-simple column elements in primary join condition for property %s - consider using remote() annotations to mark the remote side.'\n % self.prop)\n\n def _annotate_local(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'local' annotations.\n\n This annotates all column elements found\n simultaneously in the parent table\n and the join condition that don't have a\n 'remote' annotation set up from\n _annotate_remote() or user-defined.\n\n \"\"\"\n if self._has_annotation(self.primaryjoin, 'local'):\n return\n if self._local_remote_pairs:\n local_side = util.column_set([l for l, r in self.\n _local_remote_pairs])\n else:\n local_side = util.column_set(self.parent_persist_selectable.c)\n\n def locals_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' not in element._annotations and element in local_side:\n return element._annotate({'local': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, locals_)\n\n def _annotate_parentmapper(self) ->None:\n\n def parentmappers_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n return element._annotate({'parentmapper': self.prop.mapper})\n elif 'local' in element._annotations:\n return element._annotate({'parentmapper': self.prop.parent})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, parentmappers_)\n\n def _check_remote_side(self) ->None:\n if not self.local_remote_pairs:\n raise sa_exc.ArgumentError(\n 'Relationship %s could not determine any unambiguous local/remote column pairs based on join condition and remote_side arguments. Consider using the remote() annotation to accurately mark those elements of the join condition that are on the remote side of the relationship.'\n % (self.prop,))\n else:\n not_target = util.column_set(self.parent_persist_selectable.c\n ).difference(self.child_persist_selectable.c)\n for _, rmt in self.local_remote_pairs:\n if rmt in not_target:\n util.warn(\n \"Expression %s is marked as 'remote', but these column(s) are local to the local side. The remote() annotation is needed only for a self-referential relationship where both sides of the relationship refer to the same tables.\"\n % (rmt,))\n\n def _check_foreign_cols(self, join_condition: ColumnElement[bool],\n primary: bool) ->None:\n \"\"\"Check the foreign key columns collected and emit error\n messages.\"\"\"\n can_sync = False\n foreign_cols = self._gather_columns_with_annotation(join_condition,\n 'foreign')\n has_foreign = bool(foreign_cols)\n if primary:\n can_sync = bool(self.synchronize_pairs)\n else:\n can_sync = bool(self.secondary_synchronize_pairs)\n if (self.support_sync and can_sync or not self.support_sync and\n has_foreign):\n return\n if self.support_sync and has_foreign and not can_sync:\n err = (\n \"Could not locate any simple equality expressions involving locally mapped foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n \" Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation. To allow comparison operators other than '==', the relationship can be marked as viewonly=True.\"\n )\n raise sa_exc.ArgumentError(err)\n else:\n err = (\n \"Could not locate any relevant foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n ' Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation.'\n )\n raise sa_exc.ArgumentError(err)\n\n def _determine_direction(self) ->None:\n \"\"\"Determine if this relationship is one to many, many to one,\n many to many.\n\n \"\"\"\n if self.secondaryjoin is not None:\n self.direction = MANYTOMANY\n else:\n parentcols = util.column_set(self.parent_persist_selectable.c)\n targetcols = util.column_set(self.child_persist_selectable.c)\n onetomany_fk = targetcols.intersection(self.foreign_key_columns)\n manytoone_fk = parentcols.intersection(self.foreign_key_columns)\n if onetomany_fk and manytoone_fk:\n onetomany_local = self._gather_columns_with_annotation(self\n .primaryjoin, 'remote', 'foreign')\n manytoone_local = {c for c in self.\n _gather_columns_with_annotation(self.primaryjoin,\n 'foreign') if 'remote' not in c._annotations}\n if onetomany_local and manytoone_local:\n self_equated = self.remote_columns.intersection(self.\n local_columns)\n onetomany_local = onetomany_local.difference(self_equated)\n manytoone_local = manytoone_local.difference(self_equated)\n if onetomany_local and not manytoone_local:\n self.direction = ONETOMANY\n elif manytoone_local and not onetomany_local:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns within the join condition are present in both the parent and the child's mapped tables. Ensure that only those columns referring to a parent column are marked as foreign, either via the foreign() annotation or via the foreign_keys argument.\"\n % self.prop)\n elif onetomany_fk:\n self.direction = ONETOMANY\n elif manytoone_fk:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns are present in neither the parent nor the child's mapped tables\"\n % self.prop)\n\n def _deannotate_pairs(self, collection: _ColumnPairIterable\n ) ->_MutableColumnPairs:\n \"\"\"provide deannotation for the various lists of\n pairs, so that using them in hashes doesn't incur\n high-overhead __eq__() comparisons against\n original columns mapped.\n\n \"\"\"\n return [(x._deannotate(), y._deannotate()) for x, y in collection]\n\n def _setup_pairs(self) ->None:\n sync_pairs: _MutableColumnPairs = []\n lrp: util.OrderedSet[Tuple[ColumnElement[Any], ColumnElement[Any]]\n ] = util.OrderedSet([])\n secondary_sync_pairs: _MutableColumnPairs = []\n\n def go(joincond: ColumnElement[bool], collection: _MutableColumnPairs\n ) ->None:\n\n def visit_binary(binary: BinaryExpression[Any], left:\n ColumnElement[Any], right: ColumnElement[Any]) ->None:\n if ('remote' in right._annotations and 'remote' not in left\n ._annotations and self.can_be_synced_fn(left)):\n lrp.add((left, right))\n elif 'remote' in left._annotations and 'remote' not in right._annotations and self.can_be_synced_fn(\n right):\n lrp.add((right, left))\n if binary.operator is operators.eq and self.can_be_synced_fn(\n left, right):\n if 'foreign' in right._annotations:\n collection.append((left, right))\n elif 'foreign' in left._annotations:\n collection.append((right, left))\n visit_binary_product(visit_binary, joincond)\n for joincond, collection in [(self.primaryjoin, sync_pairs), (self.\n secondaryjoin, secondary_sync_pairs)]:\n if joincond is None:\n continue\n go(joincond, collection)\n self.local_remote_pairs = self._deannotate_pairs(lrp)\n self.synchronize_pairs = self._deannotate_pairs(sync_pairs)\n self.secondary_synchronize_pairs = self._deannotate_pairs(\n secondary_sync_pairs)\n _track_overlapping_sync_targets: weakref.WeakKeyDictionary[\n ColumnElement[Any], weakref.WeakKeyDictionary[RelationshipProperty[\n Any], ColumnElement[Any]]] = weakref.WeakKeyDictionary()\n\n def _warn_for_conflicting_sync_targets(self) ->None:\n if not self.support_sync:\n return\n for from_, to_ in ([(from_, to_) for from_, to_ in self.\n synchronize_pairs] + [(from_, to_) for from_, to_ in self.\n secondary_synchronize_pairs]):\n if to_ not in self._track_overlapping_sync_targets:\n self._track_overlapping_sync_targets[to_\n ] = weakref.WeakKeyDictionary({self.prop: from_})\n else:\n other_props = []\n prop_to_from = self._track_overlapping_sync_targets[to_]\n for pr, fr_ in prop_to_from.items():\n if (not pr.mapper._dispose_called and pr not in self.\n prop._reverse_property and pr.key not in self.prop.\n _overlaps and self.prop.key not in pr._overlaps and\n '__*' not in self.prop._overlaps and '__*' not in\n pr._overlaps and not self.prop.parent.is_sibling(pr\n .parent) and not self.prop.mapper.is_sibling(pr.\n mapper) and not self.prop.parent.is_sibling(pr.\n mapper) and not self.prop.mapper.is_sibling(pr.\n parent) and (self.prop.key != pr.key or not self.\n prop.parent.common_parent(pr.parent))):\n other_props.append((pr, fr_))\n if other_props:\n util.warn(\n 'relationship \\'%s\\' will copy column %s to column %s, which conflicts with relationship(s): %s. If this is not the intention, consider if these relationships should be linked with back_populates, or if viewonly=True should be applied to one or more if they are read-only. For the less common case that foreign key constraints are partially overlapping, the orm.foreign() annotation can be used to isolate the columns that should be written towards. To silence this warning, add the parameter \\'overlaps=\"%s\"\\' to the \\'%s\\' relationship.'\n % (self.prop, from_, to_, ', '.join(sorted(\n \"'%s' (copies %s to %s)\" % (pr, fr_, to_) for pr,\n fr_ in other_props)), ','.join(sorted(pr.key for pr,\n fr in other_props)), self.prop), code='qzyx')\n self._track_overlapping_sync_targets[to_][self.prop] = from_\n\n @util.memoized_property\n def remote_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('remote')\n\n @util.memoized_property\n def local_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('local')\n\n @util.memoized_property\n def foreign_key_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('foreign')\n\n def _gather_join_annotations(self, annotation: str) ->Set[ColumnElement\n [Any]]:\n s = set(self._gather_columns_with_annotation(self.primaryjoin,\n annotation))\n if self.secondaryjoin is not None:\n s.update(self._gather_columns_with_annotation(self.\n secondaryjoin, annotation))\n return {x._deannotate() for x in s}\n\n def _gather_columns_with_annotation(self, clause: ColumnElement[Any], *\n annotation: Iterable[str]) ->Set[ColumnElement[Any]]:\n annotation_set = set(annotation)\n return {cast(ColumnElement[Any], col) for col in visitors.iterate(\n clause, {}) if annotation_set.issubset(col._annotations)}\n\n def join_targets(self, source_selectable: Optional[FromClause],\n dest_selectable: FromClause, aliased: bool, single_crit: Optional[\n ColumnElement[bool]]=None, extra_criteria: Tuple[ColumnElement[bool\n ], ...]=()) ->Tuple[ColumnElement[bool], Optional[ColumnElement[\n bool]], Optional[FromClause], Optional[ClauseAdapter], FromClause]:\n \"\"\"Given a source and destination selectable, create a\n join between them.\n\n This takes into account aliasing the join clause\n to reference the appropriate corresponding columns\n in the target objects, as well as the extra child\n criterion, equivalent column sets, etc.\n\n \"\"\"\n dest_selectable = _shallow_annotate(dest_selectable, {\n 'no_replacement_traverse': True})\n primaryjoin, secondaryjoin, secondary = (self.primaryjoin, self.\n secondaryjoin, self.secondary)\n if single_crit is not None:\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & single_crit\n else:\n primaryjoin = primaryjoin & single_crit\n if extra_criteria:\n\n def mark_unrelated_columns_as_ok_to_adapt(elem:\n SupportsAnnotations, annotations: _AnnotationDict\n ) ->SupportsAnnotations:\n \"\"\"note unrelated columns in the \"extra criteria\" as OK\n to adapt, even though they are not part of our \"local\"\n or \"remote\" side.\n\n see #9779 for this case\n\n \"\"\"\n parentmapper_for_element = elem._annotations.get('parentmapper'\n , None)\n if (parentmapper_for_element is not self.prop.parent and \n parentmapper_for_element is not self.prop.mapper):\n return _safe_annotate(elem, annotations)\n else:\n return elem\n extra_criteria = tuple(_deep_annotate(elem, {\n 'ok_to_adapt_in_join_condition': True}, annotate_callable=\n mark_unrelated_columns_as_ok_to_adapt) for elem in\n extra_criteria)\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)\n else:\n primaryjoin = primaryjoin & sql.and_(*extra_criteria)\n if aliased:\n if secondary is not None:\n secondary = secondary._anonymous_fromclause(flat=True)\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn=\n _ColInAnnotations('local'))\n secondary_aliasizer = ClauseAdapter(dest_selectable,\n equivalents=self.child_equivalents).chain(primary_aliasizer\n )\n if source_selectable is not None:\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn\n =_ColInAnnotations('local')).chain(ClauseAdapter(\n source_selectable, equivalents=self.parent_equivalents)\n )\n secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)\n else:\n primary_aliasizer = ClauseAdapter(dest_selectable,\n exclude_fn=_ColInAnnotations('local'), equivalents=self\n .child_equivalents)\n if source_selectable is not None:\n primary_aliasizer.chain(ClauseAdapter(source_selectable,\n exclude_fn=_ColInAnnotations('remote'), equivalents\n =self.parent_equivalents))\n secondary_aliasizer = None\n primaryjoin = primary_aliasizer.traverse(primaryjoin)\n target_adapter = secondary_aliasizer or primary_aliasizer\n target_adapter.exclude_fn = None\n else:\n target_adapter = None\n return (primaryjoin, secondaryjoin, secondary, target_adapter,\n dest_selectable)\n\n def create_lazy_clause(self, reverse_direction: bool=False) ->Tuple[\n ColumnElement[bool], Dict[str, ColumnElement[Any]], Dict[\n ColumnElement[Any], ColumnElement[Any]]]:\n binds: Dict[ColumnElement[Any], BindParameter[Any]] = {}\n equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {}\n has_secondary = self.secondaryjoin is not None\n if has_secondary:\n lookup = collections.defaultdict(list)\n for l, r in self.local_remote_pairs:\n lookup[l].append((l, r))\n equated_columns[r] = l\n elif not reverse_direction:\n for l, r in self.local_remote_pairs:\n equated_columns[r] = l\n else:\n for l, r in self.local_remote_pairs:\n equated_columns[l] = r\n\n def col_to_bind(element: ColumnElement[Any], **kw: Any) ->Optional[\n BindParameter[Any]]:\n if (not reverse_direction and 'local' in element._annotations or\n reverse_direction and (has_secondary and element in lookup or\n not has_secondary and 'remote' in element._annotations)):\n if element not in binds:\n binds[element] = sql.bindparam(None, None, type_=\n element.type, unique=True)\n return binds[element]\n return None\n lazywhere = self.primaryjoin\n if self.secondaryjoin is None or not reverse_direction:\n lazywhere = visitors.replacement_traverse(lazywhere, {},\n col_to_bind)\n if self.secondaryjoin is not None:\n secondaryjoin = self.secondaryjoin\n if reverse_direction:\n secondaryjoin = visitors.replacement_traverse(secondaryjoin,\n {}, col_to_bind)\n lazywhere = sql.and_(lazywhere, secondaryjoin)\n bind_to_col = {binds[col].key: col for col in binds}\n return lazywhere, bind_to_col, equated_columns\n\n\nclass _ColInAnnotations:\n \"\"\"Serializable object that tests for a name in c._annotations.\"\"\"\n __slots__ = 'name',\n\n def __init__(self, name: str):\n self.name = name\n\n def __call__(self, c: ClauseElement) ->bool:\n return (self.name in c._annotations or \n 'ok_to_adapt_in_join_condition' in c._annotations)\n\n\nclass Relationship(RelationshipProperty[_T], _DeclarativeMapped[_T],\n WriteOnlyMapped[_T], DynamicMapped[_T]):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n .. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative\n compatible subclass for :class:`_orm.RelationshipProperty`.\n\n \"\"\"\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n",
"step-4": "<mask token>\nfrom __future__ import annotations\nimport collections\nfrom collections import abc\nimport dataclasses\nimport inspect as _py_inspect\nimport re\nimport typing\nfrom typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import Collection\nfrom typing import Dict\nfrom typing import Generic\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import NoReturn\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Set\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import TypeVar\nfrom typing import Union\nimport weakref\nfrom . import attributes\nfrom . import strategy_options\nfrom ._typing import insp_is_aliased_class\nfrom ._typing import is_has_collection_adapter\nfrom .base import _DeclarativeMapped\nfrom .base import _is_mapped_class\nfrom .base import class_mapper\nfrom .base import DynamicMapped\nfrom .base import LoaderCallableStatus\nfrom .base import PassiveFlag\nfrom .base import state_str\nfrom .base import WriteOnlyMapped\nfrom .interfaces import _AttributeOptions\nfrom .interfaces import _IntrospectsAnnotations\nfrom .interfaces import MANYTOMANY\nfrom .interfaces import MANYTOONE\nfrom .interfaces import ONETOMANY\nfrom .interfaces import PropComparator\nfrom .interfaces import RelationshipDirection\nfrom .interfaces import StrategizedProperty\nfrom .util import _orm_annotate\nfrom .util import _orm_deannotate\nfrom .util import CascadeOptions\nfrom .. import exc as sa_exc\nfrom .. import Exists\nfrom .. import log\nfrom .. import schema\nfrom .. import sql\nfrom .. import util\nfrom ..inspection import inspect\nfrom ..sql import coercions\nfrom ..sql import expression\nfrom ..sql import operators\nfrom ..sql import roles\nfrom ..sql import visitors\nfrom ..sql._typing import _ColumnExpressionArgument\nfrom ..sql._typing import _HasClauseElement\nfrom ..sql.annotation import _safe_annotate\nfrom ..sql.elements import ColumnClause\nfrom ..sql.elements import ColumnElement\nfrom ..sql.util import _deep_annotate\nfrom ..sql.util import _deep_deannotate\nfrom ..sql.util import _shallow_annotate\nfrom ..sql.util import adapt_criterion_to_null\nfrom ..sql.util import ClauseAdapter\nfrom ..sql.util import join_condition\nfrom ..sql.util import selectables_overlap\nfrom ..sql.util import visit_binary_product\nfrom ..util.typing import de_optionalize_union_types\nfrom ..util.typing import Literal\nfrom ..util.typing import resolve_name_to_real_class_name\nif typing.TYPE_CHECKING:\n from ._typing import _EntityType\n from ._typing import _ExternalEntityType\n from ._typing import _IdentityKeyType\n from ._typing import _InstanceDict\n from ._typing import _InternalEntityType\n from ._typing import _O\n from ._typing import _RegistryType\n from .base import Mapped\n from .clsregistry import _class_resolver\n from .clsregistry import _ModNS\n from .decl_base import _ClassScanMapperConfig\n from .dependency import DependencyProcessor\n from .mapper import Mapper\n from .query import Query\n from .session import Session\n from .state import InstanceState\n from .strategies import LazyLoader\n from .util import AliasedClass\n from .util import AliasedInsp\n from ..sql._typing import _CoreAdapterProto\n from ..sql._typing import _EquivalentColumnMap\n from ..sql._typing import _InfoType\n from ..sql.annotation import _AnnotationDict\n from ..sql.annotation import SupportsAnnotations\n from ..sql.elements import BinaryExpression\n from ..sql.elements import BindParameter\n from ..sql.elements import ClauseElement\n from ..sql.schema import Table\n from ..sql.selectable import FromClause\n from ..util.typing import _AnnotationScanType\n from ..util.typing import RODescriptorReference\n_T = TypeVar('_T', bound=Any)\n_T1 = TypeVar('_T1', bound=Any)\n_T2 = TypeVar('_T2', bound=Any)\n_PT = TypeVar('_PT', bound=Any)\n_PT2 = TypeVar('_PT2', bound=Any)\n_RelationshipArgumentType = Union[str, Type[_T], Callable[[], Type[_T]],\n 'Mapper[_T]', 'AliasedClass[_T]', Callable[[], 'Mapper[_T]'], Callable[\n [], 'AliasedClass[_T]']]\n_LazyLoadArgumentType = Literal['select', 'joined', 'selectin', 'subquery',\n 'raise', 'raise_on_sql', 'noload', 'immediate', 'write_only', 'dynamic',\n True, False, None]\n_RelationshipJoinConditionArgument = Union[str, _ColumnExpressionArgument[bool]\n ]\n_RelationshipSecondaryArgument = Union['FromClause', str, Callable[[],\n 'FromClause']]\n_ORMOrderByArgument = Union[Literal[False], str, _ColumnExpressionArgument[\n Any], Callable[[], _ColumnExpressionArgument[Any]], Callable[[],\n Iterable[_ColumnExpressionArgument[Any]]], Iterable[Union[str,\n _ColumnExpressionArgument[Any]]]]\nORMBackrefArgument = Union[str, Tuple[str, Dict[str, Any]]]\n_ORMColCollectionElement = Union[ColumnClause[Any], _HasClauseElement,\n roles.DMLColumnRole, 'Mapped[Any]']\n_ORMColCollectionArgument = Union[str, Sequence[_ORMColCollectionElement],\n Callable[[], Sequence[_ORMColCollectionElement]], Callable[[],\n _ORMColCollectionElement], _ORMColCollectionElement]\n_CEA = TypeVar('_CEA', bound=_ColumnExpressionArgument[Any])\n_CE = TypeVar('_CE', bound='ColumnElement[Any]')\n_ColumnPairIterable = Iterable[Tuple[ColumnElement[Any], ColumnElement[Any]]]\n_ColumnPairs = Sequence[Tuple[ColumnElement[Any], ColumnElement[Any]]]\n_MutableColumnPairs = List[Tuple[ColumnElement[Any], ColumnElement[Any]]]\n\n\ndef remote(expr: _CEA) ->_CEA:\n \"\"\"Annotate a portion of a primaryjoin expression\n with a 'remote' annotation.\n\n See the section :ref:`relationship_custom_foreign` for a\n description of use.\n\n .. seealso::\n\n :ref:`relationship_custom_foreign`\n\n :func:`.foreign`\n\n \"\"\"\n return _annotate_columns(coercions.expect(roles.ColumnArgumentRole,\n expr), {'remote': True})\n\n\ndef foreign(expr: _CEA) ->_CEA:\n \"\"\"Annotate a portion of a primaryjoin expression\n with a 'foreign' annotation.\n\n See the section :ref:`relationship_custom_foreign` for a\n description of use.\n\n .. seealso::\n\n :ref:`relationship_custom_foreign`\n\n :func:`.remote`\n\n \"\"\"\n return _annotate_columns(coercions.expect(roles.ColumnArgumentRole,\n expr), {'foreign': True})\n\n\[email protected]\nclass _RelationshipArg(Generic[_T1, _T2]):\n \"\"\"stores a user-defined parameter value that must be resolved and\n parsed later at mapper configuration time.\n\n \"\"\"\n __slots__ = 'name', 'argument', 'resolved'\n name: str\n argument: _T1\n resolved: Optional[_T2]\n\n def _is_populated(self) ->bool:\n return self.argument is not None\n\n def _resolve_against_registry(self, clsregistry_resolver: Callable[[str,\n bool], _class_resolver]) ->None:\n attr_value = self.argument\n if isinstance(attr_value, str):\n self.resolved = clsregistry_resolver(attr_value, self.name ==\n 'secondary')()\n elif callable(attr_value) and not _is_mapped_class(attr_value):\n self.resolved = attr_value()\n else:\n self.resolved = attr_value\n\n\nclass _RelationshipArgs(NamedTuple):\n \"\"\"stores user-passed parameters that are resolved at mapper configuration\n time.\n\n \"\"\"\n secondary: _RelationshipArg[Optional[_RelationshipSecondaryArgument],\n Optional[FromClause]]\n primaryjoin: _RelationshipArg[Optional[\n _RelationshipJoinConditionArgument], Optional[ColumnElement[Any]]]\n secondaryjoin: _RelationshipArg[Optional[\n _RelationshipJoinConditionArgument], Optional[ColumnElement[Any]]]\n order_by: _RelationshipArg[_ORMOrderByArgument, Union[Literal[None, \n False], Tuple[ColumnElement[Any], ...]]]\n foreign_keys: _RelationshipArg[Optional[_ORMColCollectionArgument], Set\n [ColumnElement[Any]]]\n remote_side: _RelationshipArg[Optional[_ORMColCollectionArgument], Set[\n ColumnElement[Any]]]\n\n\[email protected]_logger\nclass RelationshipProperty(_IntrospectsAnnotations, StrategizedProperty[_T],\n log.Identified):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n \"\"\"\n strategy_wildcard_key = strategy_options._RELATIONSHIP_TOKEN\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n _links_to_entity = True\n _is_relationship = True\n _overlaps: Sequence[str]\n _lazy_strategy: LazyLoader\n _persistence_only = dict(passive_deletes=False, passive_updates=True,\n enable_typechecks=True, active_history=False, cascade_backrefs=False)\n _dependency_processor: Optional[DependencyProcessor] = None\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n _join_condition: JoinCondition\n order_by: Union[Literal[False], Tuple[ColumnElement[Any], ...]]\n _user_defined_foreign_keys: Set[ColumnElement[Any]]\n _calculated_foreign_keys: Set[ColumnElement[Any]]\n remote_side: Set[ColumnElement[Any]]\n local_columns: Set[ColumnElement[Any]]\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: Optional[_ColumnPairs]\n local_remote_pairs: Optional[_ColumnPairs]\n direction: RelationshipDirection\n _init_args: _RelationshipArgs\n\n def __init__(self, argument: Optional[_RelationshipArgumentType[_T]]=\n None, secondary: Optional[_RelationshipSecondaryArgument]=None, *,\n uselist: Optional[bool]=None, collection_class: Optional[Union[Type\n [Collection[Any]], Callable[[], Collection[Any]]]]=None,\n primaryjoin: Optional[_RelationshipJoinConditionArgument]=None,\n secondaryjoin: Optional[_RelationshipJoinConditionArgument]=None,\n back_populates: Optional[str]=None, order_by: _ORMOrderByArgument=\n False, backref: Optional[ORMBackrefArgument]=None, overlaps:\n Optional[str]=None, post_update: bool=False, cascade: str=\n 'save-update, merge', viewonly: bool=False, attribute_options:\n Optional[_AttributeOptions]=None, lazy: _LazyLoadArgumentType=\n 'select', passive_deletes: Union[Literal['all'], bool]=False,\n passive_updates: bool=True, active_history: bool=False,\n enable_typechecks: bool=True, foreign_keys: Optional[\n _ORMColCollectionArgument]=None, remote_side: Optional[\n _ORMColCollectionArgument]=None, join_depth: Optional[int]=None,\n comparator_factory: Optional[Type[RelationshipProperty.Comparator[\n Any]]]=None, single_parent: bool=False, innerjoin: bool=False,\n distinct_target_key: Optional[bool]=None, load_on_pending: bool=\n False, query_class: Optional[Type[Query[Any]]]=None, info: Optional\n [_InfoType]=None, omit_join: Literal[None, False]=None,\n sync_backref: Optional[bool]=None, doc: Optional[str]=None,\n bake_queries: Literal[True]=True, cascade_backrefs: Literal[False]=\n False, _local_remote_pairs: Optional[_ColumnPairs]=None,\n _legacy_inactive_history_style: bool=False):\n super().__init__(attribute_options=attribute_options)\n self.uselist = uselist\n self.argument = argument\n self._init_args = _RelationshipArgs(_RelationshipArg('secondary',\n secondary, None), _RelationshipArg('primaryjoin', primaryjoin,\n None), _RelationshipArg('secondaryjoin', secondaryjoin, None),\n _RelationshipArg('order_by', order_by, None), _RelationshipArg(\n 'foreign_keys', foreign_keys, None), _RelationshipArg(\n 'remote_side', remote_side, None))\n self.post_update = post_update\n self.viewonly = viewonly\n if viewonly:\n self._warn_for_persistence_only_flags(passive_deletes=\n passive_deletes, passive_updates=passive_updates,\n enable_typechecks=enable_typechecks, active_history=\n active_history, cascade_backrefs=cascade_backrefs)\n if viewonly and sync_backref:\n raise sa_exc.ArgumentError(\n 'sync_backref and viewonly cannot both be True')\n self.sync_backref = sync_backref\n self.lazy = lazy\n self.single_parent = single_parent\n self.collection_class = collection_class\n self.passive_deletes = passive_deletes\n if cascade_backrefs:\n raise sa_exc.ArgumentError(\n \"The 'cascade_backrefs' parameter passed to relationship() may only be set to False.\"\n )\n self.passive_updates = passive_updates\n self.enable_typechecks = enable_typechecks\n self.query_class = query_class\n self.innerjoin = innerjoin\n self.distinct_target_key = distinct_target_key\n self.doc = doc\n self.active_history = active_history\n self._legacy_inactive_history_style = _legacy_inactive_history_style\n self.join_depth = join_depth\n if omit_join:\n util.warn(\n 'setting omit_join to True is not supported; selectin loading of this relationship may not work correctly if this flag is set explicitly. omit_join optimization is automatically detected for conditions under which it is supported.'\n )\n self.omit_join = omit_join\n self.local_remote_pairs = _local_remote_pairs\n self.load_on_pending = load_on_pending\n self.comparator_factory = (comparator_factory or\n RelationshipProperty.Comparator)\n util.set_creation_order(self)\n if info is not None:\n self.info.update(info)\n self.strategy_key = ('lazy', self.lazy),\n self._reverse_property: Set[RelationshipProperty[Any]] = set()\n if overlaps:\n self._overlaps = set(re.split('\\\\s*,\\\\s*', overlaps))\n else:\n self._overlaps = ()\n self.cascade = cascade\n self.back_populates = back_populates\n if self.back_populates:\n if backref:\n raise sa_exc.ArgumentError(\n 'backref and back_populates keyword arguments are mutually exclusive'\n )\n self.backref = None\n else:\n self.backref = backref\n\n def _warn_for_persistence_only_flags(self, **kw: Any) ->None:\n for k, v in kw.items():\n if v != self._persistence_only[k]:\n util.warn(\n 'Setting %s on relationship() while also setting viewonly=True does not make sense, as a viewonly=True relationship does not perform persistence operations. This configuration may raise an error in a future release.'\n % (k,))\n\n def instrument_class(self, mapper: Mapper[Any]) ->None:\n attributes.register_descriptor(mapper.class_, self.key, comparator=\n self.comparator_factory(self, mapper), parententity=mapper, doc\n =self.doc)\n\n\n class Comparator(util.MemoizedSlots, PropComparator[_PT]):\n \"\"\"Produce boolean, comparison, and other operators for\n :class:`.RelationshipProperty` attributes.\n\n See the documentation for :class:`.PropComparator` for a brief\n overview of ORM level operator definition.\n\n .. seealso::\n\n :class:`.PropComparator`\n\n :class:`.ColumnProperty.Comparator`\n\n :class:`.ColumnOperators`\n\n :ref:`types_operators`\n\n :attr:`.TypeEngine.comparator_factory`\n\n \"\"\"\n __slots__ = ('entity', 'mapper', 'property', '_of_type',\n '_extra_criteria')\n prop: RODescriptorReference[RelationshipProperty[_PT]]\n _of_type: Optional[_EntityType[_PT]]\n\n def __init__(self, prop: RelationshipProperty[_PT], parentmapper:\n _InternalEntityType[Any], adapt_to_entity: Optional[AliasedInsp\n [Any]]=None, of_type: Optional[_EntityType[_PT]]=None,\n extra_criteria: Tuple[ColumnElement[bool], ...]=()):\n \"\"\"Construction of :class:`.RelationshipProperty.Comparator`\n is internal to the ORM's attribute mechanics.\n\n \"\"\"\n self.prop = prop\n self._parententity = parentmapper\n self._adapt_to_entity = adapt_to_entity\n if of_type:\n self._of_type = of_type\n else:\n self._of_type = None\n self._extra_criteria = extra_criteria\n\n def adapt_to_entity(self, adapt_to_entity: AliasedInsp[Any]\n ) ->RelationshipProperty.Comparator[Any]:\n return self.__class__(self.prop, self._parententity,\n adapt_to_entity=adapt_to_entity, of_type=self._of_type)\n entity: _InternalEntityType[_PT]\n \"\"\"The target entity referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`\n object.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n mapper: Mapper[_PT]\n \"\"\"The target :class:`_orm.Mapper` referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n\n def _memoized_attr_entity(self) ->_InternalEntityType[_PT]:\n if self._of_type:\n return inspect(self._of_type)\n else:\n return self.prop.entity\n\n def _memoized_attr_mapper(self) ->Mapper[_PT]:\n return self.entity.mapper\n\n def _source_selectable(self) ->FromClause:\n if self._adapt_to_entity:\n return self._adapt_to_entity.selectable\n else:\n return self.property.parent._with_polymorphic_selectable\n\n def __clause_element__(self) ->ColumnElement[bool]:\n adapt_from = self._source_selectable()\n if self._of_type:\n of_type_entity = inspect(self._of_type)\n else:\n of_type_entity = None\n pj, sj, source, dest, secondary, target_adapter = (self.prop.\n _create_joins(source_selectable=adapt_from,\n source_polymorphic=True, of_type_entity=of_type_entity,\n alias_secondary=True, extra_criteria=self._extra_criteria))\n if sj is not None:\n return pj & sj\n else:\n return pj\n\n def of_type(self, class_: _EntityType[Any]) ->PropComparator[_PT]:\n \"\"\"Redefine this object in terms of a polymorphic subclass.\n\n See :meth:`.PropComparator.of_type` for an example.\n\n\n \"\"\"\n return RelationshipProperty.Comparator(self.prop, self.\n _parententity, adapt_to_entity=self._adapt_to_entity,\n of_type=class_, extra_criteria=self._extra_criteria)\n\n def and_(self, *criteria: _ColumnExpressionArgument[bool]\n ) ->PropComparator[Any]:\n \"\"\"Add AND criteria.\n\n See :meth:`.PropComparator.and_` for an example.\n\n .. versionadded:: 1.4\n\n \"\"\"\n exprs = tuple(coercions.expect(roles.WhereHavingRole, clause) for\n clause in util.coerce_generator_arg(criteria))\n return RelationshipProperty.Comparator(self.prop, self.\n _parententity, adapt_to_entity=self._adapt_to_entity,\n of_type=self._of_type, extra_criteria=self._extra_criteria +\n exprs)\n\n def in_(self, other: Any) ->NoReturn:\n \"\"\"Produce an IN clause - this is not implemented\n for :func:`_orm.relationship`-based attributes at this time.\n\n \"\"\"\n raise NotImplementedError(\n 'in_() not yet supported for relationships. For a simple many-to-one, use in_() against the set of foreign key values.'\n )\n __hash__ = None\n\n def __eq__(self, other: Any) ->ColumnElement[bool]:\n \"\"\"Implement the ``==`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop == <some object>\n\n this will typically produce a\n clause such as::\n\n mytable.related_id == <some id>\n\n Where ``<some id>`` is the primary key of the given\n object.\n\n The ``==`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use :meth:`~.Relationship.Comparator.contains`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce a NOT EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction in [ONETOMANY, MANYTOMANY]:\n return ~self._criterion_exists()\n else:\n return _orm_annotate(self.property._optimized_compare(\n None, adapt_source=self.adapter))\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(self.property._optimized_compare(other,\n adapt_source=self.adapter))\n\n def _criterion_exists(self, criterion: Optional[\n _ColumnExpressionArgument[bool]]=None, **kwargs: Any) ->Exists:\n where_criteria = coercions.expect(roles.WhereHavingRole, criterion\n ) if criterion is not None else None\n if getattr(self, '_of_type', None):\n info: Optional[_InternalEntityType[Any]] = inspect(self.\n _of_type)\n assert info is not None\n target_mapper, to_selectable, is_aliased_class = (info.\n mapper, info.selectable, info.is_aliased_class)\n if self.property._is_self_referential and not is_aliased_class:\n to_selectable = to_selectable._anonymous_fromclause()\n single_crit = target_mapper._single_table_criterion\n if single_crit is not None:\n if where_criteria is not None:\n where_criteria = single_crit & where_criteria\n else:\n where_criteria = single_crit\n else:\n is_aliased_class = False\n to_selectable = None\n if self.adapter:\n source_selectable = self._source_selectable()\n else:\n source_selectable = None\n pj, sj, source, dest, secondary, target_adapter = (self.\n property._create_joins(dest_selectable=to_selectable,\n source_selectable=source_selectable))\n for k in kwargs:\n crit = getattr(self.property.mapper.class_, k) == kwargs[k]\n if where_criteria is None:\n where_criteria = crit\n else:\n where_criteria = where_criteria & crit\n if sj is not None:\n j = _orm_annotate(pj) & sj\n else:\n j = _orm_annotate(pj, exclude=self.property.remote_side)\n if (where_criteria is not None and target_adapter and not\n is_aliased_class):\n where_criteria = target_adapter.traverse(where_criteria)\n if where_criteria is not None:\n where_criteria = where_criteria._annotate({\n 'no_replacement_traverse': True})\n crit = j & sql.True_._ifnone(where_criteria)\n if secondary is not None:\n ex = sql.exists(1).where(crit).select_from(dest, secondary\n ).correlate_except(dest, secondary)\n else:\n ex = sql.exists(1).where(crit).select_from(dest\n ).correlate_except(dest)\n return ex\n\n def any(self, criterion: Optional[_ColumnExpressionArgument[bool]]=\n None, **kwargs: Any) ->ColumnElement[bool]:\n \"\"\"Produce an expression that tests a collection against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.any(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id\n AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.any` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.any` is particularly\n useful for testing for empty collections::\n\n session.query(MyClass).filter(\n ~MyClass.somereference.any()\n )\n\n will produce::\n\n SELECT * FROM my_table WHERE\n NOT (EXISTS (SELECT 1 FROM related WHERE\n related.my_id=my_table.id))\n\n :meth:`~.Relationship.Comparator.any` is only\n valid for collections, i.e. a :func:`_orm.relationship`\n that has ``uselist=True``. For scalar references,\n use :meth:`~.Relationship.Comparator.has`.\n\n \"\"\"\n if not self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'any()' not implemented for scalar attributes. Use has().\"\n )\n return self._criterion_exists(criterion, **kwargs)\n\n def has(self, criterion: Optional[_ColumnExpressionArgument[bool]]=\n None, **kwargs: Any) ->ColumnElement[bool]:\n \"\"\"Produce an expression that tests a scalar reference against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.has(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE\n related.id==my_table.related_id AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.has` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.has` is only\n valid for scalar references, i.e. a :func:`_orm.relationship`\n that has ``uselist=False``. For collection references,\n use :meth:`~.Relationship.Comparator.any`.\n\n \"\"\"\n if self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'has()' not implemented for collections. Use any().\")\n return self._criterion_exists(criterion, **kwargs)\n\n def contains(self, other: _ColumnExpressionArgument[Any], **kwargs: Any\n ) ->ColumnElement[bool]:\n \"\"\"Return a simple expression that tests a collection for\n containment of a particular item.\n\n :meth:`~.Relationship.Comparator.contains` is\n only valid for a collection, i.e. a\n :func:`_orm.relationship` that implements\n one-to-many or many-to-many with ``uselist=True``.\n\n When used in a simple one-to-many context, an\n expression like::\n\n MyClass.contains(other)\n\n Produces a clause like::\n\n mytable.id == <some id>\n\n Where ``<some id>`` is the value of the foreign key\n attribute on ``other`` which refers to the primary\n key of its parent object. From this it follows that\n :meth:`~.Relationship.Comparator.contains` is\n very useful when used with simple one-to-many\n operations.\n\n For many-to-many operations, the behavior of\n :meth:`~.Relationship.Comparator.contains`\n has more caveats. The association table will be\n rendered in the statement, producing an \"implicit\"\n join, that is, includes multiple tables in the FROM\n clause which are equated in the WHERE clause::\n\n query(MyClass).filter(MyClass.contains(other))\n\n Produces a query like::\n\n SELECT * FROM my_table, my_association_table AS\n my_association_table_1 WHERE\n my_table.id = my_association_table_1.parent_id\n AND my_association_table_1.child_id = <some id>\n\n Where ``<some id>`` would be the primary key of\n ``other``. From the above, it is clear that\n :meth:`~.Relationship.Comparator.contains`\n will **not** work with many-to-many collections when\n used in queries that move beyond simple AND\n conjunctions, such as multiple\n :meth:`~.Relationship.Comparator.contains`\n expressions joined by OR. In such cases subqueries or\n explicit \"outer joins\" will need to be used instead.\n See :meth:`~.Relationship.Comparator.any` for\n a less-performant alternative using EXISTS, or refer\n to :meth:`_query.Query.outerjoin`\n as well as :ref:`orm_queryguide_joins`\n for more details on constructing outer joins.\n\n kwargs may be ignored by this operator but are required for API\n conformance.\n \"\"\"\n if not self.prop.uselist:\n raise sa_exc.InvalidRequestError(\n \"'contains' not implemented for scalar attributes. Use ==\"\n )\n clause = self.prop._optimized_compare(other, adapt_source=self.\n adapter)\n if self.prop.secondaryjoin is not None:\n clause.negation_clause = self.__negated_contains_or_equals(\n other)\n return clause\n\n def __negated_contains_or_equals(self, other: Any) ->ColumnElement[bool\n ]:\n if self.prop.direction == MANYTOONE:\n state = attributes.instance_state(other)\n\n def state_bindparam(local_col: ColumnElement[Any], state:\n InstanceState[Any], remote_col: ColumnElement[Any]\n ) ->BindParameter[Any]:\n dict_ = state.dict\n return sql.bindparam(local_col.key, type_=local_col.\n type, unique=True, callable_=self.prop.\n _get_attr_w_warn_on_none(self.prop.mapper, state,\n dict_, remote_col))\n\n def adapt(col: _CE) ->_CE:\n if self.adapter:\n return self.adapter(col)\n else:\n return col\n if self.property._use_get:\n return sql.and_(*[sql.or_(adapt(x) != state_bindparam(\n adapt(x), state, y), adapt(x) == None) for x, y in\n self.property.local_remote_pairs])\n criterion = sql.and_(*[(x == y) for x, y in zip(self.property.\n mapper.primary_key, self.property.mapper.\n primary_key_from_instance(other))])\n return ~self._criterion_exists(criterion)\n\n def __ne__(self, other: Any) ->ColumnElement[bool]:\n \"\"\"Implement the ``!=`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop != <some object>\n\n This will typically produce a clause such as::\n\n mytable.related_id != <some id>\n\n Where ``<some id>`` is the primary key of the\n given object.\n\n The ``!=`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use\n :meth:`~.Relationship.Comparator.contains`\n in conjunction with :func:`_expression.not_`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` in\n conjunction with :func:`_expression.not_` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce an EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction == MANYTOONE:\n return _orm_annotate(~self.property._optimized_compare(\n None, adapt_source=self.adapter))\n else:\n return self._criterion_exists()\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(self.__negated_contains_or_equals(other))\n\n def _memoized_attr_property(self) ->RelationshipProperty[_PT]:\n self.prop.parent._check_configure()\n return self.prop\n\n def _with_parent(self, instance: object, alias_secondary: bool=True,\n from_entity: Optional[_EntityType[Any]]=None) ->ColumnElement[bool]:\n assert instance is not None\n adapt_source: Optional[_CoreAdapterProto] = None\n if from_entity is not None:\n insp: Optional[_InternalEntityType[Any]] = inspect(from_entity)\n assert insp is not None\n if insp_is_aliased_class(insp):\n adapt_source = insp._adapter.adapt_clause\n return self._optimized_compare(instance, value_is_parent=True,\n adapt_source=adapt_source, alias_secondary=alias_secondary)\n\n def _optimized_compare(self, state: Any, value_is_parent: bool=False,\n adapt_source: Optional[_CoreAdapterProto]=None, alias_secondary:\n bool=True) ->ColumnElement[bool]:\n if state is not None:\n try:\n state = inspect(state)\n except sa_exc.NoInspectionAvailable:\n state = None\n if state is None or not getattr(state, 'is_instance', False):\n raise sa_exc.ArgumentError(\n 'Mapped instance expected for relationship comparison to object. Classes, queries and other SQL elements are not accepted in this context; for comparison with a subquery, use %s.has(**criteria).'\n % self)\n reverse_direction = not value_is_parent\n if state is None:\n return self._lazy_none_clause(reverse_direction, adapt_source=\n adapt_source)\n if not reverse_direction:\n criterion, bind_to_col = (self._lazy_strategy._lazywhere, self.\n _lazy_strategy._bind_to_col)\n else:\n criterion, bind_to_col = (self._lazy_strategy._rev_lazywhere,\n self._lazy_strategy._rev_bind_to_col)\n if reverse_direction:\n mapper = self.mapper\n else:\n mapper = self.parent\n dict_ = attributes.instance_dict(state.obj())\n\n def visit_bindparam(bindparam: BindParameter[Any]) ->None:\n if bindparam._identifying_key in bind_to_col:\n bindparam.callable = self._get_attr_w_warn_on_none(mapper,\n state, dict_, bind_to_col[bindparam._identifying_key])\n if self.secondary is not None and alias_secondary:\n criterion = ClauseAdapter(self.secondary._anonymous_fromclause()\n ).traverse(criterion)\n criterion = visitors.cloned_traverse(criterion, {}, {'bindparam':\n visit_bindparam})\n if adapt_source:\n criterion = adapt_source(criterion)\n return criterion\n\n def _get_attr_w_warn_on_none(self, mapper: Mapper[Any], state:\n InstanceState[Any], dict_: _InstanceDict, column: ColumnElement[Any]\n ) ->Callable[[], Any]:\n \"\"\"Create the callable that is used in a many-to-one expression.\n\n E.g.::\n\n u1 = s.query(User).get(5)\n\n expr = Address.user == u1\n\n Above, the SQL should be \"address.user_id = 5\". The callable\n returned by this method produces the value \"5\" based on the identity\n of ``u1``.\n\n \"\"\"\n prop = mapper.get_property_by_column(column)\n state._track_last_known_value(prop.key)\n lkv_fixed = state._last_known_values\n\n def _go() ->Any:\n assert lkv_fixed is not None\n last_known = to_return = lkv_fixed[prop.key]\n existing_is_available = (last_known is not LoaderCallableStatus\n .NO_VALUE)\n current_value = mapper._get_state_attr_by_column(state, dict_,\n column, passive=PassiveFlag.PASSIVE_OFF if state.persistent\n else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK)\n if current_value is LoaderCallableStatus.NEVER_SET:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object %s; no value has been set for this column\"\n % (column, state_str(state)))\n elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object %s; the object is detached and the value was expired\"\n % (column, state_str(state)))\n else:\n to_return = current_value\n if to_return is None:\n util.warn(\n 'Got None for value of column %s; this is unsupported for a relationship comparison and will not currently produce an IS comparison (but may in a future release)'\n % column)\n return to_return\n return _go\n\n def _lazy_none_clause(self, reverse_direction: bool=False, adapt_source:\n Optional[_CoreAdapterProto]=None) ->ColumnElement[bool]:\n if not reverse_direction:\n criterion, bind_to_col = (self._lazy_strategy._lazywhere, self.\n _lazy_strategy._bind_to_col)\n else:\n criterion, bind_to_col = (self._lazy_strategy._rev_lazywhere,\n self._lazy_strategy._rev_bind_to_col)\n criterion = adapt_criterion_to_null(criterion, bind_to_col)\n if adapt_source:\n criterion = adapt_source(criterion)\n return criterion\n\n def __str__(self) ->str:\n return str(self.parent.class_.__name__) + '.' + self.key\n\n def merge(self, session: Session, source_state: InstanceState[Any],\n source_dict: _InstanceDict, dest_state: InstanceState[Any],\n dest_dict: _InstanceDict, load: bool, _recursive: Dict[Any, object],\n _resolve_conflict_map: Dict[_IdentityKeyType[Any], object]) ->None:\n if load:\n for r in self._reverse_property:\n if (source_state, r) in _recursive:\n return\n if 'merge' not in self._cascade:\n return\n if self.key not in source_dict:\n return\n if self.uselist:\n impl = source_state.get_impl(self.key)\n assert is_has_collection_adapter(impl)\n instances_iterable = impl.get_collection(source_state, source_dict)\n assert not instances_iterable.empty if impl.collection else True\n if load:\n dest_state.get_impl(self.key).get(dest_state, dest_dict,\n passive=PassiveFlag.PASSIVE_MERGE)\n dest_list = []\n for current in instances_iterable:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[current_state, self] = True\n obj = session._merge(current_state, current_dict, load=load,\n _recursive=_recursive, _resolve_conflict_map=\n _resolve_conflict_map)\n if obj is not None:\n dest_list.append(obj)\n if not load:\n coll = attributes.init_state_collection(dest_state,\n dest_dict, self.key)\n for c in dest_list:\n coll.append_without_event(c)\n else:\n dest_impl = dest_state.get_impl(self.key)\n assert is_has_collection_adapter(dest_impl)\n dest_impl.set(dest_state, dest_dict, dest_list, _adapt=\n False, passive=PassiveFlag.PASSIVE_MERGE)\n else:\n current = source_dict[self.key]\n if current is not None:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[current_state, self] = True\n obj = session._merge(current_state, current_dict, load=load,\n _recursive=_recursive, _resolve_conflict_map=\n _resolve_conflict_map)\n else:\n obj = None\n if not load:\n dest_dict[self.key] = obj\n else:\n dest_state.get_impl(self.key).set(dest_state, dest_dict,\n obj, None)\n\n def _value_as_iterable(self, state: InstanceState[_O], dict_:\n _InstanceDict, key: str, passive: PassiveFlag=PassiveFlag.PASSIVE_OFF\n ) ->Sequence[Tuple[InstanceState[_O], _O]]:\n \"\"\"Return a list of tuples (state, obj) for the given\n key.\n\n returns an empty list if the value is None/empty/PASSIVE_NO_RESULT\n \"\"\"\n impl = state.manager[key].impl\n x = impl.get(state, dict_, passive=passive)\n if x is LoaderCallableStatus.PASSIVE_NO_RESULT or x is None:\n return []\n elif is_has_collection_adapter(impl):\n return [(attributes.instance_state(o), o) for o in impl.\n get_collection(state, dict_, x, passive=passive)]\n else:\n return [(attributes.instance_state(x), x)]\n\n def cascade_iterator(self, type_: str, state: InstanceState[Any], dict_:\n _InstanceDict, visited_states: Set[InstanceState[Any]], halt_on:\n Optional[Callable[[InstanceState[Any]], bool]]=None) ->Iterator[Tuple\n [Any, Mapper[Any], InstanceState[Any], _InstanceDict]]:\n if type_ != 'delete' or self.passive_deletes:\n passive = PassiveFlag.PASSIVE_NO_INITIALIZE\n else:\n passive = PassiveFlag.PASSIVE_OFF | PassiveFlag.NO_RAISE\n if type_ == 'save-update':\n tuples = state.manager[self.key].impl.get_all_pending(state, dict_)\n else:\n tuples = self._value_as_iterable(state, dict_, self.key,\n passive=passive)\n skip_pending = (type_ == 'refresh-expire' and 'delete-orphan' not in\n self._cascade)\n for instance_state, c in tuples:\n if instance_state in visited_states:\n continue\n if c is None:\n continue\n assert instance_state is not None\n instance_dict = attributes.instance_dict(c)\n if halt_on and halt_on(instance_state):\n continue\n if skip_pending and not instance_state.key:\n continue\n instance_mapper = instance_state.manager.mapper\n if not instance_mapper.isa(self.mapper.class_manager.mapper):\n raise AssertionError(\n \"Attribute '%s' on class '%s' doesn't handle objects of type '%s'\"\n % (self.key, self.parent.class_, c.__class__))\n visited_states.add(instance_state)\n yield c, instance_mapper, instance_state, instance_dict\n\n @property\n def _effective_sync_backref(self) ->bool:\n if self.viewonly:\n return False\n else:\n return self.sync_backref is not False\n\n @staticmethod\n def _check_sync_backref(rel_a: RelationshipProperty[Any], rel_b:\n RelationshipProperty[Any]) ->None:\n if rel_a.viewonly and rel_b.sync_backref:\n raise sa_exc.InvalidRequestError(\n 'Relationship %s cannot specify sync_backref=True since %s includes viewonly=True.'\n % (rel_b, rel_a))\n if (rel_a.viewonly and not rel_b.viewonly and rel_b.sync_backref is not\n False):\n rel_b.sync_backref = False\n\n def _add_reverse_property(self, key: str) ->None:\n other = self.mapper.get_property(key, _configure_mappers=False)\n if not isinstance(other, RelationshipProperty):\n raise sa_exc.InvalidRequestError(\n \"back_populates on relationship '%s' refers to attribute '%s' that is not a relationship. The back_populates parameter should refer to the name of a relationship on the target class.\"\n % (self, other))\n self._check_sync_backref(self, other)\n self._check_sync_backref(other, self)\n self._reverse_property.add(other)\n other._reverse_property.add(self)\n other._setup_entity()\n if not other.mapper.common_parent(self.parent):\n raise sa_exc.ArgumentError(\n 'reverse_property %r on relationship %s references relationship %s, which does not reference mapper %s'\n % (key, self, other, self.parent))\n if other._configure_started and self.direction in (ONETOMANY, MANYTOONE\n ) and self.direction == other.direction:\n raise sa_exc.ArgumentError(\n '%s and back-reference %s are both of the same direction %r. Did you mean to set remote_side on the many-to-one side ?'\n % (other, self, self.direction))\n\n @util.memoized_property\n def entity(self) ->_InternalEntityType[_T]:\n \"\"\"Return the target mapped entity, which is an inspect() of the\n class or aliased class that is referred towards.\n\n \"\"\"\n self.parent._check_configure()\n return self.entity\n\n @util.memoized_property\n def mapper(self) ->Mapper[_T]:\n \"\"\"Return the targeted :class:`_orm.Mapper` for this\n :class:`.RelationshipProperty`.\n\n \"\"\"\n return self.entity.mapper\n\n def do_init(self) ->None:\n self._check_conflicts()\n self._process_dependent_arguments()\n self._setup_entity()\n self._setup_registry_dependencies()\n self._setup_join_conditions()\n self._check_cascade_settings(self._cascade)\n self._post_init()\n self._generate_backref()\n self._join_condition._warn_for_conflicting_sync_targets()\n super().do_init()\n self._lazy_strategy = cast('LazyLoader', self._get_strategy(((\n 'lazy', 'select'),)))\n\n def _setup_registry_dependencies(self) ->None:\n self.parent.mapper.registry._set_depends_on(self.entity.mapper.registry\n )\n\n def _process_dependent_arguments(self) ->None:\n \"\"\"Convert incoming configuration arguments to their\n proper form.\n\n Callables are resolved, ORM annotations removed.\n\n \"\"\"\n init_args = self._init_args\n for attr in ('order_by', 'primaryjoin', 'secondaryjoin',\n 'secondary', 'foreign_keys', 'remote_side'):\n rel_arg = getattr(init_args, attr)\n rel_arg._resolve_against_registry(self._clsregistry_resolvers[1])\n for attr in ('primaryjoin', 'secondaryjoin'):\n rel_arg = getattr(init_args, attr)\n val = rel_arg.resolved\n if val is not None:\n rel_arg.resolved = _orm_deannotate(coercions.expect(roles.\n ColumnArgumentRole, val, argname=attr))\n secondary = init_args.secondary.resolved\n if secondary is not None and _is_mapped_class(secondary):\n raise sa_exc.ArgumentError(\n \"secondary argument %s passed to to relationship() %s must be a Table object or other FROM clause; can't send a mapped class directly as rows in 'secondary' are persisted independently of a class that is mapped to that same table.\"\n % (secondary, self))\n if (init_args.order_by.resolved is not False and init_args.order_by\n .resolved is not None):\n self.order_by = tuple(coercions.expect(roles.ColumnArgumentRole,\n x, argname='order_by') for x in util.to_list(init_args.\n order_by.resolved))\n else:\n self.order_by = False\n self._user_defined_foreign_keys = util.column_set(coercions.expect(\n roles.ColumnArgumentRole, x, argname='foreign_keys') for x in\n util.to_column_set(init_args.foreign_keys.resolved))\n self.remote_side = util.column_set(coercions.expect(roles.\n ColumnArgumentRole, x, argname='remote_side') for x in util.\n to_column_set(init_args.remote_side.resolved))\n\n def declarative_scan(self, decl_scan: _ClassScanMapperConfig, registry:\n _RegistryType, cls: Type[Any], originating_module: Optional[str],\n key: str, mapped_container: Optional[Type[Mapped[Any]]], annotation:\n Optional[_AnnotationScanType], extracted_mapped_annotation:\n Optional[_AnnotationScanType], is_dataclass_field: bool) ->None:\n argument = extracted_mapped_annotation\n if extracted_mapped_annotation is None:\n if self.argument is None:\n self._raise_for_required(key, cls)\n else:\n return\n argument = extracted_mapped_annotation\n assert originating_module is not None\n is_write_only = mapped_container is not None and issubclass(\n mapped_container, WriteOnlyMapped)\n if is_write_only:\n self.lazy = 'write_only'\n self.strategy_key = ('lazy', self.lazy),\n is_dynamic = mapped_container is not None and issubclass(\n mapped_container, DynamicMapped)\n if is_dynamic:\n self.lazy = 'dynamic'\n self.strategy_key = ('lazy', self.lazy),\n argument = de_optionalize_union_types(argument)\n if hasattr(argument, '__origin__'):\n arg_origin = argument.__origin__\n if isinstance(arg_origin, type) and issubclass(arg_origin, abc.\n Collection):\n if self.collection_class is None:\n if _py_inspect.isabstract(arg_origin):\n raise sa_exc.ArgumentError(\n f\"Collection annotation type {arg_origin} cannot be instantiated; please provide an explicit 'collection_class' parameter (e.g. list, set, etc.) to the relationship() function to accompany this annotation\"\n )\n self.collection_class = arg_origin\n elif not is_write_only and not is_dynamic:\n self.uselist = False\n if argument.__args__:\n if isinstance(arg_origin, type) and issubclass(arg_origin,\n typing.Mapping):\n type_arg = argument.__args__[-1]\n else:\n type_arg = argument.__args__[0]\n if hasattr(type_arg, '__forward_arg__'):\n str_argument = type_arg.__forward_arg__\n argument = resolve_name_to_real_class_name(str_argument,\n originating_module)\n else:\n argument = type_arg\n else:\n raise sa_exc.ArgumentError(\n f'Generic alias {argument} requires an argument')\n elif hasattr(argument, '__forward_arg__'):\n argument = argument.__forward_arg__\n argument = resolve_name_to_real_class_name(argument,\n originating_module)\n if (self.collection_class is None and not is_write_only and not\n is_dynamic):\n self.uselist = False\n if self.argument is None:\n self.argument = cast('_RelationshipArgumentType[_T]', argument)\n\n @util.preload_module('sqlalchemy.orm.mapper')\n def _setup_entity(self, __argument: Any=None) ->None:\n if 'entity' in self.__dict__:\n return\n mapperlib = util.preloaded.orm_mapper\n if __argument:\n argument = __argument\n else:\n argument = self.argument\n resolved_argument: _ExternalEntityType[Any]\n if isinstance(argument, str):\n resolved_argument = cast('_ExternalEntityType[Any]', self.\n _clsregistry_resolve_name(argument)())\n elif callable(argument) and not isinstance(argument, (type,\n mapperlib.Mapper)):\n resolved_argument = argument()\n else:\n resolved_argument = argument\n entity: _InternalEntityType[Any]\n if isinstance(resolved_argument, type):\n entity = class_mapper(resolved_argument, configure=False)\n else:\n try:\n entity = inspect(resolved_argument)\n except sa_exc.NoInspectionAvailable:\n entity = None\n if not hasattr(entity, 'mapper'):\n raise sa_exc.ArgumentError(\n \"relationship '%s' expects a class or a mapper argument (received: %s)\"\n % (self.key, type(resolved_argument)))\n self.entity = entity\n self.target = self.entity.persist_selectable\n\n def _setup_join_conditions(self) ->None:\n self._join_condition = jc = JoinCondition(parent_persist_selectable\n =self.parent.persist_selectable, child_persist_selectable=self.\n entity.persist_selectable, parent_local_selectable=self.parent.\n local_table, child_local_selectable=self.entity.local_table,\n primaryjoin=self._init_args.primaryjoin.resolved, secondary=\n self._init_args.secondary.resolved, secondaryjoin=self.\n _init_args.secondaryjoin.resolved, parent_equivalents=self.\n parent._equivalent_columns, child_equivalents=self.mapper.\n _equivalent_columns, consider_as_foreign_keys=self.\n _user_defined_foreign_keys, local_remote_pairs=self.\n local_remote_pairs, remote_side=self.remote_side,\n self_referential=self._is_self_referential, prop=self,\n support_sync=not self.viewonly, can_be_synced_fn=self.\n _columns_are_mapped)\n self.primaryjoin = jc.primaryjoin\n self.secondaryjoin = jc.secondaryjoin\n self.secondary = jc.secondary\n self.direction = jc.direction\n self.local_remote_pairs = jc.local_remote_pairs\n self.remote_side = jc.remote_columns\n self.local_columns = jc.local_columns\n self.synchronize_pairs = jc.synchronize_pairs\n self._calculated_foreign_keys = jc.foreign_key_columns\n self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs\n\n @property\n def _clsregistry_resolve_arg(self) ->Callable[[str, bool], _class_resolver\n ]:\n return self._clsregistry_resolvers[1]\n\n @property\n def _clsregistry_resolve_name(self) ->Callable[[str], Callable[[],\n Union[Type[Any], Table, _ModNS]]]:\n return self._clsregistry_resolvers[0]\n\n @util.memoized_property\n @util.preload_module('sqlalchemy.orm.clsregistry')\n def _clsregistry_resolvers(self) ->Tuple[Callable[[str], Callable[[],\n Union[Type[Any], Table, _ModNS]]], Callable[[str, bool],\n _class_resolver]]:\n _resolver = util.preloaded.orm_clsregistry._resolver\n return _resolver(self.parent.class_, self)\n\n def _check_conflicts(self) ->None:\n \"\"\"Test that this relationship is legal, warn about\n inheritance conflicts.\"\"\"\n if self.parent.non_primary and not class_mapper(self.parent.class_,\n configure=False).has_property(self.key):\n raise sa_exc.ArgumentError(\n \"Attempting to assign a new relationship '%s' to a non-primary mapper on class '%s'. New relationships can only be added to the primary mapper, i.e. the very first mapper created for class '%s' \"\n % (self.key, self.parent.class_.__name__, self.parent.\n class_.__name__))\n\n @property\n def cascade(self) ->CascadeOptions:\n \"\"\"Return the current cascade setting for this\n :class:`.RelationshipProperty`.\n \"\"\"\n return self._cascade\n\n @cascade.setter\n def cascade(self, cascade: Union[str, CascadeOptions]) ->None:\n self._set_cascade(cascade)\n\n def _set_cascade(self, cascade_arg: Union[str, CascadeOptions]) ->None:\n cascade = CascadeOptions(cascade_arg)\n if self.viewonly:\n cascade = CascadeOptions(cascade.intersection(CascadeOptions.\n _viewonly_cascades))\n if 'mapper' in self.__dict__:\n self._check_cascade_settings(cascade)\n self._cascade = cascade\n if self._dependency_processor:\n self._dependency_processor.cascade = cascade\n\n def _check_cascade_settings(self, cascade: CascadeOptions) ->None:\n if cascade.delete_orphan and not self.single_parent and (self.\n direction is MANYTOMANY or self.direction is MANYTOONE):\n raise sa_exc.ArgumentError(\n 'For %(direction)s relationship %(rel)s, delete-orphan cascade is normally configured only on the \"one\" side of a one-to-many relationship, and not on the \"many\" side of a many-to-one or many-to-many relationship. To force this relationship to allow a particular \"%(relatedcls)s\" object to be referred towards by only a single \"%(clsname)s\" object at a time via the %(rel)s relationship, which would allow delete-orphan cascade to take place in this direction, set the single_parent=True flag.'\n % {'rel': self, 'direction': 'many-to-one' if self.\n direction is MANYTOONE else 'many-to-many', 'clsname': self\n .parent.class_.__name__, 'relatedcls': self.mapper.class_.\n __name__}, code='bbf0')\n if self.passive_deletes == 'all' and ('delete' in cascade or \n 'delete-orphan' in cascade):\n raise sa_exc.ArgumentError(\n \"On %s, can't set passive_deletes='all' in conjunction with 'delete' or 'delete-orphan' cascade\"\n % self)\n if cascade.delete_orphan:\n self.mapper.primary_mapper()._delete_orphans.append((self.key,\n self.parent.class_))\n\n def _persists_for(self, mapper: Mapper[Any]) ->bool:\n \"\"\"Return True if this property will persist values on behalf\n of the given mapper.\n\n \"\"\"\n return self.key in mapper.relationships and mapper.relationships[self\n .key] is self\n\n def _columns_are_mapped(self, *cols: ColumnElement[Any]) ->bool:\n \"\"\"Return True if all columns in the given collection are\n mapped by the tables referenced by this :class:`.RelationshipProperty`.\n\n \"\"\"\n secondary = self._init_args.secondary.resolved\n for c in cols:\n if secondary is not None and secondary.c.contains_column(c):\n continue\n if not self.parent.persist_selectable.c.contains_column(c\n ) and not self.target.c.contains_column(c):\n return False\n return True\n\n def _generate_backref(self) ->None:\n \"\"\"Interpret the 'backref' instruction to create a\n :func:`_orm.relationship` complementary to this one.\"\"\"\n if self.parent.non_primary:\n return\n if self.backref is not None and not self.back_populates:\n kwargs: Dict[str, Any]\n if isinstance(self.backref, str):\n backref_key, kwargs = self.backref, {}\n else:\n backref_key, kwargs = self.backref\n mapper = self.mapper.primary_mapper()\n if not mapper.concrete:\n check = set(mapper.iterate_to_root()).union(mapper.\n self_and_descendants)\n for m in check:\n if m.has_property(backref_key) and not m.concrete:\n raise sa_exc.ArgumentError(\n \"Error creating backref '%s' on relationship '%s': property of that name exists on mapper '%s'\"\n % (backref_key, self, m))\n if self.secondary is not None:\n pj = kwargs.pop('primaryjoin', self._join_condition.\n secondaryjoin_minus_local)\n sj = kwargs.pop('secondaryjoin', self._join_condition.\n primaryjoin_minus_local)\n else:\n pj = kwargs.pop('primaryjoin', self._join_condition.\n primaryjoin_reverse_remote)\n sj = kwargs.pop('secondaryjoin', None)\n if sj:\n raise sa_exc.InvalidRequestError(\n \"Can't assign 'secondaryjoin' on a backref against a non-secondary relationship.\"\n )\n foreign_keys = kwargs.pop('foreign_keys', self.\n _user_defined_foreign_keys)\n parent = self.parent.primary_mapper()\n kwargs.setdefault('viewonly', self.viewonly)\n kwargs.setdefault('post_update', self.post_update)\n kwargs.setdefault('passive_updates', self.passive_updates)\n kwargs.setdefault('sync_backref', self.sync_backref)\n self.back_populates = backref_key\n relationship = RelationshipProperty(parent, self.secondary,\n primaryjoin=pj, secondaryjoin=sj, foreign_keys=foreign_keys,\n back_populates=self.key, **kwargs)\n mapper._configure_property(backref_key, relationship,\n warn_for_existing=True)\n if self.back_populates:\n self._add_reverse_property(self.back_populates)\n\n @util.preload_module('sqlalchemy.orm.dependency')\n def _post_init(self) ->None:\n dependency = util.preloaded.orm_dependency\n if self.uselist is None:\n self.uselist = self.direction is not MANYTOONE\n if not self.viewonly:\n self._dependency_processor = (dependency.DependencyProcessor.\n from_relationship(self))\n\n @util.memoized_property\n def _use_get(self) ->bool:\n \"\"\"memoize the 'use_get' attribute of this RelationshipLoader's\n lazyloader.\"\"\"\n strategy = self._lazy_strategy\n return strategy.use_get\n\n @util.memoized_property\n def _is_self_referential(self) ->bool:\n return self.mapper.common_parent(self.parent)\n\n def _create_joins(self, source_polymorphic: bool=False,\n source_selectable: Optional[FromClause]=None, dest_selectable:\n Optional[FromClause]=None, of_type_entity: Optional[\n _InternalEntityType[Any]]=None, alias_secondary: bool=False,\n extra_criteria: Tuple[ColumnElement[bool], ...]=()) ->Tuple[\n ColumnElement[bool], Optional[ColumnElement[bool]], FromClause,\n FromClause, Optional[FromClause], Optional[ClauseAdapter]]:\n aliased = False\n if alias_secondary and self.secondary is not None:\n aliased = True\n if source_selectable is None:\n if source_polymorphic and self.parent.with_polymorphic:\n source_selectable = self.parent._with_polymorphic_selectable\n if of_type_entity:\n dest_mapper = of_type_entity.mapper\n if dest_selectable is None:\n dest_selectable = of_type_entity.selectable\n aliased = True\n else:\n dest_mapper = self.mapper\n if dest_selectable is None:\n dest_selectable = self.entity.selectable\n if self.mapper.with_polymorphic:\n aliased = True\n if self._is_self_referential and source_selectable is None:\n dest_selectable = dest_selectable._anonymous_fromclause()\n aliased = True\n elif dest_selectable is not self.mapper._with_polymorphic_selectable or self.mapper.with_polymorphic:\n aliased = True\n single_crit = dest_mapper._single_table_criterion\n aliased = aliased or source_selectable is not None and (\n source_selectable is not self.parent.\n _with_polymorphic_selectable or source_selectable._is_subquery)\n (primaryjoin, secondaryjoin, secondary, target_adapter, dest_selectable\n ) = (self._join_condition.join_targets(source_selectable,\n dest_selectable, aliased, single_crit, extra_criteria))\n if source_selectable is None:\n source_selectable = self.parent.local_table\n if dest_selectable is None:\n dest_selectable = self.entity.local_table\n return (primaryjoin, secondaryjoin, source_selectable,\n dest_selectable, secondary, target_adapter)\n\n\ndef _annotate_columns(element: _CE, annotations: _AnnotationDict) ->_CE:\n\n def clone(elem: _CE) ->_CE:\n if isinstance(elem, expression.ColumnClause):\n elem = elem._annotate(annotations.copy())\n elem._copy_internals(clone=clone)\n return elem\n if element is not None:\n element = clone(element)\n clone = None\n return element\n\n\nclass JoinCondition:\n primaryjoin_initial: Optional[ColumnElement[bool]]\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n prop: RelationshipProperty[Any]\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: _ColumnPairs\n direction: RelationshipDirection\n parent_persist_selectable: FromClause\n child_persist_selectable: FromClause\n parent_local_selectable: FromClause\n child_local_selectable: FromClause\n _local_remote_pairs: Optional[_ColumnPairs]\n\n def __init__(self, parent_persist_selectable: FromClause,\n child_persist_selectable: FromClause, parent_local_selectable:\n FromClause, child_local_selectable: FromClause, *, primaryjoin:\n Optional[ColumnElement[bool]]=None, secondary: Optional[FromClause]\n =None, secondaryjoin: Optional[ColumnElement[bool]]=None,\n parent_equivalents: Optional[_EquivalentColumnMap]=None,\n child_equivalents: Optional[_EquivalentColumnMap]=None,\n consider_as_foreign_keys: Any=None, local_remote_pairs: Optional[\n _ColumnPairs]=None, remote_side: Any=None, self_referential: Any=\n False, prop: RelationshipProperty[Any], support_sync: bool=True,\n can_be_synced_fn: Callable[..., bool]=lambda *c: True):\n self.parent_persist_selectable = parent_persist_selectable\n self.parent_local_selectable = parent_local_selectable\n self.child_persist_selectable = child_persist_selectable\n self.child_local_selectable = child_local_selectable\n self.parent_equivalents = parent_equivalents\n self.child_equivalents = child_equivalents\n self.primaryjoin_initial = primaryjoin\n self.secondaryjoin = secondaryjoin\n self.secondary = secondary\n self.consider_as_foreign_keys = consider_as_foreign_keys\n self._local_remote_pairs = local_remote_pairs\n self._remote_side = remote_side\n self.prop = prop\n self.self_referential = self_referential\n self.support_sync = support_sync\n self.can_be_synced_fn = can_be_synced_fn\n self._determine_joins()\n assert self.primaryjoin is not None\n self._sanitize_joins()\n self._annotate_fks()\n self._annotate_remote()\n self._annotate_local()\n self._annotate_parentmapper()\n self._setup_pairs()\n self._check_foreign_cols(self.primaryjoin, True)\n if self.secondaryjoin is not None:\n self._check_foreign_cols(self.secondaryjoin, False)\n self._determine_direction()\n self._check_remote_side()\n self._log_joins()\n\n def _log_joins(self) ->None:\n log = self.prop.logger\n log.info('%s setup primary join %s', self.prop, self.primaryjoin)\n log.info('%s setup secondary join %s', self.prop, self.secondaryjoin)\n log.info('%s synchronize pairs [%s]', self.prop, ','.join(\n '(%s => %s)' % (l, r) for l, r in self.synchronize_pairs))\n log.info('%s secondary synchronize pairs [%s]', self.prop, ','.join\n ('(%s => %s)' % (l, r) for l, r in self.\n secondary_synchronize_pairs or []))\n log.info('%s local/remote pairs [%s]', self.prop, ','.join(\n '(%s / %s)' % (l, r) for l, r in self.local_remote_pairs))\n log.info('%s remote columns [%s]', self.prop, ','.join('%s' % col for\n col in self.remote_columns))\n log.info('%s local columns [%s]', self.prop, ','.join('%s' % col for\n col in self.local_columns))\n log.info('%s relationship direction %s', self.prop, self.direction)\n\n def _sanitize_joins(self) ->None:\n \"\"\"remove the parententity annotation from our join conditions which\n can leak in here based on some declarative patterns and maybe others.\n\n \"parentmapper\" is relied upon both by the ORM evaluator as well as\n the use case in _join_fixture_inh_selfref_w_entity\n that relies upon it being present, see :ticket:`3364`.\n\n \"\"\"\n self.primaryjoin = _deep_deannotate(self.primaryjoin, values=(\n 'parententity', 'proxy_key'))\n if self.secondaryjoin is not None:\n self.secondaryjoin = _deep_deannotate(self.secondaryjoin,\n values=('parententity', 'proxy_key'))\n\n def _determine_joins(self) ->None:\n \"\"\"Determine the 'primaryjoin' and 'secondaryjoin' attributes,\n if not passed to the constructor already.\n\n This is based on analysis of the foreign key relationships\n between the parent and target mapped selectables.\n\n \"\"\"\n if self.secondaryjoin is not None and self.secondary is None:\n raise sa_exc.ArgumentError(\n 'Property %s specified with secondary join condition but no secondary argument'\n % self.prop)\n try:\n consider_as_foreign_keys = self.consider_as_foreign_keys or None\n if self.secondary is not None:\n if self.secondaryjoin is None:\n self.secondaryjoin = join_condition(self.\n child_persist_selectable, self.secondary, a_subset=\n self.child_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.secondary, a_subset\n =self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n elif self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(self.\n parent_persist_selectable, self.\n child_persist_selectable, a_subset=self.\n parent_local_selectable, consider_as_foreign_keys=\n consider_as_foreign_keys)\n else:\n self.primaryjoin = self.primaryjoin_initial\n except sa_exc.NoForeignKeysError as nfe:\n if self.secondary is not None:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables via secondary table '%s'. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify 'primaryjoin' and 'secondaryjoin' expressions.\"\n % (self.prop, self.secondary)) from nfe\n else:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are no foreign keys linking these tables. Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or specify a 'primaryjoin' expression.\"\n % self.prop) from nfe\n except sa_exc.AmbiguousForeignKeysError as afe:\n if self.secondary is not None:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables via secondary table '%s'. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference from the secondary table to each of the parent and child tables.\"\n % (self.prop, self.secondary)) from afe\n else:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join condition between parent/child tables on relationship %s - there are multiple foreign key paths linking the tables. Specify the 'foreign_keys' argument, providing a list of those columns which should be counted as containing a foreign key reference to the parent table.\"\n % self.prop) from afe\n\n @property\n def primaryjoin_minus_local(self) ->ColumnElement[bool]:\n return _deep_deannotate(self.primaryjoin, values=('local', 'remote'))\n\n @property\n def secondaryjoin_minus_local(self) ->ColumnElement[bool]:\n assert self.secondaryjoin is not None\n return _deep_deannotate(self.secondaryjoin, values=('local', 'remote'))\n\n @util.memoized_property\n def primaryjoin_reverse_remote(self) ->ColumnElement[bool]:\n \"\"\"Return the primaryjoin condition suitable for the\n \"reverse\" direction.\n\n If the primaryjoin was delivered here with pre-existing\n \"remote\" annotations, the local/remote annotations\n are reversed. Otherwise, the local/remote annotations\n are removed.\n\n \"\"\"\n if self._has_remote_annotations:\n\n def replace(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n v = dict(element._annotations)\n del v['remote']\n v['local'] = True\n return element._with_annotations(v)\n elif 'local' in element._annotations:\n v = dict(element._annotations)\n del v['local']\n v['remote'] = True\n return element._with_annotations(v)\n return None\n return visitors.replacement_traverse(self.primaryjoin, {}, replace)\n elif self._has_foreign_annotations:\n return _deep_deannotate(self.primaryjoin, values=('local',\n 'remote'))\n else:\n return _deep_deannotate(self.primaryjoin)\n\n def _has_annotation(self, clause: ClauseElement, annotation: str) ->bool:\n for col in visitors.iterate(clause, {}):\n if annotation in col._annotations:\n return True\n else:\n return False\n\n @util.memoized_property\n def _has_foreign_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'foreign')\n\n @util.memoized_property\n def _has_remote_annotations(self) ->bool:\n return self._has_annotation(self.primaryjoin, 'remote')\n\n def _annotate_fks(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'foreign' annotations marking columns\n considered as foreign.\n\n \"\"\"\n if self._has_foreign_annotations:\n return\n if self.consider_as_foreign_keys:\n self._annotate_from_fk_list()\n else:\n self._annotate_present_fks()\n\n def _annotate_from_fk_list(self) ->None:\n\n def check_fk(element: _CE, **kw: Any) ->Optional[_CE]:\n if element in self.consider_as_foreign_keys:\n return element._annotate({'foreign': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, check_fk)\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, check_fk)\n\n def _annotate_present_fks(self) ->None:\n if self.secondary is not None:\n secondarycols = util.column_set(self.secondary.c)\n else:\n secondarycols = set()\n\n def is_foreign(a: ColumnElement[Any], b: ColumnElement[Any]\n ) ->Optional[ColumnElement[Any]]:\n if isinstance(a, schema.Column) and isinstance(b, schema.Column):\n if a.references(b):\n return a\n elif b.references(a):\n return b\n if secondarycols:\n if a in secondarycols and b not in secondarycols:\n return a\n elif b in secondarycols and a not in secondarycols:\n return b\n return None\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n if not isinstance(binary.left, sql.ColumnElement\n ) or not isinstance(binary.right, sql.ColumnElement):\n return\n if ('foreign' not in binary.left._annotations and 'foreign' not in\n binary.right._annotations):\n col = is_foreign(binary.left, binary.right)\n if col is not None:\n if col.compare(binary.left):\n binary.left = binary.left._annotate({'foreign': True})\n elif col.compare(binary.right):\n binary.right = binary.right._annotate({'foreign': True}\n )\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.cloned_traverse(self.\n secondaryjoin, {}, {'binary': visit_binary})\n\n def _refers_to_parent_table(self) ->bool:\n \"\"\"Return True if the join condition contains column\n comparisons where both columns are in both tables.\n\n \"\"\"\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n nonlocal result\n c, f = binary.left, binary.right\n if isinstance(c, expression.ColumnClause) and isinstance(f,\n expression.ColumnClause) and pt.is_derived_from(c.table\n ) and pt.is_derived_from(f.table) and mt.is_derived_from(c.\n table) and mt.is_derived_from(f.table):\n result = True\n visitors.traverse(self.primaryjoin, {}, {'binary': visit_binary})\n return result\n\n def _tables_overlap(self) ->bool:\n \"\"\"Return True if parent/child tables have some overlap.\"\"\"\n return selectables_overlap(self.parent_persist_selectable, self.\n child_persist_selectable)\n\n def _annotate_remote(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'remote' annotations marking columns\n considered as part of the 'remote' side.\n\n \"\"\"\n if self._has_remote_annotations:\n return\n if self.secondary is not None:\n self._annotate_remote_secondary()\n elif self._local_remote_pairs or self._remote_side:\n self._annotate_remote_from_args()\n elif self._refers_to_parent_table():\n self._annotate_selfref(lambda col: 'foreign' in col.\n _annotations, False)\n elif self._tables_overlap():\n self._annotate_remote_with_overlap()\n else:\n self._annotate_remote_distinct_selectables()\n\n def _annotate_remote_secondary(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when 'secondary' is present.\n\n \"\"\"\n assert self.secondary is not None\n fixed_secondary = self.secondary\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if fixed_secondary.c.contains_column(element):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n assert self.secondaryjoin is not None\n self.secondaryjoin = visitors.replacement_traverse(self.\n secondaryjoin, {}, repl)\n\n def _annotate_selfref(self, fn: Callable[[ColumnElement[Any]], bool],\n remote_side_given: bool) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the relationship is detected as self-referential.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n equated = binary.left.compare(binary.right)\n if isinstance(binary.left, expression.ColumnClause) and isinstance(\n binary.right, expression.ColumnClause):\n if fn(binary.left):\n binary.left = binary.left._annotate({'remote': True})\n if fn(binary.right) and not equated:\n binary.right = binary.right._annotate({'remote': True})\n elif not remote_side_given:\n self._warn_non_column_elements()\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n\n def _annotate_remote_from_args(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the 'remote_side' or '_local_remote_pairs'\n arguments are used.\n\n \"\"\"\n if self._local_remote_pairs:\n if self._remote_side:\n raise sa_exc.ArgumentError(\n 'remote_side argument is redundant against more detailed _local_remote_side argument.'\n )\n remote_side = [r for l, r in self._local_remote_pairs]\n else:\n remote_side = self._remote_side\n if self._refers_to_parent_table():\n self._annotate_selfref(lambda col: col in remote_side, True)\n else:\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if element in set(remote_side):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.\n primaryjoin, {}, repl)\n\n def _annotate_remote_with_overlap(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables have some set of\n tables in common, though is not a fully self-referential\n relationship.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) ->None:\n binary.left, binary.right = proc_left_right(binary.left, binary\n .right)\n binary.right, binary.left = proc_left_right(binary.right,\n binary.left)\n check_entities = (self.prop is not None and self.prop.mapper is not\n self.prop.parent)\n\n def proc_left_right(left: ColumnElement[Any], right: ColumnElement[Any]\n ) ->Tuple[ColumnElement[Any], ColumnElement[Any]]:\n if isinstance(left, expression.ColumnClause) and isinstance(right,\n expression.ColumnClause):\n if self.child_persist_selectable.c.contains_column(right\n ) and self.parent_persist_selectable.c.contains_column(left\n ):\n right = right._annotate({'remote': True})\n elif check_entities and right._annotations.get('parentmapper'\n ) is self.prop.mapper:\n right = right._annotate({'remote': True})\n elif check_entities and left._annotations.get('parentmapper'\n ) is self.prop.mapper:\n left = left._annotate({'remote': True})\n else:\n self._warn_non_column_elements()\n return left, right\n self.primaryjoin = visitors.cloned_traverse(self.primaryjoin, {}, {\n 'binary': visit_binary})\n\n def _annotate_remote_distinct_selectables(self) ->None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables are entirely\n separate.\n\n \"\"\"\n\n def repl(element: _CE, **kw: Any) ->Optional[_CE]:\n if self.child_persist_selectable.c.contains_column(element) and (\n not self.parent_local_selectable.c.contains_column(element) or\n self.child_local_selectable.c.contains_column(element)):\n return element._annotate({'remote': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, repl)\n\n def _warn_non_column_elements(self) ->None:\n util.warn(\n 'Non-simple column elements in primary join condition for property %s - consider using remote() annotations to mark the remote side.'\n % self.prop)\n\n def _annotate_local(self) ->None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'local' annotations.\n\n This annotates all column elements found\n simultaneously in the parent table\n and the join condition that don't have a\n 'remote' annotation set up from\n _annotate_remote() or user-defined.\n\n \"\"\"\n if self._has_annotation(self.primaryjoin, 'local'):\n return\n if self._local_remote_pairs:\n local_side = util.column_set([l for l, r in self.\n _local_remote_pairs])\n else:\n local_side = util.column_set(self.parent_persist_selectable.c)\n\n def locals_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' not in element._annotations and element in local_side:\n return element._annotate({'local': True})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, locals_)\n\n def _annotate_parentmapper(self) ->None:\n\n def parentmappers_(element: _CE, **kw: Any) ->Optional[_CE]:\n if 'remote' in element._annotations:\n return element._annotate({'parentmapper': self.prop.mapper})\n elif 'local' in element._annotations:\n return element._annotate({'parentmapper': self.prop.parent})\n return None\n self.primaryjoin = visitors.replacement_traverse(self.primaryjoin,\n {}, parentmappers_)\n\n def _check_remote_side(self) ->None:\n if not self.local_remote_pairs:\n raise sa_exc.ArgumentError(\n 'Relationship %s could not determine any unambiguous local/remote column pairs based on join condition and remote_side arguments. Consider using the remote() annotation to accurately mark those elements of the join condition that are on the remote side of the relationship.'\n % (self.prop,))\n else:\n not_target = util.column_set(self.parent_persist_selectable.c\n ).difference(self.child_persist_selectable.c)\n for _, rmt in self.local_remote_pairs:\n if rmt in not_target:\n util.warn(\n \"Expression %s is marked as 'remote', but these column(s) are local to the local side. The remote() annotation is needed only for a self-referential relationship where both sides of the relationship refer to the same tables.\"\n % (rmt,))\n\n def _check_foreign_cols(self, join_condition: ColumnElement[bool],\n primary: bool) ->None:\n \"\"\"Check the foreign key columns collected and emit error\n messages.\"\"\"\n can_sync = False\n foreign_cols = self._gather_columns_with_annotation(join_condition,\n 'foreign')\n has_foreign = bool(foreign_cols)\n if primary:\n can_sync = bool(self.synchronize_pairs)\n else:\n can_sync = bool(self.secondary_synchronize_pairs)\n if (self.support_sync and can_sync or not self.support_sync and\n has_foreign):\n return\n if self.support_sync and has_foreign and not can_sync:\n err = (\n \"Could not locate any simple equality expressions involving locally mapped foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n \" Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation. To allow comparison operators other than '==', the relationship can be marked as viewonly=True.\"\n )\n raise sa_exc.ArgumentError(err)\n else:\n err = (\n \"Could not locate any relevant foreign key columns for %s join condition '%s' on relationship %s.\"\n % (primary and 'primary' or 'secondary', join_condition,\n self.prop))\n err += (\n ' Ensure that referencing columns are associated with a ForeignKey or ForeignKeyConstraint, or are annotated in the join condition with the foreign() annotation.'\n )\n raise sa_exc.ArgumentError(err)\n\n def _determine_direction(self) ->None:\n \"\"\"Determine if this relationship is one to many, many to one,\n many to many.\n\n \"\"\"\n if self.secondaryjoin is not None:\n self.direction = MANYTOMANY\n else:\n parentcols = util.column_set(self.parent_persist_selectable.c)\n targetcols = util.column_set(self.child_persist_selectable.c)\n onetomany_fk = targetcols.intersection(self.foreign_key_columns)\n manytoone_fk = parentcols.intersection(self.foreign_key_columns)\n if onetomany_fk and manytoone_fk:\n onetomany_local = self._gather_columns_with_annotation(self\n .primaryjoin, 'remote', 'foreign')\n manytoone_local = {c for c in self.\n _gather_columns_with_annotation(self.primaryjoin,\n 'foreign') if 'remote' not in c._annotations}\n if onetomany_local and manytoone_local:\n self_equated = self.remote_columns.intersection(self.\n local_columns)\n onetomany_local = onetomany_local.difference(self_equated)\n manytoone_local = manytoone_local.difference(self_equated)\n if onetomany_local and not manytoone_local:\n self.direction = ONETOMANY\n elif manytoone_local and not onetomany_local:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns within the join condition are present in both the parent and the child's mapped tables. Ensure that only those columns referring to a parent column are marked as foreign, either via the foreign() annotation or via the foreign_keys argument.\"\n % self.prop)\n elif onetomany_fk:\n self.direction = ONETOMANY\n elif manytoone_fk:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship direction for relationship '%s' - foreign key columns are present in neither the parent nor the child's mapped tables\"\n % self.prop)\n\n def _deannotate_pairs(self, collection: _ColumnPairIterable\n ) ->_MutableColumnPairs:\n \"\"\"provide deannotation for the various lists of\n pairs, so that using them in hashes doesn't incur\n high-overhead __eq__() comparisons against\n original columns mapped.\n\n \"\"\"\n return [(x._deannotate(), y._deannotate()) for x, y in collection]\n\n def _setup_pairs(self) ->None:\n sync_pairs: _MutableColumnPairs = []\n lrp: util.OrderedSet[Tuple[ColumnElement[Any], ColumnElement[Any]]\n ] = util.OrderedSet([])\n secondary_sync_pairs: _MutableColumnPairs = []\n\n def go(joincond: ColumnElement[bool], collection: _MutableColumnPairs\n ) ->None:\n\n def visit_binary(binary: BinaryExpression[Any], left:\n ColumnElement[Any], right: ColumnElement[Any]) ->None:\n if ('remote' in right._annotations and 'remote' not in left\n ._annotations and self.can_be_synced_fn(left)):\n lrp.add((left, right))\n elif 'remote' in left._annotations and 'remote' not in right._annotations and self.can_be_synced_fn(\n right):\n lrp.add((right, left))\n if binary.operator is operators.eq and self.can_be_synced_fn(\n left, right):\n if 'foreign' in right._annotations:\n collection.append((left, right))\n elif 'foreign' in left._annotations:\n collection.append((right, left))\n visit_binary_product(visit_binary, joincond)\n for joincond, collection in [(self.primaryjoin, sync_pairs), (self.\n secondaryjoin, secondary_sync_pairs)]:\n if joincond is None:\n continue\n go(joincond, collection)\n self.local_remote_pairs = self._deannotate_pairs(lrp)\n self.synchronize_pairs = self._deannotate_pairs(sync_pairs)\n self.secondary_synchronize_pairs = self._deannotate_pairs(\n secondary_sync_pairs)\n _track_overlapping_sync_targets: weakref.WeakKeyDictionary[\n ColumnElement[Any], weakref.WeakKeyDictionary[RelationshipProperty[\n Any], ColumnElement[Any]]] = weakref.WeakKeyDictionary()\n\n def _warn_for_conflicting_sync_targets(self) ->None:\n if not self.support_sync:\n return\n for from_, to_ in ([(from_, to_) for from_, to_ in self.\n synchronize_pairs] + [(from_, to_) for from_, to_ in self.\n secondary_synchronize_pairs]):\n if to_ not in self._track_overlapping_sync_targets:\n self._track_overlapping_sync_targets[to_\n ] = weakref.WeakKeyDictionary({self.prop: from_})\n else:\n other_props = []\n prop_to_from = self._track_overlapping_sync_targets[to_]\n for pr, fr_ in prop_to_from.items():\n if (not pr.mapper._dispose_called and pr not in self.\n prop._reverse_property and pr.key not in self.prop.\n _overlaps and self.prop.key not in pr._overlaps and\n '__*' not in self.prop._overlaps and '__*' not in\n pr._overlaps and not self.prop.parent.is_sibling(pr\n .parent) and not self.prop.mapper.is_sibling(pr.\n mapper) and not self.prop.parent.is_sibling(pr.\n mapper) and not self.prop.mapper.is_sibling(pr.\n parent) and (self.prop.key != pr.key or not self.\n prop.parent.common_parent(pr.parent))):\n other_props.append((pr, fr_))\n if other_props:\n util.warn(\n 'relationship \\'%s\\' will copy column %s to column %s, which conflicts with relationship(s): %s. If this is not the intention, consider if these relationships should be linked with back_populates, or if viewonly=True should be applied to one or more if they are read-only. For the less common case that foreign key constraints are partially overlapping, the orm.foreign() annotation can be used to isolate the columns that should be written towards. To silence this warning, add the parameter \\'overlaps=\"%s\"\\' to the \\'%s\\' relationship.'\n % (self.prop, from_, to_, ', '.join(sorted(\n \"'%s' (copies %s to %s)\" % (pr, fr_, to_) for pr,\n fr_ in other_props)), ','.join(sorted(pr.key for pr,\n fr in other_props)), self.prop), code='qzyx')\n self._track_overlapping_sync_targets[to_][self.prop] = from_\n\n @util.memoized_property\n def remote_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('remote')\n\n @util.memoized_property\n def local_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('local')\n\n @util.memoized_property\n def foreign_key_columns(self) ->Set[ColumnElement[Any]]:\n return self._gather_join_annotations('foreign')\n\n def _gather_join_annotations(self, annotation: str) ->Set[ColumnElement\n [Any]]:\n s = set(self._gather_columns_with_annotation(self.primaryjoin,\n annotation))\n if self.secondaryjoin is not None:\n s.update(self._gather_columns_with_annotation(self.\n secondaryjoin, annotation))\n return {x._deannotate() for x in s}\n\n def _gather_columns_with_annotation(self, clause: ColumnElement[Any], *\n annotation: Iterable[str]) ->Set[ColumnElement[Any]]:\n annotation_set = set(annotation)\n return {cast(ColumnElement[Any], col) for col in visitors.iterate(\n clause, {}) if annotation_set.issubset(col._annotations)}\n\n def join_targets(self, source_selectable: Optional[FromClause],\n dest_selectable: FromClause, aliased: bool, single_crit: Optional[\n ColumnElement[bool]]=None, extra_criteria: Tuple[ColumnElement[bool\n ], ...]=()) ->Tuple[ColumnElement[bool], Optional[ColumnElement[\n bool]], Optional[FromClause], Optional[ClauseAdapter], FromClause]:\n \"\"\"Given a source and destination selectable, create a\n join between them.\n\n This takes into account aliasing the join clause\n to reference the appropriate corresponding columns\n in the target objects, as well as the extra child\n criterion, equivalent column sets, etc.\n\n \"\"\"\n dest_selectable = _shallow_annotate(dest_selectable, {\n 'no_replacement_traverse': True})\n primaryjoin, secondaryjoin, secondary = (self.primaryjoin, self.\n secondaryjoin, self.secondary)\n if single_crit is not None:\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & single_crit\n else:\n primaryjoin = primaryjoin & single_crit\n if extra_criteria:\n\n def mark_unrelated_columns_as_ok_to_adapt(elem:\n SupportsAnnotations, annotations: _AnnotationDict\n ) ->SupportsAnnotations:\n \"\"\"note unrelated columns in the \"extra criteria\" as OK\n to adapt, even though they are not part of our \"local\"\n or \"remote\" side.\n\n see #9779 for this case\n\n \"\"\"\n parentmapper_for_element = elem._annotations.get('parentmapper'\n , None)\n if (parentmapper_for_element is not self.prop.parent and \n parentmapper_for_element is not self.prop.mapper):\n return _safe_annotate(elem, annotations)\n else:\n return elem\n extra_criteria = tuple(_deep_annotate(elem, {\n 'ok_to_adapt_in_join_condition': True}, annotate_callable=\n mark_unrelated_columns_as_ok_to_adapt) for elem in\n extra_criteria)\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)\n else:\n primaryjoin = primaryjoin & sql.and_(*extra_criteria)\n if aliased:\n if secondary is not None:\n secondary = secondary._anonymous_fromclause(flat=True)\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn=\n _ColInAnnotations('local'))\n secondary_aliasizer = ClauseAdapter(dest_selectable,\n equivalents=self.child_equivalents).chain(primary_aliasizer\n )\n if source_selectable is not None:\n primary_aliasizer = ClauseAdapter(secondary, exclude_fn\n =_ColInAnnotations('local')).chain(ClauseAdapter(\n source_selectable, equivalents=self.parent_equivalents)\n )\n secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)\n else:\n primary_aliasizer = ClauseAdapter(dest_selectable,\n exclude_fn=_ColInAnnotations('local'), equivalents=self\n .child_equivalents)\n if source_selectable is not None:\n primary_aliasizer.chain(ClauseAdapter(source_selectable,\n exclude_fn=_ColInAnnotations('remote'), equivalents\n =self.parent_equivalents))\n secondary_aliasizer = None\n primaryjoin = primary_aliasizer.traverse(primaryjoin)\n target_adapter = secondary_aliasizer or primary_aliasizer\n target_adapter.exclude_fn = None\n else:\n target_adapter = None\n return (primaryjoin, secondaryjoin, secondary, target_adapter,\n dest_selectable)\n\n def create_lazy_clause(self, reverse_direction: bool=False) ->Tuple[\n ColumnElement[bool], Dict[str, ColumnElement[Any]], Dict[\n ColumnElement[Any], ColumnElement[Any]]]:\n binds: Dict[ColumnElement[Any], BindParameter[Any]] = {}\n equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {}\n has_secondary = self.secondaryjoin is not None\n if has_secondary:\n lookup = collections.defaultdict(list)\n for l, r in self.local_remote_pairs:\n lookup[l].append((l, r))\n equated_columns[r] = l\n elif not reverse_direction:\n for l, r in self.local_remote_pairs:\n equated_columns[r] = l\n else:\n for l, r in self.local_remote_pairs:\n equated_columns[l] = r\n\n def col_to_bind(element: ColumnElement[Any], **kw: Any) ->Optional[\n BindParameter[Any]]:\n if (not reverse_direction and 'local' in element._annotations or\n reverse_direction and (has_secondary and element in lookup or\n not has_secondary and 'remote' in element._annotations)):\n if element not in binds:\n binds[element] = sql.bindparam(None, None, type_=\n element.type, unique=True)\n return binds[element]\n return None\n lazywhere = self.primaryjoin\n if self.secondaryjoin is None or not reverse_direction:\n lazywhere = visitors.replacement_traverse(lazywhere, {},\n col_to_bind)\n if self.secondaryjoin is not None:\n secondaryjoin = self.secondaryjoin\n if reverse_direction:\n secondaryjoin = visitors.replacement_traverse(secondaryjoin,\n {}, col_to_bind)\n lazywhere = sql.and_(lazywhere, secondaryjoin)\n bind_to_col = {binds[col].key: col for col in binds}\n return lazywhere, bind_to_col, equated_columns\n\n\nclass _ColInAnnotations:\n \"\"\"Serializable object that tests for a name in c._annotations.\"\"\"\n __slots__ = 'name',\n\n def __init__(self, name: str):\n self.name = name\n\n def __call__(self, c: ClauseElement) ->bool:\n return (self.name in c._annotations or \n 'ok_to_adapt_in_join_condition' in c._annotations)\n\n\nclass Relationship(RelationshipProperty[_T], _DeclarativeMapped[_T],\n WriteOnlyMapped[_T], DynamicMapped[_T]):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n .. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative\n compatible subclass for :class:`_orm.RelationshipProperty`.\n\n \"\"\"\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n",
"step-5": "# orm/relationships.py\n# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors\n# <see AUTHORS file>\n#\n# This module is part of SQLAlchemy and is released under\n# the MIT License: https://www.opensource.org/licenses/mit-license.php\n\n\"\"\"Heuristics related to join conditions as used in\n:func:`_orm.relationship`.\n\nProvides the :class:`.JoinCondition` object, which encapsulates\nSQL annotation and aliasing behavior focused on the `primaryjoin`\nand `secondaryjoin` aspects of :func:`_orm.relationship`.\n\n\"\"\"\nfrom __future__ import annotations\n\nimport collections\nfrom collections import abc\nimport dataclasses\nimport inspect as _py_inspect\nimport re\nimport typing\nfrom typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import Collection\nfrom typing import Dict\nfrom typing import Generic\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import NoReturn\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Set\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import TypeVar\nfrom typing import Union\nimport weakref\n\nfrom . import attributes\nfrom . import strategy_options\nfrom ._typing import insp_is_aliased_class\nfrom ._typing import is_has_collection_adapter\nfrom .base import _DeclarativeMapped\nfrom .base import _is_mapped_class\nfrom .base import class_mapper\nfrom .base import DynamicMapped\nfrom .base import LoaderCallableStatus\nfrom .base import PassiveFlag\nfrom .base import state_str\nfrom .base import WriteOnlyMapped\nfrom .interfaces import _AttributeOptions\nfrom .interfaces import _IntrospectsAnnotations\nfrom .interfaces import MANYTOMANY\nfrom .interfaces import MANYTOONE\nfrom .interfaces import ONETOMANY\nfrom .interfaces import PropComparator\nfrom .interfaces import RelationshipDirection\nfrom .interfaces import StrategizedProperty\nfrom .util import _orm_annotate\nfrom .util import _orm_deannotate\nfrom .util import CascadeOptions\nfrom .. import exc as sa_exc\nfrom .. import Exists\nfrom .. import log\nfrom .. import schema\nfrom .. import sql\nfrom .. import util\nfrom ..inspection import inspect\nfrom ..sql import coercions\nfrom ..sql import expression\nfrom ..sql import operators\nfrom ..sql import roles\nfrom ..sql import visitors\nfrom ..sql._typing import _ColumnExpressionArgument\nfrom ..sql._typing import _HasClauseElement\nfrom ..sql.annotation import _safe_annotate\nfrom ..sql.elements import ColumnClause\nfrom ..sql.elements import ColumnElement\nfrom ..sql.util import _deep_annotate\nfrom ..sql.util import _deep_deannotate\nfrom ..sql.util import _shallow_annotate\nfrom ..sql.util import adapt_criterion_to_null\nfrom ..sql.util import ClauseAdapter\nfrom ..sql.util import join_condition\nfrom ..sql.util import selectables_overlap\nfrom ..sql.util import visit_binary_product\nfrom ..util.typing import de_optionalize_union_types\nfrom ..util.typing import Literal\nfrom ..util.typing import resolve_name_to_real_class_name\n\nif typing.TYPE_CHECKING:\n from ._typing import _EntityType\n from ._typing import _ExternalEntityType\n from ._typing import _IdentityKeyType\n from ._typing import _InstanceDict\n from ._typing import _InternalEntityType\n from ._typing import _O\n from ._typing import _RegistryType\n from .base import Mapped\n from .clsregistry import _class_resolver\n from .clsregistry import _ModNS\n from .decl_base import _ClassScanMapperConfig\n from .dependency import DependencyProcessor\n from .mapper import Mapper\n from .query import Query\n from .session import Session\n from .state import InstanceState\n from .strategies import LazyLoader\n from .util import AliasedClass\n from .util import AliasedInsp\n from ..sql._typing import _CoreAdapterProto\n from ..sql._typing import _EquivalentColumnMap\n from ..sql._typing import _InfoType\n from ..sql.annotation import _AnnotationDict\n from ..sql.annotation import SupportsAnnotations\n from ..sql.elements import BinaryExpression\n from ..sql.elements import BindParameter\n from ..sql.elements import ClauseElement\n from ..sql.schema import Table\n from ..sql.selectable import FromClause\n from ..util.typing import _AnnotationScanType\n from ..util.typing import RODescriptorReference\n\n_T = TypeVar(\"_T\", bound=Any)\n_T1 = TypeVar(\"_T1\", bound=Any)\n_T2 = TypeVar(\"_T2\", bound=Any)\n\n_PT = TypeVar(\"_PT\", bound=Any)\n\n_PT2 = TypeVar(\"_PT2\", bound=Any)\n\n\n_RelationshipArgumentType = Union[\n str,\n Type[_T],\n Callable[[], Type[_T]],\n \"Mapper[_T]\",\n \"AliasedClass[_T]\",\n Callable[[], \"Mapper[_T]\"],\n Callable[[], \"AliasedClass[_T]\"],\n]\n\n_LazyLoadArgumentType = Literal[\n \"select\",\n \"joined\",\n \"selectin\",\n \"subquery\",\n \"raise\",\n \"raise_on_sql\",\n \"noload\",\n \"immediate\",\n \"write_only\",\n \"dynamic\",\n True,\n False,\n None,\n]\n\n\n_RelationshipJoinConditionArgument = Union[\n str, _ColumnExpressionArgument[bool]\n]\n_RelationshipSecondaryArgument = Union[\n \"FromClause\", str, Callable[[], \"FromClause\"]\n]\n_ORMOrderByArgument = Union[\n Literal[False],\n str,\n _ColumnExpressionArgument[Any],\n Callable[[], _ColumnExpressionArgument[Any]],\n Callable[[], Iterable[_ColumnExpressionArgument[Any]]],\n Iterable[Union[str, _ColumnExpressionArgument[Any]]],\n]\nORMBackrefArgument = Union[str, Tuple[str, Dict[str, Any]]]\n\n_ORMColCollectionElement = Union[\n ColumnClause[Any], _HasClauseElement, roles.DMLColumnRole, \"Mapped[Any]\"\n]\n_ORMColCollectionArgument = Union[\n str,\n Sequence[_ORMColCollectionElement],\n Callable[[], Sequence[_ORMColCollectionElement]],\n Callable[[], _ORMColCollectionElement],\n _ORMColCollectionElement,\n]\n\n\n_CEA = TypeVar(\"_CEA\", bound=_ColumnExpressionArgument[Any])\n\n_CE = TypeVar(\"_CE\", bound=\"ColumnElement[Any]\")\n\n\n_ColumnPairIterable = Iterable[Tuple[ColumnElement[Any], ColumnElement[Any]]]\n\n_ColumnPairs = Sequence[Tuple[ColumnElement[Any], ColumnElement[Any]]]\n\n_MutableColumnPairs = List[Tuple[ColumnElement[Any], ColumnElement[Any]]]\n\n\ndef remote(expr: _CEA) -> _CEA:\n \"\"\"Annotate a portion of a primaryjoin expression\n with a 'remote' annotation.\n\n See the section :ref:`relationship_custom_foreign` for a\n description of use.\n\n .. seealso::\n\n :ref:`relationship_custom_foreign`\n\n :func:`.foreign`\n\n \"\"\"\n return _annotate_columns( # type: ignore\n coercions.expect(roles.ColumnArgumentRole, expr), {\"remote\": True}\n )\n\n\ndef foreign(expr: _CEA) -> _CEA:\n \"\"\"Annotate a portion of a primaryjoin expression\n with a 'foreign' annotation.\n\n See the section :ref:`relationship_custom_foreign` for a\n description of use.\n\n .. seealso::\n\n :ref:`relationship_custom_foreign`\n\n :func:`.remote`\n\n \"\"\"\n\n return _annotate_columns( # type: ignore\n coercions.expect(roles.ColumnArgumentRole, expr), {\"foreign\": True}\n )\n\n\[email protected]\nclass _RelationshipArg(Generic[_T1, _T2]):\n \"\"\"stores a user-defined parameter value that must be resolved and\n parsed later at mapper configuration time.\n\n \"\"\"\n\n __slots__ = \"name\", \"argument\", \"resolved\"\n name: str\n argument: _T1\n resolved: Optional[_T2]\n\n def _is_populated(self) -> bool:\n return self.argument is not None\n\n def _resolve_against_registry(\n self, clsregistry_resolver: Callable[[str, bool], _class_resolver]\n ) -> None:\n attr_value = self.argument\n\n if isinstance(attr_value, str):\n self.resolved = clsregistry_resolver(\n attr_value, self.name == \"secondary\"\n )()\n elif callable(attr_value) and not _is_mapped_class(attr_value):\n self.resolved = attr_value()\n else:\n self.resolved = attr_value\n\n\nclass _RelationshipArgs(NamedTuple):\n \"\"\"stores user-passed parameters that are resolved at mapper configuration\n time.\n\n \"\"\"\n\n secondary: _RelationshipArg[\n Optional[_RelationshipSecondaryArgument],\n Optional[FromClause],\n ]\n primaryjoin: _RelationshipArg[\n Optional[_RelationshipJoinConditionArgument],\n Optional[ColumnElement[Any]],\n ]\n secondaryjoin: _RelationshipArg[\n Optional[_RelationshipJoinConditionArgument],\n Optional[ColumnElement[Any]],\n ]\n order_by: _RelationshipArg[\n _ORMOrderByArgument,\n Union[Literal[None, False], Tuple[ColumnElement[Any], ...]],\n ]\n foreign_keys: _RelationshipArg[\n Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]]\n ]\n remote_side: _RelationshipArg[\n Optional[_ORMColCollectionArgument], Set[ColumnElement[Any]]\n ]\n\n\[email protected]_logger\nclass RelationshipProperty(\n _IntrospectsAnnotations, StrategizedProperty[_T], log.Identified\n):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n \"\"\"\n\n strategy_wildcard_key = strategy_options._RELATIONSHIP_TOKEN\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n\n _links_to_entity = True\n _is_relationship = True\n\n _overlaps: Sequence[str]\n\n _lazy_strategy: LazyLoader\n\n _persistence_only = dict(\n passive_deletes=False,\n passive_updates=True,\n enable_typechecks=True,\n active_history=False,\n cascade_backrefs=False,\n )\n\n _dependency_processor: Optional[DependencyProcessor] = None\n\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n _join_condition: JoinCondition\n order_by: Union[Literal[False], Tuple[ColumnElement[Any], ...]]\n\n _user_defined_foreign_keys: Set[ColumnElement[Any]]\n _calculated_foreign_keys: Set[ColumnElement[Any]]\n\n remote_side: Set[ColumnElement[Any]]\n local_columns: Set[ColumnElement[Any]]\n\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: Optional[_ColumnPairs]\n\n local_remote_pairs: Optional[_ColumnPairs]\n\n direction: RelationshipDirection\n\n _init_args: _RelationshipArgs\n\n def __init__(\n self,\n argument: Optional[_RelationshipArgumentType[_T]] = None,\n secondary: Optional[_RelationshipSecondaryArgument] = None,\n *,\n uselist: Optional[bool] = None,\n collection_class: Optional[\n Union[Type[Collection[Any]], Callable[[], Collection[Any]]]\n ] = None,\n primaryjoin: Optional[_RelationshipJoinConditionArgument] = None,\n secondaryjoin: Optional[_RelationshipJoinConditionArgument] = None,\n back_populates: Optional[str] = None,\n order_by: _ORMOrderByArgument = False,\n backref: Optional[ORMBackrefArgument] = None,\n overlaps: Optional[str] = None,\n post_update: bool = False,\n cascade: str = \"save-update, merge\",\n viewonly: bool = False,\n attribute_options: Optional[_AttributeOptions] = None,\n lazy: _LazyLoadArgumentType = \"select\",\n passive_deletes: Union[Literal[\"all\"], bool] = False,\n passive_updates: bool = True,\n active_history: bool = False,\n enable_typechecks: bool = True,\n foreign_keys: Optional[_ORMColCollectionArgument] = None,\n remote_side: Optional[_ORMColCollectionArgument] = None,\n join_depth: Optional[int] = None,\n comparator_factory: Optional[\n Type[RelationshipProperty.Comparator[Any]]\n ] = None,\n single_parent: bool = False,\n innerjoin: bool = False,\n distinct_target_key: Optional[bool] = None,\n load_on_pending: bool = False,\n query_class: Optional[Type[Query[Any]]] = None,\n info: Optional[_InfoType] = None,\n omit_join: Literal[None, False] = None,\n sync_backref: Optional[bool] = None,\n doc: Optional[str] = None,\n bake_queries: Literal[True] = True,\n cascade_backrefs: Literal[False] = False,\n _local_remote_pairs: Optional[_ColumnPairs] = None,\n _legacy_inactive_history_style: bool = False,\n ):\n super().__init__(attribute_options=attribute_options)\n\n self.uselist = uselist\n self.argument = argument\n\n self._init_args = _RelationshipArgs(\n _RelationshipArg(\"secondary\", secondary, None),\n _RelationshipArg(\"primaryjoin\", primaryjoin, None),\n _RelationshipArg(\"secondaryjoin\", secondaryjoin, None),\n _RelationshipArg(\"order_by\", order_by, None),\n _RelationshipArg(\"foreign_keys\", foreign_keys, None),\n _RelationshipArg(\"remote_side\", remote_side, None),\n )\n\n self.post_update = post_update\n self.viewonly = viewonly\n if viewonly:\n self._warn_for_persistence_only_flags(\n passive_deletes=passive_deletes,\n passive_updates=passive_updates,\n enable_typechecks=enable_typechecks,\n active_history=active_history,\n cascade_backrefs=cascade_backrefs,\n )\n if viewonly and sync_backref:\n raise sa_exc.ArgumentError(\n \"sync_backref and viewonly cannot both be True\"\n )\n self.sync_backref = sync_backref\n self.lazy = lazy\n self.single_parent = single_parent\n self.collection_class = collection_class\n self.passive_deletes = passive_deletes\n\n if cascade_backrefs:\n raise sa_exc.ArgumentError(\n \"The 'cascade_backrefs' parameter passed to \"\n \"relationship() may only be set to False.\"\n )\n\n self.passive_updates = passive_updates\n self.enable_typechecks = enable_typechecks\n self.query_class = query_class\n self.innerjoin = innerjoin\n self.distinct_target_key = distinct_target_key\n self.doc = doc\n self.active_history = active_history\n self._legacy_inactive_history_style = _legacy_inactive_history_style\n\n self.join_depth = join_depth\n if omit_join:\n util.warn(\n \"setting omit_join to True is not supported; selectin \"\n \"loading of this relationship may not work correctly if this \"\n \"flag is set explicitly. omit_join optimization is \"\n \"automatically detected for conditions under which it is \"\n \"supported.\"\n )\n\n self.omit_join = omit_join\n self.local_remote_pairs = _local_remote_pairs\n self.load_on_pending = load_on_pending\n self.comparator_factory = (\n comparator_factory or RelationshipProperty.Comparator\n )\n util.set_creation_order(self)\n\n if info is not None:\n self.info.update(info)\n\n self.strategy_key = ((\"lazy\", self.lazy),)\n\n self._reverse_property: Set[RelationshipProperty[Any]] = set()\n\n if overlaps:\n self._overlaps = set(re.split(r\"\\s*,\\s*\", overlaps)) # type: ignore # noqa: E501\n else:\n self._overlaps = ()\n\n # mypy ignoring the @property setter\n self.cascade = cascade # type: ignore\n\n self.back_populates = back_populates\n\n if self.back_populates:\n if backref:\n raise sa_exc.ArgumentError(\n \"backref and back_populates keyword arguments \"\n \"are mutually exclusive\"\n )\n self.backref = None\n else:\n self.backref = backref\n\n def _warn_for_persistence_only_flags(self, **kw: Any) -> None:\n for k, v in kw.items():\n if v != self._persistence_only[k]:\n # we are warning here rather than warn deprecated as this is a\n # configuration mistake, and Python shows regular warnings more\n # aggressively than deprecation warnings by default. Unlike the\n # case of setting viewonly with cascade, the settings being\n # warned about here are not actively doing the wrong thing\n # against viewonly=True, so it is not as urgent to have these\n # raise an error.\n util.warn(\n \"Setting %s on relationship() while also \"\n \"setting viewonly=True does not make sense, as a \"\n \"viewonly=True relationship does not perform persistence \"\n \"operations. This configuration may raise an error \"\n \"in a future release.\" % (k,)\n )\n\n def instrument_class(self, mapper: Mapper[Any]) -> None:\n attributes.register_descriptor(\n mapper.class_,\n self.key,\n comparator=self.comparator_factory(self, mapper),\n parententity=mapper,\n doc=self.doc,\n )\n\n class Comparator(util.MemoizedSlots, PropComparator[_PT]):\n \"\"\"Produce boolean, comparison, and other operators for\n :class:`.RelationshipProperty` attributes.\n\n See the documentation for :class:`.PropComparator` for a brief\n overview of ORM level operator definition.\n\n .. seealso::\n\n :class:`.PropComparator`\n\n :class:`.ColumnProperty.Comparator`\n\n :class:`.ColumnOperators`\n\n :ref:`types_operators`\n\n :attr:`.TypeEngine.comparator_factory`\n\n \"\"\"\n\n __slots__ = (\n \"entity\",\n \"mapper\",\n \"property\",\n \"_of_type\",\n \"_extra_criteria\",\n )\n\n prop: RODescriptorReference[RelationshipProperty[_PT]]\n _of_type: Optional[_EntityType[_PT]]\n\n def __init__(\n self,\n prop: RelationshipProperty[_PT],\n parentmapper: _InternalEntityType[Any],\n adapt_to_entity: Optional[AliasedInsp[Any]] = None,\n of_type: Optional[_EntityType[_PT]] = None,\n extra_criteria: Tuple[ColumnElement[bool], ...] = (),\n ):\n \"\"\"Construction of :class:`.RelationshipProperty.Comparator`\n is internal to the ORM's attribute mechanics.\n\n \"\"\"\n self.prop = prop\n self._parententity = parentmapper\n self._adapt_to_entity = adapt_to_entity\n if of_type:\n self._of_type = of_type\n else:\n self._of_type = None\n self._extra_criteria = extra_criteria\n\n def adapt_to_entity(\n self, adapt_to_entity: AliasedInsp[Any]\n ) -> RelationshipProperty.Comparator[Any]:\n return self.__class__(\n self.prop,\n self._parententity,\n adapt_to_entity=adapt_to_entity,\n of_type=self._of_type,\n )\n\n entity: _InternalEntityType[_PT]\n \"\"\"The target entity referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is either a :class:`_orm.Mapper` or :class:`.AliasedInsp`\n object.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n\n mapper: Mapper[_PT]\n \"\"\"The target :class:`_orm.Mapper` referred to by this\n :class:`.RelationshipProperty.Comparator`.\n\n This is the \"target\" or \"remote\" side of the\n :func:`_orm.relationship`.\n\n \"\"\"\n\n def _memoized_attr_entity(self) -> _InternalEntityType[_PT]:\n if self._of_type:\n return inspect(self._of_type) # type: ignore\n else:\n return self.prop.entity\n\n def _memoized_attr_mapper(self) -> Mapper[_PT]:\n return self.entity.mapper\n\n def _source_selectable(self) -> FromClause:\n if self._adapt_to_entity:\n return self._adapt_to_entity.selectable\n else:\n return self.property.parent._with_polymorphic_selectable\n\n def __clause_element__(self) -> ColumnElement[bool]:\n adapt_from = self._source_selectable()\n if self._of_type:\n of_type_entity = inspect(self._of_type)\n else:\n of_type_entity = None\n\n (\n pj,\n sj,\n source,\n dest,\n secondary,\n target_adapter,\n ) = self.prop._create_joins(\n source_selectable=adapt_from,\n source_polymorphic=True,\n of_type_entity=of_type_entity,\n alias_secondary=True,\n extra_criteria=self._extra_criteria,\n )\n if sj is not None:\n return pj & sj\n else:\n return pj\n\n def of_type(self, class_: _EntityType[Any]) -> PropComparator[_PT]:\n r\"\"\"Redefine this object in terms of a polymorphic subclass.\n\n See :meth:`.PropComparator.of_type` for an example.\n\n\n \"\"\"\n return RelationshipProperty.Comparator(\n self.prop,\n self._parententity,\n adapt_to_entity=self._adapt_to_entity,\n of_type=class_,\n extra_criteria=self._extra_criteria,\n )\n\n def and_(\n self, *criteria: _ColumnExpressionArgument[bool]\n ) -> PropComparator[Any]:\n \"\"\"Add AND criteria.\n\n See :meth:`.PropComparator.and_` for an example.\n\n .. versionadded:: 1.4\n\n \"\"\"\n exprs = tuple(\n coercions.expect(roles.WhereHavingRole, clause)\n for clause in util.coerce_generator_arg(criteria)\n )\n\n return RelationshipProperty.Comparator(\n self.prop,\n self._parententity,\n adapt_to_entity=self._adapt_to_entity,\n of_type=self._of_type,\n extra_criteria=self._extra_criteria + exprs,\n )\n\n def in_(self, other: Any) -> NoReturn:\n \"\"\"Produce an IN clause - this is not implemented\n for :func:`_orm.relationship`-based attributes at this time.\n\n \"\"\"\n raise NotImplementedError(\n \"in_() not yet supported for \"\n \"relationships. For a simple \"\n \"many-to-one, use in_() against \"\n \"the set of foreign key values.\"\n )\n\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore\n\n def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501\n \"\"\"Implement the ``==`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop == <some object>\n\n this will typically produce a\n clause such as::\n\n mytable.related_id == <some id>\n\n Where ``<some id>`` is the primary key of the given\n object.\n\n The ``==`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use :meth:`~.Relationship.Comparator.contains`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce a NOT EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction in [ONETOMANY, MANYTOMANY]:\n return ~self._criterion_exists()\n else:\n return _orm_annotate(\n self.property._optimized_compare(\n None, adapt_source=self.adapter\n )\n )\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection to an object or collection; \"\n \"use contains() to test for membership.\"\n )\n else:\n return _orm_annotate(\n self.property._optimized_compare(\n other, adapt_source=self.adapter\n )\n )\n\n def _criterion_exists(\n self,\n criterion: Optional[_ColumnExpressionArgument[bool]] = None,\n **kwargs: Any,\n ) -> Exists:\n where_criteria = (\n coercions.expect(roles.WhereHavingRole, criterion)\n if criterion is not None\n else None\n )\n\n if getattr(self, \"_of_type\", None):\n info: Optional[_InternalEntityType[Any]] = inspect(\n self._of_type\n )\n assert info is not None\n target_mapper, to_selectable, is_aliased_class = (\n info.mapper,\n info.selectable,\n info.is_aliased_class,\n )\n if self.property._is_self_referential and not is_aliased_class:\n to_selectable = to_selectable._anonymous_fromclause()\n\n single_crit = target_mapper._single_table_criterion\n if single_crit is not None:\n if where_criteria is not None:\n where_criteria = single_crit & where_criteria\n else:\n where_criteria = single_crit\n else:\n is_aliased_class = False\n to_selectable = None\n\n if self.adapter:\n source_selectable = self._source_selectable()\n else:\n source_selectable = None\n\n (\n pj,\n sj,\n source,\n dest,\n secondary,\n target_adapter,\n ) = self.property._create_joins(\n dest_selectable=to_selectable,\n source_selectable=source_selectable,\n )\n\n for k in kwargs:\n crit = getattr(self.property.mapper.class_, k) == kwargs[k]\n if where_criteria is None:\n where_criteria = crit\n else:\n where_criteria = where_criteria & crit\n\n # annotate the *local* side of the join condition, in the case\n # of pj + sj this is the full primaryjoin, in the case of just\n # pj its the local side of the primaryjoin.\n if sj is not None:\n j = _orm_annotate(pj) & sj\n else:\n j = _orm_annotate(pj, exclude=self.property.remote_side)\n\n if (\n where_criteria is not None\n and target_adapter\n and not is_aliased_class\n ):\n # limit this adapter to annotated only?\n where_criteria = target_adapter.traverse(where_criteria)\n\n # only have the \"joined left side\" of what we\n # return be subject to Query adaption. The right\n # side of it is used for an exists() subquery and\n # should not correlate or otherwise reach out\n # to anything in the enclosing query.\n if where_criteria is not None:\n where_criteria = where_criteria._annotate(\n {\"no_replacement_traverse\": True}\n )\n\n crit = j & sql.True_._ifnone(where_criteria)\n\n if secondary is not None:\n ex = (\n sql.exists(1)\n .where(crit)\n .select_from(dest, secondary)\n .correlate_except(dest, secondary)\n )\n else:\n ex = (\n sql.exists(1)\n .where(crit)\n .select_from(dest)\n .correlate_except(dest)\n )\n return ex\n\n def any(\n self,\n criterion: Optional[_ColumnExpressionArgument[bool]] = None,\n **kwargs: Any,\n ) -> ColumnElement[bool]:\n \"\"\"Produce an expression that tests a collection against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.any(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id\n AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.any` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.any` is particularly\n useful for testing for empty collections::\n\n session.query(MyClass).filter(\n ~MyClass.somereference.any()\n )\n\n will produce::\n\n SELECT * FROM my_table WHERE\n NOT (EXISTS (SELECT 1 FROM related WHERE\n related.my_id=my_table.id))\n\n :meth:`~.Relationship.Comparator.any` is only\n valid for collections, i.e. a :func:`_orm.relationship`\n that has ``uselist=True``. For scalar references,\n use :meth:`~.Relationship.Comparator.has`.\n\n \"\"\"\n if not self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'any()' not implemented for scalar \"\n \"attributes. Use has().\"\n )\n\n return self._criterion_exists(criterion, **kwargs)\n\n def has(\n self,\n criterion: Optional[_ColumnExpressionArgument[bool]] = None,\n **kwargs: Any,\n ) -> ColumnElement[bool]:\n \"\"\"Produce an expression that tests a scalar reference against\n particular criterion, using EXISTS.\n\n An expression like::\n\n session.query(MyClass).filter(\n MyClass.somereference.has(SomeRelated.x==2)\n )\n\n\n Will produce a query like::\n\n SELECT * FROM my_table WHERE\n EXISTS (SELECT 1 FROM related WHERE\n related.id==my_table.related_id AND related.x=2)\n\n Because :meth:`~.Relationship.Comparator.has` uses\n a correlated subquery, its performance is not nearly as\n good when compared against large target tables as that of\n using a join.\n\n :meth:`~.Relationship.Comparator.has` is only\n valid for scalar references, i.e. a :func:`_orm.relationship`\n that has ``uselist=False``. For collection references,\n use :meth:`~.Relationship.Comparator.any`.\n\n \"\"\"\n if self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"'has()' not implemented for collections. \" \"Use any().\"\n )\n return self._criterion_exists(criterion, **kwargs)\n\n def contains(\n self, other: _ColumnExpressionArgument[Any], **kwargs: Any\n ) -> ColumnElement[bool]:\n \"\"\"Return a simple expression that tests a collection for\n containment of a particular item.\n\n :meth:`~.Relationship.Comparator.contains` is\n only valid for a collection, i.e. a\n :func:`_orm.relationship` that implements\n one-to-many or many-to-many with ``uselist=True``.\n\n When used in a simple one-to-many context, an\n expression like::\n\n MyClass.contains(other)\n\n Produces a clause like::\n\n mytable.id == <some id>\n\n Where ``<some id>`` is the value of the foreign key\n attribute on ``other`` which refers to the primary\n key of its parent object. From this it follows that\n :meth:`~.Relationship.Comparator.contains` is\n very useful when used with simple one-to-many\n operations.\n\n For many-to-many operations, the behavior of\n :meth:`~.Relationship.Comparator.contains`\n has more caveats. The association table will be\n rendered in the statement, producing an \"implicit\"\n join, that is, includes multiple tables in the FROM\n clause which are equated in the WHERE clause::\n\n query(MyClass).filter(MyClass.contains(other))\n\n Produces a query like::\n\n SELECT * FROM my_table, my_association_table AS\n my_association_table_1 WHERE\n my_table.id = my_association_table_1.parent_id\n AND my_association_table_1.child_id = <some id>\n\n Where ``<some id>`` would be the primary key of\n ``other``. From the above, it is clear that\n :meth:`~.Relationship.Comparator.contains`\n will **not** work with many-to-many collections when\n used in queries that move beyond simple AND\n conjunctions, such as multiple\n :meth:`~.Relationship.Comparator.contains`\n expressions joined by OR. In such cases subqueries or\n explicit \"outer joins\" will need to be used instead.\n See :meth:`~.Relationship.Comparator.any` for\n a less-performant alternative using EXISTS, or refer\n to :meth:`_query.Query.outerjoin`\n as well as :ref:`orm_queryguide_joins`\n for more details on constructing outer joins.\n\n kwargs may be ignored by this operator but are required for API\n conformance.\n \"\"\"\n if not self.prop.uselist:\n raise sa_exc.InvalidRequestError(\n \"'contains' not implemented for scalar \"\n \"attributes. Use ==\"\n )\n\n clause = self.prop._optimized_compare(\n other, adapt_source=self.adapter\n )\n\n if self.prop.secondaryjoin is not None:\n clause.negation_clause = self.__negated_contains_or_equals(\n other\n )\n\n return clause\n\n def __negated_contains_or_equals(\n self, other: Any\n ) -> ColumnElement[bool]:\n if self.prop.direction == MANYTOONE:\n state = attributes.instance_state(other)\n\n def state_bindparam(\n local_col: ColumnElement[Any],\n state: InstanceState[Any],\n remote_col: ColumnElement[Any],\n ) -> BindParameter[Any]:\n dict_ = state.dict\n return sql.bindparam(\n local_col.key,\n type_=local_col.type,\n unique=True,\n callable_=self.prop._get_attr_w_warn_on_none(\n self.prop.mapper, state, dict_, remote_col\n ),\n )\n\n def adapt(col: _CE) -> _CE:\n if self.adapter:\n return self.adapter(col)\n else:\n return col\n\n if self.property._use_get:\n return sql.and_(\n *[\n sql.or_(\n adapt(x)\n != state_bindparam(adapt(x), state, y),\n adapt(x) == None,\n )\n for (x, y) in self.property.local_remote_pairs\n ]\n )\n\n criterion = sql.and_(\n *[\n x == y\n for (x, y) in zip(\n self.property.mapper.primary_key,\n self.property.mapper.primary_key_from_instance(other),\n )\n ]\n )\n\n return ~self._criterion_exists(criterion)\n\n def __ne__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501\n \"\"\"Implement the ``!=`` operator.\n\n In a many-to-one context, such as::\n\n MyClass.some_prop != <some object>\n\n This will typically produce a clause such as::\n\n mytable.related_id != <some id>\n\n Where ``<some id>`` is the primary key of the\n given object.\n\n The ``!=`` operator provides partial functionality for non-\n many-to-one comparisons:\n\n * Comparisons against collections are not supported.\n Use\n :meth:`~.Relationship.Comparator.contains`\n in conjunction with :func:`_expression.not_`.\n * Compared to a scalar one-to-many, will produce a\n clause that compares the target columns in the parent to\n the given target.\n * Compared to a scalar many-to-many, an alias\n of the association table will be rendered as\n well, forming a natural join that is part of the\n main body of the query. This will not work for\n queries that go beyond simple AND conjunctions of\n comparisons, such as those which use OR. Use\n explicit joins, outerjoins, or\n :meth:`~.Relationship.Comparator.has` in\n conjunction with :func:`_expression.not_` for\n more comprehensive non-many-to-one scalar\n membership tests.\n * Comparisons against ``None`` given in a one-to-many\n or many-to-many context produce an EXISTS clause.\n\n \"\"\"\n if other is None or isinstance(other, expression.Null):\n if self.property.direction == MANYTOONE:\n return _orm_annotate(\n ~self.property._optimized_compare(\n None, adapt_source=self.adapter\n )\n )\n\n else:\n return self._criterion_exists()\n elif self.property.uselist:\n raise sa_exc.InvalidRequestError(\n \"Can't compare a collection\"\n \" to an object or collection; use \"\n \"contains() to test for membership.\"\n )\n else:\n return _orm_annotate(self.__negated_contains_or_equals(other))\n\n def _memoized_attr_property(self) -> RelationshipProperty[_PT]:\n self.prop.parent._check_configure()\n return self.prop\n\n def _with_parent(\n self,\n instance: object,\n alias_secondary: bool = True,\n from_entity: Optional[_EntityType[Any]] = None,\n ) -> ColumnElement[bool]:\n assert instance is not None\n adapt_source: Optional[_CoreAdapterProto] = None\n if from_entity is not None:\n insp: Optional[_InternalEntityType[Any]] = inspect(from_entity)\n assert insp is not None\n if insp_is_aliased_class(insp):\n adapt_source = insp._adapter.adapt_clause\n return self._optimized_compare(\n instance,\n value_is_parent=True,\n adapt_source=adapt_source,\n alias_secondary=alias_secondary,\n )\n\n def _optimized_compare(\n self,\n state: Any,\n value_is_parent: bool = False,\n adapt_source: Optional[_CoreAdapterProto] = None,\n alias_secondary: bool = True,\n ) -> ColumnElement[bool]:\n if state is not None:\n try:\n state = inspect(state)\n except sa_exc.NoInspectionAvailable:\n state = None\n\n if state is None or not getattr(state, \"is_instance\", False):\n raise sa_exc.ArgumentError(\n \"Mapped instance expected for relationship \"\n \"comparison to object. Classes, queries and other \"\n \"SQL elements are not accepted in this context; for \"\n \"comparison with a subquery, \"\n \"use %s.has(**criteria).\" % self\n )\n reverse_direction = not value_is_parent\n\n if state is None:\n return self._lazy_none_clause(\n reverse_direction, adapt_source=adapt_source\n )\n\n if not reverse_direction:\n criterion, bind_to_col = (\n self._lazy_strategy._lazywhere,\n self._lazy_strategy._bind_to_col,\n )\n else:\n criterion, bind_to_col = (\n self._lazy_strategy._rev_lazywhere,\n self._lazy_strategy._rev_bind_to_col,\n )\n\n if reverse_direction:\n mapper = self.mapper\n else:\n mapper = self.parent\n\n dict_ = attributes.instance_dict(state.obj())\n\n def visit_bindparam(bindparam: BindParameter[Any]) -> None:\n if bindparam._identifying_key in bind_to_col:\n bindparam.callable = self._get_attr_w_warn_on_none(\n mapper,\n state,\n dict_,\n bind_to_col[bindparam._identifying_key],\n )\n\n if self.secondary is not None and alias_secondary:\n criterion = ClauseAdapter(\n self.secondary._anonymous_fromclause()\n ).traverse(criterion)\n\n criterion = visitors.cloned_traverse(\n criterion, {}, {\"bindparam\": visit_bindparam}\n )\n\n if adapt_source:\n criterion = adapt_source(criterion)\n return criterion\n\n def _get_attr_w_warn_on_none(\n self,\n mapper: Mapper[Any],\n state: InstanceState[Any],\n dict_: _InstanceDict,\n column: ColumnElement[Any],\n ) -> Callable[[], Any]:\n \"\"\"Create the callable that is used in a many-to-one expression.\n\n E.g.::\n\n u1 = s.query(User).get(5)\n\n expr = Address.user == u1\n\n Above, the SQL should be \"address.user_id = 5\". The callable\n returned by this method produces the value \"5\" based on the identity\n of ``u1``.\n\n \"\"\"\n\n # in this callable, we're trying to thread the needle through\n # a wide variety of scenarios, including:\n #\n # * the object hasn't been flushed yet and there's no value for\n # the attribute as of yet\n #\n # * the object hasn't been flushed yet but it has a user-defined\n # value\n #\n # * the object has a value but it's expired and not locally present\n #\n # * the object has a value but it's expired and not locally present,\n # and the object is also detached\n #\n # * The object hadn't been flushed yet, there was no value, but\n # later, the object has been expired and detached, and *now*\n # they're trying to evaluate it\n #\n # * the object had a value, but it was changed to a new value, and\n # then expired\n #\n # * the object had a value, but it was changed to a new value, and\n # then expired, then the object was detached\n #\n # * the object has a user-set value, but it's None and we don't do\n # the comparison correctly for that so warn\n #\n\n prop = mapper.get_property_by_column(column)\n\n # by invoking this method, InstanceState will track the last known\n # value for this key each time the attribute is to be expired.\n # this feature was added explicitly for use in this method.\n state._track_last_known_value(prop.key)\n\n lkv_fixed = state._last_known_values\n\n def _go() -> Any:\n assert lkv_fixed is not None\n last_known = to_return = lkv_fixed[prop.key]\n existing_is_available = (\n last_known is not LoaderCallableStatus.NO_VALUE\n )\n\n # we support that the value may have changed. so here we\n # try to get the most recent value including re-fetching.\n # only if we can't get a value now due to detachment do we return\n # the last known value\n current_value = mapper._get_state_attr_by_column(\n state,\n dict_,\n column,\n passive=PassiveFlag.PASSIVE_OFF\n if state.persistent\n else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK,\n )\n\n if current_value is LoaderCallableStatus.NEVER_SET:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object \"\n \"%s; no value has been set for this column\"\n % (column, state_str(state))\n )\n elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object \"\n \"%s; the object is detached and the value was \"\n \"expired\" % (column, state_str(state))\n )\n else:\n to_return = current_value\n if to_return is None:\n util.warn(\n \"Got None for value of column %s; this is unsupported \"\n \"for a relationship comparison and will not \"\n \"currently produce an IS comparison \"\n \"(but may in a future release)\" % column\n )\n return to_return\n\n return _go\n\n def _lazy_none_clause(\n self,\n reverse_direction: bool = False,\n adapt_source: Optional[_CoreAdapterProto] = None,\n ) -> ColumnElement[bool]:\n if not reverse_direction:\n criterion, bind_to_col = (\n self._lazy_strategy._lazywhere,\n self._lazy_strategy._bind_to_col,\n )\n else:\n criterion, bind_to_col = (\n self._lazy_strategy._rev_lazywhere,\n self._lazy_strategy._rev_bind_to_col,\n )\n\n criterion = adapt_criterion_to_null(criterion, bind_to_col)\n\n if adapt_source:\n criterion = adapt_source(criterion)\n return criterion\n\n def __str__(self) -> str:\n return str(self.parent.class_.__name__) + \".\" + self.key\n\n def merge(\n self,\n session: Session,\n source_state: InstanceState[Any],\n source_dict: _InstanceDict,\n dest_state: InstanceState[Any],\n dest_dict: _InstanceDict,\n load: bool,\n _recursive: Dict[Any, object],\n _resolve_conflict_map: Dict[_IdentityKeyType[Any], object],\n ) -> None:\n if load:\n for r in self._reverse_property:\n if (source_state, r) in _recursive:\n return\n\n if \"merge\" not in self._cascade:\n return\n\n if self.key not in source_dict:\n return\n\n if self.uselist:\n impl = source_state.get_impl(self.key)\n\n assert is_has_collection_adapter(impl)\n instances_iterable = impl.get_collection(source_state, source_dict)\n\n # if this is a CollectionAttributeImpl, then empty should\n # be False, otherwise \"self.key in source_dict\" should not be\n # True\n assert not instances_iterable.empty if impl.collection else True\n\n if load:\n # for a full merge, pre-load the destination collection,\n # so that individual _merge of each item pulls from identity\n # map for those already present.\n # also assumes CollectionAttributeImpl behavior of loading\n # \"old\" list in any case\n dest_state.get_impl(self.key).get(\n dest_state, dest_dict, passive=PassiveFlag.PASSIVE_MERGE\n )\n\n dest_list = []\n for current in instances_iterable:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[(current_state, self)] = True\n obj = session._merge(\n current_state,\n current_dict,\n load=load,\n _recursive=_recursive,\n _resolve_conflict_map=_resolve_conflict_map,\n )\n if obj is not None:\n dest_list.append(obj)\n\n if not load:\n coll = attributes.init_state_collection(\n dest_state, dest_dict, self.key\n )\n for c in dest_list:\n coll.append_without_event(c)\n else:\n dest_impl = dest_state.get_impl(self.key)\n assert is_has_collection_adapter(dest_impl)\n dest_impl.set(\n dest_state,\n dest_dict,\n dest_list,\n _adapt=False,\n passive=PassiveFlag.PASSIVE_MERGE,\n )\n else:\n current = source_dict[self.key]\n if current is not None:\n current_state = attributes.instance_state(current)\n current_dict = attributes.instance_dict(current)\n _recursive[(current_state, self)] = True\n obj = session._merge(\n current_state,\n current_dict,\n load=load,\n _recursive=_recursive,\n _resolve_conflict_map=_resolve_conflict_map,\n )\n else:\n obj = None\n\n if not load:\n dest_dict[self.key] = obj\n else:\n dest_state.get_impl(self.key).set(\n dest_state, dest_dict, obj, None\n )\n\n def _value_as_iterable(\n self,\n state: InstanceState[_O],\n dict_: _InstanceDict,\n key: str,\n passive: PassiveFlag = PassiveFlag.PASSIVE_OFF,\n ) -> Sequence[Tuple[InstanceState[_O], _O]]:\n \"\"\"Return a list of tuples (state, obj) for the given\n key.\n\n returns an empty list if the value is None/empty/PASSIVE_NO_RESULT\n \"\"\"\n\n impl = state.manager[key].impl\n x = impl.get(state, dict_, passive=passive)\n if x is LoaderCallableStatus.PASSIVE_NO_RESULT or x is None:\n return []\n elif is_has_collection_adapter(impl):\n return [\n (attributes.instance_state(o), o)\n for o in impl.get_collection(state, dict_, x, passive=passive)\n ]\n else:\n return [(attributes.instance_state(x), x)]\n\n def cascade_iterator(\n self,\n type_: str,\n state: InstanceState[Any],\n dict_: _InstanceDict,\n visited_states: Set[InstanceState[Any]],\n halt_on: Optional[Callable[[InstanceState[Any]], bool]] = None,\n ) -> Iterator[Tuple[Any, Mapper[Any], InstanceState[Any], _InstanceDict]]:\n # assert type_ in self._cascade\n\n # only actively lazy load on the 'delete' cascade\n if type_ != \"delete\" or self.passive_deletes:\n passive = PassiveFlag.PASSIVE_NO_INITIALIZE\n else:\n passive = PassiveFlag.PASSIVE_OFF | PassiveFlag.NO_RAISE\n\n if type_ == \"save-update\":\n tuples = state.manager[self.key].impl.get_all_pending(state, dict_)\n else:\n tuples = self._value_as_iterable(\n state, dict_, self.key, passive=passive\n )\n\n skip_pending = (\n type_ == \"refresh-expire\" and \"delete-orphan\" not in self._cascade\n )\n\n for instance_state, c in tuples:\n if instance_state in visited_states:\n continue\n\n if c is None:\n # would like to emit a warning here, but\n # would not be consistent with collection.append(None)\n # current behavior of silently skipping.\n # see [ticket:2229]\n continue\n\n assert instance_state is not None\n instance_dict = attributes.instance_dict(c)\n\n if halt_on and halt_on(instance_state):\n continue\n\n if skip_pending and not instance_state.key:\n continue\n\n instance_mapper = instance_state.manager.mapper\n\n if not instance_mapper.isa(self.mapper.class_manager.mapper):\n raise AssertionError(\n \"Attribute '%s' on class '%s' \"\n \"doesn't handle objects \"\n \"of type '%s'\"\n % (self.key, self.parent.class_, c.__class__)\n )\n\n visited_states.add(instance_state)\n\n yield c, instance_mapper, instance_state, instance_dict\n\n @property\n def _effective_sync_backref(self) -> bool:\n if self.viewonly:\n return False\n else:\n return self.sync_backref is not False\n\n @staticmethod\n def _check_sync_backref(\n rel_a: RelationshipProperty[Any], rel_b: RelationshipProperty[Any]\n ) -> None:\n if rel_a.viewonly and rel_b.sync_backref:\n raise sa_exc.InvalidRequestError(\n \"Relationship %s cannot specify sync_backref=True since %s \"\n \"includes viewonly=True.\" % (rel_b, rel_a)\n )\n if (\n rel_a.viewonly\n and not rel_b.viewonly\n and rel_b.sync_backref is not False\n ):\n rel_b.sync_backref = False\n\n def _add_reverse_property(self, key: str) -> None:\n other = self.mapper.get_property(key, _configure_mappers=False)\n if not isinstance(other, RelationshipProperty):\n raise sa_exc.InvalidRequestError(\n \"back_populates on relationship '%s' refers to attribute '%s' \"\n \"that is not a relationship. The back_populates parameter \"\n \"should refer to the name of a relationship on the target \"\n \"class.\" % (self, other)\n )\n # viewonly and sync_backref cases\n # 1. self.viewonly==True and other.sync_backref==True -> error\n # 2. self.viewonly==True and other.viewonly==False and\n # other.sync_backref==None -> warn sync_backref=False, set to False\n self._check_sync_backref(self, other)\n # 3. other.viewonly==True and self.sync_backref==True -> error\n # 4. other.viewonly==True and self.viewonly==False and\n # self.sync_backref==None -> warn sync_backref=False, set to False\n self._check_sync_backref(other, self)\n\n self._reverse_property.add(other)\n other._reverse_property.add(self)\n\n other._setup_entity()\n\n if not other.mapper.common_parent(self.parent):\n raise sa_exc.ArgumentError(\n \"reverse_property %r on \"\n \"relationship %s references relationship %s, which \"\n \"does not reference mapper %s\"\n % (key, self, other, self.parent)\n )\n\n if (\n other._configure_started\n and self.direction in (ONETOMANY, MANYTOONE)\n and self.direction == other.direction\n ):\n raise sa_exc.ArgumentError(\n \"%s and back-reference %s are \"\n \"both of the same direction %r. Did you mean to \"\n \"set remote_side on the many-to-one side ?\"\n % (other, self, self.direction)\n )\n\n @util.memoized_property\n def entity(self) -> _InternalEntityType[_T]:\n \"\"\"Return the target mapped entity, which is an inspect() of the\n class or aliased class that is referred towards.\n\n \"\"\"\n self.parent._check_configure()\n return self.entity\n\n @util.memoized_property\n def mapper(self) -> Mapper[_T]:\n \"\"\"Return the targeted :class:`_orm.Mapper` for this\n :class:`.RelationshipProperty`.\n\n \"\"\"\n return self.entity.mapper\n\n def do_init(self) -> None:\n self._check_conflicts()\n self._process_dependent_arguments()\n self._setup_entity()\n self._setup_registry_dependencies()\n self._setup_join_conditions()\n self._check_cascade_settings(self._cascade)\n self._post_init()\n self._generate_backref()\n self._join_condition._warn_for_conflicting_sync_targets()\n super().do_init()\n self._lazy_strategy = cast(\n \"LazyLoader\", self._get_strategy(((\"lazy\", \"select\"),))\n )\n\n def _setup_registry_dependencies(self) -> None:\n self.parent.mapper.registry._set_depends_on(\n self.entity.mapper.registry\n )\n\n def _process_dependent_arguments(self) -> None:\n \"\"\"Convert incoming configuration arguments to their\n proper form.\n\n Callables are resolved, ORM annotations removed.\n\n \"\"\"\n\n # accept callables for other attributes which may require\n # deferred initialization. This technique is used\n # by declarative \"string configs\" and some recipes.\n init_args = self._init_args\n\n for attr in (\n \"order_by\",\n \"primaryjoin\",\n \"secondaryjoin\",\n \"secondary\",\n \"foreign_keys\",\n \"remote_side\",\n ):\n rel_arg = getattr(init_args, attr)\n\n rel_arg._resolve_against_registry(self._clsregistry_resolvers[1])\n\n # remove \"annotations\" which are present if mapped class\n # descriptors are used to create the join expression.\n for attr in \"primaryjoin\", \"secondaryjoin\":\n rel_arg = getattr(init_args, attr)\n val = rel_arg.resolved\n if val is not None:\n rel_arg.resolved = _orm_deannotate(\n coercions.expect(\n roles.ColumnArgumentRole, val, argname=attr\n )\n )\n\n secondary = init_args.secondary.resolved\n if secondary is not None and _is_mapped_class(secondary):\n raise sa_exc.ArgumentError(\n \"secondary argument %s passed to to relationship() %s must \"\n \"be a Table object or other FROM clause; can't send a mapped \"\n \"class directly as rows in 'secondary' are persisted \"\n \"independently of a class that is mapped \"\n \"to that same table.\" % (secondary, self)\n )\n\n # ensure expressions in self.order_by, foreign_keys,\n # remote_side are all columns, not strings.\n if (\n init_args.order_by.resolved is not False\n and init_args.order_by.resolved is not None\n ):\n self.order_by = tuple(\n coercions.expect(\n roles.ColumnArgumentRole, x, argname=\"order_by\"\n )\n for x in util.to_list(init_args.order_by.resolved)\n )\n else:\n self.order_by = False\n\n self._user_defined_foreign_keys = util.column_set(\n coercions.expect(\n roles.ColumnArgumentRole, x, argname=\"foreign_keys\"\n )\n for x in util.to_column_set(init_args.foreign_keys.resolved)\n )\n\n self.remote_side = util.column_set(\n coercions.expect(\n roles.ColumnArgumentRole, x, argname=\"remote_side\"\n )\n for x in util.to_column_set(init_args.remote_side.resolved)\n )\n\n def declarative_scan(\n self,\n decl_scan: _ClassScanMapperConfig,\n registry: _RegistryType,\n cls: Type[Any],\n originating_module: Optional[str],\n key: str,\n mapped_container: Optional[Type[Mapped[Any]]],\n annotation: Optional[_AnnotationScanType],\n extracted_mapped_annotation: Optional[_AnnotationScanType],\n is_dataclass_field: bool,\n ) -> None:\n argument = extracted_mapped_annotation\n\n if extracted_mapped_annotation is None:\n if self.argument is None:\n self._raise_for_required(key, cls)\n else:\n return\n\n argument = extracted_mapped_annotation\n assert originating_module is not None\n\n is_write_only = mapped_container is not None and issubclass(\n mapped_container, WriteOnlyMapped\n )\n if is_write_only:\n self.lazy = \"write_only\"\n self.strategy_key = ((\"lazy\", self.lazy),)\n\n is_dynamic = mapped_container is not None and issubclass(\n mapped_container, DynamicMapped\n )\n if is_dynamic:\n self.lazy = \"dynamic\"\n self.strategy_key = ((\"lazy\", self.lazy),)\n\n argument = de_optionalize_union_types(argument)\n\n if hasattr(argument, \"__origin__\"):\n arg_origin = argument.__origin__ # type: ignore\n if isinstance(arg_origin, type) and issubclass(\n arg_origin, abc.Collection\n ):\n if self.collection_class is None:\n if _py_inspect.isabstract(arg_origin):\n raise sa_exc.ArgumentError(\n f\"Collection annotation type {arg_origin} cannot \"\n \"be instantiated; please provide an explicit \"\n \"'collection_class' parameter \"\n \"(e.g. list, set, etc.) to the \"\n \"relationship() function to accompany this \"\n \"annotation\"\n )\n\n self.collection_class = arg_origin\n\n elif not is_write_only and not is_dynamic:\n self.uselist = False\n\n if argument.__args__: # type: ignore\n if isinstance(arg_origin, type) and issubclass(\n arg_origin, typing.Mapping # type: ignore\n ):\n type_arg = argument.__args__[-1] # type: ignore\n else:\n type_arg = argument.__args__[0] # type: ignore\n if hasattr(type_arg, \"__forward_arg__\"):\n str_argument = type_arg.__forward_arg__\n\n argument = resolve_name_to_real_class_name(\n str_argument, originating_module\n )\n else:\n argument = type_arg\n else:\n raise sa_exc.ArgumentError(\n f\"Generic alias {argument} requires an argument\"\n )\n elif hasattr(argument, \"__forward_arg__\"):\n argument = argument.__forward_arg__ # type: ignore\n\n argument = resolve_name_to_real_class_name(\n argument, originating_module\n )\n\n # we don't allow the collection class to be a\n # __forward_arg__ right now, so if we see a forward arg here,\n # we know there was no collection class either\n if (\n self.collection_class is None\n and not is_write_only\n and not is_dynamic\n ):\n self.uselist = False\n\n # ticket #8759\n # if a lead argument was given to relationship(), like\n # `relationship(\"B\")`, use that, don't replace it with class we\n # found in the annotation. The declarative_scan() method call here is\n # still useful, as we continue to derive collection type and do\n # checking of the annotation in any case.\n if self.argument is None:\n self.argument = cast(\"_RelationshipArgumentType[_T]\", argument)\n\n @util.preload_module(\"sqlalchemy.orm.mapper\")\n def _setup_entity(self, __argument: Any = None) -> None:\n if \"entity\" in self.__dict__:\n return\n\n mapperlib = util.preloaded.orm_mapper\n\n if __argument:\n argument = __argument\n else:\n argument = self.argument\n\n resolved_argument: _ExternalEntityType[Any]\n\n if isinstance(argument, str):\n # we might want to cleanup clsregistry API to make this\n # more straightforward\n resolved_argument = cast(\n \"_ExternalEntityType[Any]\",\n self._clsregistry_resolve_name(argument)(),\n )\n elif callable(argument) and not isinstance(\n argument, (type, mapperlib.Mapper)\n ):\n resolved_argument = argument()\n else:\n resolved_argument = argument\n\n entity: _InternalEntityType[Any]\n\n if isinstance(resolved_argument, type):\n entity = class_mapper(resolved_argument, configure=False)\n else:\n try:\n entity = inspect(resolved_argument)\n except sa_exc.NoInspectionAvailable:\n entity = None # type: ignore\n\n if not hasattr(entity, \"mapper\"):\n raise sa_exc.ArgumentError(\n \"relationship '%s' expects \"\n \"a class or a mapper argument (received: %s)\"\n % (self.key, type(resolved_argument))\n )\n\n self.entity = entity # type: ignore\n self.target = self.entity.persist_selectable\n\n def _setup_join_conditions(self) -> None:\n self._join_condition = jc = JoinCondition(\n parent_persist_selectable=self.parent.persist_selectable,\n child_persist_selectable=self.entity.persist_selectable,\n parent_local_selectable=self.parent.local_table,\n child_local_selectable=self.entity.local_table,\n primaryjoin=self._init_args.primaryjoin.resolved,\n secondary=self._init_args.secondary.resolved,\n secondaryjoin=self._init_args.secondaryjoin.resolved,\n parent_equivalents=self.parent._equivalent_columns,\n child_equivalents=self.mapper._equivalent_columns,\n consider_as_foreign_keys=self._user_defined_foreign_keys,\n local_remote_pairs=self.local_remote_pairs,\n remote_side=self.remote_side,\n self_referential=self._is_self_referential,\n prop=self,\n support_sync=not self.viewonly,\n can_be_synced_fn=self._columns_are_mapped,\n )\n self.primaryjoin = jc.primaryjoin\n self.secondaryjoin = jc.secondaryjoin\n self.secondary = jc.secondary\n self.direction = jc.direction\n self.local_remote_pairs = jc.local_remote_pairs\n self.remote_side = jc.remote_columns\n self.local_columns = jc.local_columns\n self.synchronize_pairs = jc.synchronize_pairs\n self._calculated_foreign_keys = jc.foreign_key_columns\n self.secondary_synchronize_pairs = jc.secondary_synchronize_pairs\n\n @property\n def _clsregistry_resolve_arg(\n self,\n ) -> Callable[[str, bool], _class_resolver]:\n return self._clsregistry_resolvers[1]\n\n @property\n def _clsregistry_resolve_name(\n self,\n ) -> Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]]:\n return self._clsregistry_resolvers[0]\n\n @util.memoized_property\n @util.preload_module(\"sqlalchemy.orm.clsregistry\")\n def _clsregistry_resolvers(\n self,\n ) -> Tuple[\n Callable[[str], Callable[[], Union[Type[Any], Table, _ModNS]]],\n Callable[[str, bool], _class_resolver],\n ]:\n _resolver = util.preloaded.orm_clsregistry._resolver\n\n return _resolver(self.parent.class_, self)\n\n def _check_conflicts(self) -> None:\n \"\"\"Test that this relationship is legal, warn about\n inheritance conflicts.\"\"\"\n if self.parent.non_primary and not class_mapper(\n self.parent.class_, configure=False\n ).has_property(self.key):\n raise sa_exc.ArgumentError(\n \"Attempting to assign a new \"\n \"relationship '%s' to a non-primary mapper on \"\n \"class '%s'. New relationships can only be added \"\n \"to the primary mapper, i.e. the very first mapper \"\n \"created for class '%s' \"\n % (\n self.key,\n self.parent.class_.__name__,\n self.parent.class_.__name__,\n )\n )\n\n @property\n def cascade(self) -> CascadeOptions:\n \"\"\"Return the current cascade setting for this\n :class:`.RelationshipProperty`.\n \"\"\"\n return self._cascade\n\n @cascade.setter\n def cascade(self, cascade: Union[str, CascadeOptions]) -> None:\n self._set_cascade(cascade)\n\n def _set_cascade(self, cascade_arg: Union[str, CascadeOptions]) -> None:\n cascade = CascadeOptions(cascade_arg)\n\n if self.viewonly:\n cascade = CascadeOptions(\n cascade.intersection(CascadeOptions._viewonly_cascades)\n )\n\n if \"mapper\" in self.__dict__:\n self._check_cascade_settings(cascade)\n self._cascade = cascade\n\n if self._dependency_processor:\n self._dependency_processor.cascade = cascade\n\n def _check_cascade_settings(self, cascade: CascadeOptions) -> None:\n if (\n cascade.delete_orphan\n and not self.single_parent\n and (self.direction is MANYTOMANY or self.direction is MANYTOONE)\n ):\n raise sa_exc.ArgumentError(\n \"For %(direction)s relationship %(rel)s, delete-orphan \"\n \"cascade is normally \"\n 'configured only on the \"one\" side of a one-to-many '\n \"relationship, \"\n 'and not on the \"many\" side of a many-to-one or many-to-many '\n \"relationship. \"\n \"To force this relationship to allow a particular \"\n '\"%(relatedcls)s\" object to be referred towards by only '\n 'a single \"%(clsname)s\" object at a time via the '\n \"%(rel)s relationship, which \"\n \"would allow \"\n \"delete-orphan cascade to take place in this direction, set \"\n \"the single_parent=True flag.\"\n % {\n \"rel\": self,\n \"direction\": \"many-to-one\"\n if self.direction is MANYTOONE\n else \"many-to-many\",\n \"clsname\": self.parent.class_.__name__,\n \"relatedcls\": self.mapper.class_.__name__,\n },\n code=\"bbf0\",\n )\n\n if self.passive_deletes == \"all\" and (\n \"delete\" in cascade or \"delete-orphan\" in cascade\n ):\n raise sa_exc.ArgumentError(\n \"On %s, can't set passive_deletes='all' in conjunction \"\n \"with 'delete' or 'delete-orphan' cascade\" % self\n )\n\n if cascade.delete_orphan:\n self.mapper.primary_mapper()._delete_orphans.append(\n (self.key, self.parent.class_)\n )\n\n def _persists_for(self, mapper: Mapper[Any]) -> bool:\n \"\"\"Return True if this property will persist values on behalf\n of the given mapper.\n\n \"\"\"\n\n return (\n self.key in mapper.relationships\n and mapper.relationships[self.key] is self\n )\n\n def _columns_are_mapped(self, *cols: ColumnElement[Any]) -> bool:\n \"\"\"Return True if all columns in the given collection are\n mapped by the tables referenced by this :class:`.RelationshipProperty`.\n\n \"\"\"\n\n secondary = self._init_args.secondary.resolved\n for c in cols:\n if secondary is not None and secondary.c.contains_column(c):\n continue\n if not self.parent.persist_selectable.c.contains_column(\n c\n ) and not self.target.c.contains_column(c):\n return False\n return True\n\n def _generate_backref(self) -> None:\n \"\"\"Interpret the 'backref' instruction to create a\n :func:`_orm.relationship` complementary to this one.\"\"\"\n\n if self.parent.non_primary:\n return\n if self.backref is not None and not self.back_populates:\n kwargs: Dict[str, Any]\n if isinstance(self.backref, str):\n backref_key, kwargs = self.backref, {}\n else:\n backref_key, kwargs = self.backref\n mapper = self.mapper.primary_mapper()\n\n if not mapper.concrete:\n check = set(mapper.iterate_to_root()).union(\n mapper.self_and_descendants\n )\n for m in check:\n if m.has_property(backref_key) and not m.concrete:\n raise sa_exc.ArgumentError(\n \"Error creating backref \"\n \"'%s' on relationship '%s': property of that \"\n \"name exists on mapper '%s'\"\n % (backref_key, self, m)\n )\n\n # determine primaryjoin/secondaryjoin for the\n # backref. Use the one we had, so that\n # a custom join doesn't have to be specified in\n # both directions.\n if self.secondary is not None:\n # for many to many, just switch primaryjoin/\n # secondaryjoin. use the annotated\n # pj/sj on the _join_condition.\n pj = kwargs.pop(\n \"primaryjoin\",\n self._join_condition.secondaryjoin_minus_local,\n )\n sj = kwargs.pop(\n \"secondaryjoin\",\n self._join_condition.primaryjoin_minus_local,\n )\n else:\n pj = kwargs.pop(\n \"primaryjoin\",\n self._join_condition.primaryjoin_reverse_remote,\n )\n sj = kwargs.pop(\"secondaryjoin\", None)\n if sj:\n raise sa_exc.InvalidRequestError(\n \"Can't assign 'secondaryjoin' on a backref \"\n \"against a non-secondary relationship.\"\n )\n\n foreign_keys = kwargs.pop(\n \"foreign_keys\", self._user_defined_foreign_keys\n )\n parent = self.parent.primary_mapper()\n kwargs.setdefault(\"viewonly\", self.viewonly)\n kwargs.setdefault(\"post_update\", self.post_update)\n kwargs.setdefault(\"passive_updates\", self.passive_updates)\n kwargs.setdefault(\"sync_backref\", self.sync_backref)\n self.back_populates = backref_key\n relationship = RelationshipProperty(\n parent,\n self.secondary,\n primaryjoin=pj,\n secondaryjoin=sj,\n foreign_keys=foreign_keys,\n back_populates=self.key,\n **kwargs,\n )\n mapper._configure_property(\n backref_key, relationship, warn_for_existing=True\n )\n\n if self.back_populates:\n self._add_reverse_property(self.back_populates)\n\n @util.preload_module(\"sqlalchemy.orm.dependency\")\n def _post_init(self) -> None:\n dependency = util.preloaded.orm_dependency\n\n if self.uselist is None:\n self.uselist = self.direction is not MANYTOONE\n if not self.viewonly:\n self._dependency_processor = ( # type: ignore\n dependency.DependencyProcessor.from_relationship\n )(self)\n\n @util.memoized_property\n def _use_get(self) -> bool:\n \"\"\"memoize the 'use_get' attribute of this RelationshipLoader's\n lazyloader.\"\"\"\n\n strategy = self._lazy_strategy\n return strategy.use_get\n\n @util.memoized_property\n def _is_self_referential(self) -> bool:\n return self.mapper.common_parent(self.parent)\n\n def _create_joins(\n self,\n source_polymorphic: bool = False,\n source_selectable: Optional[FromClause] = None,\n dest_selectable: Optional[FromClause] = None,\n of_type_entity: Optional[_InternalEntityType[Any]] = None,\n alias_secondary: bool = False,\n extra_criteria: Tuple[ColumnElement[bool], ...] = (),\n ) -> Tuple[\n ColumnElement[bool],\n Optional[ColumnElement[bool]],\n FromClause,\n FromClause,\n Optional[FromClause],\n Optional[ClauseAdapter],\n ]:\n aliased = False\n\n if alias_secondary and self.secondary is not None:\n aliased = True\n\n if source_selectable is None:\n if source_polymorphic and self.parent.with_polymorphic:\n source_selectable = self.parent._with_polymorphic_selectable\n\n if of_type_entity:\n dest_mapper = of_type_entity.mapper\n if dest_selectable is None:\n dest_selectable = of_type_entity.selectable\n aliased = True\n else:\n dest_mapper = self.mapper\n\n if dest_selectable is None:\n dest_selectable = self.entity.selectable\n if self.mapper.with_polymorphic:\n aliased = True\n\n if self._is_self_referential and source_selectable is None:\n dest_selectable = dest_selectable._anonymous_fromclause()\n aliased = True\n elif (\n dest_selectable is not self.mapper._with_polymorphic_selectable\n or self.mapper.with_polymorphic\n ):\n aliased = True\n\n single_crit = dest_mapper._single_table_criterion\n aliased = aliased or (\n source_selectable is not None\n and (\n source_selectable\n is not self.parent._with_polymorphic_selectable\n or source_selectable._is_subquery\n )\n )\n\n (\n primaryjoin,\n secondaryjoin,\n secondary,\n target_adapter,\n dest_selectable,\n ) = self._join_condition.join_targets(\n source_selectable,\n dest_selectable,\n aliased,\n single_crit,\n extra_criteria,\n )\n if source_selectable is None:\n source_selectable = self.parent.local_table\n if dest_selectable is None:\n dest_selectable = self.entity.local_table\n return (\n primaryjoin,\n secondaryjoin,\n source_selectable,\n dest_selectable,\n secondary,\n target_adapter,\n )\n\n\ndef _annotate_columns(element: _CE, annotations: _AnnotationDict) -> _CE:\n def clone(elem: _CE) -> _CE:\n if isinstance(elem, expression.ColumnClause):\n elem = elem._annotate(annotations.copy()) # type: ignore\n elem._copy_internals(clone=clone)\n return elem\n\n if element is not None:\n element = clone(element)\n clone = None # type: ignore # remove gc cycles\n return element\n\n\nclass JoinCondition:\n primaryjoin_initial: Optional[ColumnElement[bool]]\n primaryjoin: ColumnElement[bool]\n secondaryjoin: Optional[ColumnElement[bool]]\n secondary: Optional[FromClause]\n prop: RelationshipProperty[Any]\n\n synchronize_pairs: _ColumnPairs\n secondary_synchronize_pairs: _ColumnPairs\n direction: RelationshipDirection\n\n parent_persist_selectable: FromClause\n child_persist_selectable: FromClause\n parent_local_selectable: FromClause\n child_local_selectable: FromClause\n\n _local_remote_pairs: Optional[_ColumnPairs]\n\n def __init__(\n self,\n parent_persist_selectable: FromClause,\n child_persist_selectable: FromClause,\n parent_local_selectable: FromClause,\n child_local_selectable: FromClause,\n *,\n primaryjoin: Optional[ColumnElement[bool]] = None,\n secondary: Optional[FromClause] = None,\n secondaryjoin: Optional[ColumnElement[bool]] = None,\n parent_equivalents: Optional[_EquivalentColumnMap] = None,\n child_equivalents: Optional[_EquivalentColumnMap] = None,\n consider_as_foreign_keys: Any = None,\n local_remote_pairs: Optional[_ColumnPairs] = None,\n remote_side: Any = None,\n self_referential: Any = False,\n prop: RelationshipProperty[Any],\n support_sync: bool = True,\n can_be_synced_fn: Callable[..., bool] = lambda *c: True,\n ):\n self.parent_persist_selectable = parent_persist_selectable\n self.parent_local_selectable = parent_local_selectable\n self.child_persist_selectable = child_persist_selectable\n self.child_local_selectable = child_local_selectable\n self.parent_equivalents = parent_equivalents\n self.child_equivalents = child_equivalents\n self.primaryjoin_initial = primaryjoin\n self.secondaryjoin = secondaryjoin\n self.secondary = secondary\n self.consider_as_foreign_keys = consider_as_foreign_keys\n self._local_remote_pairs = local_remote_pairs\n self._remote_side = remote_side\n self.prop = prop\n self.self_referential = self_referential\n self.support_sync = support_sync\n self.can_be_synced_fn = can_be_synced_fn\n\n self._determine_joins()\n assert self.primaryjoin is not None\n\n self._sanitize_joins()\n self._annotate_fks()\n self._annotate_remote()\n self._annotate_local()\n self._annotate_parentmapper()\n self._setup_pairs()\n self._check_foreign_cols(self.primaryjoin, True)\n if self.secondaryjoin is not None:\n self._check_foreign_cols(self.secondaryjoin, False)\n self._determine_direction()\n self._check_remote_side()\n self._log_joins()\n\n def _log_joins(self) -> None:\n log = self.prop.logger\n log.info(\"%s setup primary join %s\", self.prop, self.primaryjoin)\n log.info(\"%s setup secondary join %s\", self.prop, self.secondaryjoin)\n log.info(\n \"%s synchronize pairs [%s]\",\n self.prop,\n \",\".join(\n \"(%s => %s)\" % (l, r) for (l, r) in self.synchronize_pairs\n ),\n )\n log.info(\n \"%s secondary synchronize pairs [%s]\",\n self.prop,\n \",\".join(\n \"(%s => %s)\" % (l, r)\n for (l, r) in self.secondary_synchronize_pairs or []\n ),\n )\n log.info(\n \"%s local/remote pairs [%s]\",\n self.prop,\n \",\".join(\n \"(%s / %s)\" % (l, r) for (l, r) in self.local_remote_pairs\n ),\n )\n log.info(\n \"%s remote columns [%s]\",\n self.prop,\n \",\".join(\"%s\" % col for col in self.remote_columns),\n )\n log.info(\n \"%s local columns [%s]\",\n self.prop,\n \",\".join(\"%s\" % col for col in self.local_columns),\n )\n log.info(\"%s relationship direction %s\", self.prop, self.direction)\n\n def _sanitize_joins(self) -> None:\n \"\"\"remove the parententity annotation from our join conditions which\n can leak in here based on some declarative patterns and maybe others.\n\n \"parentmapper\" is relied upon both by the ORM evaluator as well as\n the use case in _join_fixture_inh_selfref_w_entity\n that relies upon it being present, see :ticket:`3364`.\n\n \"\"\"\n\n self.primaryjoin = _deep_deannotate(\n self.primaryjoin, values=(\"parententity\", \"proxy_key\")\n )\n if self.secondaryjoin is not None:\n self.secondaryjoin = _deep_deannotate(\n self.secondaryjoin, values=(\"parententity\", \"proxy_key\")\n )\n\n def _determine_joins(self) -> None:\n \"\"\"Determine the 'primaryjoin' and 'secondaryjoin' attributes,\n if not passed to the constructor already.\n\n This is based on analysis of the foreign key relationships\n between the parent and target mapped selectables.\n\n \"\"\"\n if self.secondaryjoin is not None and self.secondary is None:\n raise sa_exc.ArgumentError(\n \"Property %s specified with secondary \"\n \"join condition but \"\n \"no secondary argument\" % self.prop\n )\n\n # find a join between the given mapper's mapped table and\n # the given table. will try the mapper's local table first\n # for more specificity, then if not found will try the more\n # general mapped table, which in the case of inheritance is\n # a join.\n try:\n consider_as_foreign_keys = self.consider_as_foreign_keys or None\n if self.secondary is not None:\n if self.secondaryjoin is None:\n self.secondaryjoin = join_condition(\n self.child_persist_selectable,\n self.secondary,\n a_subset=self.child_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys,\n )\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(\n self.parent_persist_selectable,\n self.secondary,\n a_subset=self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys,\n )\n else:\n self.primaryjoin = self.primaryjoin_initial\n else:\n if self.primaryjoin_initial is None:\n self.primaryjoin = join_condition(\n self.parent_persist_selectable,\n self.child_persist_selectable,\n a_subset=self.parent_local_selectable,\n consider_as_foreign_keys=consider_as_foreign_keys,\n )\n else:\n self.primaryjoin = self.primaryjoin_initial\n except sa_exc.NoForeignKeysError as nfe:\n if self.secondary is not None:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are no foreign keys \"\n \"linking these tables via secondary table '%s'. \"\n \"Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or \"\n \"specify 'primaryjoin' and 'secondaryjoin' \"\n \"expressions.\" % (self.prop, self.secondary)\n ) from nfe\n else:\n raise sa_exc.NoForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are no foreign keys \"\n \"linking these tables. \"\n \"Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or \"\n \"specify a 'primaryjoin' expression.\" % self.prop\n ) from nfe\n except sa_exc.AmbiguousForeignKeysError as afe:\n if self.secondary is not None:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are multiple foreign key \"\n \"paths linking the tables via secondary table '%s'. \"\n \"Specify the 'foreign_keys' \"\n \"argument, providing a list of those columns which \"\n \"should be counted as containing a foreign key \"\n \"reference from the secondary table to each of the \"\n \"parent and child tables.\" % (self.prop, self.secondary)\n ) from afe\n else:\n raise sa_exc.AmbiguousForeignKeysError(\n \"Could not determine join \"\n \"condition between parent/child tables on \"\n \"relationship %s - there are multiple foreign key \"\n \"paths linking the tables. Specify the \"\n \"'foreign_keys' argument, providing a list of those \"\n \"columns which should be counted as containing a \"\n \"foreign key reference to the parent table.\" % self.prop\n ) from afe\n\n @property\n def primaryjoin_minus_local(self) -> ColumnElement[bool]:\n return _deep_deannotate(self.primaryjoin, values=(\"local\", \"remote\"))\n\n @property\n def secondaryjoin_minus_local(self) -> ColumnElement[bool]:\n assert self.secondaryjoin is not None\n return _deep_deannotate(self.secondaryjoin, values=(\"local\", \"remote\"))\n\n @util.memoized_property\n def primaryjoin_reverse_remote(self) -> ColumnElement[bool]:\n \"\"\"Return the primaryjoin condition suitable for the\n \"reverse\" direction.\n\n If the primaryjoin was delivered here with pre-existing\n \"remote\" annotations, the local/remote annotations\n are reversed. Otherwise, the local/remote annotations\n are removed.\n\n \"\"\"\n if self._has_remote_annotations:\n\n def replace(element: _CE, **kw: Any) -> Optional[_CE]:\n if \"remote\" in element._annotations:\n v = dict(element._annotations)\n del v[\"remote\"]\n v[\"local\"] = True\n return element._with_annotations(v)\n elif \"local\" in element._annotations:\n v = dict(element._annotations)\n del v[\"local\"]\n v[\"remote\"] = True\n return element._with_annotations(v)\n\n return None\n\n return visitors.replacement_traverse(self.primaryjoin, {}, replace)\n else:\n if self._has_foreign_annotations:\n # TODO: coverage\n return _deep_deannotate(\n self.primaryjoin, values=(\"local\", \"remote\")\n )\n else:\n return _deep_deannotate(self.primaryjoin)\n\n def _has_annotation(self, clause: ClauseElement, annotation: str) -> bool:\n for col in visitors.iterate(clause, {}):\n if annotation in col._annotations:\n return True\n else:\n return False\n\n @util.memoized_property\n def _has_foreign_annotations(self) -> bool:\n return self._has_annotation(self.primaryjoin, \"foreign\")\n\n @util.memoized_property\n def _has_remote_annotations(self) -> bool:\n return self._has_annotation(self.primaryjoin, \"remote\")\n\n def _annotate_fks(self) -> None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'foreign' annotations marking columns\n considered as foreign.\n\n \"\"\"\n if self._has_foreign_annotations:\n return\n\n if self.consider_as_foreign_keys:\n self._annotate_from_fk_list()\n else:\n self._annotate_present_fks()\n\n def _annotate_from_fk_list(self) -> None:\n def check_fk(element: _CE, **kw: Any) -> Optional[_CE]:\n if element in self.consider_as_foreign_keys:\n return element._annotate({\"foreign\": True})\n return None\n\n self.primaryjoin = visitors.replacement_traverse(\n self.primaryjoin, {}, check_fk\n )\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.replacement_traverse(\n self.secondaryjoin, {}, check_fk\n )\n\n def _annotate_present_fks(self) -> None:\n if self.secondary is not None:\n secondarycols = util.column_set(self.secondary.c)\n else:\n secondarycols = set()\n\n def is_foreign(\n a: ColumnElement[Any], b: ColumnElement[Any]\n ) -> Optional[ColumnElement[Any]]:\n if isinstance(a, schema.Column) and isinstance(b, schema.Column):\n if a.references(b):\n return a\n elif b.references(a):\n return b\n\n if secondarycols:\n if a in secondarycols and b not in secondarycols:\n return a\n elif b in secondarycols and a not in secondarycols:\n return b\n\n return None\n\n def visit_binary(binary: BinaryExpression[Any]) -> None:\n if not isinstance(\n binary.left, sql.ColumnElement\n ) or not isinstance(binary.right, sql.ColumnElement):\n return\n\n if (\n \"foreign\" not in binary.left._annotations\n and \"foreign\" not in binary.right._annotations\n ):\n col = is_foreign(binary.left, binary.right)\n if col is not None:\n if col.compare(binary.left):\n binary.left = binary.left._annotate({\"foreign\": True})\n elif col.compare(binary.right):\n binary.right = binary.right._annotate(\n {\"foreign\": True}\n )\n\n self.primaryjoin = visitors.cloned_traverse(\n self.primaryjoin, {}, {\"binary\": visit_binary}\n )\n if self.secondaryjoin is not None:\n self.secondaryjoin = visitors.cloned_traverse(\n self.secondaryjoin, {}, {\"binary\": visit_binary}\n )\n\n def _refers_to_parent_table(self) -> bool:\n \"\"\"Return True if the join condition contains column\n comparisons where both columns are in both tables.\n\n \"\"\"\n pt = self.parent_persist_selectable\n mt = self.child_persist_selectable\n result = False\n\n def visit_binary(binary: BinaryExpression[Any]) -> None:\n nonlocal result\n c, f = binary.left, binary.right\n if (\n isinstance(c, expression.ColumnClause)\n and isinstance(f, expression.ColumnClause)\n and pt.is_derived_from(c.table)\n and pt.is_derived_from(f.table)\n and mt.is_derived_from(c.table)\n and mt.is_derived_from(f.table)\n ):\n result = True\n\n visitors.traverse(self.primaryjoin, {}, {\"binary\": visit_binary})\n return result\n\n def _tables_overlap(self) -> bool:\n \"\"\"Return True if parent/child tables have some overlap.\"\"\"\n\n return selectables_overlap(\n self.parent_persist_selectable, self.child_persist_selectable\n )\n\n def _annotate_remote(self) -> None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'remote' annotations marking columns\n considered as part of the 'remote' side.\n\n \"\"\"\n if self._has_remote_annotations:\n return\n\n if self.secondary is not None:\n self._annotate_remote_secondary()\n elif self._local_remote_pairs or self._remote_side:\n self._annotate_remote_from_args()\n elif self._refers_to_parent_table():\n self._annotate_selfref(\n lambda col: \"foreign\" in col._annotations, False\n )\n elif self._tables_overlap():\n self._annotate_remote_with_overlap()\n else:\n self._annotate_remote_distinct_selectables()\n\n def _annotate_remote_secondary(self) -> None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when 'secondary' is present.\n\n \"\"\"\n\n assert self.secondary is not None\n fixed_secondary = self.secondary\n\n def repl(element: _CE, **kw: Any) -> Optional[_CE]:\n if fixed_secondary.c.contains_column(element):\n return element._annotate({\"remote\": True})\n return None\n\n self.primaryjoin = visitors.replacement_traverse(\n self.primaryjoin, {}, repl\n )\n\n assert self.secondaryjoin is not None\n self.secondaryjoin = visitors.replacement_traverse(\n self.secondaryjoin, {}, repl\n )\n\n def _annotate_selfref(\n self, fn: Callable[[ColumnElement[Any]], bool], remote_side_given: bool\n ) -> None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the relationship is detected as self-referential.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) -> None:\n equated = binary.left.compare(binary.right)\n if isinstance(binary.left, expression.ColumnClause) and isinstance(\n binary.right, expression.ColumnClause\n ):\n # assume one to many - FKs are \"remote\"\n if fn(binary.left):\n binary.left = binary.left._annotate({\"remote\": True})\n if fn(binary.right) and not equated:\n binary.right = binary.right._annotate({\"remote\": True})\n elif not remote_side_given:\n self._warn_non_column_elements()\n\n self.primaryjoin = visitors.cloned_traverse(\n self.primaryjoin, {}, {\"binary\": visit_binary}\n )\n\n def _annotate_remote_from_args(self) -> None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the 'remote_side' or '_local_remote_pairs'\n arguments are used.\n\n \"\"\"\n if self._local_remote_pairs:\n if self._remote_side:\n raise sa_exc.ArgumentError(\n \"remote_side argument is redundant \"\n \"against more detailed _local_remote_side \"\n \"argument.\"\n )\n\n remote_side = [r for (l, r) in self._local_remote_pairs]\n else:\n remote_side = self._remote_side\n\n if self._refers_to_parent_table():\n self._annotate_selfref(lambda col: col in remote_side, True)\n else:\n\n def repl(element: _CE, **kw: Any) -> Optional[_CE]:\n # use set() to avoid generating ``__eq__()`` expressions\n # against each element\n if element in set(remote_side):\n return element._annotate({\"remote\": True})\n return None\n\n self.primaryjoin = visitors.replacement_traverse(\n self.primaryjoin, {}, repl\n )\n\n def _annotate_remote_with_overlap(self) -> None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables have some set of\n tables in common, though is not a fully self-referential\n relationship.\n\n \"\"\"\n\n def visit_binary(binary: BinaryExpression[Any]) -> None:\n binary.left, binary.right = proc_left_right(\n binary.left, binary.right\n )\n binary.right, binary.left = proc_left_right(\n binary.right, binary.left\n )\n\n check_entities = (\n self.prop is not None and self.prop.mapper is not self.prop.parent\n )\n\n def proc_left_right(\n left: ColumnElement[Any], right: ColumnElement[Any]\n ) -> Tuple[ColumnElement[Any], ColumnElement[Any]]:\n if isinstance(left, expression.ColumnClause) and isinstance(\n right, expression.ColumnClause\n ):\n if self.child_persist_selectable.c.contains_column(\n right\n ) and self.parent_persist_selectable.c.contains_column(left):\n right = right._annotate({\"remote\": True})\n elif (\n check_entities\n and right._annotations.get(\"parentmapper\") is self.prop.mapper\n ):\n right = right._annotate({\"remote\": True})\n elif (\n check_entities\n and left._annotations.get(\"parentmapper\") is self.prop.mapper\n ):\n left = left._annotate({\"remote\": True})\n else:\n self._warn_non_column_elements()\n\n return left, right\n\n self.primaryjoin = visitors.cloned_traverse(\n self.primaryjoin, {}, {\"binary\": visit_binary}\n )\n\n def _annotate_remote_distinct_selectables(self) -> None:\n \"\"\"annotate 'remote' in primaryjoin, secondaryjoin\n when the parent/child tables are entirely\n separate.\n\n \"\"\"\n\n def repl(element: _CE, **kw: Any) -> Optional[_CE]:\n if self.child_persist_selectable.c.contains_column(element) and (\n not self.parent_local_selectable.c.contains_column(element)\n or self.child_local_selectable.c.contains_column(element)\n ):\n return element._annotate({\"remote\": True})\n return None\n\n self.primaryjoin = visitors.replacement_traverse(\n self.primaryjoin, {}, repl\n )\n\n def _warn_non_column_elements(self) -> None:\n util.warn(\n \"Non-simple column elements in primary \"\n \"join condition for property %s - consider using \"\n \"remote() annotations to mark the remote side.\" % self.prop\n )\n\n def _annotate_local(self) -> None:\n \"\"\"Annotate the primaryjoin and secondaryjoin\n structures with 'local' annotations.\n\n This annotates all column elements found\n simultaneously in the parent table\n and the join condition that don't have a\n 'remote' annotation set up from\n _annotate_remote() or user-defined.\n\n \"\"\"\n if self._has_annotation(self.primaryjoin, \"local\"):\n return\n\n if self._local_remote_pairs:\n local_side = util.column_set(\n [l for (l, r) in self._local_remote_pairs]\n )\n else:\n local_side = util.column_set(self.parent_persist_selectable.c)\n\n def locals_(element: _CE, **kw: Any) -> Optional[_CE]:\n if \"remote\" not in element._annotations and element in local_side:\n return element._annotate({\"local\": True})\n return None\n\n self.primaryjoin = visitors.replacement_traverse(\n self.primaryjoin, {}, locals_\n )\n\n def _annotate_parentmapper(self) -> None:\n def parentmappers_(element: _CE, **kw: Any) -> Optional[_CE]:\n if \"remote\" in element._annotations:\n return element._annotate({\"parentmapper\": self.prop.mapper})\n elif \"local\" in element._annotations:\n return element._annotate({\"parentmapper\": self.prop.parent})\n return None\n\n self.primaryjoin = visitors.replacement_traverse(\n self.primaryjoin, {}, parentmappers_\n )\n\n def _check_remote_side(self) -> None:\n if not self.local_remote_pairs:\n raise sa_exc.ArgumentError(\n \"Relationship %s could \"\n \"not determine any unambiguous local/remote column \"\n \"pairs based on join condition and remote_side \"\n \"arguments. \"\n \"Consider using the remote() annotation to \"\n \"accurately mark those elements of the join \"\n \"condition that are on the remote side of \"\n \"the relationship.\" % (self.prop,)\n )\n else:\n not_target = util.column_set(\n self.parent_persist_selectable.c\n ).difference(self.child_persist_selectable.c)\n\n for _, rmt in self.local_remote_pairs:\n if rmt in not_target:\n util.warn(\n \"Expression %s is marked as 'remote', but these \"\n \"column(s) are local to the local side. The \"\n \"remote() annotation is needed only for a \"\n \"self-referential relationship where both sides \"\n \"of the relationship refer to the same tables.\"\n % (rmt,)\n )\n\n def _check_foreign_cols(\n self, join_condition: ColumnElement[bool], primary: bool\n ) -> None:\n \"\"\"Check the foreign key columns collected and emit error\n messages.\"\"\"\n\n can_sync = False\n\n foreign_cols = self._gather_columns_with_annotation(\n join_condition, \"foreign\"\n )\n\n has_foreign = bool(foreign_cols)\n\n if primary:\n can_sync = bool(self.synchronize_pairs)\n else:\n can_sync = bool(self.secondary_synchronize_pairs)\n\n if (\n self.support_sync\n and can_sync\n or (not self.support_sync and has_foreign)\n ):\n return\n\n # from here below is just determining the best error message\n # to report. Check for a join condition using any operator\n # (not just ==), perhaps they need to turn on \"viewonly=True\".\n if self.support_sync and has_foreign and not can_sync:\n err = (\n \"Could not locate any simple equality expressions \"\n \"involving locally mapped foreign key columns for \"\n \"%s join condition \"\n \"'%s' on relationship %s.\"\n % (\n primary and \"primary\" or \"secondary\",\n join_condition,\n self.prop,\n )\n )\n err += (\n \" Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or are \"\n \"annotated in the join condition with the foreign() \"\n \"annotation. To allow comparison operators other than \"\n \"'==', the relationship can be marked as viewonly=True.\"\n )\n\n raise sa_exc.ArgumentError(err)\n else:\n err = (\n \"Could not locate any relevant foreign key columns \"\n \"for %s join condition '%s' on relationship %s.\"\n % (\n primary and \"primary\" or \"secondary\",\n join_condition,\n self.prop,\n )\n )\n err += (\n \" Ensure that referencing columns are associated \"\n \"with a ForeignKey or ForeignKeyConstraint, or are \"\n \"annotated in the join condition with the foreign() \"\n \"annotation.\"\n )\n raise sa_exc.ArgumentError(err)\n\n def _determine_direction(self) -> None:\n \"\"\"Determine if this relationship is one to many, many to one,\n many to many.\n\n \"\"\"\n if self.secondaryjoin is not None:\n self.direction = MANYTOMANY\n else:\n parentcols = util.column_set(self.parent_persist_selectable.c)\n targetcols = util.column_set(self.child_persist_selectable.c)\n\n # fk collection which suggests ONETOMANY.\n onetomany_fk = targetcols.intersection(self.foreign_key_columns)\n\n # fk collection which suggests MANYTOONE.\n\n manytoone_fk = parentcols.intersection(self.foreign_key_columns)\n\n if onetomany_fk and manytoone_fk:\n # fks on both sides. test for overlap of local/remote\n # with foreign key.\n # we will gather columns directly from their annotations\n # without deannotating, so that we can distinguish on a column\n # that refers to itself.\n\n # 1. columns that are both remote and FK suggest\n # onetomany.\n onetomany_local = self._gather_columns_with_annotation(\n self.primaryjoin, \"remote\", \"foreign\"\n )\n\n # 2. columns that are FK but are not remote (e.g. local)\n # suggest manytoone.\n manytoone_local = {\n c\n for c in self._gather_columns_with_annotation(\n self.primaryjoin, \"foreign\"\n )\n if \"remote\" not in c._annotations\n }\n\n # 3. if both collections are present, remove columns that\n # refer to themselves. This is for the case of\n # and_(Me.id == Me.remote_id, Me.version == Me.version)\n if onetomany_local and manytoone_local:\n self_equated = self.remote_columns.intersection(\n self.local_columns\n )\n onetomany_local = onetomany_local.difference(self_equated)\n manytoone_local = manytoone_local.difference(self_equated)\n\n # at this point, if only one or the other collection is\n # present, we know the direction, otherwise it's still\n # ambiguous.\n\n if onetomany_local and not manytoone_local:\n self.direction = ONETOMANY\n elif manytoone_local and not onetomany_local:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship\"\n \" direction for relationship '%s' - foreign \"\n \"key columns within the join condition are present \"\n \"in both the parent and the child's mapped tables. \"\n \"Ensure that only those columns referring \"\n \"to a parent column are marked as foreign, \"\n \"either via the foreign() annotation or \"\n \"via the foreign_keys argument.\" % self.prop\n )\n elif onetomany_fk:\n self.direction = ONETOMANY\n elif manytoone_fk:\n self.direction = MANYTOONE\n else:\n raise sa_exc.ArgumentError(\n \"Can't determine relationship \"\n \"direction for relationship '%s' - foreign \"\n \"key columns are present in neither the parent \"\n \"nor the child's mapped tables\" % self.prop\n )\n\n def _deannotate_pairs(\n self, collection: _ColumnPairIterable\n ) -> _MutableColumnPairs:\n \"\"\"provide deannotation for the various lists of\n pairs, so that using them in hashes doesn't incur\n high-overhead __eq__() comparisons against\n original columns mapped.\n\n \"\"\"\n return [(x._deannotate(), y._deannotate()) for x, y in collection]\n\n def _setup_pairs(self) -> None:\n sync_pairs: _MutableColumnPairs = []\n lrp: util.OrderedSet[\n Tuple[ColumnElement[Any], ColumnElement[Any]]\n ] = util.OrderedSet([])\n secondary_sync_pairs: _MutableColumnPairs = []\n\n def go(\n joincond: ColumnElement[bool],\n collection: _MutableColumnPairs,\n ) -> None:\n def visit_binary(\n binary: BinaryExpression[Any],\n left: ColumnElement[Any],\n right: ColumnElement[Any],\n ) -> None:\n if (\n \"remote\" in right._annotations\n and \"remote\" not in left._annotations\n and self.can_be_synced_fn(left)\n ):\n lrp.add((left, right))\n elif (\n \"remote\" in left._annotations\n and \"remote\" not in right._annotations\n and self.can_be_synced_fn(right)\n ):\n lrp.add((right, left))\n if binary.operator is operators.eq and self.can_be_synced_fn(\n left, right\n ):\n if \"foreign\" in right._annotations:\n collection.append((left, right))\n elif \"foreign\" in left._annotations:\n collection.append((right, left))\n\n visit_binary_product(visit_binary, joincond)\n\n for joincond, collection in [\n (self.primaryjoin, sync_pairs),\n (self.secondaryjoin, secondary_sync_pairs),\n ]:\n if joincond is None:\n continue\n go(joincond, collection)\n\n self.local_remote_pairs = self._deannotate_pairs(lrp)\n self.synchronize_pairs = self._deannotate_pairs(sync_pairs)\n self.secondary_synchronize_pairs = self._deannotate_pairs(\n secondary_sync_pairs\n )\n\n _track_overlapping_sync_targets: weakref.WeakKeyDictionary[\n ColumnElement[Any],\n weakref.WeakKeyDictionary[\n RelationshipProperty[Any], ColumnElement[Any]\n ],\n ] = weakref.WeakKeyDictionary()\n\n def _warn_for_conflicting_sync_targets(self) -> None:\n if not self.support_sync:\n return\n\n # we would like to detect if we are synchronizing any column\n # pairs in conflict with another relationship that wishes to sync\n # an entirely different column to the same target. This is a\n # very rare edge case so we will try to minimize the memory/overhead\n # impact of this check\n for from_, to_ in [\n (from_, to_) for (from_, to_) in self.synchronize_pairs\n ] + [\n (from_, to_) for (from_, to_) in self.secondary_synchronize_pairs\n ]:\n # save ourselves a ton of memory and overhead by only\n # considering columns that are subject to a overlapping\n # FK constraints at the core level. This condition can arise\n # if multiple relationships overlap foreign() directly, but\n # we're going to assume it's typically a ForeignKeyConstraint-\n # level configuration that benefits from this warning.\n\n if to_ not in self._track_overlapping_sync_targets:\n self._track_overlapping_sync_targets[\n to_\n ] = weakref.WeakKeyDictionary({self.prop: from_})\n else:\n other_props = []\n prop_to_from = self._track_overlapping_sync_targets[to_]\n\n for pr, fr_ in prop_to_from.items():\n if (\n not pr.mapper._dispose_called\n and pr not in self.prop._reverse_property\n and pr.key not in self.prop._overlaps\n and self.prop.key not in pr._overlaps\n # note: the \"__*\" symbol is used internally by\n # SQLAlchemy as a general means of suppressing the\n # overlaps warning for some extension cases, however\n # this is not currently\n # a publicly supported symbol and may change at\n # any time.\n and \"__*\" not in self.prop._overlaps\n and \"__*\" not in pr._overlaps\n and not self.prop.parent.is_sibling(pr.parent)\n and not self.prop.mapper.is_sibling(pr.mapper)\n and not self.prop.parent.is_sibling(pr.mapper)\n and not self.prop.mapper.is_sibling(pr.parent)\n and (\n self.prop.key != pr.key\n or not self.prop.parent.common_parent(pr.parent)\n )\n ):\n other_props.append((pr, fr_))\n\n if other_props:\n util.warn(\n \"relationship '%s' will copy column %s to column %s, \"\n \"which conflicts with relationship(s): %s. \"\n \"If this is not the intention, consider if these \"\n \"relationships should be linked with \"\n \"back_populates, or if viewonly=True should be \"\n \"applied to one or more if they are read-only. \"\n \"For the less common case that foreign key \"\n \"constraints are partially overlapping, the \"\n \"orm.foreign() \"\n \"annotation can be used to isolate the columns that \"\n \"should be written towards. To silence this \"\n \"warning, add the parameter 'overlaps=\\\"%s\\\"' to the \"\n \"'%s' relationship.\"\n % (\n self.prop,\n from_,\n to_,\n \", \".join(\n sorted(\n \"'%s' (copies %s to %s)\" % (pr, fr_, to_)\n for (pr, fr_) in other_props\n )\n ),\n \",\".join(sorted(pr.key for pr, fr in other_props)),\n self.prop,\n ),\n code=\"qzyx\",\n )\n self._track_overlapping_sync_targets[to_][self.prop] = from_\n\n @util.memoized_property\n def remote_columns(self) -> Set[ColumnElement[Any]]:\n return self._gather_join_annotations(\"remote\")\n\n @util.memoized_property\n def local_columns(self) -> Set[ColumnElement[Any]]:\n return self._gather_join_annotations(\"local\")\n\n @util.memoized_property\n def foreign_key_columns(self) -> Set[ColumnElement[Any]]:\n return self._gather_join_annotations(\"foreign\")\n\n def _gather_join_annotations(\n self, annotation: str\n ) -> Set[ColumnElement[Any]]:\n s = set(\n self._gather_columns_with_annotation(self.primaryjoin, annotation)\n )\n if self.secondaryjoin is not None:\n s.update(\n self._gather_columns_with_annotation(\n self.secondaryjoin, annotation\n )\n )\n return {x._deannotate() for x in s}\n\n def _gather_columns_with_annotation(\n self, clause: ColumnElement[Any], *annotation: Iterable[str]\n ) -> Set[ColumnElement[Any]]:\n annotation_set = set(annotation)\n return {\n cast(ColumnElement[Any], col)\n for col in visitors.iterate(clause, {})\n if annotation_set.issubset(col._annotations)\n }\n\n def join_targets(\n self,\n source_selectable: Optional[FromClause],\n dest_selectable: FromClause,\n aliased: bool,\n single_crit: Optional[ColumnElement[bool]] = None,\n extra_criteria: Tuple[ColumnElement[bool], ...] = (),\n ) -> Tuple[\n ColumnElement[bool],\n Optional[ColumnElement[bool]],\n Optional[FromClause],\n Optional[ClauseAdapter],\n FromClause,\n ]:\n \"\"\"Given a source and destination selectable, create a\n join between them.\n\n This takes into account aliasing the join clause\n to reference the appropriate corresponding columns\n in the target objects, as well as the extra child\n criterion, equivalent column sets, etc.\n\n \"\"\"\n # place a barrier on the destination such that\n # replacement traversals won't ever dig into it.\n # its internal structure remains fixed\n # regardless of context.\n dest_selectable = _shallow_annotate(\n dest_selectable, {\"no_replacement_traverse\": True}\n )\n\n primaryjoin, secondaryjoin, secondary = (\n self.primaryjoin,\n self.secondaryjoin,\n self.secondary,\n )\n\n # adjust the join condition for single table inheritance,\n # in the case that the join is to a subclass\n # this is analogous to the\n # \"_adjust_for_single_table_inheritance()\" method in Query.\n\n if single_crit is not None:\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & single_crit\n else:\n primaryjoin = primaryjoin & single_crit\n\n if extra_criteria:\n\n def mark_unrelated_columns_as_ok_to_adapt(\n elem: SupportsAnnotations, annotations: _AnnotationDict\n ) -> SupportsAnnotations:\n \"\"\"note unrelated columns in the \"extra criteria\" as OK\n to adapt, even though they are not part of our \"local\"\n or \"remote\" side.\n\n see #9779 for this case\n\n \"\"\"\n\n parentmapper_for_element = elem._annotations.get(\n \"parentmapper\", None\n )\n if (\n parentmapper_for_element is not self.prop.parent\n and parentmapper_for_element is not self.prop.mapper\n ):\n return _safe_annotate(elem, annotations)\n else:\n return elem\n\n extra_criteria = tuple(\n _deep_annotate(\n elem,\n {\"ok_to_adapt_in_join_condition\": True},\n annotate_callable=mark_unrelated_columns_as_ok_to_adapt,\n )\n for elem in extra_criteria\n )\n\n if secondaryjoin is not None:\n secondaryjoin = secondaryjoin & sql.and_(*extra_criteria)\n else:\n primaryjoin = primaryjoin & sql.and_(*extra_criteria)\n\n if aliased:\n if secondary is not None:\n secondary = secondary._anonymous_fromclause(flat=True)\n primary_aliasizer = ClauseAdapter(\n secondary, exclude_fn=_ColInAnnotations(\"local\")\n )\n secondary_aliasizer = ClauseAdapter(\n dest_selectable, equivalents=self.child_equivalents\n ).chain(primary_aliasizer)\n if source_selectable is not None:\n primary_aliasizer = ClauseAdapter(\n secondary, exclude_fn=_ColInAnnotations(\"local\")\n ).chain(\n ClauseAdapter(\n source_selectable,\n equivalents=self.parent_equivalents,\n )\n )\n\n secondaryjoin = secondary_aliasizer.traverse(secondaryjoin)\n else:\n primary_aliasizer = ClauseAdapter(\n dest_selectable,\n exclude_fn=_ColInAnnotations(\"local\"),\n equivalents=self.child_equivalents,\n )\n if source_selectable is not None:\n primary_aliasizer.chain(\n ClauseAdapter(\n source_selectable,\n exclude_fn=_ColInAnnotations(\"remote\"),\n equivalents=self.parent_equivalents,\n )\n )\n secondary_aliasizer = None\n\n primaryjoin = primary_aliasizer.traverse(primaryjoin)\n target_adapter = secondary_aliasizer or primary_aliasizer\n target_adapter.exclude_fn = None\n else:\n target_adapter = None\n return (\n primaryjoin,\n secondaryjoin,\n secondary,\n target_adapter,\n dest_selectable,\n )\n\n def create_lazy_clause(\n self, reverse_direction: bool = False\n ) -> Tuple[\n ColumnElement[bool],\n Dict[str, ColumnElement[Any]],\n Dict[ColumnElement[Any], ColumnElement[Any]],\n ]:\n binds: Dict[ColumnElement[Any], BindParameter[Any]] = {}\n equated_columns: Dict[ColumnElement[Any], ColumnElement[Any]] = {}\n\n has_secondary = self.secondaryjoin is not None\n\n if has_secondary:\n lookup = collections.defaultdict(list)\n for l, r in self.local_remote_pairs:\n lookup[l].append((l, r))\n equated_columns[r] = l\n elif not reverse_direction:\n for l, r in self.local_remote_pairs:\n equated_columns[r] = l\n else:\n for l, r in self.local_remote_pairs:\n equated_columns[l] = r\n\n def col_to_bind(\n element: ColumnElement[Any], **kw: Any\n ) -> Optional[BindParameter[Any]]:\n if (\n (not reverse_direction and \"local\" in element._annotations)\n or reverse_direction\n and (\n (has_secondary and element in lookup)\n or (not has_secondary and \"remote\" in element._annotations)\n )\n ):\n if element not in binds:\n binds[element] = sql.bindparam(\n None, None, type_=element.type, unique=True\n )\n return binds[element]\n return None\n\n lazywhere = self.primaryjoin\n if self.secondaryjoin is None or not reverse_direction:\n lazywhere = visitors.replacement_traverse(\n lazywhere, {}, col_to_bind\n )\n\n if self.secondaryjoin is not None:\n secondaryjoin = self.secondaryjoin\n if reverse_direction:\n secondaryjoin = visitors.replacement_traverse(\n secondaryjoin, {}, col_to_bind\n )\n lazywhere = sql.and_(lazywhere, secondaryjoin)\n\n bind_to_col = {binds[col].key: col for col in binds}\n\n return lazywhere, bind_to_col, equated_columns\n\n\nclass _ColInAnnotations:\n \"\"\"Serializable object that tests for a name in c._annotations.\"\"\"\n\n __slots__ = (\"name\",)\n\n def __init__(self, name: str):\n self.name = name\n\n def __call__(self, c: ClauseElement) -> bool:\n return (\n self.name in c._annotations\n or \"ok_to_adapt_in_join_condition\" in c._annotations\n )\n\n\nclass Relationship( # type: ignore\n RelationshipProperty[_T],\n _DeclarativeMapped[_T],\n WriteOnlyMapped[_T], # not compatible with Mapped[_T]\n DynamicMapped[_T], # not compatible with Mapped[_T]\n):\n \"\"\"Describes an object property that holds a single item or list\n of items that correspond to a related database table.\n\n Public constructor is the :func:`_orm.relationship` function.\n\n .. seealso::\n\n :ref:`relationship_config_toplevel`\n\n .. versionchanged:: 2.0 Added :class:`_orm.Relationship` as a Declarative\n compatible subclass for :class:`_orm.RelationshipProperty`.\n\n \"\"\"\n\n inherit_cache = True\n \"\"\":meta private:\"\"\"\n",
"step-ids": [
44,
79,
88,
99,
100
]
}
|
[
44,
79,
88,
99,
100
] |
"""tables
Revision ID: 35f6815c3112
Revises: None
Create Date: 2013-07-28 21:15:38.385006
"""
# revision identifiers, used by Alembic.
revision = '35f6815c3112'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=64), nullable=True),
sa.Column('lastname', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=64), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('city', sa.String(length=64), nullable=True),
sa.Column('state', sa.String(length=64), nullable=True),
sa.Column('zipcode', sa.String(length=64), nullable=True),
sa.Column('country', sa.String(length=64), nullable=True),
sa.Column('role', sa.Integer(), nullable=True),
sa.Column('dob', sa.DateTime(), nullable=True),
sa.Column('gender', sa.String(length=64), nullable=True),
sa.Column('fitness', sa.Integer(), nullable=True),
sa.Column('experience', sa.Integer(), nullable=True),
sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('health_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('issue', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users_health',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('health_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['health_id'], ['health_types.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('positions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('position_type', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('positions')
op.drop_table('users_health')
op.drop_table('health_types')
op.drop_table('users')
### end Alembic commands ###
|
normal
|
{
"blob_id": "9989d31dfe13809d67f629cc283cd02ce354a74e",
"index": 115,
"step-1": "<mask token>\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\ndef downgrade():\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n",
"step-3": "<mask token>\nrevision = '35f6815c3112'\ndown_revision = None\n<mask token>\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\ndef downgrade():\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n",
"step-4": "<mask token>\nrevision = '35f6815c3112'\ndown_revision = None\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\ndef downgrade():\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n",
"step-5": "\"\"\"tables\n\nRevision ID: 35f6815c3112\nRevises: None\nCreate Date: 2013-07-28 21:15:38.385006\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '35f6815c3112'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True),\n sa.Column('lastname', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password', sa.String(length=64), nullable=True),\n sa.Column('address', sa.String(length=120), nullable=True),\n sa.Column('city', sa.String(length=64), nullable=True),\n sa.Column('state', sa.String(length=64), nullable=True),\n sa.Column('zipcode', sa.String(length=64), nullable=True),\n sa.Column('country', sa.String(length=64), nullable=True),\n sa.Column('role', sa.Integer(), nullable=True),\n sa.Column('dob', sa.DateTime(), nullable=True),\n sa.Column('gender', sa.String(length=64), nullable=True),\n sa.Column('fitness', sa.Integer(), nullable=True),\n sa.Column('experience', sa.Integer(), nullable=True),\n sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('health_types',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('issue', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users_health',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('health_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['health_id'], ['health_types.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('positions',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('position_type', sa.String(length=64), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# 总管buffer和policy
from os import path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributions as distributions
import numpy as np
from torch.serialization import load
import global_var as gv
torch.set_default_dtype(gv.torch_default_type)
class PG_Agent(object):
def __init__(
self,
env,
policy: torch.nn.modules.container.Sequential,
learning_rate: float,
n_policy: int, # 迭代多少个策略
n_episode: int, # 每个策略下输出多少个episode用来更新该策略
max_timesteps: int # 最多一个episode多个步,免得一个很强的策略出来以后episode不终止了
) -> None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
# self.buffer = buffer
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.learning_rate)
def get_acs(self, obs):
'''
obs is shape (batch_size, n_dim)
'''
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs # shape (batch_size,)
def get_ac(self, ob):
'''
ob is shape (n_dim,)
'''
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1,-1))
# 按照概率分布来获取ac,而不是直接取较大Logit者,这里dubug了好久,烦烦烦
# ac = torch.argmax(logits)
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render = False):
next_ob = self.env.reset().reshape(1,-1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1,-1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
# break
if done or timesteps > self.max_timesteps:
break
# print(acs, type(acs), 'acs')
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)), torch.tensor(res), torch.tensor(terminals)
def train(self):
'''
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
'''
# print(self.policy.state_dict(), 'p1')
for i_policy in range(self.n_policy):
J = 0 # j tilda,也就是loss
q = 0
for i_episode in range(self.n_episode):
# 生成
obs, acs, next_obs, res, terminals = self.generate_episode()
# print(acs, acs.shape, 'acs')
assert(len(obs)==len(next_obs)==len(res)==len(acs)==len(terminals))
r_tau = sum(res)
logits = self.policy(obs)
# print(logits, logits.shape, 'logits')
# print(acs, type(acs))
criterion = nn.CrossEntropyLoss(reduction='sum') # 注意这里要选择sum才对,否则和policy gradient的公式并不一样,导致训练一直没有效果,难受啊,找了好久这个问题
negative_likelihoods = criterion(logits, acs)
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
negative_likelihoods = negative_likelihoods.sum()
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
# print(r_tau, 'r_tau')
J += negative_likelihoods*r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(f"第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q/self.n_episode}") # 这里的loss估计不对,要用平均每次的
J.backward()
self.optimizer.step()
# print(self.policy.state_dict(), 'p2')
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
|
normal
|
{
"blob_id": "b2cfd397e48213a540608fc232db2eab282935bb",
"index": 1481,
"step-1": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n <mask token>\n <mask token>\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-2": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n <mask token>\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-3": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n\n def generate_episode(self, render=False):\n next_ob = self.env.reset().reshape(1, -1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1, -1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n if done or timesteps > self.max_timesteps:\n break\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)\n ), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)\n ), torch.tensor(res), torch.tensor(terminals)\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-4": "from os import path\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.distributions as distributions\nimport numpy as np\nfrom torch.serialization import load\nimport global_var as gv\ntorch.set_default_dtype(gv.torch_default_type)\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n\n def generate_episode(self, render=False):\n next_ob = self.env.reset().reshape(1, -1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1, -1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n if done or timesteps > self.max_timesteps:\n break\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)\n ), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)\n ), torch.tensor(res), torch.tensor(terminals)\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-5": "# 总管buffer和policy\n\n\n\nfrom os import path\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.distributions as distributions\nimport numpy as np\nfrom torch.serialization import load\n\nimport global_var as gv\n\ntorch.set_default_dtype(gv.torch_default_type)\n\nclass PG_Agent(object):\n def __init__(\n self,\n env,\n policy: torch.nn.modules.container.Sequential, \n learning_rate: float,\n n_policy: int, # 迭代多少个策略\n n_episode: int, # 每个策略下输出多少个episode用来更新该策略\n max_timesteps: int # 最多一个episode多个步,免得一个很强的策略出来以后episode不终止了\n ) -> None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n # self.buffer = buffer\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.learning_rate)\n\n def get_acs(self, obs):\n '''\n obs is shape (batch_size, n_dim)\n '''\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs # shape (batch_size,)\n \n def get_ac(self, ob):\n '''\n ob is shape (n_dim,)\n '''\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1,-1))\n # 按照概率分布来获取ac,而不是直接取较大Logit者,这里dubug了好久,烦烦烦\n # ac = torch.argmax(logits)\n distri = distributions.Categorical(logits=logits)\n\n return distri.sample().item()\n\n def generate_episode(self, render = False):\n next_ob = self.env.reset().reshape(1,-1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1,-1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n # break\n if done or timesteps > self.max_timesteps:\n break\n # print(acs, type(acs), 'acs')\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)), torch.tensor(res), torch.tensor(terminals)\n\n\n def train(self):\n '''\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n '''\n # print(self.policy.state_dict(), 'p1')\n for i_policy in range(self.n_policy):\n J = 0 # j tilda,也就是loss\n q = 0\n for i_episode in range(self.n_episode):\n # 生成\n obs, acs, next_obs, res, terminals = self.generate_episode()\n # print(acs, acs.shape, 'acs')\n assert(len(obs)==len(next_obs)==len(res)==len(acs)==len(terminals))\n r_tau = sum(res)\n logits = self.policy(obs)\n\n # print(logits, logits.shape, 'logits')\n # print(acs, type(acs))\n\n criterion = nn.CrossEntropyLoss(reduction='sum') # 注意这里要选择sum才对,否则和policy gradient的公式并不一样,导致训练一直没有效果,难受啊,找了好久这个问题\n negative_likelihoods = criterion(logits, acs)\n # print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')\n negative_likelihoods = negative_likelihoods.sum()\n # print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')\n # print(r_tau, 'r_tau')\n J += negative_likelihoods*r_tau\n q += res.sum().item()\n \n J /= self.n_episode\n self.optimizer.zero_grad()\n print(f\"第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q/self.n_episode}\") # 这里的loss估计不对,要用平均每次的\n J.backward()\n self.optimizer.step()\n\n # print(self.policy.state_dict(), 'p2')\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n\n\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from django.contrib.auth import logout
from rest_framework import status
from rest_framework.authtoken.models import Token
from .serilizer import UserSerializer
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self,request):
serilizer = UserSerializer(data= request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = { 'response': "user with username " + str(user_name) + ' created'}
data['key'] = get_object_or_404(Token,user = account).key
return Response( data ,status = status.HTTP_201_CREATED )
else :
return Response(serilizer.errors,status = status.HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self,request):
logout(request)
return Response({"response" : "logged out"},status=status.HTTP_200_OK)
|
normal
|
{
"blob_id": "6a5a6bdb0740d51426aa8b36dd3cc317103412b1",
"index": 641,
"step-1": "<mask token>\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-2": "<mask token>\n\n\nclass RegistrationView(APIView):\n <mask token>\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-3": "<mask token>\n\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-4": "from rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom django.contrib.auth import logout\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom .serilizer import UserSerializer\n\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-5": "from rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom django.contrib.auth import logout\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom .serilizer import UserSerializer\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self,request):\n serilizer = UserSerializer(data= request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = { 'response': \"user with username \" + str(user_name) + ' created'}\n data['key'] = get_object_or_404(Token,user = account).key\n return Response( data ,status = status.HTTP_201_CREATED )\n else :\n return Response(serilizer.errors,status = status.HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n def get(self,request):\n logout(request)\n return Response({\"response\" : \"logged out\"},status=status.HTTP_200_OK)",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import numpy as np
import mysql.connector
from mysql.connector import Error
import matplotlib.pyplot as plt
def readData():
connection = mysql.connector.connect(host='localhost',database='cad_ultrasound',user='root',password='')
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
data = records[0]
# nama_pasien = data[1]
filename = data[2]
# dataSignal = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/files/"+filename,delimiter=',')
## READ TXT FILE
dataSignal = []
my_file = open("C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/" + filename, "r")
for line in my_file.readlines():
if line[-1:] == "\n":
dataSignal.append(line[:-1])
else:
dataSignal.append(line)
my_file.close()
# C:/xampp/htdocs/projectCAD/public/storage/upload/files/hasilproses
if (connection.is_connected()):
cursor.close()
connection.close()
return dataSignal, filename
def saveData(data,label,filename):
connection = mysql.connector.connect(host='localhost', database='cad_ultrasound', user='root', password='')
cursor = connection.cursor()
filename_hasil = 'hasilproses_'+filename
with open(r'C:\xampp\htdocs\projectCAD\public\storage\upload/files\hasilproses/' + filename_hasil, 'w') as f:
for row in data:
f.write(str(row) + '\n')
f.close()
#Select Pasien from database
sql_select = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select)
records = cursor.fetchall()
data = records[0]
id_pasien = data[0]
print(label[0])
sql_update = "UPDATE pasien SET hasilproses = '" + filename_hasil + "',label = '"+str(label[0])+"' WHERE id = "+str(id_pasien)
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses")
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/" + fiturname, delimiter=',')
if (connection.is_connected()):
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur,label):
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
# dbfitur = getFiturEkstraksi()
# dbfitur.append(fitur)
fiturname = 'fitur.txt'
rowfitur = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+fiturname, "w")
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+labelname, "w")
for row in range(len(label)):
np.savetxt(rowlabel,row)
rowlabel.close()
sql_update = "UPDATE fitur_ekstraksis SET fitur = '" + fiturname + "', label = '" + labelname + "' WHERE id = 1"
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses")
|
normal
|
{
"blob_id": "4d7696c832f9255fbc68040b61fde12e057c06fa",
"index": 3899,
"step-1": "<mask token>\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-2": "<mask token>\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-3": "<mask token>\n\n\ndef readData():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor = connection.cursor()\n cursor.execute(sql_select_Query)\n records = cursor.fetchall()\n data = records[0]\n filename = data[2]\n dataSignal = []\n my_file = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +\n filename, 'r')\n for line in my_file.readlines():\n if line[-1:] == '\\n':\n dataSignal.append(line[:-1])\n else:\n dataSignal.append(line)\n my_file.close()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return dataSignal, filename\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-4": "import numpy as np\nimport mysql.connector\nfrom mysql.connector import Error\nimport matplotlib.pyplot as plt\n\n\ndef readData():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor = connection.cursor()\n cursor.execute(sql_select_Query)\n records = cursor.fetchall()\n data = records[0]\n filename = data[2]\n dataSignal = []\n my_file = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +\n filename, 'r')\n for line in my_file.readlines():\n if line[-1:] == '\\n':\n dataSignal.append(line[:-1])\n else:\n dataSignal.append(line)\n my_file.close()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return dataSignal, filename\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-5": "import numpy as np\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\nimport matplotlib.pyplot as plt\r\n\r\ndef readData():\r\n connection = mysql.connector.connect(host='localhost',database='cad_ultrasound',user='root',password='')\r\n\r\n sql_select_Query = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor = connection.cursor()\r\n cursor.execute(sql_select_Query)\r\n records = cursor.fetchall()\r\n data = records[0]\r\n # nama_pasien = data[1]\r\n filename = data[2]\r\n # dataSignal = np.genfromtxt(r\"C:/xampp/htdocs/projectCAD/storage/app/public/upload/files/\"+filename,delimiter=',')\r\n\r\n ## READ TXT FILE\r\n dataSignal = []\r\n my_file = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/\" + filename, \"r\")\r\n for line in my_file.readlines():\r\n if line[-1:] == \"\\n\":\r\n dataSignal.append(line[:-1])\r\n else:\r\n dataSignal.append(line)\r\n my_file.close()\r\n\r\n # C:/xampp/htdocs/projectCAD/public/storage/upload/files/hasilproses\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n return dataSignal, filename\r\n\r\ndef saveData(data,label,filename):\r\n connection = mysql.connector.connect(host='localhost', database='cad_ultrasound', user='root', password='')\r\n cursor = connection.cursor()\r\n\r\n filename_hasil = 'hasilproses_'+filename\r\n with open(r'C:\\xampp\\htdocs\\projectCAD\\public\\storage\\upload/files\\hasilproses/' + filename_hasil, 'w') as f:\r\n for row in data:\r\n f.write(str(row) + '\\n')\r\n f.close()\r\n\r\n #Select Pasien from database\r\n sql_select = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor.execute(sql_select)\r\n records = cursor.fetchall()\r\n data = records[0]\r\n id_pasien = data[0]\r\n print(label[0])\r\n\r\n sql_update = \"UPDATE pasien SET hasilproses = '\" + filename_hasil + \"',label = '\"+str(label[0])+\"' WHERE id = \"+str(id_pasien)\r\n cursor.execute(sql_update)\r\n connection.commit()\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return print(\"sukses\")\r\n\r\ndef getFiturEkstraksi():\r\n connection = mysql.connector.connect(host='localhost',\r\n database='cad_ultrasound',\r\n user='root',\r\n password='')\r\n cursor = connection.cursor()\r\n sql_select_Query = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor.execute(sql_select_Query)\r\n fiturname = cursor.fetchall()\r\n fitur = np.genfromtxt(r\"C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/\" + fiturname, delimiter=',')\r\n\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return fitur\r\n\r\ndef saveFiturEkstraksi(fitur,label):\r\n connection = mysql.connector.connect(host='localhost',\r\n database='cad_ultrasound',\r\n user='root',\r\n password='')\r\n cursor = connection.cursor()\r\n # dbfitur = getFiturEkstraksi()\r\n # dbfitur.append(fitur)\r\n fiturname = 'fitur.txt'\r\n rowfitur = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/\"+fiturname, \"w\")\r\n for row in range(len(fitur)):\r\n np.savetxt(rowfitur, row)\r\n rowfitur.close()\r\n\r\n labelname = 'label.txt'\r\n rowlabel = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/\"+labelname, \"w\")\r\n for row in range(len(label)):\r\n np.savetxt(rowlabel,row)\r\n rowlabel.close()\r\n\r\n sql_update = \"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname + \"', label = '\" + labelname + \"' WHERE id = 1\"\r\n cursor.execute(sql_update)\r\n connection.commit()\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return print(\"sukses\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
#
# RPi.Spark KeyButton Demo
#
# Author: Kunpeng Zhang
# 2018.6.6
#
# See LICENSE for details.
from time import sleep
import RPi.GPIO as GPIO
from JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL
########################################################################
# Key buttons include Joystick buttons and Action buttons,
# use BCM mode, there are keyboard layout:
#
# [JOY UP]
# [JOY LEFT] [JOY RIGHT] [ACT_A] [ACT_B]
# [JOY DOWN]
#
class CONFIG_KEY:
# Action Buttons BCM_IO_NUM
BUTTON_ACT_A = 22
BUTTON_ACT_B = 23
# Joy Buttons BCM_IO_NUM
BUTTON_JOY_LEFT = 26
BUTTON_JOY_RIGHT = 27
BUTTON_JOY_UP = 5
BUTTON_JOY_DOWN = 6
BUTTON_JOY_OK = 24
class demo:
_myKey = None
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A: return "BUTTON_A"
if keyBtn == CONFIG_KEY.BUTTON_ACT_B: return "BUTTON_B"
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP: return "JOY_UP"
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN: return "JOY_DOWN"
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT: return "JOY_RIGHT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT: return "JOY_LEFT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK: return "JOY_CENTER"
return "UNKNOW"
def onKeyButtonDown(self, channel):
print("DOWN:\t{}".format(self._getKeyButtonName(channel)))
pass
def onKeyButtonUp(self, channel):
print("UP:\t{}\n".format(self._getKeyButtonName(channel)))
pass
def _callbackKeyButton(self, channel):
"""!
Key button interrupt event callback function
Inherit this method to implement your want
"""
if self._myKey.readKeyButton(channel) == 0:
self.onKeyButtonDown(channel)
return
if self._myKey.readKeyButton(channel) == 1:
self.onKeyButtonUp(channel)
return
def initKeyButtons(self, mode = "INT"):
"""!
Init all key buttons interrupt events or query mode.
Inherit the onKeyButtonDown and onKeyButtonUp to implement your want
@param mode: Can be { "INT" | "QUERY" }, default is "INT"
"""
if mode.upper() == "INT":
try:
self._myKey.configKeyButtons(
enableButtons = [
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":self._callbackKeyButton}
],
bounceTime = DEF_BOUNCE_TIME_SHORT_MON )
except:
pass
if mode.upper() == "QUERY":
self._myKey.configKeyButtons([
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":None},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":None}
])
def releaseKeyButtons(self):
"""!
Release all key button events
"""
self._myKey.removeKeyButtonEvent([
CONFIG_KEY.BUTTON_ACT_A,
CONFIG_KEY.BUTTON_ACT_B,
CONFIG_KEY.BUTTON_JOY_UP,
CONFIG_KEY.BUTTON_JOY_DOWN,
CONFIG_KEY.BUTTON_JOY_LEFT,
CONFIG_KEY.BUTTON_JOY_RIGHT,
CONFIG_KEY.BUTTON_JOY_OK
])
def readKeyButton(self, keyBtn):
"""!
Read key button status, return 0 / 1
"""
if self._myKey.readKeyButton( keyBtn ) == 0:
sleep(0.02)
return 0 if self._myKey.readKeyButton( keyBtn ) else 1
return 0
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print("\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n")
self.initKeyButtons("INT")
while True:
if self.readExitButtonStatus(): break
pass
self.releaseKeyButtons()
GPIO.cleanup()
if __name__ == "__main__":
demo().run()
print("Key buttons demo is end.")
|
normal
|
{
"blob_id": "50c274e0365f2556a46eb58edcd1f0a7301e89db",
"index": 8716,
"step-1": "<mask token>\n\n\nclass demo:\n <mask token>\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CONFIG_KEY:\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n demo().run()\n print('Key buttons demo is end.')\n",
"step-4": "from time import sleep\nimport RPi.GPIO as GPIO\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL\n\n\nclass CONFIG_KEY:\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n demo().run()\n print('Key buttons demo is end.')\n",
"step-5": "# -*- coding: utf-8 -*-\n#\n# RPi.Spark KeyButton Demo\n#\n# Author: Kunpeng Zhang\n# 2018.6.6\n#\n# See LICENSE for details.\n\nfrom time import sleep\nimport RPi.GPIO as GPIO\n\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL\n\n########################################################################\n# Key buttons include Joystick buttons and Action buttons, \n# use BCM mode, there are keyboard layout:\n# \n# [JOY UP] \n# [JOY LEFT] [JOY RIGHT] [ACT_A] [ACT_B]\n# [JOY DOWN] \n#\nclass CONFIG_KEY:\n # Action Buttons BCM_IO_NUM\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n \n # Joy Buttons BCM_IO_NUM\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A: return \"BUTTON_A\"\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B: return \"BUTTON_B\"\n \n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP: return \"JOY_UP\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN: return \"JOY_DOWN\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT: return \"JOY_RIGHT\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT: return \"JOY_LEFT\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK: return \"JOY_CENTER\"\n return \"UNKNOW\"\n\n def onKeyButtonDown(self, channel):\n print(\"DOWN:\\t{}\".format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print(\"UP:\\t{}\\n\".format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode = \"INT\"):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == \"INT\":\n try:\n self._myKey.configKeyButtons(\n enableButtons = [\n {\"id\":CONFIG_KEY.BUTTON_ACT_A, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_ACT_B, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_UP, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_DOWN, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_LEFT, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_RIGHT, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_OK, \"callback\":self._callbackKeyButton}\n ],\n bounceTime = DEF_BOUNCE_TIME_SHORT_MON )\n except:\n pass\n\n if mode.upper() == \"QUERY\":\n self._myKey.configKeyButtons([\n {\"id\":CONFIG_KEY.BUTTON_ACT_A, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_ACT_B, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_OK, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_UP, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_DOWN, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_LEFT, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_RIGHT, \"callback\":None}\n ])\n \n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([\n CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B,\n CONFIG_KEY.BUTTON_JOY_UP,\n CONFIG_KEY.BUTTON_JOY_DOWN,\n CONFIG_KEY.BUTTON_JOY_LEFT,\n CONFIG_KEY.BUTTON_JOY_RIGHT,\n CONFIG_KEY.BUTTON_JOY_OK\n ])\n \n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton( keyBtn ) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton( keyBtn ) else 1\n return 0\n \n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\"\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n\")\n self.initKeyButtons(\"INT\")\n\n while True:\n if self.readExitButtonStatus(): break\n pass\n\n self.releaseKeyButtons()\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n demo().run()\n print(\"Key buttons demo is end.\")",
"step-ids": [
6,
12,
15,
16,
17
]
}
|
[
6,
12,
15,
16,
17
] |
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import QCoreApplication
import pymysql
import requests
from twisted.internet import reactor, defer
from scrapy.crawler import CrawlerRunner, CrawlerProcess
from scrapy.utils.project import get_project_settings
from spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport
from scrapy.settings import Settings
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from multiprocessing import Process
def db_handle():
con = pymysql.connect(
host='localhost',
user='web',
passwd='web',
charset='utf8',
database='heroes'
)
return con
class Example(QWidget):
class A(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
#QToolTip.setFont(QFont('SanSerif', 10))
#self.setToolTip('This is a <b>QWidget</b> widget')
#textEdit = QTextEdit()
#self.setCentralWidget(textEdit)
self.qle = QLineEdit("蔽月八云")
self.user = self.qle.text()
self.para = "user={}".format(self.user)
print(self.user, '1')
btn = QPushButton('查询', self)
#btn.setToolTip('This is a <b>QPushButton</b> widget')
btn.resize(btn.sizeHint())
btn.clicked.connect(self.search)
self.txt = QTextEdit()
#self.txt.textChanged.connect(self.adjustSize)
self.battle = QTextEdit()
self.player_status = QTextEdit()
self.create_table()
# 名称不能用Quit、Exit,用了就无法显示,原因不明
exitAction = QAction('Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('application')
exitAction.triggered.connect(qApp.quit)
#self.statusBar()
#menubar = QMainWindow.menuBar()
# Mac OS的状态栏显示不一样
#menubar.setNativeMenuBar(False)
#fileMenu = menubar.addMenu('&File')
#fileMenu.addAction(exitAction)
#toolbar = self.addToolBar('Exit')
#toolbar.addAction(exitAction)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.qle, 1, 0)
grid.addWidget(btn, 2, 0)
grid.addWidget(self.txt, 3, 0)
grid.addWidget(self.battle, 1, 1, 3, 1)
grid.addWidget(self.player_status, 4, 0, 2, 2)
grid.addWidget(self.battle_table, 6, 0, 2, 2)
self.setLayout(grid)
self.setGeometry(600, 600, 800, 600)
self.center()
self.setWindowTitle("战绩查询")
self.show()
def create_table(self):
# 设置表
self.battle_table = QTableWidget()
# 表列数,行数在下方读取数据时,根据数据量建立
self.battle_table.setColumnCount(8)
# 设置表头
self.battle_table.setHorizontalHeaderLabels(
['match_id', 'head', 'date', 'time', 'kill_count', 'death', 'support', 'score'])
# 隔行变色
self.battle_table.setAlternatingRowColors(True)
# 整行选中
self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)
# 将列调整到跟内容大小相匹配
# self.battle_table.resizeColumnsToContents()
# #将行大小调整到跟内容的大小相匹配
self.battle_table.resizeRowsToContents()
# 点击事件
self.battle_table.doubleClicked.connect(self.on_click)
@pyqtSlot()
def on_click(self):
currentQTableWidgetItem = self.battle_table.selectedItems()[0]
# 点击的行包含的比赛id
#match_id = self.battle_table.item(currentQTableWidgetItem.row(), 0).text()
match_id = currentQTableWidgetItem.text()
print(match_id)
self.showDialog(match_id)
def showDialog(self, match_id):
data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'.format(match_id))
a = self.A()
## 启动爬虫,获取该场比赛所有人的数据
#runner = CrawlerRunner(get_project_settings())
#runner.crawl('JumpReport')
#d = runner.join()
#d.addBoth(lambda _: reactor.stop())
#reactor.run() # 阻塞运行爬虫
#
#text, ok = QInputDialog.getText(self, 'Input Dialog',
# 'Enter your name:')
def searchd(self):
if __name__ == '__main__':
#print(user, '2')
p = Process(target=self.a)
p.start()
p.join()
def search(self):
print(self.user)
print(__name__)
#print(user, '3')
#process = CrawlerProcess(get_project_settings())
#process.crawl('JumpReport')
#process.start()
#process.stop()
#process.put()
# 脚本执行爬虫代码
runner = CrawlerRunner(get_project_settings())
#def search(runner, keyword):
# return runner.crawl(JumpReport, keyword)
#runner = CrawlerProcess()
#dfs = set()
print('a')
runner.crawl('JumpReport', user=self.user)
print(self.user)
d = runner.join()
#dfs.add(d)
#defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())
d.addBoth(lambda _: reactor.stop())
#search(runner, "abcd")
#search(runner, "beat")
#runner.start()
reactor.run() # 阻塞运行爬虫
print("complete")
# runner = CrawlerRunner(get_project_settings())
# dfs = set()
# for domain in range(2):
# d = runner.crawl('JumpReport')
# dfs.add(d)
#
# defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())
# reactor.run() # the script will block here until all crawling jobs are finished
# runner = CrawlerRunner(get_project_settings())
#
# @defer.inlineCallbacks
# def crawl():
# for domain in range(2):
# yield runner.crawl('JumpReport')
# reactor.stop()
#
# crawl()
# reactor.run() # the script will block here until the last crawl call is finished
# settings = Settings({'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'})
# runner = CrawlerRunner(settings)
#
# d = runner.crawl(JumpReport)
# d.addBoth(lambda _: reactor.stop())
# reactor.run() # the script will block here until the crawling is finished
# runner = CrawlerProcess(get_project_settings())
# runner.crawl(JumpReport)
# runner.start()
name = self.qle.text()
db = db_handle()
with db as con:
sql = "select * from player where name = '{}' order by update_time".format(name)
con.execute(sql)
player = con.fetchone()
if player:
id, name, win, match_count, strength, level, update_time, rank = player
text = "角色名: {}\n胜场: {}\n总场数: {}\n团分: {}\n团分排行: {}\n等级: {}\n更新时间: {}".format(
name, win, match_count, strength, rank, level, update_time)
self.txt.setText(text)
sql = "select * from player_data where name = '{}' order by date".format(name)
con.execute(sql)
player_data = con.fetchall()
a = ""
for data in player_data:
a += str(data)
a += "\n"
self.battle.setText(str(a))
sql = "select * from game_data order by match_id desc"
con.execute(sql)
game_data = con.fetchall()
a = ""
l = 0
self.battle_table.setRowCount(len(game_data))
for data in game_data:
a += str(data[1:])
print(type(data))
for i in range(self.battle_table.columnCount()):
item = QTableWidgetItem(str(data[i + 1]))
# 设置填入数据的排列位置(左右居中| 上下居中)
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.battle_table.setItem(l, i, item)
a += "\n"
self.player_status.setText(str(a))
l += 1
#for i in range(len(list(a))):
# self.battle_table.setLayout(str(a))
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', "Quit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
class BatterReport(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.txt = QTextEdit()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "889d465ceeac57a600b2fa3bd26632edcd90a655",
"index": 2911,
"step-1": "<mask token>\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n <mask token>\n <mask token>\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n <mask token>\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n <mask token>\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n <mask token>\n <mask token>\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n <mask token>\n\n def initUI(self):\n self.qle = QLineEdit('蔽月八云')\n self.user = self.qle.text()\n self.para = 'user={}'.format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n self.txt = QTextEdit()\n self.battle = QTextEdit()\n self.player_status = QTextEdit()\n self.create_table()\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n self.setLayout(grid)\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle('战绩查询')\n self.show()\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n <mask token>\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n <mask token>\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n <mask token>\n <mask token>\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.qle = QLineEdit('蔽月八云')\n self.user = self.qle.text()\n self.para = 'user={}'.format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n self.txt = QTextEdit()\n self.battle = QTextEdit()\n self.player_status = QTextEdit()\n self.create_table()\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n self.setLayout(grid)\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle('战绩查询')\n self.show()\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n\n @pyqtSlot()\n def on_click(self):\n currentQTableWidgetItem = self.battle_table.selectedItems()[0]\n match_id = currentQTableWidgetItem.text()\n print(match_id)\n self.showDialog(match_id)\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n <mask token>\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n <mask token>\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', 'Quit?', QMessageBox.\n Yes | QMessageBox.No, QMessageBox.Yes)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\n<mask token>\n",
"step-4": "import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QFont\nfrom PyQt5.QtCore import QCoreApplication\nimport pymysql\nimport requests\nfrom twisted.internet import reactor, defer\nfrom scrapy.crawler import CrawlerRunner, CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nfrom spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport\nfrom scrapy.settings import Settings\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom multiprocessing import Process\n\n\ndef db_handle():\n con = pymysql.connect(host='localhost', user='web', passwd='web',\n charset='utf8', database='heroes')\n return con\n\n\nclass Example(QWidget):\n\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n self.show()\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.qle = QLineEdit('蔽月八云')\n self.user = self.qle.text()\n self.para = 'user={}'.format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n self.txt = QTextEdit()\n self.battle = QTextEdit()\n self.player_status = QTextEdit()\n self.create_table()\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n grid = QGridLayout()\n grid.setSpacing(10)\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n self.setLayout(grid)\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle('战绩查询')\n self.show()\n\n def create_table(self):\n self.battle_table = QTableWidget()\n self.battle_table.setColumnCount(8)\n self.battle_table.setHorizontalHeaderLabels(['match_id', 'head',\n 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n self.battle_table.setAlternatingRowColors(True)\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.battle_table.resizeRowsToContents()\n self.battle_table.doubleClicked.connect(self.on_click)\n\n @pyqtSlot()\n def on_click(self):\n currentQTableWidgetItem = self.battle_table.selectedItems()[0]\n match_id = currentQTableWidgetItem.text()\n print(match_id)\n self.showDialog(match_id)\n\n def showDialog(self, match_id):\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'\n .format(match_id))\n a = self.A()\n\n def searchd(self):\n if __name__ == '__main__':\n p = Process(target=self.a)\n p.start()\n p.join()\n\n def search(self):\n print(self.user)\n print(__name__)\n runner = CrawlerRunner(get_project_settings())\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n print('complete')\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = (\n \"select * from player where name = '{}' order by update_time\"\n .format(name))\n con.execute(sql)\n player = con.fetchone()\n if player:\n (id, name, win, match_count, strength, level, update_time, rank\n ) = player\n text = (\n '角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}'\n .format(name, win, match_count, strength, rank, level,\n update_time))\n self.txt.setText(text)\n sql = (\"select * from player_data where name = '{}' order by date\"\n .format(name))\n con.execute(sql)\n player_data = con.fetchall()\n a = ''\n for data in player_data:\n a += str(data)\n a += '\\n'\n self.battle.setText(str(a))\n sql = 'select * from game_data order by match_id desc'\n con.execute(sql)\n game_data = con.fetchall()\n a = ''\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n for i in range(self.battle_table.columnCount()):\n item = QTableWidgetItem(str(data[i + 1]))\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n a += '\\n'\n self.player_status.setText(str(a))\n l += 1\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, 'Message', 'Quit?', QMessageBox.\n Yes | QMessageBox.No, QMessageBox.Yes)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n",
"step-5": "import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import QIcon, QFont\nfrom PyQt5.QtCore import QCoreApplication\n\nimport pymysql\nimport requests\n\nfrom twisted.internet import reactor, defer\nfrom scrapy.crawler import CrawlerRunner, CrawlerProcess\nfrom scrapy.utils.project import get_project_settings\nfrom spider.jump_300heroes.jump_300heroes.spiders.my_report import JumpReport\nfrom scrapy.settings import Settings\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\n\nfrom multiprocessing import Process\n\n\n\n\ndef db_handle():\n\n con = pymysql.connect(\n host='localhost',\n user='web',\n passwd='web',\n charset='utf8',\n database='heroes'\n )\n return con\n\nclass Example(QWidget):\n\n class A(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 220)\n self.setWindowTitle('Icon')\n self.setWindowIcon(QIcon('web.png'))\n\n self.show()\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n\n #QToolTip.setFont(QFont('SanSerif', 10))\n\n #self.setToolTip('This is a <b>QWidget</b> widget')\n\n #textEdit = QTextEdit()\n #self.setCentralWidget(textEdit)\n\n self.qle = QLineEdit(\"蔽月八云\")\n self.user = self.qle.text()\n self.para = \"user={}\".format(self.user)\n print(self.user, '1')\n btn = QPushButton('查询', self)\n #btn.setToolTip('This is a <b>QPushButton</b> widget')\n btn.resize(btn.sizeHint())\n btn.clicked.connect(self.search)\n\n self.txt = QTextEdit()\n #self.txt.textChanged.connect(self.adjustSize)\n\n self.battle = QTextEdit()\n\n self.player_status = QTextEdit()\n\n self.create_table()\n\n\n\n # 名称不能用Quit、Exit,用了就无法显示,原因不明\n exitAction = QAction('Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('application')\n exitAction.triggered.connect(qApp.quit)\n\n #self.statusBar()\n\n #menubar = QMainWindow.menuBar()\n\n # Mac OS的状态栏显示不一样\n #menubar.setNativeMenuBar(False)\n\n #fileMenu = menubar.addMenu('&File')\n #fileMenu.addAction(exitAction)\n\n #toolbar = self.addToolBar('Exit')\n #toolbar.addAction(exitAction)\n\n grid = QGridLayout()\n grid.setSpacing(10)\n\n grid.addWidget(self.qle, 1, 0)\n grid.addWidget(btn, 2, 0)\n grid.addWidget(self.txt, 3, 0)\n grid.addWidget(self.battle, 1, 1, 3, 1)\n grid.addWidget(self.player_status, 4, 0, 2, 2)\n grid.addWidget(self.battle_table, 6, 0, 2, 2)\n\n self.setLayout(grid)\n\n self.setGeometry(600, 600, 800, 600)\n self.center()\n self.setWindowTitle(\"战绩查询\")\n\n self.show()\n\n def create_table(self):\n # 设置表\n self.battle_table = QTableWidget()\n # 表列数,行数在下方读取数据时,根据数据量建立\n self.battle_table.setColumnCount(8)\n # 设置表头\n self.battle_table.setHorizontalHeaderLabels(\n ['match_id', 'head', 'date', 'time', 'kill_count', 'death', 'support', 'score'])\n # 隔行变色\n self.battle_table.setAlternatingRowColors(True)\n # 整行选中\n self.battle_table.setSelectionBehavior(QAbstractItemView.SelectRows)\n # 将列调整到跟内容大小相匹配\n # self.battle_table.resizeColumnsToContents()\n # #将行大小调整到跟内容的大小相匹配\n self.battle_table.resizeRowsToContents()\n # 点击事件\n self.battle_table.doubleClicked.connect(self.on_click)\n\n @pyqtSlot()\n def on_click(self):\n currentQTableWidgetItem = self.battle_table.selectedItems()[0]\n # 点击的行包含的比赛id\n #match_id = self.battle_table.item(currentQTableWidgetItem.row(), 0).text()\n match_id = currentQTableWidgetItem.text()\n print(match_id)\n self.showDialog(match_id)\n\n def showDialog(self, match_id):\n\n data = requests.get('http://300report.jumpw.com/api/getmatch?id={}'.format(match_id))\n a = self.A()\n\n ## 启动爬虫,获取该场比赛所有人的数据\n #runner = CrawlerRunner(get_project_settings())\n #runner.crawl('JumpReport')\n #d = runner.join()\n #d.addBoth(lambda _: reactor.stop())\n #reactor.run() # 阻塞运行爬虫\n #\n #text, ok = QInputDialog.getText(self, 'Input Dialog',\n # 'Enter your name:')\n\n\n\n def searchd(self):\n if __name__ == '__main__':\n #print(user, '2')\n p = Process(target=self.a)\n p.start()\n p.join()\n\n def search(self):\n print(self.user)\n print(__name__)\n #print(user, '3')\n\n\n #process = CrawlerProcess(get_project_settings())\n #process.crawl('JumpReport')\n #process.start()\n #process.stop()\n #process.put()\n # 脚本执行爬虫代码\n runner = CrawlerRunner(get_project_settings())\n\n #def search(runner, keyword):\n # return runner.crawl(JumpReport, keyword)\n\n #runner = CrawlerProcess()\n #dfs = set()\n print('a')\n runner.crawl('JumpReport', user=self.user)\n print(self.user)\n d = runner.join()\n #dfs.add(d)\n #defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())\n d.addBoth(lambda _: reactor.stop())\n #search(runner, \"abcd\")\n #search(runner, \"beat\")\n #runner.start()\n reactor.run() # 阻塞运行爬虫\n\n print(\"complete\")\n\n\n # runner = CrawlerRunner(get_project_settings())\n # dfs = set()\n # for domain in range(2):\n # d = runner.crawl('JumpReport')\n # dfs.add(d)\n #\n # defer.DeferredList(dfs).addBoth(lambda _: reactor.stop())\n # reactor.run() # the script will block here until all crawling jobs are finished\n\n # runner = CrawlerRunner(get_project_settings())\n #\n # @defer.inlineCallbacks\n # def crawl():\n # for domain in range(2):\n # yield runner.crawl('JumpReport')\n # reactor.stop()\n #\n # crawl()\n # reactor.run() # the script will block here until the last crawl call is finished\n\n # settings = Settings({'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'})\n # runner = CrawlerRunner(settings)\n # \n # d = runner.crawl(JumpReport)\n # d.addBoth(lambda _: reactor.stop())\n # reactor.run() # the script will block here until the crawling is finished\n\n\n # runner = CrawlerProcess(get_project_settings())\n # runner.crawl(JumpReport)\n # runner.start()\n\n name = self.qle.text()\n db = db_handle()\n with db as con:\n sql = \"select * from player where name = '{}' order by update_time\".format(name)\n con.execute(sql)\n player = con.fetchone()\n if player:\n id, name, win, match_count, strength, level, update_time, rank = player\n text = \"角色名: {}\\n胜场: {}\\n总场数: {}\\n团分: {}\\n团分排行: {}\\n等级: {}\\n更新时间: {}\".format(\n name, win, match_count, strength, rank, level, update_time)\n \n self.txt.setText(text)\n \n sql = \"select * from player_data where name = '{}' order by date\".format(name)\n con.execute(sql)\n player_data = con.fetchall()\n a = \"\"\n for data in player_data:\n a += str(data)\n a += \"\\n\"\n self.battle.setText(str(a))\n\n sql = \"select * from game_data order by match_id desc\"\n con.execute(sql)\n game_data = con.fetchall()\n a = \"\"\n l = 0\n self.battle_table.setRowCount(len(game_data))\n for data in game_data:\n a += str(data[1:])\n print(type(data))\n\n for i in range(self.battle_table.columnCount()):\n\n item = QTableWidgetItem(str(data[i + 1]))\n # 设置填入数据的排列位置(左右居中| 上下居中)\n item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n self.battle_table.setItem(l, i, item)\n\n a += \"\\n\"\n self.player_status.setText(str(a))\n l += 1\n #for i in range(len(list(a))):\n # self.battle_table.setLayout(str(a))\n\n def center(self):\n\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def closeEvent(self, event):\n\n reply = QMessageBox.question(self, 'Message', \"Quit?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\nclass BatterReport(QWidget):\n\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n self.txt = QTextEdit()\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n\n ex = Example()\n\n sys.exit(app.exec_())\n",
"step-ids": [
7,
8,
11,
16,
17
]
}
|
[
7,
8,
11,
16,
17
] |
from django import forms
class PasswordChangeForm(forms.Form):
password = forms.CharField(min_length=8,
label="New Password*",
strip=False,
widget=forms.PasswordInput(
attrs={'autocomplete': 'current-password', 'class': 'form-control'}),
)
|
normal
|
{
"blob_id": "85fff1f6e1f69dd0e2e9b5acc90db31d27329c7c",
"index": 3352,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PasswordChangeForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PasswordChangeForm(forms.Form):\n password = forms.CharField(min_length=8, label='New Password*', strip=\n False, widget=forms.PasswordInput(attrs={'autocomplete':\n 'current-password', 'class': 'form-control'}))\n",
"step-4": "from django import forms\n\n\nclass PasswordChangeForm(forms.Form):\n password = forms.CharField(min_length=8, label='New Password*', strip=\n False, widget=forms.PasswordInput(attrs={'autocomplete':\n 'current-password', 'class': 'form-control'}))\n",
"step-5": "from django import forms\n\n\nclass PasswordChangeForm(forms.Form):\n password = forms.CharField(min_length=8,\n label=\"New Password*\",\n strip=False,\n widget=forms.PasswordInput(\n attrs={'autocomplete': 'current-password', 'class': 'form-control'}),\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('location', '0005_auto_20170303_1625'),
]
operations = [
migrations.RemoveField(
model_name='location',
name='block',
),
migrations.RemoveField(
model_name='location',
name='mandapam',
),
migrations.RemoveField(
model_name='location',
name='others',
),
migrations.RemoveField(
model_name='location',
name='sub_district',
),
migrations.RemoveField(
model_name='location',
name='taluka',
),
migrations.RemoveField(
model_name='location',
name='tehsil',
),
migrations.AlterField(
model_name='location',
name='sub_district_type',
field=models.ForeignKey(related_name='location', blank=True, to='location.SubDistrictType', null=True, on_delete=django.db.models.deletion.CASCADE),
),
]
|
normal
|
{
"blob_id": "ca7b3b5df860d3c3fb0953857ad950affdcc671d",
"index": 9311,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('location', '0005_auto_20170303_1625')]\n operations = [migrations.RemoveField(model_name='location', name=\n 'block'), migrations.RemoveField(model_name='location', name=\n 'mandapam'), migrations.RemoveField(model_name='location', name=\n 'others'), migrations.RemoveField(model_name='location', name=\n 'sub_district'), migrations.RemoveField(model_name='location', name\n ='taluka'), migrations.RemoveField(model_name='location', name=\n 'tehsil'), migrations.AlterField(model_name='location', name=\n 'sub_district_type', field=models.ForeignKey(related_name=\n 'location', blank=True, to='location.SubDistrictType', null=True,\n on_delete=django.db.models.deletion.CASCADE))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('location', '0005_auto_20170303_1625')]\n operations = [migrations.RemoveField(model_name='location', name=\n 'block'), migrations.RemoveField(model_name='location', name=\n 'mandapam'), migrations.RemoveField(model_name='location', name=\n 'others'), migrations.RemoveField(model_name='location', name=\n 'sub_district'), migrations.RemoveField(model_name='location', name\n ='taluka'), migrations.RemoveField(model_name='location', name=\n 'tehsil'), migrations.AlterField(model_name='location', name=\n 'sub_district_type', field=models.ForeignKey(related_name=\n 'location', blank=True, to='location.SubDistrictType', null=True,\n on_delete=django.db.models.deletion.CASCADE))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('location', '0005_auto_20170303_1625'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='location',\n name='block',\n ),\n migrations.RemoveField(\n model_name='location',\n name='mandapam',\n ),\n migrations.RemoveField(\n model_name='location',\n name='others',\n ),\n migrations.RemoveField(\n model_name='location',\n name='sub_district',\n ),\n migrations.RemoveField(\n model_name='location',\n name='taluka',\n ),\n migrations.RemoveField(\n model_name='location',\n name='tehsil',\n ),\n migrations.AlterField(\n model_name='location',\n name='sub_district_type',\n field=models.ForeignKey(related_name='location', blank=True, to='location.SubDistrictType', null=True, on_delete=django.db.models.deletion.CASCADE),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.core.cache import cache
from rest_framework import serializers
from thenewboston.constants.crawl import (
CRAWL_COMMAND_START,
CRAWL_COMMAND_STOP,
CRAWL_STATUS_CRAWLING,
CRAWL_STATUS_NOT_CRAWLING,
CRAWL_STATUS_STOP_REQUESTED
)
from v1.cache_tools.cache_keys import CRAWL_CACHE_LOCK_KEY, CRAWL_STATUS
from v1.tasks.crawl import start_crawl
class CrawlSerializer(serializers.Serializer):
crawl = serializers.ChoiceField(choices=[CRAWL_COMMAND_START, CRAWL_COMMAND_STOP])
default_error_messages = {
**serializers.Serializer.default_error_messages,
'cant_start_crawl': 'Can not start new crawl when already crawling',
'cant_stop_crawl': 'Can not stop crawl if not crawling',
}
def create(self, validated_data):
"""Start a network crawl"""
crawl = validated_data['crawl']
if crawl == CRAWL_COMMAND_START:
cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)
start_crawl.delay()
if crawl == CRAWL_COMMAND_STOP:
cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)
return validated_data
def is_valid(self, raise_exception=False):
with cache.lock(CRAWL_CACHE_LOCK_KEY):
return super().is_valid(raise_exception)
def update(self, instance, validated_data):
raise RuntimeError('Method unavailable')
def validate_crawl(self, crawl):
"""
Validate the correct crawl command is given
- can not start new crawl when already crawling
- can not stop crawl if not crawling
"""
crawl_status = cache.get(CRAWL_STATUS)
if crawl == CRAWL_COMMAND_START and crawl_status in (CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages['cant_start_crawl'])
if crawl == CRAWL_COMMAND_STOP and crawl_status in (CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):
raise serializers.ValidationError(self.error_messages['cant_stop_crawl'])
return crawl
|
normal
|
{
"blob_id": "cb32aa6a1c42e7bb417999f3f6f74ec22209c5a0",
"index": 1230,
"step-1": "<mask token>\n\n\nclass CrawlSerializer(serializers.Serializer):\n <mask token>\n <mask token>\n\n def create(self, validated_data):\n \"\"\"Start a network crawl\"\"\"\n crawl = validated_data['crawl']\n if crawl == CRAWL_COMMAND_START:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)\n start_crawl.delay()\n if crawl == CRAWL_COMMAND_STOP:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)\n return validated_data\n <mask token>\n\n def update(self, instance, validated_data):\n raise RuntimeError('Method unavailable')\n\n def validate_crawl(self, crawl):\n \"\"\"\n Validate the correct crawl command is given\n\n - can not start new crawl when already crawling\n - can not stop crawl if not crawling\n \"\"\"\n crawl_status = cache.get(CRAWL_STATUS)\n if crawl == CRAWL_COMMAND_START and crawl_status in (\n CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_start_crawl'])\n if crawl == CRAWL_COMMAND_STOP and crawl_status in (\n CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_stop_crawl'])\n return crawl\n",
"step-2": "<mask token>\n\n\nclass CrawlSerializer(serializers.Serializer):\n <mask token>\n <mask token>\n\n def create(self, validated_data):\n \"\"\"Start a network crawl\"\"\"\n crawl = validated_data['crawl']\n if crawl == CRAWL_COMMAND_START:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)\n start_crawl.delay()\n if crawl == CRAWL_COMMAND_STOP:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)\n return validated_data\n\n def is_valid(self, raise_exception=False):\n with cache.lock(CRAWL_CACHE_LOCK_KEY):\n return super().is_valid(raise_exception)\n\n def update(self, instance, validated_data):\n raise RuntimeError('Method unavailable')\n\n def validate_crawl(self, crawl):\n \"\"\"\n Validate the correct crawl command is given\n\n - can not start new crawl when already crawling\n - can not stop crawl if not crawling\n \"\"\"\n crawl_status = cache.get(CRAWL_STATUS)\n if crawl == CRAWL_COMMAND_START and crawl_status in (\n CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_start_crawl'])\n if crawl == CRAWL_COMMAND_STOP and crawl_status in (\n CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_stop_crawl'])\n return crawl\n",
"step-3": "<mask token>\n\n\nclass CrawlSerializer(serializers.Serializer):\n crawl = serializers.ChoiceField(choices=[CRAWL_COMMAND_START,\n CRAWL_COMMAND_STOP])\n default_error_messages = {**serializers.Serializer.\n default_error_messages, 'cant_start_crawl':\n 'Can not start new crawl when already crawling', 'cant_stop_crawl':\n 'Can not stop crawl if not crawling'}\n\n def create(self, validated_data):\n \"\"\"Start a network crawl\"\"\"\n crawl = validated_data['crawl']\n if crawl == CRAWL_COMMAND_START:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)\n start_crawl.delay()\n if crawl == CRAWL_COMMAND_STOP:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)\n return validated_data\n\n def is_valid(self, raise_exception=False):\n with cache.lock(CRAWL_CACHE_LOCK_KEY):\n return super().is_valid(raise_exception)\n\n def update(self, instance, validated_data):\n raise RuntimeError('Method unavailable')\n\n def validate_crawl(self, crawl):\n \"\"\"\n Validate the correct crawl command is given\n\n - can not start new crawl when already crawling\n - can not stop crawl if not crawling\n \"\"\"\n crawl_status = cache.get(CRAWL_STATUS)\n if crawl == CRAWL_COMMAND_START and crawl_status in (\n CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_start_crawl'])\n if crawl == CRAWL_COMMAND_STOP and crawl_status in (\n CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_stop_crawl'])\n return crawl\n",
"step-4": "from django.core.cache import cache\nfrom rest_framework import serializers\nfrom thenewboston.constants.crawl import CRAWL_COMMAND_START, CRAWL_COMMAND_STOP, CRAWL_STATUS_CRAWLING, CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED\nfrom v1.cache_tools.cache_keys import CRAWL_CACHE_LOCK_KEY, CRAWL_STATUS\nfrom v1.tasks.crawl import start_crawl\n\n\nclass CrawlSerializer(serializers.Serializer):\n crawl = serializers.ChoiceField(choices=[CRAWL_COMMAND_START,\n CRAWL_COMMAND_STOP])\n default_error_messages = {**serializers.Serializer.\n default_error_messages, 'cant_start_crawl':\n 'Can not start new crawl when already crawling', 'cant_stop_crawl':\n 'Can not stop crawl if not crawling'}\n\n def create(self, validated_data):\n \"\"\"Start a network crawl\"\"\"\n crawl = validated_data['crawl']\n if crawl == CRAWL_COMMAND_START:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)\n start_crawl.delay()\n if crawl == CRAWL_COMMAND_STOP:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)\n return validated_data\n\n def is_valid(self, raise_exception=False):\n with cache.lock(CRAWL_CACHE_LOCK_KEY):\n return super().is_valid(raise_exception)\n\n def update(self, instance, validated_data):\n raise RuntimeError('Method unavailable')\n\n def validate_crawl(self, crawl):\n \"\"\"\n Validate the correct crawl command is given\n\n - can not start new crawl when already crawling\n - can not stop crawl if not crawling\n \"\"\"\n crawl_status = cache.get(CRAWL_STATUS)\n if crawl == CRAWL_COMMAND_START and crawl_status in (\n CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_start_crawl'])\n if crawl == CRAWL_COMMAND_STOP and crawl_status in (\n CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages[\n 'cant_stop_crawl'])\n return crawl\n",
"step-5": "from django.core.cache import cache\nfrom rest_framework import serializers\nfrom thenewboston.constants.crawl import (\n CRAWL_COMMAND_START,\n CRAWL_COMMAND_STOP,\n CRAWL_STATUS_CRAWLING,\n CRAWL_STATUS_NOT_CRAWLING,\n CRAWL_STATUS_STOP_REQUESTED\n)\n\nfrom v1.cache_tools.cache_keys import CRAWL_CACHE_LOCK_KEY, CRAWL_STATUS\nfrom v1.tasks.crawl import start_crawl\n\n\nclass CrawlSerializer(serializers.Serializer):\n crawl = serializers.ChoiceField(choices=[CRAWL_COMMAND_START, CRAWL_COMMAND_STOP])\n\n default_error_messages = {\n **serializers.Serializer.default_error_messages,\n 'cant_start_crawl': 'Can not start new crawl when already crawling',\n 'cant_stop_crawl': 'Can not stop crawl if not crawling',\n }\n\n def create(self, validated_data):\n \"\"\"Start a network crawl\"\"\"\n crawl = validated_data['crawl']\n\n if crawl == CRAWL_COMMAND_START:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_CRAWLING, None)\n start_crawl.delay()\n\n if crawl == CRAWL_COMMAND_STOP:\n cache.set(CRAWL_STATUS, CRAWL_STATUS_STOP_REQUESTED, None)\n\n return validated_data\n\n def is_valid(self, raise_exception=False):\n with cache.lock(CRAWL_CACHE_LOCK_KEY):\n return super().is_valid(raise_exception)\n\n def update(self, instance, validated_data):\n raise RuntimeError('Method unavailable')\n\n def validate_crawl(self, crawl):\n \"\"\"\n Validate the correct crawl command is given\n\n - can not start new crawl when already crawling\n - can not stop crawl if not crawling\n \"\"\"\n crawl_status = cache.get(CRAWL_STATUS)\n\n if crawl == CRAWL_COMMAND_START and crawl_status in (CRAWL_STATUS_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages['cant_start_crawl'])\n\n if crawl == CRAWL_COMMAND_STOP and crawl_status in (CRAWL_STATUS_NOT_CRAWLING, CRAWL_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages['cant_stop_crawl'])\n\n return crawl\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from torch import nn
class MNIST3dModel(nn.Module):
def __init__(self, input_c=3, num_filters=8, num_classes=10):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=
num_filters, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=
num_filters * 2, kernel_size=3, stride=1, padding=1)
self.batchnorm1 = nn.BatchNorm3d(16)
self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=
num_filters * 4, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=
num_filters * 8, kernel_size=3, stride=1, padding=1)
self.batchnorm2 = nn.BatchNorm3d(64)
self.pool = nn.MaxPool3d(2)
self.dropout1 = nn.Dropout(0.25)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(4096, 1024)
self.dropout2 = nn.Dropout(0.5)
self.linear2 = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.batchnorm1(x)
x = self.pool(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(x)
x = self.batchnorm2(x)
x = self.pool(x)
x = self.dropout1(x)
x = x.view(x.size()[0], -1)
x = self.linear1(x)
x = self.relu(x)
x = self.dropout2(x)
x = self.linear2(x)
return x
|
normal
|
{
"blob_id": "f6838906c961a9ca7d91d2ab02fd2af72797b880",
"index": 4628,
"step-1": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n <mask token>\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-3": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n\n def __init__(self, input_c=3, num_filters=8, num_classes=10):\n super().__init__()\n self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=\n num_filters, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=\n num_filters * 2, kernel_size=3, stride=1, padding=1)\n self.batchnorm1 = nn.BatchNorm3d(16)\n self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=\n num_filters * 4, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=\n num_filters * 8, kernel_size=3, stride=1, padding=1)\n self.batchnorm2 = nn.BatchNorm3d(64)\n self.pool = nn.MaxPool3d(2)\n self.dropout1 = nn.Dropout(0.25)\n self.relu = nn.ReLU()\n self.linear1 = nn.Linear(4096, 1024)\n self.dropout2 = nn.Dropout(0.5)\n self.linear2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-4": "from torch import nn\n\n\nclass MNIST3dModel(nn.Module):\n\n def __init__(self, input_c=3, num_filters=8, num_classes=10):\n super().__init__()\n self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=\n num_filters, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=\n num_filters * 2, kernel_size=3, stride=1, padding=1)\n self.batchnorm1 = nn.BatchNorm3d(16)\n self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=\n num_filters * 4, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=\n num_filters * 8, kernel_size=3, stride=1, padding=1)\n self.batchnorm2 = nn.BatchNorm3d(64)\n self.pool = nn.MaxPool3d(2)\n self.dropout1 = nn.Dropout(0.25)\n self.relu = nn.ReLU()\n self.linear1 = nn.Linear(4096, 1024)\n self.dropout2 = nn.Dropout(0.5)\n self.linear2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/env python
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from vts.utils.python.archive import archive_parser
class ArchiveParserTest(unittest.TestCase):
"""Unit tests for archive_parser of vts.utils.python.archive.
"""
def testReadHeaderPass(self):
"""Tests that archive is read when header is correct.
Parses archive content containing only the signature.
"""
try:
archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)
archive.Parse()
except ValueError:
self.fail('Archive reader read improperly.')
def testReadHeaderFail(self):
"""Tests that parser throws error when header is invalid.
Parses archive content lacking the correct signature.
"""
archive = archive_parser.Archive('Fail.')
self.assertRaises(ValueError, archive.Parse)
def testReadFile(self):
"""Tests that file is read correctly.
Tests that correctly formatted file in archive is read correctly.
"""
content = archive_parser.Archive.GLOBAL_SIG
file_name = 'test_file'
content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -
len(file_name))
content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH
content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH
content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH
content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH
message = 'test file contents'
message_size = str(len(message))
content += message_size + ' ' * (archive_parser.Archive.CONTENT_SIZE_LENGTH -
len(message_size))
content += archive_parser.Archive.END_TAG
content += message
archive = archive_parser.Archive(content)
archive.Parse()
self.assertIn(file_name, archive.files)
self.assertEquals(archive.files[file_name], message)
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "2ea335dd8d879731aad7713499440db6d1f60d36",
"index": 2427,
"step-1": "<mask token>\n\n\nclass ArchiveParserTest(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def testReadFile(self):\n \"\"\"Tests that file is read correctly.\n\n Tests that correctly formatted file in archive is read correctly.\n \"\"\"\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.\n CONTENT_SIZE_LENGTH - len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ArchiveParserTest(unittest.TestCase):\n <mask token>\n\n def testReadHeaderPass(self):\n \"\"\"Tests that archive is read when header is correct.\n\n Parses archive content containing only the signature.\n \"\"\"\n try:\n archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)\n archive.Parse()\n except ValueError:\n self.fail('Archive reader read improperly.')\n <mask token>\n\n def testReadFile(self):\n \"\"\"Tests that file is read correctly.\n\n Tests that correctly formatted file in archive is read correctly.\n \"\"\"\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.\n CONTENT_SIZE_LENGTH - len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ArchiveParserTest(unittest.TestCase):\n \"\"\"Unit tests for archive_parser of vts.utils.python.archive.\n \"\"\"\n\n def testReadHeaderPass(self):\n \"\"\"Tests that archive is read when header is correct.\n\n Parses archive content containing only the signature.\n \"\"\"\n try:\n archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)\n archive.Parse()\n except ValueError:\n self.fail('Archive reader read improperly.')\n\n def testReadHeaderFail(self):\n \"\"\"Tests that parser throws error when header is invalid.\n\n Parses archive content lacking the correct signature.\n \"\"\"\n archive = archive_parser.Archive('Fail.')\n self.assertRaises(ValueError, archive.Parse)\n\n def testReadFile(self):\n \"\"\"Tests that file is read correctly.\n\n Tests that correctly formatted file in archive is read correctly.\n \"\"\"\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.\n CONTENT_SIZE_LENGTH - len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import os\nimport unittest\nfrom vts.utils.python.archive import archive_parser\n\n\nclass ArchiveParserTest(unittest.TestCase):\n \"\"\"Unit tests for archive_parser of vts.utils.python.archive.\n \"\"\"\n\n def testReadHeaderPass(self):\n \"\"\"Tests that archive is read when header is correct.\n\n Parses archive content containing only the signature.\n \"\"\"\n try:\n archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)\n archive.Parse()\n except ValueError:\n self.fail('Archive reader read improperly.')\n\n def testReadHeaderFail(self):\n \"\"\"Tests that parser throws error when header is invalid.\n\n Parses archive content lacking the correct signature.\n \"\"\"\n archive = archive_parser.Archive('Fail.')\n self.assertRaises(ValueError, archive.Parse)\n\n def testReadFile(self):\n \"\"\"Tests that file is read correctly.\n\n Tests that correctly formatted file in archive is read correctly.\n \"\"\"\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.\n CONTENT_SIZE_LENGTH - len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python\n#\n# Copyright (C) 2016 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport unittest\n\nfrom vts.utils.python.archive import archive_parser\n\n\nclass ArchiveParserTest(unittest.TestCase):\n \"\"\"Unit tests for archive_parser of vts.utils.python.archive.\n \"\"\"\n\n def testReadHeaderPass(self):\n \"\"\"Tests that archive is read when header is correct.\n\n Parses archive content containing only the signature.\n \"\"\"\n try:\n archive = archive_parser.Archive(archive_parser.Archive.GLOBAL_SIG)\n archive.Parse()\n except ValueError:\n self.fail('Archive reader read improperly.')\n\n def testReadHeaderFail(self):\n \"\"\"Tests that parser throws error when header is invalid.\n\n Parses archive content lacking the correct signature.\n \"\"\"\n archive = archive_parser.Archive('Fail.')\n self.assertRaises(ValueError, archive.Parse)\n\n def testReadFile(self):\n \"\"\"Tests that file is read correctly.\n\n Tests that correctly formatted file in archive is read correctly.\n \"\"\"\n content = archive_parser.Archive.GLOBAL_SIG\n file_name = 'test_file'\n content += file_name + ' ' * (archive_parser.Archive.FILE_ID_LENGTH -\n len(file_name))\n content += ' ' * archive_parser.Archive.FILE_TIMESTAMP_LENGTH\n content += ' ' * archive_parser.Archive.OWNER_ID_LENGTH\n content += ' ' * archive_parser.Archive.GROUP_ID_LENGTH\n content += ' ' * archive_parser.Archive.FILE_MODE_LENGTH\n\n message = 'test file contents'\n message_size = str(len(message))\n content += message_size + ' ' * (archive_parser.Archive.CONTENT_SIZE_LENGTH -\n len(message_size))\n content += archive_parser.Archive.END_TAG\n content += message\n archive = archive_parser.Archive(content)\n archive.Parse()\n self.assertIn(file_name, archive.files)\n self.assertEquals(archive.files[file_name], message)\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
2,
3,
6,
7,
8
]
}
|
[
2,
3,
6,
7,
8
] |
from django.db import models
from skills.models import skill
from offres.models import Offer
# Create your models here.
class OfferRequirement(models.Model):
skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING ,default="")
offer = models.ForeignKey(Offer , on_delete=models.CASCADE, default="")
|
normal
|
{
"blob_id": "3640f1df412b43b42fb4e856604508f698a208ad",
"index": 6385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass OfferRequirement(models.Model):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass OfferRequirement(models.Model):\n skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING, default='')\n offer = models.ForeignKey(Offer, on_delete=models.CASCADE, default='')\n",
"step-4": "from django.db import models\nfrom skills.models import skill\nfrom offres.models import Offer\n\n\nclass OfferRequirement(models.Model):\n skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING, default='')\n offer = models.ForeignKey(Offer, on_delete=models.CASCADE, default='')\n",
"step-5": "from django.db import models\nfrom skills.models import skill\nfrom offres.models import Offer \n\n# Create your models here.\nclass OfferRequirement(models.Model):\n skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING ,default=\"\")\n offer = models.ForeignKey(Offer , on_delete=models.CASCADE, default=\"\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from AStar import astar
def main():
grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
start = (0, 0)
end = (8, 9)
path = astar(grid, start, end)
print(path)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "ba483c7eaf2f2ced7f70a14b53c781f190585024",
"index": 1257,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0, \n 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n start = 0, 0\n end = 8, 9\n path = astar(grid, start, end)\n print(path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0, \n 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n start = 0, 0\n end = 8, 9\n path = astar(grid, start, end)\n print(path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from AStar import astar\n\n\ndef main():\n grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 0], [0,\n 0, 0, 0, 1, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0, \n 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n start = 0, 0\n end = 8, 9\n path = astar(grid, start, end)\n print(path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from AStar import astar\n\n\ndef main():\n grid = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 1, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n\n start = (0, 0)\n end = (8, 9)\n\n path = astar(grid, start, end)\n print(path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
https://docs.djangoproject.com/zh-hans/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#example:
# python3.0
from django.contrib import admin
# 为何要用 path呢
from django.urls import path, include
from django.conf.urls import url
from . import view
# 如何链接其他文件模块下的路径呢
#
urlpatterns = [
# path('hello/', view.hello),
# path('hello/<int:year>/', view.hello), # hello()中要有对应的参数
# path('ifor/', view.ifor),
path('admin/', admin.site.urls),
# path('blog/', blog.views.goodbye),
# path('', include('blog.urls.py', namespace='blog')), # 错误
path('', include('blog.urls', namespace='blog')),
# url(r'^hello/$', view.hello),
url(r'^hello/([0-9]{4})/$', view.hello),
url(r'^ifor/', view.ifor),
# url(r'^blog/', 'blog.views.goodbye')
#
]
"""
# python 2.7
from django.conf.urls import url
from . import view
urlpatterns = [
url(r'^$', view.hello),
]
"""
|
normal
|
{
"blob_id": "a470aad80e47b244811e4d9aed4a630ba36a8daf",
"index": 4112,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('', include(\n 'blog.urls', namespace='blog')), url('^hello/([0-9]{4})/$', view.hello),\n url('^ifor/', view.ifor)]\n<mask token>\n",
"step-3": "<mask token>\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls import url\nfrom . import view\nurlpatterns = [path('admin/', admin.site.urls), path('', include(\n 'blog.urls', namespace='blog')), url('^hello/([0-9]{4})/$', view.hello),\n url('^ifor/', view.ifor)]\n<mask token>\n",
"step-4": "\"\"\"helloworld URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\n https://docs.djangoproject.com/zh-hans/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\n\"\"\"\n#example:\n\n# python3.0\nfrom django.contrib import admin\n\n# 为何要用 path呢\nfrom django.urls import path, include\n\nfrom django.conf.urls import url\n\nfrom . import view\n\n# 如何链接其他文件模块下的路径呢\n# \nurlpatterns = [\n\t# path('hello/', view.hello),\n \n # path('hello/<int:year>/', view.hello), # hello()中要有对应的参数\n\t# path('ifor/', view.ifor),\n path('admin/', admin.site.urls),\n # path('blog/', blog.views.goodbye),\n # path('', include('blog.urls.py', namespace='blog')), # 错误\n path('', include('blog.urls', namespace='blog')),\n # url(r'^hello/$', view.hello),\n url(r'^hello/([0-9]{4})/$', view.hello),\n url(r'^ifor/', view.ifor),\n # url(r'^blog/', 'blog.views.goodbye')\n # \n \n\n]\n\n\"\"\"\n# python 2.7\nfrom django.conf.urls import url\n \nfrom . import view\n \nurlpatterns = [\n url(r'^$', view.hello),\n]\n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'tcaruso'
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import fnmatch
import os
import sys
import warnings
from shutil import rmtree
from setuptools import find_packages, setup, Command
from collections import namedtuple
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
except Exception:
from pip import __version__ as __pip_version__
msg = """Sorry, could not install due to a pip import error. Please open an issue on the repo
with this message and the error so it can be addressed.
pip version: {}
python version: {}
""".format(__pip_version__, '.'.join(sys.version_info))
raise EnvironmentError(msg)
here = os.path.abspath(os.path.dirname(__file__))
# ------------------------------------------------
# Package meta-data.
# PACKAGE_NAME is the name of the package directory and the import path. If you use my_package then when installed, you
# will import the package like `import my_package`.
PACKAGE_NAME = 'socket_wait'
DESCRIPTION = 'Listen on a port until a connection is received.'
URL = 'https://github.com/tomplex/socket_wait'
EMAIL = '[email protected]'
AUTHOR = 'Tom Caruso'
# The minimum Python version required
REQUIRES_PYTHON = (2, 7, 0)
# PYPI_NAME is the name of the package on pypi. We'll default to pbvt_{PACKAGE_NAME} so we avoid name collisions
# with PyPI. You'll use this name to install the package.
PYPI_NAME = '{}'.format(PACKAGE_NAME)
# Specify the name of the requirements file we should use. If there is none, then just leave it as is. We'll detect
# ------------------------------------------------
# Check Python version we're installing against. Bail if it's not correct. This will blow up both when we build the
# package and when someone tries to install it.
if sys.version_info < REQUIRES_PYTHON:
# Raise if we're trying to install on an unsupported Python version
raise Exception("Package {} requires python >= {}.".format(PYPI_NAME, '.'.join(map(str, REQUIRES_PYTHON))))
REQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))
# ------------------------------------------------
# Requirements gathering.
about = {}
from socket_wait import __version__
about['__version__'] = __version__
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status("Installing required build packages...")
os.system('{0} -m pip install wheel twine'.format(sys.executable))
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to pypi via Twine…')
os.system('{0} -m twine upload dist/* '.format(sys.executable))
sys.exit()
setup(
name=PYPI_NAME,
version=about['__version__'],
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=URL,
py_modules=['socket_wait'],
include_package_data=True,
# If your package has a CLI component, specify it in entry_points.
# for example, if you want it to be called like "mycli" from the command line, and the command line entry
# point lives in the somepackage/cli.py file, in the function main, you'd construct it like this:
entry_points={
'console_scripts': ['socket_wait=socket_wait:cli'],
},
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
],
# setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
normal
|
{
"blob_id": "58438a1fb0b9e620717ba262c25a43bfbf6b8824",
"index": 8100,
"step-1": "<mask token>\n\n\nclass UploadCommand(Command):\n <mask token>\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\n<mask token>\n",
"step-3": "__author__ = 'tcaruso'\n<mask token>\ntry:\n from pip._internal.req import parse_requirements\nexcept ImportError:\n from pip.req import parse_requirements\nexcept Exception:\n from pip import __version__ as __pip_version__\n msg = (\n \"\"\"Sorry, could not install due to a pip import error. Please open an issue on the repo \n with this message and the error so it can be addressed.\n\n pip version: {}\n python version: {}\n\n \"\"\"\n .format(__pip_version__, '.'.join(sys.version_info)))\n raise EnvironmentError(msg)\nhere = os.path.abspath(os.path.dirname(__file__))\nPACKAGE_NAME = 'socket_wait'\nDESCRIPTION = 'Listen on a port until a connection is received.'\nURL = 'https://github.com/tomplex/socket_wait'\nEMAIL = '[email protected]'\nAUTHOR = 'Tom Caruso'\nREQUIRES_PYTHON = 2, 7, 0\nPYPI_NAME = '{}'.format(PACKAGE_NAME)\nif sys.version_info < REQUIRES_PYTHON:\n raise Exception('Package {} requires python >= {}.'.format(PYPI_NAME,\n '.'.join(map(str, REQUIRES_PYTHON))))\nREQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))\nabout = {}\n<mask token>\nabout['__version__'] = __version__\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\nsetup(name=PYPI_NAME, version=about['__version__'], description=DESCRIPTION,\n author=AUTHOR, author_email=EMAIL, url=URL, py_modules=['socket_wait'],\n include_package_data=True, entry_points={'console_scripts': [\n 'socket_wait=socket_wait:cli']}, classifiers=[\n 'Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython'],\n cmdclass={'upload': UploadCommand})\n",
"step-4": "__author__ = 'tcaruso'\nimport glob\nimport fnmatch\nimport os\nimport sys\nimport warnings\nfrom shutil import rmtree\nfrom setuptools import find_packages, setup, Command\nfrom collections import namedtuple\ntry:\n from pip._internal.req import parse_requirements\nexcept ImportError:\n from pip.req import parse_requirements\nexcept Exception:\n from pip import __version__ as __pip_version__\n msg = (\n \"\"\"Sorry, could not install due to a pip import error. Please open an issue on the repo \n with this message and the error so it can be addressed.\n\n pip version: {}\n python version: {}\n\n \"\"\"\n .format(__pip_version__, '.'.join(sys.version_info)))\n raise EnvironmentError(msg)\nhere = os.path.abspath(os.path.dirname(__file__))\nPACKAGE_NAME = 'socket_wait'\nDESCRIPTION = 'Listen on a port until a connection is received.'\nURL = 'https://github.com/tomplex/socket_wait'\nEMAIL = '[email protected]'\nAUTHOR = 'Tom Caruso'\nREQUIRES_PYTHON = 2, 7, 0\nPYPI_NAME = '{}'.format(PACKAGE_NAME)\nif sys.version_info < REQUIRES_PYTHON:\n raise Exception('Package {} requires python >= {}.'.format(PYPI_NAME,\n '.'.join(map(str, REQUIRES_PYTHON))))\nREQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))\nabout = {}\nfrom socket_wait import __version__\nabout['__version__'] = __version__\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\x1b[1m{0}\\x1b[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n self.status('Installing required build packages...')\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.\n executable))\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n sys.exit()\n\n\nsetup(name=PYPI_NAME, version=about['__version__'], description=DESCRIPTION,\n author=AUTHOR, author_email=EMAIL, url=URL, py_modules=['socket_wait'],\n include_package_data=True, entry_points={'console_scripts': [\n 'socket_wait=socket_wait:cli']}, classifiers=[\n 'Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython'],\n cmdclass={'upload': UploadCommand})\n",
"step-5": "__author__ = 'tcaruso'\n\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport glob\nimport fnmatch\nimport os\nimport sys\nimport warnings\nfrom shutil import rmtree\nfrom setuptools import find_packages, setup, Command\nfrom collections import namedtuple\n\ntry:\n from pip._internal.req import parse_requirements\nexcept ImportError:\n from pip.req import parse_requirements\nexcept Exception:\n from pip import __version__ as __pip_version__\n\n msg = \"\"\"Sorry, could not install due to a pip import error. Please open an issue on the repo \n with this message and the error so it can be addressed.\n\n pip version: {}\n python version: {}\n\n \"\"\".format(__pip_version__, '.'.join(sys.version_info))\n raise EnvironmentError(msg)\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# ------------------------------------------------\n\n# Package meta-data.\n# PACKAGE_NAME is the name of the package directory and the import path. If you use my_package then when installed, you\n# will import the package like `import my_package`.\nPACKAGE_NAME = 'socket_wait'\nDESCRIPTION = 'Listen on a port until a connection is received.'\nURL = 'https://github.com/tomplex/socket_wait'\nEMAIL = '[email protected]'\nAUTHOR = 'Tom Caruso'\n# The minimum Python version required\nREQUIRES_PYTHON = (2, 7, 0)\n# PYPI_NAME is the name of the package on pypi. We'll default to pbvt_{PACKAGE_NAME} so we avoid name collisions\n# with PyPI. You'll use this name to install the package.\nPYPI_NAME = '{}'.format(PACKAGE_NAME)\n# Specify the name of the requirements file we should use. If there is none, then just leave it as is. We'll detect\n\n# ------------------------------------------------\n# Check Python version we're installing against. Bail if it's not correct. This will blow up both when we build the\n# package and when someone tries to install it.\n\nif sys.version_info < REQUIRES_PYTHON:\n # Raise if we're trying to install on an unsupported Python version\n raise Exception(\"Package {} requires python >= {}.\".format(PYPI_NAME, '.'.join(map(str, REQUIRES_PYTHON))))\n\nREQUIRES_PYTHON = '>=' + '.'.join(map(str, REQUIRES_PYTHON))\n\n\n# ------------------------------------------------\n# Requirements gathering.\n\n\nabout = {}\nfrom socket_wait import __version__\n\nabout['__version__'] = __version__\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = 'Build and publish the package.'\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\033[1m{0}\\033[0m'.format(s))\n\n def run(self):\n try:\n self.status('Removing previous builds…')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n\n self.status(\"Installing required build packages...\")\n os.system('{0} -m pip install wheel twine'.format(sys.executable))\n\n self.status('Building Source and Wheel (universal) distribution…')\n os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))\n\n self.status('Uploading the package to pypi via Twine…')\n os.system('{0} -m twine upload dist/* '.format(sys.executable))\n\n sys.exit()\n\n\nsetup(\n name=PYPI_NAME,\n version=about['__version__'],\n description=DESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n py_modules=['socket_wait'],\n include_package_data=True,\n # If your package has a CLI component, specify it in entry_points.\n # for example, if you want it to be called like \"mycli\" from the command line, and the command line entry\n # point lives in the somepackage/cli.py file, in the function main, you'd construct it like this:\n entry_points={\n 'console_scripts': ['socket_wait=socket_wait:cli'],\n },\n\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython',\n ],\n # setup.py publish support.\n cmdclass={\n 'upload': UploadCommand,\n },\n)\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
a = 2
while a == 1:
b = source()
c = function(b)
|
normal
|
{
"blob_id": "56cae7b7a0338bd4a405cdc3cdcd9945a9df8823",
"index": 5839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile a == 1:\n b = source()\n<mask token>\n",
"step-3": "a = 2\nwhile a == 1:\n b = source()\nc = function(b)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib import admin
from pages.blog.models import Blog
admin.site.register(Blog)
|
normal
|
{
"blob_id": "534aaf8371707089522af014a93f3ff6c4f913ff",
"index": 8510,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Blog)\n",
"step-3": "from django.contrib import admin\nfrom pages.blog.models import Blog\nadmin.site.register(Blog)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# @description Exporting outline (boundary faces) of zsoil results to vtu
# @input zsoil results
# @output vtu unstructured grid
# @author Matthias Preisig
# @date 2017/10/10
import numpy as np
from zsoil_tools import zsoil_results as zr
from zsoil_tools import vtktools
pathname = r'\\192.168.1.51\Mandats sur H RAID0\M1010_Tourbillon\stab_panneau'
prob = 'M1010_stabPann_m2_renfLat'
res = zr(pathname,prob)
res.read_rcf()
res.read_his()
tx = [67]
tsteps = []
for kt,step in enumerate(res.steps):
if step.conv_status in [-1]:
if step.time in tx:
tsteps.append(kt)
res.out_steps = tsteps
res.read_dat()
res.read_s00()
for lab in res.ele_group_labels:
if lab=='VOLUMICS':
res.read_s01() # volumics
## elif lab=='SHELLS':
## res.read_s02() # shells
## elif lab=='TRUSSES':
## res.read_s03() # trusses
## elif lab=='BEAMS':
## res.read_s04() # beams
## elif lab=='CONTACT':
## res.read_s07()
##vtktools.write_vtu(res,beams=True,verbose=False)
##vtktools.write_vtu(res,trusses=True,verbose=False)
vtktools.write_vtu(res,vol=True,verbose=False,outline=True)
##vtktools.write_vtu(res,shells=True,verbose=False)
|
normal
|
{
"blob_id": "fb6dd9ec7d8dc80eace90dadc2112c7c27125efd",
"index": 2055,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nres.read_rcf()\nres.read_his()\n<mask token>\nfor kt, step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\n<mask token>\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab == 'VOLUMICS':\n res.read_s01()\nvtktools.write_vtu(res, vol=True, verbose=False, outline=True)\n",
"step-3": "<mask token>\npathname = (\n '\\\\\\\\192.168.1.51\\\\Mandats sur H RAID0\\\\M1010_Tourbillon\\\\stab_panneau')\nprob = 'M1010_stabPann_m2_renfLat'\nres = zr(pathname, prob)\nres.read_rcf()\nres.read_his()\ntx = [67]\ntsteps = []\nfor kt, step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\nres.out_steps = tsteps\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab == 'VOLUMICS':\n res.read_s01()\nvtktools.write_vtu(res, vol=True, verbose=False, outline=True)\n",
"step-4": "import numpy as np\nfrom zsoil_tools import zsoil_results as zr\nfrom zsoil_tools import vtktools\npathname = (\n '\\\\\\\\192.168.1.51\\\\Mandats sur H RAID0\\\\M1010_Tourbillon\\\\stab_panneau')\nprob = 'M1010_stabPann_m2_renfLat'\nres = zr(pathname, prob)\nres.read_rcf()\nres.read_his()\ntx = [67]\ntsteps = []\nfor kt, step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\nres.out_steps = tsteps\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab == 'VOLUMICS':\n res.read_s01()\nvtktools.write_vtu(res, vol=True, verbose=False, outline=True)\n",
"step-5": "# @description Exporting outline (boundary faces) of zsoil results to vtu\n# @input zsoil results\n# @output vtu unstructured grid\n# @author Matthias Preisig\n# @date 2017/10/10\n\nimport numpy as np\n\nfrom zsoil_tools import zsoil_results as zr\nfrom zsoil_tools import vtktools\n\n\npathname = r'\\\\192.168.1.51\\Mandats sur H RAID0\\M1010_Tourbillon\\stab_panneau'\nprob = 'M1010_stabPann_m2_renfLat'\n\nres = zr(pathname,prob)\nres.read_rcf()\nres.read_his()\ntx = [67]\ntsteps = []\nfor kt,step in enumerate(res.steps):\n if step.conv_status in [-1]:\n if step.time in tx:\n tsteps.append(kt)\nres.out_steps = tsteps\nres.read_dat()\nres.read_s00()\nfor lab in res.ele_group_labels:\n if lab=='VOLUMICS':\n res.read_s01() # volumics\n## elif lab=='SHELLS':\n## res.read_s02() # shells\n## elif lab=='TRUSSES':\n## res.read_s03() # trusses\n## elif lab=='BEAMS':\n## res.read_s04() # beams\n## elif lab=='CONTACT':\n## res.read_s07()\n\n\n##vtktools.write_vtu(res,beams=True,verbose=False)\n##vtktools.write_vtu(res,trusses=True,verbose=False)\nvtktools.write_vtu(res,vol=True,verbose=False,outline=True)\n##vtktools.write_vtu(res,shells=True,verbose=False)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Restaurant"""
def main():
"""Restaurant"""
moeny = int(input())
service = moeny*0.1
vat = moeny*0.07
print("Service Charge : %.2f Baht" %service)
print("VAT : %.2f Baht" %vat)
print("Total : %.2f Baht" %(moeny+vat+service))
main()
|
normal
|
{
"blob_id": "ae6cbb181e024b8c0b222d14120b910919f8cc81",
"index": 3811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n \"\"\"Restaurant\"\"\"\n moeny = int(input())\n service = moeny * 0.1\n vat = moeny * 0.07\n print('Service Charge : %.2f Baht' % service)\n print('VAT : %.2f Baht' % vat)\n print('Total : %.2f Baht' % (moeny + vat + service))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n \"\"\"Restaurant\"\"\"\n moeny = int(input())\n service = moeny * 0.1\n vat = moeny * 0.07\n print('Service Charge : %.2f Baht' % service)\n print('VAT : %.2f Baht' % vat)\n print('Total : %.2f Baht' % (moeny + vat + service))\n\n\nmain()\n",
"step-4": "\"\"\"Restaurant\"\"\"\ndef main():\n \"\"\"Restaurant\"\"\"\n moeny = int(input())\n service = moeny*0.1\n vat = moeny*0.07\n print(\"Service Charge : %.2f Baht\" %service)\n print(\"VAT : %.2f Baht\" %vat)\n print(\"Total : %.2f Baht\" %(moeny+vat+service))\nmain()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
queries = []
for n in range(2, 51):
for k in range(n, n * n + 1):
queries.append((n, k))
print(len(queries))
for n, k in queries:
print(n, k)
|
normal
|
{
"blob_id": "798d5c68a0aa2057c28d7f333905f20fef965d70",
"index": 2850,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(2, 51):\n for k in range(n, n * n + 1):\n queries.append((n, k))\nprint(len(queries))\nfor n, k in queries:\n print(n, k)\n",
"step-3": "queries = []\nfor n in range(2, 51):\n for k in range(n, n * n + 1):\n queries.append((n, k))\nprint(len(queries))\nfor n, k in queries:\n print(n, k)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import sqlite3
import time
import shelve
import os
from constants import *
VEC_TYPES = [
'''
CREATE TABLE "{}"
(ID TEXT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
''',
'''
CREATE TABLE "{}"
(ID INT PRIMARY KEY NOT NULL,
num TEXT NOT NULL);
'''
]
class Vector():
def __init__(self, name, type, url_path):
self._name = name
self._conn = sqlite3.connect(url_path)
self._cur = self._conn.cursor()
# check if table exists, if not create TABLE
self._cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = self._cur.fetchall()
if name not in [val[0] for val in tables]:
self._conn.execute(VEC_TYPES[type].format(self._name.replace('"', '""')))
def __setitem__(self, index, edges):
try:
self._conn.execute(
"""
INSERT INTO "{}" (ID, num)
VALUES (?, ?);
""".format(self._name.replace('"', '""')), (index, edges)
)
except Exception as e:
print(e)
print("Update Failed")
def __getitem__(self, index):
self._cur.execute(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), (index,)
)
try:
return self._cur.fetchall()[0][1]
except Exception as e:
print(e)
return None
def get_multiple(self, keys):
print(keys)
if len(keys) == 0:
return []
keys = [(key,) for key in keys]
print(keys)
self._cur.executemany(
"""
SELECT * FROM "{}"
WHERE ID = ?;
""".format(self._name.replace('"', '""')), keys
)
try:
a = [val[1] for val in self._cur.fetchall()]
print(a)
return a
except Exception as e:
print(e)
return []
def save(self):
self._conn.commit()
def close(self):
self._conn.close()
"""
vec = Vector("yoav_table", 0, EDGES_VECTOR_PATH)
print(vec[0])
vec[0] = "yo"
print(vec[0])
vec.save()
"""
|
normal
|
{
"blob_id": "0a6cb6d3fad09ab7f0e19b6c79965315c0e0d634",
"index": 4793,
"step-1": "<mask token>\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n <mask token>\n <mask token>\n <mask token>\n\n def save(self):\n self._conn.commit()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n <mask token>\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nVEC_TYPES = [\n \"\"\"\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ,\n \"\"\"\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ]\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index, edges))\n except Exception as e:\n print(e)\n print('Update Failed')\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-4": "import json\nimport sqlite3\nimport time\nimport shelve\nimport os\nfrom constants import *\nVEC_TYPES = [\n \"\"\"\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ,\n \"\"\"\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n \"\"\"\n ]\n\n\nclass Vector:\n\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace(\n '\"', '\"\"')))\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index, edges))\n except Exception as e:\n print(e)\n print('Update Failed')\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), (index,))\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\"\n .format(self._name.replace('\"', '\"\"')), keys)\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\n<mask token>\n",
"step-5": "import json\nimport sqlite3\nimport time\nimport shelve\nimport os\n\nfrom constants import *\n\n\nVEC_TYPES = [\n '''\n CREATE TABLE \"{}\"\n (ID TEXT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n ''',\n '''\n CREATE TABLE \"{}\"\n (ID INT PRIMARY KEY NOT NULL,\n num TEXT NOT NULL);\n '''\n]\n\n\nclass Vector():\n def __init__(self, name, type, url_path):\n self._name = name\n self._conn = sqlite3.connect(url_path)\n self._cur = self._conn.cursor()\n\n # check if table exists, if not create TABLE\n self._cur.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = self._cur.fetchall()\n if name not in [val[0] for val in tables]:\n self._conn.execute(VEC_TYPES[type].format(self._name.replace('\"', '\"\"')))\n\n\n def __setitem__(self, index, edges):\n try:\n self._conn.execute(\n \"\"\"\n INSERT INTO \"{}\" (ID, num) \n VALUES (?, ?);\n \"\"\".format(self._name.replace('\"', '\"\"')), (index, edges)\n )\n except Exception as e:\n print(e)\n print(\"Update Failed\")\n\n def __getitem__(self, index):\n self._cur.execute(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\".format(self._name.replace('\"', '\"\"')), (index,)\n )\n try:\n return self._cur.fetchall()[0][1]\n except Exception as e:\n print(e)\n return None\n\n def get_multiple(self, keys):\n print(keys)\n if len(keys) == 0:\n return []\n\n keys = [(key,) for key in keys]\n print(keys)\n self._cur.executemany(\n \"\"\"\n SELECT * FROM \"{}\"\n WHERE ID = ?;\n \"\"\".format(self._name.replace('\"', '\"\"')), keys\n )\n try:\n a = [val[1] for val in self._cur.fetchall()]\n print(a)\n return a\n except Exception as e:\n print(e)\n return []\n\n def save(self):\n self._conn.commit()\n\n def close(self):\n self._conn.close()\n\n\"\"\"\nvec = Vector(\"yoav_table\", 0, EDGES_VECTOR_PATH)\nprint(vec[0])\nvec[0] = \"yo\"\nprint(vec[0])\nvec.save()\n\n\"\"\"",
"step-ids": [
3,
6,
8,
9,
10
]
}
|
[
3,
6,
8,
9,
10
] |
#1. Create a greeting for your program.
print("Welcome to the Band Name Generator")
#2. Ask the user for the city that they grew up in.
city = input("Which city did you grew up in?\n")
#3. Ask the user for the name of a pet.
pet = input("What is the name of the pet?\n")
#4. Combine the name of their city and pet and show them their band name.
Band_name = city + " " + pet
#5. Make sure the input cursor shows on a new line:
print("Your band name could be ", Band_name)
|
normal
|
{
"blob_id": "19962e94afdd3edf298b28b9954f479fefa3bba8",
"index": 8656,
"step-1": "<mask token>\n",
"step-2": "print('Welcome to the Band Name Generator')\n<mask token>\nprint('Your band name could be ', Band_name)\n",
"step-3": "print('Welcome to the Band Name Generator')\ncity = input('Which city did you grew up in?\\n')\npet = input('What is the name of the pet?\\n')\nBand_name = city + ' ' + pet\nprint('Your band name could be ', Band_name)\n",
"step-4": "#1. Create a greeting for your program.\nprint(\"Welcome to the Band Name Generator\")\n\n#2. Ask the user for the city that they grew up in.\ncity = input(\"Which city did you grew up in?\\n\")\n\n#3. Ask the user for the name of a pet.\npet = input(\"What is the name of the pet?\\n\")\n\n#4. Combine the name of their city and pet and show them their band name.\nBand_name = city + \" \" + pet\n\n#5. Make sure the input cursor shows on a new line:\nprint(\"Your band name could be \", Band_name)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.conf.urls import url
from .import views
app_name='user'
# user子路由
urlpatterns = [
# user首页
url(r'^$',views.index,name='index'),
# 用户登录
url('login/', views.login, name='login'),
# 用户注册
url('regist/', views.regist, name='regist'),
# 根据id判断用户是否存在
url(r'^getuser\w*/(?P<id>\d*)', views.getUserById, name='getuser'),
# 获取短信验证接口
url(r'^sendmessage\w*/(?P<user_telephone>\d*)',views.sendMessage,name='sendmessage'),
# 根据token查询一定的用户信息 postRight
url(r'gettoken', views.getUserbyToken, name='getUserbyToken'),
]
|
normal
|
{
"blob_id": "de7b5e44c5c213e4ab70b0f8c0c402edaf4926e0",
"index": 211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'user'\nurlpatterns = [url('^$', views.index, name='index'), url('login/', views.\n login, name='login'), url('regist/', views.regist, name='regist'), url(\n '^getuser\\\\w*/(?P<id>\\\\d*)', views.getUserById, name='getuser'), url(\n '^sendmessage\\\\w*/(?P<user_telephone>\\\\d*)', views.sendMessage, name=\n 'sendmessage'), url('gettoken', views.getUserbyToken, name=\n 'getUserbyToken')]\n",
"step-3": "from django.conf.urls import url\nfrom . import views\napp_name = 'user'\nurlpatterns = [url('^$', views.index, name='index'), url('login/', views.\n login, name='login'), url('regist/', views.regist, name='regist'), url(\n '^getuser\\\\w*/(?P<id>\\\\d*)', views.getUserById, name='getuser'), url(\n '^sendmessage\\\\w*/(?P<user_telephone>\\\\d*)', views.sendMessage, name=\n 'sendmessage'), url('gettoken', views.getUserbyToken, name=\n 'getUserbyToken')]\n",
"step-4": "\nfrom django.conf.urls import url\nfrom .import views\n\napp_name='user'\n# user子路由\nurlpatterns = [\n\n # user首页\n url(r'^$',views.index,name='index'),\n\n # 用户登录\n url('login/', views.login, name='login'),\n\n # 用户注册\n url('regist/', views.regist, name='regist'),\n\n # 根据id判断用户是否存在\n url(r'^getuser\\w*/(?P<id>\\d*)', views.getUserById, name='getuser'),\n\n # 获取短信验证接口\n url(r'^sendmessage\\w*/(?P<user_telephone>\\d*)',views.sendMessage,name='sendmessage'),\n\n # 根据token查询一定的用户信息 postRight\n url(r'gettoken', views.getUserbyToken, name='getUserbyToken'),\n\n\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import Blueprint, request, make_response
from untils import restful, cacheuntil
from untils.captcha import Captcha
from exts import smsapi
from .forms import SMSCaptchaForm
from io import BytesIO
bp = Blueprint('common', __name__, url_prefix='/c')
# @bp.route('/sms_captcha/', methods=['post'])
# def sms_captcha():
# telephone = request.form.get('telephone')
# if not telephone:
# return restful.params_error(message='请传入手机号码!')
# code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串
# resp = smsapi.send_sms(telephone=telephone, param=code)
# if resp:
# return restful.success(message='短信验证码发送成功!')
# else:
# return restful.params_error(message='短信验证码发送失败!')
# TODO: 发送短信验证码
@bp.route('/sms_captcha/', methods=['post'])
def sms_captcha():
form = SMSCaptchaForm(request.form)
if form.validate():
telephone = form.telephone.data # TODO: 获取手机号
code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串
resp = smsapi.send_sms(telephone=telephone, param=code)
if resp:
cacheuntil.set(telephone, code) # TODO: redis存储短信验证码
return restful.success(message='短信验证码发送成功!')
else:
return restful.params_error(message='短信验证码发送失败!')
else:
return restful.params_error(message=form.get_random_error(), data=form.get_all_errors())
# TODO: 图形验证码视图
@bp.route('/captcha/')
def CaptchaView():
text, image = Captcha.gene_graph_captcha()
cacheuntil.set(text.lower(), text.lower()) # TODO: redis存储图片验证码
out = BytesIO()
# TODO: 将图片保存到IO中格式png
image.save(out, 'png')
# TODO: 保存完毕后,移动指针到起始位置
out.seek(0)
# TODO: 将IO读取出来转为image/png响应
resp = make_response(out.read())
resp.content_type = 'image/png'
return resp
|
normal
|
{
"blob_id": "856beaf3b9dad333d5b48c1be3a8ad917f8d020c",
"index": 3634,
"step-1": "<mask token>\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-2": "<mask token>\n\n\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data\n code = Captcha.gene_text(number=4)\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code)\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=\n form.get_all_errors())\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-3": "<mask token>\nbp = Blueprint('common', __name__, url_prefix='/c')\n\n\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data\n code = Captcha.gene_text(number=4)\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code)\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=\n form.get_all_errors())\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-4": "from flask import Blueprint, request, make_response\nfrom untils import restful, cacheuntil\nfrom untils.captcha import Captcha\nfrom exts import smsapi\nfrom .forms import SMSCaptchaForm\nfrom io import BytesIO\nbp = Blueprint('common', __name__, url_prefix='/c')\n\n\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data\n code = Captcha.gene_text(number=4)\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code)\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=\n form.get_all_errors())\n\n\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-5": "from flask import Blueprint, request, make_response\nfrom untils import restful, cacheuntil\nfrom untils.captcha import Captcha\nfrom exts import smsapi\nfrom .forms import SMSCaptchaForm\nfrom io import BytesIO\n\nbp = Blueprint('common', __name__, url_prefix='/c')\n\n\n# @bp.route('/sms_captcha/', methods=['post'])\n# def sms_captcha():\n# telephone = request.form.get('telephone')\n# if not telephone:\n# return restful.params_error(message='请传入手机号码!')\n# code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串\n# resp = smsapi.send_sms(telephone=telephone, param=code)\n# if resp:\n# return restful.success(message='短信验证码发送成功!')\n# else:\n# return restful.params_error(message='短信验证码发送失败!')\n\n\n# TODO: 发送短信验证码\[email protected]('/sms_captcha/', methods=['post'])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data # TODO: 获取手机号\n code = Captcha.gene_text(number=4) # TODO: 获取随机4位数字字符串\n resp = smsapi.send_sms(telephone=telephone, param=code)\n if resp:\n cacheuntil.set(telephone, code) # TODO: redis存储短信验证码\n return restful.success(message='短信验证码发送成功!')\n else:\n return restful.params_error(message='短信验证码发送失败!')\n else:\n return restful.params_error(message=form.get_random_error(), data=form.get_all_errors())\n\n\n# TODO: 图形验证码视图\[email protected]('/captcha/')\ndef CaptchaView():\n text, image = Captcha.gene_graph_captcha()\n cacheuntil.set(text.lower(), text.lower()) # TODO: redis存储图片验证码\n out = BytesIO()\n # TODO: 将图片保存到IO中格式png\n image.save(out, 'png')\n # TODO: 保存完毕后,移动指针到起始位置\n out.seek(0)\n # TODO: 将IO读取出来转为image/png响应\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#! /usr/bin/env python3
__all__ = [
'FrameCorners',
'CornerStorage',
'build',
'dump',
'load',
'draw',
'without_short_tracks'
]
import click
import cv2
import numpy as np
import pims
from _corners import FrameCorners, CornerStorage, StorageImpl
from _corners import dump, load, draw, without_short_tracks, create_cli
class _CornerStorageBuilder:
def __init__(self, progress_indicator=None):
self._progress_indicator = progress_indicator
self._corners = dict()
def set_corners_at_frame(self, frame, corners):
self._corners[frame] = corners
if self._progress_indicator is not None:
self._progress_indicator.update(1)
def build_corner_storage(self):
return StorageImpl(item[1] for item in sorted(self._corners.items()))
def to_uint8_image(img):
img = img * 255.0
img = np.round(img)
return img.astype(np.uint8)
class CornerTracker:
MAX_CORNERS = 1300
INITIAL_QUALITY_LEVEL = 0.03
QUALITY_LEVEL = 0.15
MIN_DISTANCE = 6
BLOCK_SIZE = 5
CIRCLE_SIZE = 14
MAX_LEVEL_LK = 2
TERM_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
def __init__(self):
self.total_corners = 0
def get_circles_mask(self, shape, points):
mask = np.full(shape, 255, dtype=np.uint8)
for x, y in points:
cv2.circle(mask,
center=(x, y),
radius=self.MIN_DISTANCE,
color=0,
thickness=-1)
return mask
def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None, quality_level=INITIAL_QUALITY_LEVEL):
points = cv2.goodFeaturesToTrack(img,
mask=mask,
maxCorners=num_corners,
qualityLevel=quality_level,
minDistance=self.MIN_DISTANCE,
blockSize=self.BLOCK_SIZE)
if points is None:
return None, None
num_points = points.shape[0]
sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])
return points, sizes
def get_corners(self, new_img, old_img = None, old_corners=None):
if old_img is None:
points, sizes = self.find_new_corners(new_img)
ids = np.arange(len(points))
points = points.reshape((-1, 2))
self.total_corners = len(points)
return FrameCorners(ids, points, sizes)
else:
ids = old_corners.ids
points = old_corners.points
sizes = old_corners.sizes
nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(old_img),
to_uint8_image(new_img),
prevPts=points,
nextPts=None,
winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),
maxLevel=self.MAX_LEVEL_LK,
criteria=self.TERM_CRITERIA)
status = status.squeeze()
found = np.where(status == 1)
ids = ids[found]
points = nextPts[found]
sizes = sizes[found]
mask = self.get_circles_mask(new_img.shape, points)
if len(points) < self.MAX_CORNERS:
new_points, new_sizes = self.find_new_corners(new_img,
self.MAX_CORNERS - len(points),
mask,
self.QUALITY_LEVEL)
if new_points is not None:
new_ids = np.arange(self.total_corners, self.total_corners + len(new_points))
new_ids = new_ids.reshape((-1, 1))
new_points = new_points.reshape((-1, 2))
new_sizes = new_sizes.reshape((-1, 1))
self.total_corners += len(new_points)
ids = np.concatenate([ids, new_ids])
points = np.concatenate([points, new_points])
sizes = np.concatenate([sizes, new_sizes])
points = points.reshape((-1, 2))
return FrameCorners(ids, points, sizes)
def _build_impl(frame_sequence: pims.FramesSequence,
builder: _CornerStorageBuilder) -> None:
cornerTracker = CornerTracker()
image_0 = frame_sequence[0]
corners = cornerTracker.get_corners(image_0)
builder.set_corners_at_frame(0, corners)
for frame, image_1 in enumerate(frame_sequence[1:], 1):
corners = cornerTracker.get_corners(image_1, image_0, corners)
builder.set_corners_at_frame(frame, corners)
image_0 = image_1
def build(frame_sequence: pims.FramesSequence,
progress: bool = True) -> CornerStorage:
"""
Build corners for all frames of a frame sequence.
:param frame_sequence: grayscale float32 frame sequence.
:param progress: enable/disable building progress bar.
:return: corners for all frames of given sequence.
"""
if progress:
with click.progressbar(length=len(frame_sequence),
label='Calculating corners') as progress_bar:
builder = _CornerStorageBuilder(progress_bar)
_build_impl(frame_sequence, builder)
else:
builder = _CornerStorageBuilder()
_build_impl(frame_sequence, builder)
corner_storage = builder.build_corner_storage()
final_storage = without_short_tracks(corner_storage, min_len=20)
return final_storage
if __name__ == '__main__':
create_cli(build)() # pylint:disable=no-value-for-parameter
|
normal
|
{
"blob_id": "0b5fb649dc421187820677ce75f3cd0e804c18a3",
"index": 7055,
"step-1": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\n<mask token>\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\n<mask token>\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence, builder:\n _CornerStorageBuilder) ->None:\n cornerTracker = CornerTracker()\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)()\n",
"step-4": "__all__ = ['FrameCorners', 'CornerStorage', 'build', 'dump', 'load', 'draw',\n 'without_short_tracks']\n<mask token>\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask, center=(x, y), radius=self.MIN_DISTANCE, color\n =0, thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None,\n quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img, mask=mask, maxCorners=\n num_corners, qualityLevel=quality_level, minDistance=self.\n MIN_DISTANCE, blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img=None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(\n old_img), to_uint8_image(new_img), prevPts=points, nextPts=\n None, winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK, criteria=self.TERM_CRITERIA)\n status = status.squeeze()\n found = np.where(status == 1)\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img, self\n .MAX_CORNERS - len(points), mask, self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.\n total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence, builder:\n _CornerStorageBuilder) ->None:\n cornerTracker = CornerTracker()\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence, progress: bool=True\n ) ->CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence), label=\n 'Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)()\n",
"step-5": "#! /usr/bin/env python3\n\n__all__ = [\n 'FrameCorners',\n 'CornerStorage',\n 'build',\n 'dump',\n 'load',\n 'draw',\n 'without_short_tracks'\n]\n\nimport click\nimport cv2\nimport numpy as np\nimport pims\n\nfrom _corners import FrameCorners, CornerStorage, StorageImpl\nfrom _corners import dump, load, draw, without_short_tracks, create_cli\n\n\nclass _CornerStorageBuilder:\n\n def __init__(self, progress_indicator=None):\n self._progress_indicator = progress_indicator\n self._corners = dict()\n\n def set_corners_at_frame(self, frame, corners):\n self._corners[frame] = corners\n if self._progress_indicator is not None:\n self._progress_indicator.update(1)\n\n def build_corner_storage(self):\n return StorageImpl(item[1] for item in sorted(self._corners.items()))\n\n\ndef to_uint8_image(img):\n img = img * 255.0\n img = np.round(img)\n return img.astype(np.uint8)\n\n\nclass CornerTracker:\n MAX_CORNERS = 1300\n INITIAL_QUALITY_LEVEL = 0.03\n QUALITY_LEVEL = 0.15\n MIN_DISTANCE = 6\n BLOCK_SIZE = 5\n CIRCLE_SIZE = 14\n MAX_LEVEL_LK = 2\n TERM_CRITERIA = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)\n\n def __init__(self):\n self.total_corners = 0\n\n def get_circles_mask(self, shape, points):\n mask = np.full(shape, 255, dtype=np.uint8)\n for x, y in points:\n cv2.circle(mask,\n center=(x, y),\n radius=self.MIN_DISTANCE,\n color=0,\n thickness=-1)\n return mask\n\n def find_new_corners(self, img, num_corners=MAX_CORNERS, mask=None, quality_level=INITIAL_QUALITY_LEVEL):\n points = cv2.goodFeaturesToTrack(img,\n mask=mask,\n maxCorners=num_corners,\n qualityLevel=quality_level,\n minDistance=self.MIN_DISTANCE,\n blockSize=self.BLOCK_SIZE)\n if points is None:\n return None, None\n\n num_points = points.shape[0]\n sizes = np.array([self.CIRCLE_SIZE for _ in range(num_points)])\n return points, sizes\n\n def get_corners(self, new_img, old_img = None, old_corners=None):\n if old_img is None:\n points, sizes = self.find_new_corners(new_img)\n ids = np.arange(len(points))\n points = points.reshape((-1, 2))\n self.total_corners = len(points)\n return FrameCorners(ids, points, sizes)\n else:\n ids = old_corners.ids\n points = old_corners.points\n sizes = old_corners.sizes\n\n nextPts, status, err = cv2.calcOpticalFlowPyrLK(to_uint8_image(old_img),\n to_uint8_image(new_img),\n prevPts=points,\n nextPts=None,\n winSize=(self.CIRCLE_SIZE, self.CIRCLE_SIZE),\n maxLevel=self.MAX_LEVEL_LK,\n criteria=self.TERM_CRITERIA)\n\n status = status.squeeze()\n found = np.where(status == 1)\n\n ids = ids[found]\n points = nextPts[found]\n sizes = sizes[found]\n\n mask = self.get_circles_mask(new_img.shape, points)\n if len(points) < self.MAX_CORNERS:\n new_points, new_sizes = self.find_new_corners(new_img,\n self.MAX_CORNERS - len(points),\n mask,\n self.QUALITY_LEVEL)\n if new_points is not None:\n new_ids = np.arange(self.total_corners, self.total_corners + len(new_points))\n new_ids = new_ids.reshape((-1, 1))\n new_points = new_points.reshape((-1, 2))\n new_sizes = new_sizes.reshape((-1, 1))\n self.total_corners += len(new_points)\n ids = np.concatenate([ids, new_ids])\n points = np.concatenate([points, new_points])\n sizes = np.concatenate([sizes, new_sizes])\n\n points = points.reshape((-1, 2))\n return FrameCorners(ids, points, sizes)\n\n\ndef _build_impl(frame_sequence: pims.FramesSequence,\n builder: _CornerStorageBuilder) -> None:\n cornerTracker = CornerTracker()\n\n image_0 = frame_sequence[0]\n corners = cornerTracker.get_corners(image_0)\n builder.set_corners_at_frame(0, corners)\n\n for frame, image_1 in enumerate(frame_sequence[1:], 1):\n corners = cornerTracker.get_corners(image_1, image_0, corners)\n builder.set_corners_at_frame(frame, corners)\n image_0 = image_1\n\n\ndef build(frame_sequence: pims.FramesSequence,\n progress: bool = True) -> CornerStorage:\n \"\"\"\n Build corners for all frames of a frame sequence.\n :param frame_sequence: grayscale float32 frame sequence.\n :param progress: enable/disable building progress bar.\n :return: corners for all frames of given sequence.\n \"\"\"\n if progress:\n with click.progressbar(length=len(frame_sequence),\n label='Calculating corners') as progress_bar:\n builder = _CornerStorageBuilder(progress_bar)\n _build_impl(frame_sequence, builder)\n else:\n builder = _CornerStorageBuilder()\n _build_impl(frame_sequence, builder)\n\n corner_storage = builder.build_corner_storage()\n final_storage = without_short_tracks(corner_storage, min_len=20)\n\n return final_storage\n\n\nif __name__ == '__main__':\n create_cli(build)() # pylint:disable=no-value-for-parameter",
"step-ids": [
10,
12,
14,
15,
17
]
}
|
[
10,
12,
14,
15,
17
] |
# Copyright (c) 2019 Jannika Lossner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR
class MultiSpeakerBRIR(SimpleFreeFieldHRIR):
name = "MultiSpeakerBRIR"
version = "0.3"
def __init__(self):
super().__init__()
self.default_objects["Receiver"]["count"] = 2
#self.default_data["IR"] = 1
self.conditions["must have 2 Receivers"] = lambda name, fixed, variances, count: name != "Receiver" or count == 2
self.conditions["must have Listener Up and View"] = lambda name, fixed, variances, count: name != "Listener" or ("Up" in fixed + variances and "View" in fixed + variances)
self.conditions["must have both Emitter View and Up or neither"] = lambda name, fixed, variances, count: name != "Emitter" or "View" not in fixed + variances or ("Up" in fixed + variances and "View" in fixed + variances)
def add_metadata(self, database):
super().add_metadata(database)
database.Data.Type = "FIRE"
database.Room.Type = "reverberant"
return
|
normal
|
{
"blob_id": "e30bd33ae18881307e7cf4f60d3c60eae91573bc",
"index": 181,
"step-1": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n <mask token>\n <mask token>\n <mask token>\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-2": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n <mask token>\n <mask token>\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-3": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = 'MultiSpeakerBRIR'\n version = '0.3'\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-4": "from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = 'MultiSpeakerBRIR'\n version = '0.3'\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-5": "# Copyright (c) 2019 Jannika Lossner\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = \"MultiSpeakerBRIR\"\n version = \"0.3\"\n def __init__(self):\n super().__init__()\n self.default_objects[\"Receiver\"][\"count\"] = 2\n\n #self.default_data[\"IR\"] = 1\n\n self.conditions[\"must have 2 Receivers\"] = lambda name, fixed, variances, count: name != \"Receiver\" or count == 2\n self.conditions[\"must have Listener Up and View\"] = lambda name, fixed, variances, count: name != \"Listener\" or (\"Up\" in fixed + variances and \"View\" in fixed + variances)\n self.conditions[\"must have both Emitter View and Up or neither\"] = lambda name, fixed, variances, count: name != \"Emitter\" or \"View\" not in fixed + variances or (\"Up\" in fixed + variances and \"View\" in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n\n database.Data.Type = \"FIRE\"\n database.Room.Type = \"reverberant\"\n return\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import re
import cgi
import os
import urllib
import urllib2
from time import sleep
from google.appengine.api import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import backends
from google.appengine.api import logservice
logservice.AUTOFLUSH_EVERY_SECONDS = None
logservice.AUTOFLUSH_EVERY_BYTES = None
logservice.AUTOFLUSH_ENABLED = False
MONTH = "jun09"
NGRAM = "3"
PROB = "jp"
DATASET = "bing-body"
REQUESTURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/"+PROB+"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
GENURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
class lexicon0(db.Model):
word = db.StringProperty(required = True)
known = db.StringListProperty(indexed = False)
def lexicon_key(lexicon_name=None):
return db.Key.from_path('lexicon0', lexicon_name or 'default')
def combination(wordlist,t):#argument t is to notify that it is the main query while using cobination for first time
tempc = wordlist
combinationqueryset = [listtostr(tempc[:i] +
["%s%s"%(tempc[i],tempc[i+1])] +
tempc[i+2:] ) for i in range(0, len(tempc)-1)]
cquery = listtostr(tempc)
combinationqueryset.append(cquery)
results = getjp1('',combinationqueryset,'')
dictionary = dict(results)
x = results.index((cquery,dictionary[cquery]))
if (t==0): t = dictionary[cquery]
if (results[0][0] == cquery):
return (cquery,results[0][1],t)
else:
dictionary = dict(results)
x = results.index((cquery,dictionary[cquery]))
y = list()
for i in range(x):
y.append(combinationqueryset.index(results[i][0]))
y.sort(reverse = True)
cache = wordlist
for z in y:
cache[z] += cache[z+1]
del cache[z+1]
return combination(cache,t)
def spacesplits(wordlist):
temps = wordlist
query = listtostr(temps)
strings = []
for i in range(len(temps)):
for y in range(1,len(temps[i])):
strings.append(listtostr(temps[:i]+list([temps[i][:y],temps[i][y:]])+temps[i+1:]))
strings.append(query)
results = getjp1('',strings,'')
if (results[0][0] == query):
return (query,results[0][1])
else:
return spacesplits(results[0][0].split())
def getjp(before,wordlist,after):
global REQUESTURL
wordli = wordlist
string = ''
for x in wordli:
string += before+" "+str(x)+" "+after+"\n"
string = string.strip()
jps = list()
jps = urllib2.urlopen(
urllib2.Request(REQUESTURL,str(string))).read().split()
for i in range(len(jps)):
jps[i] = float(jps[i])/(querylength(wordli[i]))
dictionary = dict(zip(wordli,jps))
return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)
def getjp1(before,wordlist,after):
global REQUESTURL
string = ''
for x in wordlist:
string += before+" "+str(x)+" "+after+"\n"
string = string.strip()
jps = list()
jps = urllib2.urlopen(
urllib2.Request(REQUESTURL,str(string))).read().split()
for i in range(len(jps)):
jps[i] = float(jps[i])
dictionary = dict(zip(wordlist,jps))
return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)
class mainpage(webapp.RequestHandler):
def get(self):
global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL
if len(self.request.get('m')):
MONTH = str(self.request.get('m'))
if len(self.request.get('d')):
DATASET = str(self.request.get('d'))
if len(self.request.get('ng')):
NGRAM = str(self.request.get('ng'))
if len(self.request.get('pp')):
PROB = str(self.request.get('pp'))
REQUESTURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/"+PROB+"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
GENURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
query = str(self.request.get('q'))
wordlist = query.strip().split()
dictionary = dict()
try:
cquery = combination(wordlist,0)[0]
except:
cquery = query
try:
wordlist = query.strip().split()
squery = spacesplits(wordlist)[0]
except:
squery = query
try: dictionary.update(getdictionary(wordlist))
except:
dictionary.update({query:0})
try:
if (query != cquery): dictionary.update(getdictionary(cquery.split()))
except: dictionary.update({cquery:0})
try:
if (query != squery): dictionary.update(getdictionary(squery.split()))
except: dictionary.update({squery:0})
finallist = dictionary.keys()
self.response.headers['Content-Type'] = 'text/plain'
try:
result = getjp('',finallist,'')
final = list()
for i in range(len(result)):
final.append(10**((result[i][1])))
printresult = normalize(final)
for i in range(len(printresult)):
self.response.out.write(str(result[i][0])+"\t"+printresult[i]+"\n")
except:
self.response.out.write(query+"\t"+str(1))
class maintest(webapp.RequestHandler):
def get(self):
global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(REQUESTURL+"\n")
self.response.out.write(GENURL)
def getdictionary(wordelist):
global MONTH,DATASET,NGRAM,PROB
dictionaryy = dict()
rpcs = []
for i in range(len(wordelist)):
if i<3: t=0
else: t = i-3
form_fields = {
"word": wordelist[i],
"before": listtostr(wordelist[t:i]),
"after": listtostr(wordelist[i+1:i+4]),
"m": MONTH,
"d": DATASET,
"ng": NGRAM,
"pp": PROB
}
formdata = urllib.urlencode(form_fields)
rpc = urlfetch.create_rpc()
url = "http://timetest.forbackend.appspot.com/wordspellcheck"
#rpc.callback = create_callback(rpc)
urlfetch.make_fetch_call(rpc,
url,
payload = formdata,
method = urlfetch.POST)
rpcs.append(rpc)
resultts = list()
for rpc in rpcs:
result = rpc.get_result()
resultts.append(result.content)
#self.response.out.write(results)
#self.response.out.write(wordee)
dictionaryy[listtostr(wordelist)] = 0
for i in range(len(wordelist)):
if resultts[i] == wordelist[i]: continue
else:
for j in range(i,len(wordelist)+1):
pp = listtostr(wordelist[:i]+resultts[i:j]+wordelist[j:])
dictionaryy[pp] = 0
return dictionaryy
class splittest(webapp.RequestHandler):
def get(self):
query = self.request.get('q')
wordlist = query.split()
splitted = combination(wordlist,0)
self.response.out.write(splitted)
def querylength(query):
liste = query.split()
counte = 0
for x in liste:
if len(x)>1: counte += 1
if counte == 0: return 1
else: return counte
def listtostr(wordlist):
string = ''
for word in wordlist:
string += word+" "
string = string.strip()
return string
#def create_callback(rpc):
def normalize(problist):
tot = 0
for x in problist:
tot += x
returnlist = list()
for i in range(len(problist)):
returnlist.append(str(round((problist[i]/tot),3)))
return returnlist
application = webapp.WSGIApplication([
('/mainpage',maintest),#### the main speller is in main page web handler as i submitted maintest as the official submission i changed this
('/maintest',mainpage),
('/split',splittest)],
debug = True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "c8a6a8633f863e0350157346106a747096d26939",
"index": 9912,
"step-1": "<mask token>\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\n<mask token>\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\n<mask token>\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\n<mask token>\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\n<mask token>\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\n<mask token>\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\n<mask token>\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\ndef combination(wordlist, t):\n tempc = wordlist\n combinationqueryset = [listtostr(tempc[:i] + ['%s%s' % (tempc[i], tempc\n [i + 1])] + tempc[i + 2:]) for i in range(0, len(tempc) - 1)]\n cquery = listtostr(tempc)\n combinationqueryset.append(cquery)\n results = getjp1('', combinationqueryset, '')\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n if t == 0:\n t = dictionary[cquery]\n if results[0][0] == cquery:\n return cquery, results[0][1], t\n else:\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n y = list()\n for i in range(x):\n y.append(combinationqueryset.index(results[i][0]))\n y.sort(reverse=True)\n cache = wordlist\n for z in y:\n cache[z] += cache[z + 1]\n del cache[z + 1]\n return combination(cache, t)\n\n\n<mask token>\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\ndef getdictionary(wordelist):\n global MONTH, DATASET, NGRAM, PROB\n dictionaryy = dict()\n rpcs = []\n for i in range(len(wordelist)):\n if i < 3:\n t = 0\n else:\n t = i - 3\n form_fields = {'word': wordelist[i], 'before': listtostr(wordelist[\n t:i]), 'after': listtostr(wordelist[i + 1:i + 4]), 'm': MONTH,\n 'd': DATASET, 'ng': NGRAM, 'pp': PROB}\n formdata = urllib.urlencode(form_fields)\n rpc = urlfetch.create_rpc()\n url = 'http://timetest.forbackend.appspot.com/wordspellcheck'\n urlfetch.make_fetch_call(rpc, url, payload=formdata, method=\n urlfetch.POST)\n rpcs.append(rpc)\n resultts = list()\n for rpc in rpcs:\n result = rpc.get_result()\n resultts.append(result.content)\n dictionaryy[listtostr(wordelist)] = 0\n for i in range(len(wordelist)):\n if resultts[i] == wordelist[i]:\n continue\n else:\n for j in range(i, len(wordelist) + 1):\n pp = listtostr(wordelist[:i] + resultts[i:j] + wordelist[j:])\n dictionaryy[pp] = 0\n return dictionaryy\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\ndef querylength(query):\n liste = query.split()\n counte = 0\n for x in liste:\n if len(x) > 1:\n counte += 1\n if counte == 0:\n return 1\n else:\n return counte\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\n<mask token>\n",
"step-4": "import re\nimport cgi\nimport os\nimport urllib\nimport urllib2\nfrom time import sleep\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import backends\nfrom google.appengine.api import logservice\nlogservice.AUTOFLUSH_EVERY_SECONDS = None\nlogservice.AUTOFLUSH_EVERY_BYTES = None\nlogservice.AUTOFLUSH_ENABLED = False\nMONTH = 'jun09'\nNGRAM = '3'\nPROB = 'jp'\nDATASET = 'bing-body'\nREQUESTURL = ('http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\nGENURL = ('http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\ndef combination(wordlist, t):\n tempc = wordlist\n combinationqueryset = [listtostr(tempc[:i] + ['%s%s' % (tempc[i], tempc\n [i + 1])] + tempc[i + 2:]) for i in range(0, len(tempc) - 1)]\n cquery = listtostr(tempc)\n combinationqueryset.append(cquery)\n results = getjp1('', combinationqueryset, '')\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n if t == 0:\n t = dictionary[cquery]\n if results[0][0] == cquery:\n return cquery, results[0][1], t\n else:\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n y = list()\n for i in range(x):\n y.append(combinationqueryset.index(results[i][0]))\n y.sort(reverse=True)\n cache = wordlist\n for z in y:\n cache[z] += cache[z + 1]\n del cache[z + 1]\n return combination(cache, t)\n\n\ndef spacesplits(wordlist):\n temps = wordlist\n query = listtostr(temps)\n strings = []\n for i in range(len(temps)):\n for y in range(1, len(temps[i])):\n strings.append(listtostr(temps[:i] + list([temps[i][:y], temps[\n i][y:]]) + temps[i + 1:]))\n strings.append(query)\n results = getjp1('', strings, '')\n if results[0][0] == query:\n return query, results[0][1]\n else:\n return spacesplits(results[0][0].split())\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\ndef getdictionary(wordelist):\n global MONTH, DATASET, NGRAM, PROB\n dictionaryy = dict()\n rpcs = []\n for i in range(len(wordelist)):\n if i < 3:\n t = 0\n else:\n t = i - 3\n form_fields = {'word': wordelist[i], 'before': listtostr(wordelist[\n t:i]), 'after': listtostr(wordelist[i + 1:i + 4]), 'm': MONTH,\n 'd': DATASET, 'ng': NGRAM, 'pp': PROB}\n formdata = urllib.urlencode(form_fields)\n rpc = urlfetch.create_rpc()\n url = 'http://timetest.forbackend.appspot.com/wordspellcheck'\n urlfetch.make_fetch_call(rpc, url, payload=formdata, method=\n urlfetch.POST)\n rpcs.append(rpc)\n resultts = list()\n for rpc in rpcs:\n result = rpc.get_result()\n resultts.append(result.content)\n dictionaryy[listtostr(wordelist)] = 0\n for i in range(len(wordelist)):\n if resultts[i] == wordelist[i]:\n continue\n else:\n for j in range(i, len(wordelist) + 1):\n pp = listtostr(wordelist[:i] + resultts[i:j] + wordelist[j:])\n dictionaryy[pp] = 0\n return dictionaryy\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\ndef querylength(query):\n liste = query.split()\n counte = 0\n for x in liste:\n if len(x) > 1:\n counte += 1\n if counte == 0:\n return 1\n else:\n return counte\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\napplication = webapp.WSGIApplication([('/mainpage', maintest), ('/maintest',\n mainpage), ('/split', splittest)], debug=True)\n\n\ndef main():\n run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\n\nimport re\nimport cgi\nimport os\nimport urllib\nimport urllib2\n\nfrom time import sleep\n\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import backends\nfrom google.appengine.api import logservice\nlogservice.AUTOFLUSH_EVERY_SECONDS = None\nlogservice.AUTOFLUSH_EVERY_BYTES = None\nlogservice.AUTOFLUSH_ENABLED = False\n\nMONTH = \"jun09\"\nNGRAM = \"3\"\nPROB = \"jp\"\nDATASET = \"bing-body\"\nREQUESTURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/\"+PROB+\"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\"\nGENURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\"\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required = True)\n known = db.StringListProperty(indexed = False)\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\ndef combination(wordlist,t):#argument t is to notify that it is the main query while using cobination for first time\n tempc = wordlist\n combinationqueryset = [listtostr(tempc[:i] +\n [\"%s%s\"%(tempc[i],tempc[i+1])] +\n tempc[i+2:] ) for i in range(0, len(tempc)-1)]\n cquery = listtostr(tempc)\n combinationqueryset.append(cquery)\n results = getjp1('',combinationqueryset,'')\n dictionary = dict(results)\n x = results.index((cquery,dictionary[cquery]))\n if (t==0): t = dictionary[cquery]\n if (results[0][0] == cquery):\n return (cquery,results[0][1],t)\n else:\n dictionary = dict(results)\n x = results.index((cquery,dictionary[cquery]))\n y = list()\n for i in range(x):\n y.append(combinationqueryset.index(results[i][0]))\n y.sort(reverse = True)\n cache = wordlist\n for z in y:\n cache[z] += cache[z+1]\n del cache[z+1]\n return combination(cache,t)\n \ndef spacesplits(wordlist):\n temps = wordlist\n query = listtostr(temps)\n strings = []\n for i in range(len(temps)):\n for y in range(1,len(temps[i])):\n strings.append(listtostr(temps[:i]+list([temps[i][:y],temps[i][y:]])+temps[i+1:]))\n strings.append(query) \n results = getjp1('',strings,'')\n if (results[0][0] == query):\n return (query,results[0][1])\n else:\n return spacesplits(results[0][0].split())\n\n\n\ndef getjp(before,wordlist,after): \n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before+\" \"+str(x)+\" \"+after+\"\\n\"\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(\n urllib2.Request(REQUESTURL,str(string))).read().split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])/(querylength(wordli[i]))\n dictionary = dict(zip(wordli,jps))\n return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)\n\ndef getjp1(before,wordlist,after): \n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before+\" \"+str(x)+\" \"+after+\"\\n\"\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(\n urllib2.Request(REQUESTURL,str(string))).read().split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist,jps))\n return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)\n\nclass mainpage(webapp.RequestHandler):\n def get(self):\n global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/\"+PROB+\"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\" \n GENURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\"\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist,0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try: dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query:0})\n try:\n if (query != cquery): dictionary.update(getdictionary(cquery.split()))\n except: dictionary.update({cquery:0})\n try:\n if (query != squery): dictionary.update(getdictionary(squery.split()))\n except: dictionary.update({squery:0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('',finallist,'')\n final = list()\n for i in range(len(result)):\n final.append(10**((result[i][1])))\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0])+\"\\t\"+printresult[i]+\"\\n\")\n except:\n self.response.out.write(query+\"\\t\"+str(1))\n \n\n \nclass maintest(webapp.RequestHandler):\n def get(self):\n global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL+\"\\n\")\n self.response.out.write(GENURL)\n \n\n\ndef getdictionary(wordelist):\n global MONTH,DATASET,NGRAM,PROB\n dictionaryy = dict()\n rpcs = []\n for i in range(len(wordelist)):\n if i<3: t=0\n else: t = i-3\n form_fields = {\n \"word\": wordelist[i],\n \"before\": listtostr(wordelist[t:i]),\n \"after\": listtostr(wordelist[i+1:i+4]),\n \"m\": MONTH,\n \"d\": DATASET,\n \"ng\": NGRAM,\n \"pp\": PROB\n }\n formdata = urllib.urlencode(form_fields)\n rpc = urlfetch.create_rpc()\n url = \"http://timetest.forbackend.appspot.com/wordspellcheck\"\n #rpc.callback = create_callback(rpc)\n urlfetch.make_fetch_call(rpc,\n url,\n payload = formdata,\n method = urlfetch.POST)\n rpcs.append(rpc)\n resultts = list()\n for rpc in rpcs:\n result = rpc.get_result()\n resultts.append(result.content)\n #self.response.out.write(results)\n #self.response.out.write(wordee)\n dictionaryy[listtostr(wordelist)] = 0\n for i in range(len(wordelist)):\n if resultts[i] == wordelist[i]: continue\n else:\n for j in range(i,len(wordelist)+1):\n pp = listtostr(wordelist[:i]+resultts[i:j]+wordelist[j:])\n dictionaryy[pp] = 0\n return dictionaryy\n\n \nclass splittest(webapp.RequestHandler):\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist,0)\n self.response.out.write(splitted)\n\ndef querylength(query):\n liste = query.split()\n counte = 0\n for x in liste:\n if len(x)>1: counte += 1\n if counte == 0: return 1\n else: return counte\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word+\" \"\n string = string.strip()\n return string\n#def create_callback(rpc):\n \ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round((problist[i]/tot),3)))\n return returnlist\n \napplication = webapp.WSGIApplication([\n ('/mainpage',maintest),#### the main speller is in main page web handler as i submitted maintest as the official submission i changed this\n ('/maintest',mainpage),\n ('/split',splittest)],\n debug = True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
13,
14,
17,
21,
22
]
}
|
[
13,
14,
17,
21,
22
] |
import pandas
class _RegressionModelTable(object):
def __init__(self, regression_models, function_to_evaluate_model=None, function_to_select_model=None):
if not isinstance(regression_models, list):
regression_models = [regression_models]
self._check_model_inputs(regression_models, function_to_evaluate_model, function_to_select_model)
self._function_to_evaluate_model = function_to_evaluate_model
self._function_to_select_model = function_to_select_model
self._regression_model_list = regression_models
self._table_evaluation_dict = {}
self._all_fit_models_table_dict = {}
self._fit_model_table_dict = {}
@property
def pandas_table(self):
model_names = [model.__str__() for model in self._regression_model_list]
df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)
df = df.transpose()
return df
@classmethod
def _check_model_inputs(cls, regression_models, function_to_evaluate_model, function_to_select_model):
if len(regression_models) > 1:
if function_to_select_model is None or function_to_evaluate_model is None:
raise ValueError("Functions to evaluate and select regression models must be specified "
"in case of regression model list.")
def initialize_tables(self, label_names):
n_models = len(self._regression_model_list)
self._table_evaluation_dict = {LABEL_NAME: [None]*n_models for LABEL_NAME in label_names}
self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in label_names}
def evaluate_label_models(self, x, y, label_name):
label_tuple_list = list(map(lambda model: self.evaluate_model(model, x, y), self._regression_model_list))
# print("TUPLES! Window, Model", label_tuple_list[0][0], label_tuple_list[0][0]._model)
self._all_fit_models_table_dict[label_name] = [T[0] for T in label_tuple_list]
self._table_evaluation_dict[label_name] = [T[1] for T in label_tuple_list]
def evaluate_model(self, model, x, y):
model, value = self._function_to_evaluate_model(model, x, y)
return model, value
def return_selected_label_model(self, label_name):
if len(self._regression_model_list) == 1:
# print("unique model")
return self._all_fit_models_table_dict[label_name][0]
if self._is_any_none_in_list(self._table_evaluation_dict[label_name]):
raise ValueError("Some models were not evaluated")
return self._function_to_select_model(self._all_fit_models_table_dict[label_name], self._table_evaluation_dict[label_name])
@staticmethod
def _is_any_none_in_list(list_):
return any(list(map(lambda x: x is None, list_)))
def set_label_regression_model(self, model, label_name):
self._fit_model_table_dict[label_name] = model
def return_label_regression_model(self, label_name):
return self._fit_model_table_dict[label_name]
@classmethod
def _predict_func(cls, model, x_instance, n_samples):
return model.predict(x_instance, n_samples)
|
normal
|
{
"blob_id": "94264e121bb31a08cbd9766be1ff16173d2838ed",
"index": 5331,
"step-1": "<mask token>\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n <mask token>\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n <mask token>\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-3": "<mask token>\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n\n @property\n def pandas_table(self):\n model_names = [model.__str__() for model in self._regression_model_list\n ]\n df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)\n df = df.transpose()\n return df\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-4": "import pandas\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n\n @property\n def pandas_table(self):\n model_names = [model.__str__() for model in self._regression_model_list\n ]\n df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)\n df = df.transpose()\n return df\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n\n def return_selected_label_model(self, label_name):\n if len(self._regression_model_list) == 1:\n return self._all_fit_models_table_dict[label_name][0]\n if self._is_any_none_in_list(self._table_evaluation_dict[label_name]):\n raise ValueError('Some models were not evaluated')\n return self._function_to_select_model(self.\n _all_fit_models_table_dict[label_name], self.\n _table_evaluation_dict[label_name])\n\n @staticmethod\n def _is_any_none_in_list(list_):\n return any(list(map(lambda x: x is None, list_)))\n\n def set_label_regression_model(self, model, label_name):\n self._fit_model_table_dict[label_name] = model\n\n def return_label_regression_model(self, label_name):\n return self._fit_model_table_dict[label_name]\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-5": "import pandas\n\n\nclass _RegressionModelTable(object):\n def __init__(self, regression_models, function_to_evaluate_model=None, function_to_select_model=None):\n\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n\n self._check_model_inputs(regression_models, function_to_evaluate_model, function_to_select_model)\n\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n\n @property\n def pandas_table(self):\n model_names = [model.__str__() for model in self._regression_model_list]\n df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)\n df = df.transpose()\n return df\n\n @classmethod\n def _check_model_inputs(cls, regression_models, function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if function_to_select_model is None or function_to_evaluate_model is None:\n raise ValueError(\"Functions to evaluate and select regression models must be specified \"\n \"in case of regression model list.\")\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: [None]*n_models for LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model, x, y), self._regression_model_list))\n # print(\"TUPLES! Window, Model\", label_tuple_list[0][0], label_tuple_list[0][0]._model)\n self._all_fit_models_table_dict[label_name] = [T[0] for T in label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n\n def return_selected_label_model(self, label_name):\n if len(self._regression_model_list) == 1:\n # print(\"unique model\")\n return self._all_fit_models_table_dict[label_name][0]\n if self._is_any_none_in_list(self._table_evaluation_dict[label_name]):\n raise ValueError(\"Some models were not evaluated\")\n return self._function_to_select_model(self._all_fit_models_table_dict[label_name], self._table_evaluation_dict[label_name])\n\n @staticmethod\n def _is_any_none_in_list(list_):\n return any(list(map(lambda x: x is None, list_)))\n\n def set_label_regression_model(self, model, label_name):\n self._fit_model_table_dict[label_name] = model\n\n def return_label_regression_model(self, label_name):\n return self._fit_model_table_dict[label_name]\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-ids": [
6,
7,
8,
13,
14
]
}
|
[
6,
7,
8,
13,
14
] |
def filter(txt): # can be improved using regular expression
output = []
for t in txt:
if t == "(" or t == ")" or t == "[" or t == "]":
output.append(t)
return output
result = []
while True:
raw_input = input()
line = filter(raw_input)
if raw_input != ".":
stack = []
err = False
for l in line:
try:
if l == "(" or l == "[":
stack.append(l)
elif l == "]":
if stack[len(stack) - 1] == "[":
stack.pop()
else:
err = True
break
elif l == ")":
if stack[len(stack) - 1] == "(":
stack.pop()
else:
err = True
break
except:
err = True
break
if err == True or len(stack) != 0:
result.append("no")
else:
result.append("yes")
else:
break
for r in result:
print(r)
|
normal
|
{
"blob_id": "9ca769ae8bbabee20b5dd4d75ab91d3c30e8d1bf",
"index": 8387,
"step-1": "<mask token>\n",
"step-2": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\n<mask token>\n",
"step-3": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\n<mask token>\nwhile True:\n raw_input = input()\n line = filter(raw_input)\n if raw_input != '.':\n stack = []\n err = False\n for l in line:\n try:\n if l == '(' or l == '[':\n stack.append(l)\n elif l == ']':\n if stack[len(stack) - 1] == '[':\n stack.pop()\n else:\n err = True\n break\n elif l == ')':\n if stack[len(stack) - 1] == '(':\n stack.pop()\n else:\n err = True\n break\n except:\n err = True\n break\n if err == True or len(stack) != 0:\n result.append('no')\n else:\n result.append('yes')\n else:\n break\nfor r in result:\n print(r)\n",
"step-4": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\nresult = []\nwhile True:\n raw_input = input()\n line = filter(raw_input)\n if raw_input != '.':\n stack = []\n err = False\n for l in line:\n try:\n if l == '(' or l == '[':\n stack.append(l)\n elif l == ']':\n if stack[len(stack) - 1] == '[':\n stack.pop()\n else:\n err = True\n break\n elif l == ')':\n if stack[len(stack) - 1] == '(':\n stack.pop()\n else:\n err = True\n break\n except:\n err = True\n break\n if err == True or len(stack) != 0:\n result.append('no')\n else:\n result.append('yes')\n else:\n break\nfor r in result:\n print(r)\n",
"step-5": "def filter(txt): # can be improved using regular expression\n\toutput = []\n\tfor t in txt:\n\t\tif t == \"(\" or t == \")\" or t == \"[\" or t == \"]\":\n\t\t\toutput.append(t)\n\treturn output\n\nresult = []\nwhile True:\n\traw_input = input()\n\tline = filter(raw_input)\n\t\n\tif raw_input != \".\":\n\t\tstack = []\n\t\terr = False\n\t\t\n\t\tfor l in line:\n\t\t\ttry:\n\t\t\t\tif l == \"(\" or l == \"[\":\n\t\t\t\t\tstack.append(l)\n\t\t\t\telif l == \"]\":\n\t\t\t\t\tif stack[len(stack) - 1] == \"[\":\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\telse:\n\t\t\t\t\t\terr = True\n\t\t\t\t\t\tbreak\n\t\t\t\telif l == \")\":\n\t\t\t\t\tif stack[len(stack) - 1] == \"(\":\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\telse:\n\t\t\t\t\t\terr = True\n\t\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\terr = True\n\t\t\t\tbreak\n\t\tif err == True or len(stack) != 0:\n\t\t\tresult.append(\"no\")\n\t\telse:\n\t\t\tresult.append(\"yes\")\n\telse:\n\t\tbreak\n\nfor r in result:\n\tprint(r)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""PriceTrail URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from .views import validate_product, display_product
#user related views
from .views import index_view, login_view, register_view, profile_view
#products related views
from .views import my_products_view, delete_product, add_new_product, dashboard_view, test_email_notifications, edit_profile_view, product_details_view, \
test_update_prices, test_update_all_prices
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
# implemented views
url(r'^$', index_view, name='index'),#this will became index
url(r'^login/$', login_view, name='login'),
url(r'^register/$', register_view, name='register'),
url(r'^profile/$', profile_view, name='profile'),
url(r'^my-products/$', my_products_view, name='my-products'),
url(r'^my-products/(?P<filter>[\w-]+)', my_products_view, name='my-products'),
url(r'^delete-product/(?P<id>\d+)/', delete_product, name='delete-product'),
url(r'^add-new-product/$', add_new_product, name='add-new-product'),
url(r'^validate-product/$', validate_product, name='validate-product'),
url(r'^dashboard/$', dashboard_view, name='dashboard'),
url(r'^edit-profile/$', edit_profile_view, name='edit-profile'),
#modal window
url(r'^display-product/(?P<id>\d+)/', display_product, name='display-product'),
url(r'^product-details/(?P<id>\d+)/', product_details_view, name='product-details'),
#superuser endpoints
url(r'^test_notifications/$', test_email_notifications, name='test-view'),
url(r'^test_update_prices/(?P<id>\w+)/', test_update_prices, name='update-prices'),
url(r'^test_update_all_prices/$', test_update_all_prices, name='update-all-prices'),
]
|
normal
|
{
"blob_id": "06627821c09d02543974a3c90664e84e11c980ed",
"index": 7631,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^admin/', admin.site.urls), url('^logout/$', auth_views\n .logout, {'next_page': '/'}, name='logout'), url('^$', index_view, name\n ='index'), url('^login/$', login_view, name='login'), url('^register/$',\n register_view, name='register'), url('^profile/$', profile_view, name=\n 'profile'), url('^my-products/$', my_products_view, name='my-products'),\n url('^my-products/(?P<filter>[\\\\w-]+)', my_products_view, name=\n 'my-products'), url('^delete-product/(?P<id>\\\\d+)/', delete_product,\n name='delete-product'), url('^add-new-product/$', add_new_product, name\n ='add-new-product'), url('^validate-product/$', validate_product, name=\n 'validate-product'), url('^dashboard/$', dashboard_view, name=\n 'dashboard'), url('^edit-profile/$', edit_profile_view, name=\n 'edit-profile'), url('^display-product/(?P<id>\\\\d+)/', display_product,\n name='display-product'), url('^product-details/(?P<id>\\\\d+)/',\n product_details_view, name='product-details'), url(\n '^test_notifications/$', test_email_notifications, name='test-view'),\n url('^test_update_prices/(?P<id>\\\\w+)/', test_update_prices, name=\n 'update-prices'), url('^test_update_all_prices/$',\n test_update_all_prices, name='update-all-prices')]\n",
"step-3": "<mask token>\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom .views import validate_product, display_product\nfrom .views import index_view, login_view, register_view, profile_view\nfrom .views import my_products_view, delete_product, add_new_product, dashboard_view, test_email_notifications, edit_profile_view, product_details_view, test_update_prices, test_update_all_prices\nurlpatterns = [url('^admin/', admin.site.urls), url('^logout/$', auth_views\n .logout, {'next_page': '/'}, name='logout'), url('^$', index_view, name\n ='index'), url('^login/$', login_view, name='login'), url('^register/$',\n register_view, name='register'), url('^profile/$', profile_view, name=\n 'profile'), url('^my-products/$', my_products_view, name='my-products'),\n url('^my-products/(?P<filter>[\\\\w-]+)', my_products_view, name=\n 'my-products'), url('^delete-product/(?P<id>\\\\d+)/', delete_product,\n name='delete-product'), url('^add-new-product/$', add_new_product, name\n ='add-new-product'), url('^validate-product/$', validate_product, name=\n 'validate-product'), url('^dashboard/$', dashboard_view, name=\n 'dashboard'), url('^edit-profile/$', edit_profile_view, name=\n 'edit-profile'), url('^display-product/(?P<id>\\\\d+)/', display_product,\n name='display-product'), url('^product-details/(?P<id>\\\\d+)/',\n product_details_view, name='product-details'), url(\n '^test_notifications/$', test_email_notifications, name='test-view'),\n url('^test_update_prices/(?P<id>\\\\w+)/', test_update_prices, name=\n 'update-prices'), url('^test_update_all_prices/$',\n test_update_all_prices, name='update-all-prices')]\n",
"step-4": "\"\"\"PriceTrail URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom .views import validate_product, display_product\n\n#user related views\nfrom .views import index_view, login_view, register_view, profile_view\n#products related views\nfrom .views import my_products_view, delete_product, add_new_product, dashboard_view, test_email_notifications, edit_profile_view, product_details_view, \\\n test_update_prices, test_update_all_prices\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),\n\n # implemented views\n url(r'^$', index_view, name='index'),#this will became index\n url(r'^login/$', login_view, name='login'),\n url(r'^register/$', register_view, name='register'),\n url(r'^profile/$', profile_view, name='profile'),\n url(r'^my-products/$', my_products_view, name='my-products'),\n url(r'^my-products/(?P<filter>[\\w-]+)', my_products_view, name='my-products'),\n url(r'^delete-product/(?P<id>\\d+)/', delete_product, name='delete-product'),\n url(r'^add-new-product/$', add_new_product, name='add-new-product'),\n url(r'^validate-product/$', validate_product, name='validate-product'),\n url(r'^dashboard/$', dashboard_view, name='dashboard'),\n url(r'^edit-profile/$', edit_profile_view, name='edit-profile'),\n\n #modal window\n url(r'^display-product/(?P<id>\\d+)/', display_product, name='display-product'),\n url(r'^product-details/(?P<id>\\d+)/', product_details_view, name='product-details'),\n\n #superuser endpoints\n url(r'^test_notifications/$', test_email_notifications, name='test-view'),\n url(r'^test_update_prices/(?P<id>\\w+)/', test_update_prices, name='update-prices'),\n url(r'^test_update_all_prices/$', test_update_all_prices, name='update-all-prices'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import Flask,Response,render_template,url_for,request,jsonify
from flask_bootstrap import Bootstrap
import pandas as pd
import gpt_2_simple as gpt2
import json
app = Flask(__name__)
Bootstrap(app)
#Main Page
@app.route('/')
def interactive_input():
return render_template('main.html')
#Creating the different routes
@app.route('/food_1_star')
def food_1_star():
return render_template('food_1.html')
@app.route('/food_5_star')
def food_5_star():
return render_template('food_5.html')
@app.route('/general_1_star')
def general_1_star():
return render_template('general_1.html')
@app.route('/general_5_star')
def general_5_star():
return render_template('general_5.html')
#Reactive function that will enable the code to run
@app.route('/food_1')
def food_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_1_star_large')
my_prediction = gpt2.generate(sess, run_name= 'food_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
#Reactive function that will enable the code to run
@app.route('/food_5')
def food_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='food_5_star_large')
my_prediction = gpt2.generate(sess, run_name= 'food_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
#Reactive function that will enable the code to run
@app.route('/general_1')
def general_1():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_1_star_large')
my_prediction = gpt2.generate(sess, run_name= 'general_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
#Reactive function that will enable the code to run
@app.route('/general_5')
def general_5():
try:
lang = request.args.get('message', 0, type=str)
complexity = request.args.get('complexity', 0, type=str)
complexity = float(complexity)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name='general_5_star_large')
my_prediction = gpt2.generate(sess, run_name= 'general_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)
res1 = str(my_prediction[0]).replace('<|endoftext|>', '')
res2 = str(my_prediction[1]).replace('<|endoftext|>', '')
res3 = str(my_prediction[2]).replace('<|endoftext|>', '')
return jsonify(result1 = res1, result2 = res2, result3 = res3)
except Exception as e:
return str(e)
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "1e41cc5d2661f1fb4f3a356318fabcb2b742cbdf",
"index": 1826,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\n<mask token>\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\n<mask token>\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_1')\ndef general_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\[email protected]('/food_5_star')\ndef food_5_star():\n return render_template('food_5.html')\n\n\[email protected]('/general_1_star')\ndef general_1_star():\n return render_template('general_1.html')\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_1')\ndef general_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n",
"step-4": "<mask token>\napp = Flask(__name__)\nBootstrap(app)\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\[email protected]('/food_5_star')\ndef food_5_star():\n return render_template('food_5.html')\n\n\[email protected]('/general_1_star')\ndef general_1_star():\n return render_template('general_1.html')\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_1')\ndef general_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask,Response,render_template,url_for,request,jsonify\nfrom flask_bootstrap import Bootstrap\nimport pandas as pd \nimport gpt_2_simple as gpt2\nimport json\n\n\napp = Flask(__name__)\nBootstrap(app)\n\n#Main Page\[email protected]('/')\ndef interactive_input():\n\treturn render_template('main.html')\n\n#Creating the different routes\[email protected]('/food_1_star')\ndef food_1_star():\n\treturn render_template('food_1.html')\n\[email protected]('/food_5_star')\ndef food_5_star():\n\treturn render_template('food_5.html')\n\[email protected]('/general_1_star')\ndef general_1_star():\n\treturn render_template('general_1.html')\n\[email protected]('/general_5_star')\ndef general_5_star():\n\treturn render_template('general_5.html')\n\n#Reactive function that will enable the code to run \[email protected]('/food_1')\ndef food_1():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='food_1_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'food_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\n#Reactive function that will enable the code to run \[email protected]('/food_5')\ndef food_5():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='food_5_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'food_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\n#Reactive function that will enable the code to run \[email protected]('/general_1')\ndef general_1():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='general_1_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'general_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\n#Reactive function that will enable the code to run \[email protected]('/general_5')\ndef general_5():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='general_5_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'general_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\nif __name__ == '__main__':\n\tapp.run(debug=True)",
"step-ids": [
6,
7,
9,
11,
13
]
}
|
[
6,
7,
9,
11,
13
] |
#!/usr/bin/env python
#coding:utf-8
"""
Author: Wusf --<[email protected]>
Purpose:
Created: 2016/2/29
"""
import os,sys,sqlite3
MyQtLibPath = os.path.abspath("D:\\MyQuantLib\\")
sys.path.append(MyQtLibPath)
import PCA.PCA_For_Stat_Arb2 as pca
import pandas as pd
import numpy as np
import time
def ComputeZScores(begDate,endDate,computeZScore,paramsDict,mark):
v = paramsDict['EigenNum']
varPecent = paramsDict['VarExplained']
corrSampleDays = paramsDict['CorrMatSampleDays']
regressSampleDays = paramsDict['RegressSampleDays']
ouSampleDays = paramsDict['OUFitSampleDays']
ifDeTrend = paramsDict['IfDeTrend']
ifAdjustReturnByVol = paramsDict['IfAdjRetByVol']
days = []
score=pd.DataFrame()
score_adj = pd.DataFrame()
revsD=pd.DataFrame()
rsqrd=pd.DataFrame()
significantEigNum = []
c = 0
for i in range(600,len(computeZScore.trdDay)):
scoreDate = computeZScore.trdDay[i]
if scoreDate>=begDate and scoreDate<endDate:
c+=1
tm1 = time.time()
k = 1
for i in range(600,len(computeZScore.trdDay)):
scoreDate = computeZScore.trdDay[i]
if scoreDate>=begDate and scoreDate<endDate:
if k==1 or i%1==0:
reEstCorrDay = computeZScore.trdDay[i]
computeZScore.GenEigenPort(reEstCorrDay,v,varPecent,corrSampleDays,0.05)
print "Re estimate correlation matrix and process PCA"
computeZScore.RegressOnEigenFactor(scoreDate,regressSampleDays,ifAdjustReturnByVol,winsorize=0)
res = computeZScore.OUFitAndCalcZScore(ouSampleDays,ifDeTrend)
significantEigNum.append(computeZScore.significantEigNum)
_score = res[0].loc['score'].to_frame(scoreDate).transpose()
_scoreAdj = res[0].loc['score_adj'].to_frame(scoreDate).transpose()
_revsD = res[0].loc['period'].to_frame(scoreDate).transpose()
_rsqrd = res[1].to_frame(scoreDate).transpose()
score = score.append(_score)
score_adj=score_adj.append(_scoreAdj)
revsD = revsD.append(_revsD)
rsqrd = rsqrd.append(_rsqrd)
k+=1
tm2 = time.time()
deltaT = int((tm2-tm1)/k*(c-k))
print "Generating zscore,date:{},paras:{}|{}|{}|{}......{}s left".format(scoreDate,v,varPecent,corrSampleDays,regressSampleDays,deltaT)
score.to_csv("ZScores{}.csv".format(mark))
score_adj.to_csv("ZScores_adj{}.csv".format(mark))
revsD.to_csv("ReversePerid{}.csv".format(mark))
rsqrd.to_csv("RegressionRSquared{}.csv".format(mark))
np.savetxt("sigEigNum{}.csv".format(mark), np.array(significantEigNum), delimiter=",")
if __name__=="__main__":
computeZScore = pca.PCA_For_Stat_Arb("MktData\\MktData_Wind_CICC.db", 1,"20050104")
computeZScore.LoadDataIntoTimeSeries("000300","000300",1)
begDate = "20080101"
endDate = "20160310"
params1 = {}
params1['EigenNum']=0
params1['VarExplained']=0.60
params1['CorrMatSampleDays']=100
params1['RegressSampleDays']=40
params1['OUFitSampleDays']=40
params1['IfDeTrend']=0
params1['IfAdjRetByVol']=0
params2 = {}
params2['EigenNum']=0
params2['VarExplained']=0.60
params2['CorrMatSampleDays']=80
params2['RegressSampleDays']=40
params2['OUFitSampleDays']=40
params2['IfDeTrend']=0
params2['IfAdjRetByVol']=0
params3 = {}
params3['EigenNum']=0
params3['VarExplained']=0.60
params3['CorrMatSampleDays']=60
params3['RegressSampleDays']=40
params3['OUFitSampleDays']=40
params3['IfDeTrend']=0
params3['IfAdjRetByVol']=0
params4 = {}
params4['EigenNum']=0
params4['VarExplained']=0.60
params4['CorrMatSampleDays']=40
params4['RegressSampleDays']=40
params4['OUFitSampleDays']=40
params4['IfDeTrend']=0
params4['IfAdjRetByVol']=0
params5 = {}
params5['EigenNum']=0
params5['VarExplained']=0.65
params5['CorrMatSampleDays']=60
params5['RegressSampleDays']=40
params5['OUFitSampleDays']=40
params5['IfDeTrend']=0
params5['IfAdjRetByVol']=0
params6 = {}
params6['EigenNum']=0
params6['VarExplained']=0.60
params6['CorrMatSampleDays']=60
params6['RegressSampleDays']=40
params6['OUFitSampleDays']=40
params6['IfDeTrend']=1
params6['IfAdjRetByVol']=0
params7 = {}
params7['EigenNum']=0
params7['VarExplained']=0.60
params7['CorrMatSampleDays']=60
params7['RegressSampleDays']=60
params7['OUFitSampleDays']=60
params7['IfDeTrend']=0
params7['IfAdjRetByVol']=0
ComputeZScores(begDate,endDate,computeZScore,params1,'Params301')
ComputeZScores(begDate,endDate,computeZScore,params2,'Params302')
ComputeZScores(begDate,endDate,computeZScore,params3,'Params303')
ComputeZScores(begDate,endDate,computeZScore,params4,'Params304')
ComputeZScores(begDate,endDate,computeZScore,params5,'Params305')
ComputeZScores(begDate,endDate,computeZScore,params6,'Params306')
ComputeZScores(begDate,endDate,computeZScore,params7,'Params307')
|
normal
|
{
"blob_id": "70cda2d6d3928cd8008daf221cd78665a9b05eea",
"index": 7064,
"step-1": "#!/usr/bin/env python\n#coding:utf-8\n\"\"\"\n Author: Wusf --<[email protected]>\n Purpose: \n Created: 2016/2/29\n\"\"\"\n\nimport os,sys,sqlite3\nMyQtLibPath = os.path.abspath(\"D:\\\\MyQuantLib\\\\\")\nsys.path.append(MyQtLibPath)\n\nimport PCA.PCA_For_Stat_Arb2 as pca\nimport pandas as pd\nimport numpy as np\nimport time\n\n\ndef ComputeZScores(begDate,endDate,computeZScore,paramsDict,mark):\n\n v = paramsDict['EigenNum']\n varPecent = paramsDict['VarExplained']\n corrSampleDays = paramsDict['CorrMatSampleDays']\n regressSampleDays = paramsDict['RegressSampleDays']\n ouSampleDays = paramsDict['OUFitSampleDays']\n ifDeTrend = paramsDict['IfDeTrend']\n ifAdjustReturnByVol = paramsDict['IfAdjRetByVol']\n\n \n days = []\n score=pd.DataFrame()\n score_adj = pd.DataFrame()\n revsD=pd.DataFrame()\n rsqrd=pd.DataFrame()\n significantEigNum = []\n \n c = 0\n for i in range(600,len(computeZScore.trdDay)):\n scoreDate = computeZScore.trdDay[i]\n if scoreDate>=begDate and scoreDate<endDate: \n c+=1\n \n tm1 = time.time()\n k = 1\n for i in range(600,len(computeZScore.trdDay)):\n scoreDate = computeZScore.trdDay[i]\n if scoreDate>=begDate and scoreDate<endDate:\n if k==1 or i%1==0:\n reEstCorrDay = computeZScore.trdDay[i]\n computeZScore.GenEigenPort(reEstCorrDay,v,varPecent,corrSampleDays,0.05)\n print \"Re estimate correlation matrix and process PCA\"\n computeZScore.RegressOnEigenFactor(scoreDate,regressSampleDays,ifAdjustReturnByVol,winsorize=0)\n res = computeZScore.OUFitAndCalcZScore(ouSampleDays,ifDeTrend)\n significantEigNum.append(computeZScore.significantEigNum)\n _score = res[0].loc['score'].to_frame(scoreDate).transpose()\n _scoreAdj = res[0].loc['score_adj'].to_frame(scoreDate).transpose()\n _revsD = res[0].loc['period'].to_frame(scoreDate).transpose()\n _rsqrd = res[1].to_frame(scoreDate).transpose()\n score = score.append(_score)\n score_adj=score_adj.append(_scoreAdj)\n revsD = revsD.append(_revsD)\n rsqrd = rsqrd.append(_rsqrd)\n k+=1\n tm2 = time.time()\n deltaT = int((tm2-tm1)/k*(c-k))\n print \"Generating zscore,date:{},paras:{}|{}|{}|{}......{}s left\".format(scoreDate,v,varPecent,corrSampleDays,regressSampleDays,deltaT)\n score.to_csv(\"ZScores{}.csv\".format(mark))\n score_adj.to_csv(\"ZScores_adj{}.csv\".format(mark))\n revsD.to_csv(\"ReversePerid{}.csv\".format(mark))\n rsqrd.to_csv(\"RegressionRSquared{}.csv\".format(mark))\n np.savetxt(\"sigEigNum{}.csv\".format(mark), np.array(significantEigNum), delimiter=\",\")\n\n\n\n\nif __name__==\"__main__\":\n computeZScore = pca.PCA_For_Stat_Arb(\"MktData\\\\MktData_Wind_CICC.db\", 1,\"20050104\")\n computeZScore.LoadDataIntoTimeSeries(\"000300\",\"000300\",1) \n begDate = \"20080101\"\n endDate = \"20160310\"\n params1 = {}\n params1['EigenNum']=0\n params1['VarExplained']=0.60\n params1['CorrMatSampleDays']=100\n params1['RegressSampleDays']=40\n params1['OUFitSampleDays']=40\n params1['IfDeTrend']=0\n params1['IfAdjRetByVol']=0\n \n params2 = {}\n params2['EigenNum']=0\n params2['VarExplained']=0.60\n params2['CorrMatSampleDays']=80\n params2['RegressSampleDays']=40\n params2['OUFitSampleDays']=40\n params2['IfDeTrend']=0\n params2['IfAdjRetByVol']=0 \n \n params3 = {}\n params3['EigenNum']=0\n params3['VarExplained']=0.60\n params3['CorrMatSampleDays']=60\n params3['RegressSampleDays']=40\n params3['OUFitSampleDays']=40\n params3['IfDeTrend']=0\n params3['IfAdjRetByVol']=0 \n \n params4 = {}\n params4['EigenNum']=0\n params4['VarExplained']=0.60\n params4['CorrMatSampleDays']=40\n params4['RegressSampleDays']=40\n params4['OUFitSampleDays']=40\n params4['IfDeTrend']=0\n params4['IfAdjRetByVol']=0 \n \n params5 = {}\n params5['EigenNum']=0\n params5['VarExplained']=0.65\n params5['CorrMatSampleDays']=60\n params5['RegressSampleDays']=40\n params5['OUFitSampleDays']=40\n params5['IfDeTrend']=0\n params5['IfAdjRetByVol']=0 \n \n params6 = {}\n params6['EigenNum']=0\n params6['VarExplained']=0.60\n params6['CorrMatSampleDays']=60\n params6['RegressSampleDays']=40\n params6['OUFitSampleDays']=40\n params6['IfDeTrend']=1\n params6['IfAdjRetByVol']=0 \n \n params7 = {}\n params7['EigenNum']=0\n params7['VarExplained']=0.60\n params7['CorrMatSampleDays']=60\n params7['RegressSampleDays']=60\n params7['OUFitSampleDays']=60\n params7['IfDeTrend']=0\n params7['IfAdjRetByVol']=0 \n \n \n ComputeZScores(begDate,endDate,computeZScore,params1,'Params301')\n ComputeZScores(begDate,endDate,computeZScore,params2,'Params302')\n ComputeZScores(begDate,endDate,computeZScore,params3,'Params303')\n ComputeZScores(begDate,endDate,computeZScore,params4,'Params304')\n ComputeZScores(begDate,endDate,computeZScore,params5,'Params305')\n ComputeZScores(begDate,endDate,computeZScore,params6,'Params306')\n ComputeZScores(begDate,endDate,computeZScore,params7,'Params307')\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import data_helpers
def write_to_file(file,line):
file.write(line+"\n")
def cat_map():
catmap={}
id=1
f=open("cat")
cat=set([s.strip() for s in list(f.readlines())])
for i in cat:
catmap[i]=id
id=id+1
return catmap
tree = ET.ElementTree(file="test.xml")
root = tree.getroot()
cnn=open("cnn","a")
lstm=open("lstm","a")
cat=open("cat","a")
for vespaadd in root:
document = vespaadd.find("document")
if(document!=None):
subject = document.find("subject")
content = document.find("content")
maincat = document.find("maincat")
if(subject==None):
continue
if(content==None):
content=subject
if(maincat==None):
continue
write_to_file(cnn,data_helpers.clean_str(subject.text))
write_to_file(lstm,data_helpers.clean_str(content.text))
write_to_file(cat,data_helpers.clean_str(maincat.text))
cnn.close()
lstm.close()
cat.close()
|
normal
|
{
"blob_id": "04538cc5c9c68582cc9aa2959faae2d7547ab2ee",
"index": 302,
"step-1": "<mask token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\n<mask token>\n",
"step-2": "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n<mask token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\n<mask token>\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n",
"step-3": "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n<mask token>\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\ntree = ET.ElementTree(file='test.xml')\nroot = tree.getroot()\ncnn = open('cnn', 'a')\nlstm = open('lstm', 'a')\ncat = open('cat', 'a')\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n",
"step-4": "try:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\nimport data_helpers\n\n\ndef write_to_file(file, line):\n file.write(line + '\\n')\n\n\ndef cat_map():\n catmap = {}\n id = 1\n f = open('cat')\n cat = set([s.strip() for s in list(f.readlines())])\n for i in cat:\n catmap[i] = id\n id = id + 1\n return catmap\n\n\ntree = ET.ElementTree(file='test.xml')\nroot = tree.getroot()\ncnn = open('cnn', 'a')\nlstm = open('lstm', 'a')\ncat = open('cat', 'a')\nfor vespaadd in root:\n document = vespaadd.find('document')\n if document != None:\n subject = document.find('subject')\n content = document.find('content')\n maincat = document.find('maincat')\n if subject == None:\n continue\n if content == None:\n content = subject\n if maincat == None:\n continue\n write_to_file(cnn, data_helpers.clean_str(subject.text))\n write_to_file(lstm, data_helpers.clean_str(content.text))\n write_to_file(cat, data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()\n",
"step-5": "try:\n\timport xml.etree.cElementTree as ET\nexcept ImportError:\n\timport xml.etree.ElementTree as ET\nimport data_helpers\n\ndef write_to_file(file,line):\n\tfile.write(line+\"\\n\")\n\ndef cat_map():\n\tcatmap={}\n\tid=1\n\tf=open(\"cat\")\n\tcat=set([s.strip() for s in list(f.readlines())])\n\tfor i in cat:\n\t\tcatmap[i]=id\n\t\tid=id+1\n\treturn catmap\n\ntree = ET.ElementTree(file=\"test.xml\")\nroot = tree.getroot()\ncnn=open(\"cnn\",\"a\")\nlstm=open(\"lstm\",\"a\")\ncat=open(\"cat\",\"a\")\nfor vespaadd in root:\n\tdocument = vespaadd.find(\"document\")\n\tif(document!=None):\n\t\tsubject = document.find(\"subject\")\n\t\tcontent = document.find(\"content\")\n\t\tmaincat = document.find(\"maincat\")\n\t\tif(subject==None):\n\t\t\tcontinue\n\t\tif(content==None):\n\t\t\tcontent=subject\n\t\tif(maincat==None):\n\t\t\tcontinue\n\t\twrite_to_file(cnn,data_helpers.clean_str(subject.text))\n\t\twrite_to_file(lstm,data_helpers.clean_str(content.text))\n\t\twrite_to_file(cat,data_helpers.clean_str(maincat.text))\ncnn.close()\nlstm.close()\ncat.close()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ユークリッド距離
# http://en.wikipedia.org/wiki/Euclidean_space
# 多次元空間中での 2 点間の距離を探索する
def euclidean(p,q):
sumSq=0.0
# 差の平方を加算
for i in range(len(p)):
sumSq+=(p[i]-q[i])**2
# 平方根
return (sumSq**0.5)
#print euclidean([3,4,5],[4,5,6])
|
normal
|
{
"blob_id": "11a7ebac3dad1f91a6d46b62f557b51ded8e3d7a",
"index": 1271,
"step-1": "<mask token>\n",
"step-2": "def euclidean(p, q):\n sumSq = 0.0\n for i in range(len(p)):\n sumSq += (p[i] - q[i]) ** 2\n return sumSq ** 0.5\n",
"step-3": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# ユークリッド距離\n# http://en.wikipedia.org/wiki/Euclidean_space\n\n# 多次元空間中での 2 点間の距離を探索する\n\ndef euclidean(p,q):\n sumSq=0.0\n # 差の平方を加算\n for i in range(len(p)):\n sumSq+=(p[i]-q[i])**2\n # 平方根\n return (sumSq**0.5)\n\n#print euclidean([3,4,5],[4,5,6])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import torch
class Activation(torch.nn.Module):
def __init__(self):
super().__init__()
self.swish = lambda x: x * torch.sigmoid(x)
self.linear = lambda x: x
self.sigmoid = lambda x: torch.sigmoid(x)
self.neg = lambda x: -x
self.sine = lambda x: torch.sin(x)
self.params = torch.nn.Parameter(torch.zeros(10))
def forward(self, x):
params = torch.sigmoid(self.params)
linear_x = self.linear(x) * params[0]
swish_x = self.swish(x) * params[1]
sigmoid_x = self.sigmoid(x) * params[2]
neg_x = self.neg(x) * params[3]
sine_x = self.sine(x) * params[4]
x = swish_x + linear_x + sigmoid_x + neg_x + sine_x
return x
class ResizableConv2d(torch.nn.Module):
def __init__(self, state_size, inchan, outchan):
super().__init__()
self.conv = torch.nn.Conv2d(inchan, outchan, 3)
self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)
self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)
self.resize = lambda x: torch.nn.functional.interpolate(x, size=state_size, mode='bicubic', align_corners=True)
self.activation = Activation()
def forward(self, x):
y = self.conv(x)
y = self.conv2(y)
y = self.resize(y)
y = y + self.resize(self.residual_conv(x))
y = self.activation(y)
return y
class ActorNet(torch.nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 6, 3)
self.conv_backwards = ResizableConv2d(state_size, 3, 6)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.conv_resize = ResizableConv2d((8, 8), 3, 3)
self.linears = torch.nn.ModuleList([])
for i in action_size:
self.linears.append(torch.nn.Linear(8*8*3, i))
self.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)
def forward(self, x, goal, time):
x = torch.cat([x, goal], dim=1) + time
x = self.conv(x)
x_ = self.conv_backwards(x)
x = self.conv(x_) + goal
x = x + torch.randn_like(x)
x = self.conv2(x) + time
x = x + torch.randn_like(x)
x = self.conv3(x) + goal
x = x + torch.randn_like(x)
x = self.conv4(x) + goal
x = self.conv_resize(x)
y = x
y = torch.flatten(y, start_dim=1)
y_list = []
for i in self.linears:
iy = i(y)
iy = torch.sigmoid(iy)
y_list.append(iy)
return y_list
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print("Actor Loss:", loss.item())
class GoalkeeperNet(torch.nn.Module):
def __init__(self, state_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 3, 3)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.flatten = torch.nn.Flatten()
self.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)
def forward(self, state):
y = self.conv(state)
goal = self.conv2(y)
goal = self.conv3(goal)
return goal
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print("Goalkeeper Loss:", loss.item())
|
normal
|
{
"blob_id": "850310b6c431981a246832e8a6f5417a88587b99",
"index": 3151,
"step-1": "<mask token>\n\n\nclass ResizableConv2d(torch.nn.Module):\n <mask token>\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-2": "<mask token>\n\n\nclass ResizableConv2d(torch.nn.Module):\n\n def __init__(self, state_size, inchan, outchan):\n super().__init__()\n self.conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.resize = lambda x: torch.nn.functional.interpolate(x, size=\n state_size, mode='bicubic', align_corners=True)\n self.activation = Activation()\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-3": "<mask token>\n\n\nclass Activation(torch.nn.Module):\n <mask token>\n <mask token>\n\n\nclass ResizableConv2d(torch.nn.Module):\n\n def __init__(self, state_size, inchan, outchan):\n super().__init__()\n self.conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.resize = lambda x: torch.nn.functional.interpolate(x, size=\n state_size, mode='bicubic', align_corners=True)\n self.activation = Activation()\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-4": "<mask token>\n\n\nclass Activation(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n self.swish = lambda x: x * torch.sigmoid(x)\n self.linear = lambda x: x\n self.sigmoid = lambda x: torch.sigmoid(x)\n self.neg = lambda x: -x\n self.sine = lambda x: torch.sin(x)\n self.params = torch.nn.Parameter(torch.zeros(10))\n\n def forward(self, x):\n params = torch.sigmoid(self.params)\n linear_x = self.linear(x) * params[0]\n swish_x = self.swish(x) * params[1]\n sigmoid_x = self.sigmoid(x) * params[2]\n neg_x = self.neg(x) * params[3]\n sine_x = self.sine(x) * params[4]\n x = swish_x + linear_x + sigmoid_x + neg_x + sine_x\n return x\n\n\nclass ResizableConv2d(torch.nn.Module):\n\n def __init__(self, state_size, inchan, outchan):\n super().__init__()\n self.conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.resize = lambda x: torch.nn.functional.interpolate(x, size=\n state_size, mode='bicubic', align_corners=True)\n self.activation = Activation()\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-5": "import torch\n\nclass Activation(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.swish = lambda x: x * torch.sigmoid(x)\n\t\tself.linear = lambda x: x\n\t\tself.sigmoid = lambda x: torch.sigmoid(x)\n\t\tself.neg = lambda x: -x\n\t\tself.sine = lambda x: torch.sin(x)\n\t\t\n\t\tself.params = torch.nn.Parameter(torch.zeros(10))\n\n\tdef forward(self, x):\n\t\tparams = torch.sigmoid(self.params)\n\t\t\n\t\tlinear_x = self.linear(x) * params[0]\n\t\tswish_x = self.swish(x) * params[1]\n\t\tsigmoid_x = self.sigmoid(x) * params[2]\n\t\tneg_x = self.neg(x) * params[3]\n\t\tsine_x = self.sine(x) * params[4]\n\n\t\tx = swish_x + linear_x + sigmoid_x + neg_x + sine_x\n\t\t\n\t\treturn x\n\nclass ResizableConv2d(torch.nn.Module):\n\tdef __init__(self, state_size, inchan, outchan):\n\t\tsuper().__init__()\n\t\tself.conv = torch.nn.Conv2d(inchan, outchan, 3)\n\t\tself.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n\t\tself.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n\t\tself.resize = lambda x: torch.nn.functional.interpolate(x, size=state_size, mode='bicubic', align_corners=True)\n\t\tself.activation = Activation()\n\tdef forward(self, x):\n\t\ty = self.conv(x)\n\t\ty = self.conv2(y)\n\t\ty = self.resize(y)\n\n\t\ty = y + self.resize(self.residual_conv(x))\n\t\ty = self.activation(y)\n\t\treturn y\n\nclass ActorNet(torch.nn.Module):\n\tdef __init__(self, state_size, action_size):\n\t\tsuper().__init__()\n\t\tself.conv = ResizableConv2d(state_size, 6, 3)\n\t\tself.conv_backwards = ResizableConv2d(state_size, 3, 6)\n\n\t\tself.conv2 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv3 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv4 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv_resize = ResizableConv2d((8, 8), 3, 3)\n\n\t\tself.linears = torch.nn.ModuleList([])\n\n\t\tfor i in action_size:\n\t\t\tself.linears.append(torch.nn.Linear(8*8*3, i))\n\n\t\tself.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)\n\n\tdef forward(self, x, goal, time):\n\t\tx = torch.cat([x, goal], dim=1) + time\n\n\t\tx = self.conv(x)\n\t\tx_ = self.conv_backwards(x)\n\n\t\tx = self.conv(x_) + goal\n\t\tx = x + torch.randn_like(x)\n\t\tx = self.conv2(x) + time\n\t\tx = x + torch.randn_like(x)\n\t\tx = self.conv3(x) + goal\n\t\tx = x + torch.randn_like(x)\n\t\tx = self.conv4(x) + goal\n\n\t\tx = self.conv_resize(x)\n\n\t\ty = x\n\n\t\ty = torch.flatten(y, start_dim=1)\n\n\t\ty_list = []\n\t\tfor i in self.linears:\n\t\t\tiy = i(y)\n\t\t\tiy = torch.sigmoid(iy)\t\n\t\t\ty_list.append(iy)\n\n\t\treturn y_list\n\t\n\tdef optimize(self, loss):\n\t\tself.optim.zero_grad()\n\t\tloss.backward()\n\t\tself.optim.step()\n\t\tprint(\"Actor Loss:\", loss.item())\n\nclass GoalkeeperNet(torch.nn.Module):\n\tdef __init__(self, state_size):\n\t\tsuper().__init__()\n\t\tself.conv = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv2 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv3 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv4 = ResizableConv2d(state_size, 3, 3)\n\t\tself.flatten = torch.nn.Flatten()\n\t\tself.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)\n\n\tdef forward(self, state):\n\t\ty = self.conv(state)\n\t\tgoal = self.conv2(y)\n\t\tgoal = self.conv3(goal)\n\n\t\treturn goal\n\n\n\tdef optimize(self, loss):\n\t\tself.optim.zero_grad()\n\t\tloss.backward()\n\t\tself.optim.step()\n\t\tprint(\"Goalkeeper Loss:\", loss.item())",
"step-ids": [
10,
11,
12,
14,
16
]
}
|
[
10,
11,
12,
14,
16
] |
import numpy as np
import scipy.io as sio
import os
import torch
from torchvision.utils import save_image
from tools import *
def test(config, base, loaders, brief):
compute_and_save_features(base, loaders)
results = evalutate(config, base, brief)
return results
def evalutate(config, base, brief=False):
results = {}
for mode in config.modes:
print(mode)
for number_shot in config.number_shots:
print(number_shot)
cmc, map = evaluate_sysymm01(base.save_features_path, mode, number_shot)
results['{},{}'.format(mode, number_shot)] = [cmc, map]
if brief: break
if brief: break
return results
def compute_and_save_features(base, loaders):
def compute_features(images):
images_f = fliplr(images)
images = images.to(base.device)
images_f = images_f.to(base.device)
features = base.encoder(base.process_images_4_encoder(images, True, True))
features_f = base.encoder(base.process_images_4_encoder(images_f, True, True))
features, _, _, _ = base.embeder(features)
features_f, _, _, _ = base.embeder(features_f)
features = features + features_f
if base.part_num == 1:
features = torch.unsqueeze(features, -1)
return features
def normalize_and_resize_feature(features):
# normlize
norm = torch.norm(features, dim=1, keepdim=True)
features = features / norm.repeat([1, features.size(1), 1])
# resize
features = features.view(features.size(0), -1)
return features
class XX:
def __init__(self):
self.val = {}
def update(self, key, value):
if key not in self.val.keys():
self.val[key] = value
else:
self.val[key] = np.concatenate([self.val[key], value], axis=0)
def get_val(self, key):
if key in self.val.keys():
return self.val[key]
else:
return np.array([[]])
print('Time:{}. Start to compute features'.format(time_now()))
# compute features
# base._resume_model(test_step)
base.set_eval()
features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()
with torch.no_grad():
for i, data in enumerate(loaders.rgb_all_loader):
# load data
images, pids, cids, _ = data
images = base.G_rgb2ir(images.to(base.device)).data.cpu()
# forward
features = compute_features(images)
# meter
features_meter.update(features.data)
pids_meter.update(pids.data)
cids_meter.update(cids.data)
for i, data in enumerate(loaders.ir_all_loader):
# load data
images, pids, cids, _ = data
# forward
features = compute_features(images)
# meter
features_meter.update(features.data)
pids_meter.update(pids.data)
cids_meter.update(cids.data)
print('Time:{}. Start to normalize features.'.format(time_now()))
# normalize features
features = features_meter.get_val()
features = normalize_and_resize_feature(features)
features = features.data.cpu().numpy()
pids = pids_meter.get_val_numpy()
cids = cids_meter.get_val_numpy()
print('Time: {}. Note: Start to save features as .mat file'.format(time_now()))
# save features as .mat file
results = {1: XX(), 2: XX(), 3: XX(), 4: XX(), 5: XX(), 6: XX()}
for i in range(features.shape[0]):
feature = features[i, :]
feature = np.resize(feature, [1, feature.shape[0]])
cid, pid = cids[i], pids[i]
results[cid].update(pid, feature)
pid_num_of_cids = [333, 333, 533, 533, 533, 333]
cids = [1, 2, 3, 4, 5, 6]
for cid in cids:
a_result = results[cid]
xx = []
for pid in range(1, 1+ pid_num_of_cids[cid - 1]):
xx.append([a_result.get_val(pid).astype(np.double)])
xx = np.array(xx)
sio.savemat(os.path.join(base.save_features_path, 'feature_cam{}.mat'.format(cid)), {'feature': xx})
def save_images(base, current_step):
#base.set_eval()
with torch.no_grad():
fixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images).detach()
xxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images, base.fixed_real_ir_images], dim=0)
save_image((xxxx.data.cpu() + 1.0) / 2.0,
os.path.join(base.save_images_path, 'image_{}.jpg'.format(current_step)), nrow=base.fixed_real_rgb_images.size(0), padding=0)
|
normal
|
{
"blob_id": "b21796a9e10314f80cac3151d1fdbb139966303f",
"index": 5555,
"step-1": "<mask token>\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\ndef compute_and_save_features(base, loaders):\n\n def compute_features(images):\n images_f = fliplr(images)\n images = images.to(base.device)\n images_f = images_f.to(base.device)\n features = base.encoder(base.process_images_4_encoder(images, True,\n True))\n features_f = base.encoder(base.process_images_4_encoder(images_f, \n True, True))\n features, _, _, _ = base.embeder(features)\n features_f, _, _, _ = base.embeder(features_f)\n features = features + features_f\n if base.part_num == 1:\n features = torch.unsqueeze(features, -1)\n return features\n\n def normalize_and_resize_feature(features):\n norm = torch.norm(features, dim=1, keepdim=True)\n features = features / norm.repeat([1, features.size(1), 1])\n features = features.view(features.size(0), -1)\n return features\n\n\n class XX:\n\n def __init__(self):\n self.val = {}\n\n def update(self, key, value):\n if key not in self.val.keys():\n self.val[key] = value\n else:\n self.val[key] = np.concatenate([self.val[key], value], axis=0)\n\n def get_val(self, key):\n if key in self.val.keys():\n return self.val[key]\n else:\n return np.array([[]])\n print('Time:{}. Start to compute features'.format(time_now()))\n base.set_eval()\n features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n with torch.no_grad():\n for i, data in enumerate(loaders.rgb_all_loader):\n images, pids, cids, _ = data\n images = base.G_rgb2ir(images.to(base.device)).data.cpu()\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n for i, data in enumerate(loaders.ir_all_loader):\n images, pids, cids, _ = data\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n print('Time:{}. Start to normalize features.'.format(time_now()))\n features = features_meter.get_val()\n features = normalize_and_resize_feature(features)\n features = features.data.cpu().numpy()\n pids = pids_meter.get_val_numpy()\n cids = cids_meter.get_val_numpy()\n print('Time: {}. Note: Start to save features as .mat file'.format(\n time_now()))\n results = {(1): XX(), (2): XX(), (3): XX(), (4): XX(), (5): XX(), (6): XX()\n }\n for i in range(features.shape[0]):\n feature = features[i, :]\n feature = np.resize(feature, [1, feature.shape[0]])\n cid, pid = cids[i], pids[i]\n results[cid].update(pid, feature)\n pid_num_of_cids = [333, 333, 533, 533, 533, 333]\n cids = [1, 2, 3, 4, 5, 6]\n for cid in cids:\n a_result = results[cid]\n xx = []\n for pid in range(1, 1 + pid_num_of_cids[cid - 1]):\n xx.append([a_result.get_val(pid).astype(np.double)])\n xx = np.array(xx)\n sio.savemat(os.path.join(base.save_features_path,\n 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\ndef compute_and_save_features(base, loaders):\n\n def compute_features(images):\n images_f = fliplr(images)\n images = images.to(base.device)\n images_f = images_f.to(base.device)\n features = base.encoder(base.process_images_4_encoder(images, True,\n True))\n features_f = base.encoder(base.process_images_4_encoder(images_f, \n True, True))\n features, _, _, _ = base.embeder(features)\n features_f, _, _, _ = base.embeder(features_f)\n features = features + features_f\n if base.part_num == 1:\n features = torch.unsqueeze(features, -1)\n return features\n\n def normalize_and_resize_feature(features):\n norm = torch.norm(features, dim=1, keepdim=True)\n features = features / norm.repeat([1, features.size(1), 1])\n features = features.view(features.size(0), -1)\n return features\n\n\n class XX:\n\n def __init__(self):\n self.val = {}\n\n def update(self, key, value):\n if key not in self.val.keys():\n self.val[key] = value\n else:\n self.val[key] = np.concatenate([self.val[key], value], axis=0)\n\n def get_val(self, key):\n if key in self.val.keys():\n return self.val[key]\n else:\n return np.array([[]])\n print('Time:{}. Start to compute features'.format(time_now()))\n base.set_eval()\n features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n with torch.no_grad():\n for i, data in enumerate(loaders.rgb_all_loader):\n images, pids, cids, _ = data\n images = base.G_rgb2ir(images.to(base.device)).data.cpu()\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n for i, data in enumerate(loaders.ir_all_loader):\n images, pids, cids, _ = data\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n print('Time:{}. Start to normalize features.'.format(time_now()))\n features = features_meter.get_val()\n features = normalize_and_resize_feature(features)\n features = features.data.cpu().numpy()\n pids = pids_meter.get_val_numpy()\n cids = cids_meter.get_val_numpy()\n print('Time: {}. Note: Start to save features as .mat file'.format(\n time_now()))\n results = {(1): XX(), (2): XX(), (3): XX(), (4): XX(), (5): XX(), (6): XX()\n }\n for i in range(features.shape[0]):\n feature = features[i, :]\n feature = np.resize(feature, [1, feature.shape[0]])\n cid, pid = cids[i], pids[i]\n results[cid].update(pid, feature)\n pid_num_of_cids = [333, 333, 533, 533, 533, 333]\n cids = [1, 2, 3, 4, 5, 6]\n for cid in cids:\n a_result = results[cid]\n xx = []\n for pid in range(1, 1 + pid_num_of_cids[cid - 1]):\n xx.append([a_result.get_val(pid).astype(np.double)])\n xx = np.array(xx)\n sio.savemat(os.path.join(base.save_features_path,\n 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\ndef save_images(base, current_step):\n with torch.no_grad():\n fixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images\n ).detach()\n xxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images,\n base.fixed_real_ir_images], dim=0)\n save_image((xxxx.data.cpu() + 1.0) / 2.0, os.path.join(base.\n save_images_path, 'image_{}.jpg'.format(current_step)), nrow=\n base.fixed_real_rgb_images.size(0), padding=0)\n",
"step-4": "import numpy as np\nimport scipy.io as sio\nimport os\nimport torch\nfrom torchvision.utils import save_image\nfrom tools import *\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\ndef compute_and_save_features(base, loaders):\n\n def compute_features(images):\n images_f = fliplr(images)\n images = images.to(base.device)\n images_f = images_f.to(base.device)\n features = base.encoder(base.process_images_4_encoder(images, True,\n True))\n features_f = base.encoder(base.process_images_4_encoder(images_f, \n True, True))\n features, _, _, _ = base.embeder(features)\n features_f, _, _, _ = base.embeder(features_f)\n features = features + features_f\n if base.part_num == 1:\n features = torch.unsqueeze(features, -1)\n return features\n\n def normalize_and_resize_feature(features):\n norm = torch.norm(features, dim=1, keepdim=True)\n features = features / norm.repeat([1, features.size(1), 1])\n features = features.view(features.size(0), -1)\n return features\n\n\n class XX:\n\n def __init__(self):\n self.val = {}\n\n def update(self, key, value):\n if key not in self.val.keys():\n self.val[key] = value\n else:\n self.val[key] = np.concatenate([self.val[key], value], axis=0)\n\n def get_val(self, key):\n if key in self.val.keys():\n return self.val[key]\n else:\n return np.array([[]])\n print('Time:{}. Start to compute features'.format(time_now()))\n base.set_eval()\n features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n with torch.no_grad():\n for i, data in enumerate(loaders.rgb_all_loader):\n images, pids, cids, _ = data\n images = base.G_rgb2ir(images.to(base.device)).data.cpu()\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n for i, data in enumerate(loaders.ir_all_loader):\n images, pids, cids, _ = data\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n print('Time:{}. Start to normalize features.'.format(time_now()))\n features = features_meter.get_val()\n features = normalize_and_resize_feature(features)\n features = features.data.cpu().numpy()\n pids = pids_meter.get_val_numpy()\n cids = cids_meter.get_val_numpy()\n print('Time: {}. Note: Start to save features as .mat file'.format(\n time_now()))\n results = {(1): XX(), (2): XX(), (3): XX(), (4): XX(), (5): XX(), (6): XX()\n }\n for i in range(features.shape[0]):\n feature = features[i, :]\n feature = np.resize(feature, [1, feature.shape[0]])\n cid, pid = cids[i], pids[i]\n results[cid].update(pid, feature)\n pid_num_of_cids = [333, 333, 533, 533, 533, 333]\n cids = [1, 2, 3, 4, 5, 6]\n for cid in cids:\n a_result = results[cid]\n xx = []\n for pid in range(1, 1 + pid_num_of_cids[cid - 1]):\n xx.append([a_result.get_val(pid).astype(np.double)])\n xx = np.array(xx)\n sio.savemat(os.path.join(base.save_features_path,\n 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\ndef save_images(base, current_step):\n with torch.no_grad():\n fixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images\n ).detach()\n xxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images,\n base.fixed_real_ir_images], dim=0)\n save_image((xxxx.data.cpu() + 1.0) / 2.0, os.path.join(base.\n save_images_path, 'image_{}.jpg'.format(current_step)), nrow=\n base.fixed_real_rgb_images.size(0), padding=0)\n",
"step-5": "import numpy as np\nimport scipy.io as sio\nimport os\n\nimport torch\nfrom torchvision.utils import save_image\n\nfrom tools import *\n\n\n\ndef test(config, base, loaders, brief):\n\n\tcompute_and_save_features(base, loaders)\n\tresults = evalutate(config, base, brief)\n\treturn results\n\n\ndef evalutate(config, base, brief=False):\n\n\tresults = {}\n\tfor mode in config.modes:\n\t\tprint(mode)\n\t\tfor number_shot in config.number_shots:\n\t\t\tprint(number_shot)\n\t\t\tcmc, map = evaluate_sysymm01(base.save_features_path, mode, number_shot)\n\t\t\tresults['{},{}'.format(mode, number_shot)] = [cmc, map]\n\t\t\tif brief: break\n\t\tif brief: break\n\n\treturn results\n\n\ndef compute_and_save_features(base, loaders):\n\n\tdef compute_features(images):\n\t\timages_f = fliplr(images)\n\t\timages = images.to(base.device)\n\t\timages_f = images_f.to(base.device)\n\t\tfeatures = base.encoder(base.process_images_4_encoder(images, True, True))\n\t\tfeatures_f = base.encoder(base.process_images_4_encoder(images_f, True, True))\n\t\tfeatures, _, _, _ = base.embeder(features)\n\t\tfeatures_f, _, _, _ = base.embeder(features_f)\n\t\tfeatures = features + features_f\n\t\tif base.part_num == 1:\n\t\t\tfeatures = torch.unsqueeze(features, -1)\n\t\treturn features\n\n\tdef normalize_and_resize_feature(features):\n\t\t# normlize\n\t\tnorm = torch.norm(features, dim=1, keepdim=True)\n\t\tfeatures = features / norm.repeat([1, features.size(1), 1])\n\t\t# resize\n\t\tfeatures = features.view(features.size(0), -1)\n\t\treturn features\n\n\tclass XX:\n\t\tdef __init__(self):\n\t\t\tself.val = {}\n\t\tdef update(self, key, value):\n\t\t\tif key not in self.val.keys():\n\t\t\t\tself.val[key] = value\n\t\t\telse:\n\t\t\t\tself.val[key] = np.concatenate([self.val[key], value], axis=0)\n\t\tdef get_val(self, key):\n\t\t\tif key in self.val.keys():\n\t\t\t\treturn self.val[key]\n\t\t\telse:\n\t\t\t\treturn np.array([[]])\n\n\n\tprint('Time:{}. Start to compute features'.format(time_now()))\n\t# compute features\n\t# base._resume_model(test_step)\n\tbase.set_eval()\n\tfeatures_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n\n\twith torch.no_grad():\n\t\tfor i, data in enumerate(loaders.rgb_all_loader):\n\t\t\t# load data\n\t\t\timages, pids, cids, _ = data\n\t\t\timages = base.G_rgb2ir(images.to(base.device)).data.cpu()\n\t\t\t# forward\n\t\t\tfeatures = compute_features(images)\n\t\t\t# meter\n\t\t\tfeatures_meter.update(features.data)\n\t\t\tpids_meter.update(pids.data)\n\t\t\tcids_meter.update(cids.data)\n\n\t\tfor i, data in enumerate(loaders.ir_all_loader):\n\t\t\t# load data\n\t\t\timages, pids, cids, _ = data\n\t\t\t# forward\n\t\t\tfeatures = compute_features(images)\n\t\t\t# meter\n\t\t\tfeatures_meter.update(features.data)\n\t\t\tpids_meter.update(pids.data)\n\t\t\tcids_meter.update(cids.data)\n\n\tprint('Time:{}. Start to normalize features.'.format(time_now()))\n\t# normalize features\n\tfeatures = features_meter.get_val()\n\tfeatures = normalize_and_resize_feature(features)\n\tfeatures = features.data.cpu().numpy()\n\tpids = pids_meter.get_val_numpy()\n\tcids = cids_meter.get_val_numpy()\n\n\tprint('Time: {}. Note: Start to save features as .mat file'.format(time_now()))\n\t# save features as .mat file\n\tresults = {1: XX(), 2: XX(), 3: XX(), 4: XX(), 5: XX(), 6: XX()}\n\tfor i in range(features.shape[0]):\n\t\tfeature = features[i, :]\n\t\tfeature = np.resize(feature, [1, feature.shape[0]])\n\t\tcid, pid = cids[i], pids[i]\n\t\tresults[cid].update(pid, feature)\n\n\tpid_num_of_cids = [333, 333, 533, 533, 533, 333]\n\tcids = [1, 2, 3, 4, 5, 6]\n\tfor cid in cids:\n\t\ta_result = results[cid]\n\t\txx = []\n\t\tfor pid in range(1, 1+ pid_num_of_cids[cid - 1]):\n\t\t\txx.append([a_result.get_val(pid).astype(np.double)])\n\t\txx = np.array(xx)\n\t\tsio.savemat(os.path.join(base.save_features_path, 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\n\ndef save_images(base, current_step):\n\n\t#base.set_eval()\n\twith torch.no_grad():\n\t\tfixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images).detach()\n\t\txxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images, base.fixed_real_ir_images], dim=0)\n\t\tsave_image((xxxx.data.cpu() + 1.0) / 2.0,\n\t\t os.path.join(base.save_images_path, 'image_{}.jpg'.format(current_step)), nrow=base.fixed_real_rgb_images.size(0), padding=0)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
from django.db import models
from backend.models.account import Account
from string import Template
out = Template("$account: $parts")
class Group(models.Model):
name = models.CharField(max_length=100)
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = ('account', 'parts', 'group')
|
normal
|
{
"blob_id": "11337f6f9cf22ba6fbed68dfcb7a07fb6368e94e",
"index": 6350,
"step-1": "<mask token>\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-2": "<mask token>\n\n\nclass Group(models.Model):\n <mask token>\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-3": "<mask token>\nout = Template('$account: $parts')\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-4": "from django.db import models\nfrom backend.models.account import Account\nfrom string import Template\nout = Template('$account: $parts')\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n\n class Meta:\n unique_together = 'account', 'parts', 'group'\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.db import models\n\nfrom backend.models.account import Account\nfrom string import Template\n\n\nout = Template(\"$account: $parts\")\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass GroupParticipation(models.Model):\n account = models.ForeignKey(Account, related_name='groups')\n parts = models.FloatField(default=1.0)\n group = models.ForeignKey(Group, related_name='participants')\n\n def __str__(self):\n return out.substitute(account=self.account, parts=self.parts)\n\n class Meta:\n unique_together = ('account', 'parts', 'group')\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
from collections import defaultdict
def k_most_frequent(arr:list, k:int):
''' '''
counts = defaultdict(int)
for n in nums:
counts[n] += 1
counts = [(k,v) for k,v in counts.items()]
ordered = list(reversed(sorted(counts, key=lambda d: d[1])))
return [o[0] for o in ordered[:k]]
nums = [1,6,2,1,6,1,4,2,6,1]
k_most_frequent(nums, 3)
|
normal
|
{
"blob_id": "1298c2abae519a5365cc0d9d406196db987eb219",
"index": 5923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef k_most_frequent(arr: list, k: int):\n \"\"\" \"\"\"\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n counts = [(k, v) for k, v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\n\n<mask token>\nk_most_frequent(nums, 3)\n",
"step-3": "<mask token>\n\n\ndef k_most_frequent(arr: list, k: int):\n \"\"\" \"\"\"\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n counts = [(k, v) for k, v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\n\nnums = [1, 6, 2, 1, 6, 1, 4, 2, 6, 1]\nk_most_frequent(nums, 3)\n",
"step-4": "from collections import defaultdict\n\n\ndef k_most_frequent(arr: list, k: int):\n \"\"\" \"\"\"\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n counts = [(k, v) for k, v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\n\nnums = [1, 6, 2, 1, 6, 1, 4, 2, 6, 1]\nk_most_frequent(nums, 3)\n",
"step-5": "\nfrom collections import defaultdict\n\ndef k_most_frequent(arr:list, k:int):\n ''' '''\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n \n counts = [(k,v) for k,v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\nnums = [1,6,2,1,6,1,4,2,6,1]\nk_most_frequent(nums, 3)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from django.conf.urls import patterns, url
from riskDashboard2 import views
urlpatterns = patterns('',
#url(r'getdata', views.vulnData, name='getdata'),
url(r'appmanagement', views.appmanagement, name='appmanagement'),
url(r'^.*', views.index, name='index'),
)
|
normal
|
{
"blob_id": "3372d98ff91d90558a87293d4032820b1662d60b",
"index": 298,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('appmanagement', views.appmanagement, name=\n 'appmanagement'), url('^.*', views.index, name='index'))\n",
"step-3": "from django.conf.urls import patterns, url\nfrom riskDashboard2 import views\nurlpatterns = patterns('', url('appmanagement', views.appmanagement, name=\n 'appmanagement'), url('^.*', views.index, name='index'))\n",
"step-4": "from django.conf.urls import patterns, url\n\nfrom riskDashboard2 import views\n\nurlpatterns = patterns('',\n #url(r'getdata', views.vulnData, name='getdata'),\n url(r'appmanagement', views.appmanagement, name='appmanagement'),\n url(r'^.*', views.index, name='index'),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 08:04:11 2019
@author: yocoy
"""
import serial, time
arduino = serial.Serial('COM7', 9600)
time.sleep(4)
lectura = []
for i in range(100):
lectura.append(arduino.readline())
arduino.close()
print(lectura)
|
normal
|
{
"blob_id": "d514413c303dd174d8f56685158780a1681e1aba",
"index": 7925,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntime.sleep(4)\n<mask token>\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\nprint(lectura)\n",
"step-3": "<mask token>\narduino = serial.Serial('COM7', 9600)\ntime.sleep(4)\nlectura = []\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\nprint(lectura)\n",
"step-4": "<mask token>\nimport serial, time\narduino = serial.Serial('COM7', 9600)\ntime.sleep(4)\nlectura = []\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\nprint(lectura)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 2 08:04:11 2019\n\n@author: yocoy\n\"\"\"\n\nimport serial, time\n\narduino = serial.Serial('COM7', 9600)\ntime.sleep(4)\n\nlectura = []\n\nfor i in range(100):\n lectura.append(arduino.readline())\narduino.close()\n\nprint(lectura)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import erfc
import time
from ..core.errors import InvalidConfigError
def compute_integrated_acquisition(acquisition,x):
'''
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
'''
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x/acquisition.model.num_hmc_samples
return acqu_x
def compute_integrated_acquisition_withGradients(acquisition,x):
'''
Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
'''
acqu_x = 0
d_acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x)
acqu_x += acqu_x_sample
d_acqu_x += d_acqu_x_sample
acqu_x = acqu_x/acquisition.model.num_hmc_samples
d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples
return acqu_x, d_acqu_x
def best_guess(f,X):
'''
Gets the best current guess from a vector.
:param f: function to evaluate.
:param X: locations.
'''
n = X.shape[0]
xbest = np.zeros(n)
for i in range(n):
ff = f(X[0:(i+1)])
xbest[i] = ff[np.argmin(ff)]
return xbest
def samples_multidimensional_uniform(bounds,num_data):
'''
Generates a multidimensional grid uniformly distributed.
:param bounds: tuple defining the box constraints.
:num_data: number of data points to generate.
'''
dim = len(bounds)
Z_rand = np.zeros(shape=(num_data,dim))
for k in range(0,dim): Z_rand[:,k] = np.random.uniform(low=bounds[k][0],high=bounds[k][1],size=num_data)
return Z_rand
def reshape(x,input_dim):
'''
Reshapes x into a matrix with input_dim columns
'''
x = np.array(x)
if x.size ==input_dim:
x = x.reshape((1,input_dim))
return x
def get_moments(model,x):
'''
Moments (mean and sdev.) of a GP model at x
'''
input_dim = model.X.shape[1]
x = reshape(x,input_dim)
fmin = min(model.predict(model.X)[0])
m, v = model.predict(x)
s = np.sqrt(np.clip(v, 0, np.inf))
return (m,s, fmin)
def get_d_moments(model,x):
'''
Gradients with respect to x of the moments (mean and sdev.) of the GP
:param model: GPy model.
:param x: location where the gradients are evaluated.
'''
input_dim = model.input_dim
x = reshape(x,input_dim)
_, v = model.predict(x)
dmdx, dvdx = model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return (dmdx, dsdx)
def get_quantiles(acquisition_par, fmin, m, s):
'''
Quantiles of the Gaussian distribution useful to determine the acquisition function values
:param acquisition_par: parameter of the acquisition function
:param fmin: current minimum.
:param m: vector of means.
:param s: vector of standard deviations.
'''
if isinstance(s, np.ndarray):
s[s<1e-10] = 1e-10
elif s< 1e-10:
s = 1e-10
u = (fmin - m - acquisition_par)/s
phi = np.exp(-0.5 * u**2) / np.sqrt(2*np.pi)
Phi = 0.5 * erfc(-u / np.sqrt(2))
return (phi, Phi, u)
def best_value(Y,sign=1):
'''
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
'''
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i]=Y[:(i+1)].min()
else:
Y_best[i]=Y[:(i+1)].max()
return Y_best
def spawn(f):
'''
Function for parallel evaluation of the acquisition function
'''
def fun(pipe,x):
pipe.send(f(x))
pipe.close()
return fun
def evaluate_function(f,X):
'''
Returns the evaluation of a function *f* and the time per evaluation
'''
num_data, dim_data = X.shape
Y_eval = np.zeros((num_data, dim_data))
Y_time = np.zeros((num_data, 1))
for i in range(num_data):
time_zero = time.time()
Y_eval[i,:] = f(X[i,:])
Y_time[i,:] = time.time() - time_zero
return Y_eval, Y_time
def values_to_array(input_values):
'''
Transforms a values of int, float and tuples to a column vector numpy array
'''
if type(input_values)==tuple:
values = np.array(input_values).reshape(-1,1)
elif type(input_values) == np.ndarray:
values = np.atleast_2d(input_values)
elif type(input_values)==int or type(input_values)==float or type(np.int64):
values = np.atleast_2d(np.array(input_values))
else:
print('Type to transform not recognized')
return values
def merge_values(values1,values2):
'''
Merges two numpy arrays by calculating all possible combinations of rows
'''
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1,row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array)
def normalize(Y, normalization_type='stats'):
"""Normalize the vector Y using statistics or its range.
:param Y: Row or column vector that you want to normalize.
:param normalization_type: String specifying the kind of normalization
to use. Options are 'stats' to use mean and standard deviation,
or 'maxmin' to use the range of function values.
:return Y_normalized: The normalized vector.
"""
Y = np.asarray(Y, dtype=float)
if np.max(Y.shape) != Y.size:
raise NotImplementedError('Only 1-dimensional arrays are supported.')
# Only normalize with non null sdev (divide by zero). For only one
# data point both std and ptp return 0.
if normalization_type == 'stats':
Y_norm = Y - Y.mean()
std = Y.std()
if std > 0:
Y_norm /= std
elif normalization_type == 'maxmin':
Y_norm = Y - Y.min()
y_range = np.ptp(Y)
if y_range > 0:
Y_norm /= y_range
# A range of [-1, 1] is more natural for a zero-mean GP
Y_norm = 2 * (Y_norm - 0.5)
else:
raise ValueError('Unknown normalization type: {}'.format(normalization_type))
return Y_norm
|
normal
|
{
"blob_id": "4e7cfbf51ec9bad691d8dd9f103f22728cf5e952",
"index": 1229,
"step-1": "<mask token>\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\n<mask token>\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\n<mask token>\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\n<mask token>\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-2": "<mask token>\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds, num_data):\n \"\"\"\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n \"\"\"\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data, dim))\n for k in range(0, dim):\n Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1\n ], size=num_data)\n return Z_rand\n\n\ndef reshape(x, input_dim):\n \"\"\"\n Reshapes x into a matrix with input_dim columns\n\n \"\"\"\n x = np.array(x)\n if x.size == input_dim:\n x = x.reshape((1, input_dim))\n return x\n\n\n<mask token>\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\n<mask token>\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\n<mask token>\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-3": "<mask token>\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds, num_data):\n \"\"\"\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n \"\"\"\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data, dim))\n for k in range(0, dim):\n Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1\n ], size=num_data)\n return Z_rand\n\n\ndef reshape(x, input_dim):\n \"\"\"\n Reshapes x into a matrix with input_dim columns\n\n \"\"\"\n x = np.array(x)\n if x.size == input_dim:\n x = x.reshape((1, input_dim))\n return x\n\n\n<mask token>\n\n\ndef get_d_moments(model, x):\n \"\"\"\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n \"\"\"\n input_dim = model.input_dim\n x = reshape(x, input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:, :, 0]\n dsdx = dvdx / (2 * np.sqrt(v))\n return dmdx, dsdx\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n \"\"\"\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n \"\"\"\n if isinstance(s, np.ndarray):\n s[s < 1e-10] = 1e-10\n elif s < 1e-10:\n s = 1e-10\n u = (fmin - m - acquisition_par) / s\n phi = np.exp(-0.5 * u ** 2) / np.sqrt(2 * np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return phi, Phi, u\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\n<mask token>\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n \"\"\"\n Transforms a values of int, float and tuples to a column vector numpy array\n \"\"\"\n if type(input_values) == tuple:\n values = np.array(input_values).reshape(-1, 1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values) == int or type(input_values) == float or type(np\n .int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-4": "import numpy as np\nfrom scipy.special import erfc\nimport time\nfrom ..core.errors import InvalidConfigError\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds, num_data):\n \"\"\"\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n \"\"\"\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data, dim))\n for k in range(0, dim):\n Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1\n ], size=num_data)\n return Z_rand\n\n\ndef reshape(x, input_dim):\n \"\"\"\n Reshapes x into a matrix with input_dim columns\n\n \"\"\"\n x = np.array(x)\n if x.size == input_dim:\n x = x.reshape((1, input_dim))\n return x\n\n\ndef get_moments(model, x):\n \"\"\"\n Moments (mean and sdev.) of a GP model at x\n\n \"\"\"\n input_dim = model.X.shape[1]\n x = reshape(x, input_dim)\n fmin = min(model.predict(model.X)[0])\n m, v = model.predict(x)\n s = np.sqrt(np.clip(v, 0, np.inf))\n return m, s, fmin\n\n\ndef get_d_moments(model, x):\n \"\"\"\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n \"\"\"\n input_dim = model.input_dim\n x = reshape(x, input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:, :, 0]\n dsdx = dvdx / (2 * np.sqrt(v))\n return dmdx, dsdx\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n \"\"\"\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n \"\"\"\n if isinstance(s, np.ndarray):\n s[s < 1e-10] = 1e-10\n elif s < 1e-10:\n s = 1e-10\n u = (fmin - m - acquisition_par) / s\n phi = np.exp(-0.5 * u ** 2) / np.sqrt(2 * np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return phi, Phi, u\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\ndef spawn(f):\n \"\"\"\n Function for parallel evaluation of the acquisition function\n \"\"\"\n\n def fun(pipe, x):\n pipe.send(f(x))\n pipe.close()\n return fun\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n \"\"\"\n Transforms a values of int, float and tuples to a column vector numpy array\n \"\"\"\n if type(input_values) == tuple:\n values = np.array(input_values).reshape(-1, 1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values) == int or type(input_values) == float or type(np\n .int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-5": "# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nimport numpy as np\nfrom scipy.special import erfc\nimport time\nfrom ..core.errors import InvalidConfigError\n\ndef compute_integrated_acquisition(acquisition,x):\n '''\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x += acquisition.acquisition_function(x)\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n return acqu_x\n\ndef compute_integrated_acquisition_withGradients(acquisition,x):\n '''\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n d_acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x)\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples\n\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f,X):\n '''\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n '''\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:(i+1)])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds,num_data):\n '''\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n '''\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data,dim))\n for k in range(0,dim): Z_rand[:,k] = np.random.uniform(low=bounds[k][0],high=bounds[k][1],size=num_data)\n return Z_rand\n\n\ndef reshape(x,input_dim):\n '''\n Reshapes x into a matrix with input_dim columns\n\n '''\n x = np.array(x)\n if x.size ==input_dim:\n x = x.reshape((1,input_dim))\n return x\n\ndef get_moments(model,x):\n '''\n Moments (mean and sdev.) of a GP model at x\n\n '''\n input_dim = model.X.shape[1]\n x = reshape(x,input_dim)\n fmin = min(model.predict(model.X)[0])\n m, v = model.predict(x)\n s = np.sqrt(np.clip(v, 0, np.inf))\n return (m,s, fmin)\n\ndef get_d_moments(model,x):\n '''\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n '''\n input_dim = model.input_dim\n x = reshape(x,input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:,:,0]\n dsdx = dvdx / (2*np.sqrt(v))\n return (dmdx, dsdx)\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n '''\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n '''\n if isinstance(s, np.ndarray):\n s[s<1e-10] = 1e-10\n elif s< 1e-10:\n s = 1e-10\n u = (fmin - m - acquisition_par)/s\n phi = np.exp(-0.5 * u**2) / np.sqrt(2*np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return (phi, Phi, u)\n\n\ndef best_value(Y,sign=1):\n '''\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n '''\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i]=Y[:(i+1)].min()\n else:\n Y_best[i]=Y[:(i+1)].max()\n return Y_best\n\ndef spawn(f):\n '''\n Function for parallel evaluation of the acquisition function\n '''\n def fun(pipe,x):\n pipe.send(f(x))\n pipe.close()\n return fun\n\n\ndef evaluate_function(f,X):\n '''\n Returns the evaluation of a function *f* and the time per evaluation\n '''\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i,:] = f(X[i,:])\n Y_time[i,:] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n '''\n Transforms a values of int, float and tuples to a column vector numpy array\n '''\n if type(input_values)==tuple:\n values = np.array(input_values).reshape(-1,1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values)==int or type(input_values)==float or type(np.int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1,values2):\n '''\n Merges two numpy arrays by calculating all possible combinations of rows\n '''\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1,row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n\n # Only normalize with non null sdev (divide by zero). For only one\n # data point both std and ptp return 0.\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n # A range of [-1, 1] is more natural for a zero-mean GP\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(normalization_type))\n\n return Y_norm\n",
"step-ids": [
7,
9,
12,
15,
16
]
}
|
[
7,
9,
12,
15,
16
] |
import math
def is_prime(n):
# Based on the Sieve of Eratosthenes
if n == 1:
return False
if n < 4:
# 2 and 3 are prime
return True
if n % 2 == 0:
return False
if n < 9:
# 5 and 7 are prime (we have already excluded 4, 6 and 8)
return True
if n % 3 == 0:
return False
root = math.sqrt(n)
f = 5
while f <= root:
if n % f == 0:
return False
if n % (f + 2) == 0:
return False
f += 6
return True
def main():
limit = 10001
# We know that 2 is prime
count = 1
candidate = 1
while count < limit:
candidate += 2
if is_prime(candidate):
count += 1
print(candidate)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "3970c7768e892ad217c193b1d967c1203b7e9a25",
"index": 6512,
"step-1": "<mask token>\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\ndef main():\n limit = 10001\n count = 1\n candidate = 1\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n print(candidate)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\ndef main():\n limit = 10001\n count = 1\n candidate = 1\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n print(candidate)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import math\n\n\ndef is_prime(n):\n if n == 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n root = math.sqrt(n)\n f = 5\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n return True\n\n\ndef main():\n limit = 10001\n count = 1\n candidate = 1\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n print(candidate)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import math\n\n\ndef is_prime(n):\n # Based on the Sieve of Eratosthenes\n if n == 1:\n return False\n if n < 4:\n # 2 and 3 are prime\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n # 5 and 7 are prime (we have already excluded 4, 6 and 8)\n return True\n if n % 3 == 0:\n return False\n\n root = math.sqrt(n)\n f = 5\n\n while f <= root:\n if n % f == 0:\n return False\n if n % (f + 2) == 0:\n return False\n f += 6\n\n return True\n\n\ndef main():\n limit = 10001\n # We know that 2 is prime\n count = 1\n candidate = 1\n\n while count < limit:\n candidate += 2\n if is_prime(candidate):\n count += 1\n\n print(candidate)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# coding: utf-8
import re
import numpy as np
from sklearn.manifold import TSNE
import word2vec
from matplotlib import pyplot as plt
from adjustText import adjust_text
import nltk
'''
word2vec.word2phrase('all.txt', 'phrases.txt', verbose=True)
word2vec.word2vec('phrases.txt', 'text.bin', size=100, verbose=True)
word2vec.word2clusters('all.txt', 'clusters.txt', 100, verbose=True)
'''
model = word2vec.load('text.bin')
words = [word for word in model.vocab[:500]]
X = [ model[word] for word in words]
X = np.array(X)
tsne = TSNE(n_components=2)
X_tsne = tsne.fit_transform(X)
def plot_scatter(x,y,texts,adjust=False):
fig, ax = plt.subplots()
ax.plot(x, y, 'bo')
texts = [plt.text(x[i], y[i], texts[i]) for i in range(len(x))]
if adjust:
plt.title(str( adjust_text(texts, x, y, arrowprops=dict(arrowstyle='->', color='red')))+' iterations')
plt.savefig("500")
pattern = re.compile(r"[,.:;!?“”’]")
X, Y, texts = [], [], []
for i,word in enumerate(words):
if not pattern.findall(word):
tag = nltk.pos_tag([word])
if tag[0][1] != 'JJ' and tag[0][1] != 'NNP' and tag[0][1] != 'NN' and tag[0][1] != 'NNS':
continue
X.append(X_tsne[i][0])
Y.append(X_tsne[i][1])
texts.append(word)
print(len(X))
plot_scatter(X, Y, texts, True)
|
normal
|
{
"blob_id": "31996699bec6507d941eb8a7aaacffbd6248d79c",
"index": 7112,
"step-1": "<mask token>\n\n\ndef plot_scatter(x, y, texts, adjust=False):\n fig, ax = plt.subplots()\n ax.plot(x, y, 'bo')\n texts = [plt.text(x[i], y[i], texts[i]) for i in range(len(x))]\n if adjust:\n plt.title(str(adjust_text(texts, x, y, arrowprops=dict(arrowstyle=\n '->', color='red'))) + ' iterations')\n plt.savefig('500')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_scatter(x, y, texts, adjust=False):\n fig, ax = plt.subplots()\n ax.plot(x, y, 'bo')\n texts = [plt.text(x[i], y[i], texts[i]) for i in range(len(x))]\n if adjust:\n plt.title(str(adjust_text(texts, x, y, arrowprops=dict(arrowstyle=\n '->', color='red'))) + ' iterations')\n plt.savefig('500')\n\n\n<mask token>\nfor i, word in enumerate(words):\n if not pattern.findall(word):\n tag = nltk.pos_tag([word])\n if tag[0][1] != 'JJ' and tag[0][1] != 'NNP' and tag[0][1\n ] != 'NN' and tag[0][1] != 'NNS':\n continue\n X.append(X_tsne[i][0])\n Y.append(X_tsne[i][1])\n texts.append(word)\nprint(len(X))\nplot_scatter(X, Y, texts, True)\n",
"step-3": "<mask token>\nmodel = word2vec.load('text.bin')\nwords = [word for word in model.vocab[:500]]\nX = [model[word] for word in words]\nX = np.array(X)\ntsne = TSNE(n_components=2)\nX_tsne = tsne.fit_transform(X)\n\n\ndef plot_scatter(x, y, texts, adjust=False):\n fig, ax = plt.subplots()\n ax.plot(x, y, 'bo')\n texts = [plt.text(x[i], y[i], texts[i]) for i in range(len(x))]\n if adjust:\n plt.title(str(adjust_text(texts, x, y, arrowprops=dict(arrowstyle=\n '->', color='red'))) + ' iterations')\n plt.savefig('500')\n\n\npattern = re.compile('[,.:;!?“”’]')\nX, Y, texts = [], [], []\nfor i, word in enumerate(words):\n if not pattern.findall(word):\n tag = nltk.pos_tag([word])\n if tag[0][1] != 'JJ' and tag[0][1] != 'NNP' and tag[0][1\n ] != 'NN' and tag[0][1] != 'NNS':\n continue\n X.append(X_tsne[i][0])\n Y.append(X_tsne[i][1])\n texts.append(word)\nprint(len(X))\nplot_scatter(X, Y, texts, True)\n",
"step-4": "import re\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport word2vec\nfrom matplotlib import pyplot as plt\nfrom adjustText import adjust_text\nimport nltk\n<mask token>\nmodel = word2vec.load('text.bin')\nwords = [word for word in model.vocab[:500]]\nX = [model[word] for word in words]\nX = np.array(X)\ntsne = TSNE(n_components=2)\nX_tsne = tsne.fit_transform(X)\n\n\ndef plot_scatter(x, y, texts, adjust=False):\n fig, ax = plt.subplots()\n ax.plot(x, y, 'bo')\n texts = [plt.text(x[i], y[i], texts[i]) for i in range(len(x))]\n if adjust:\n plt.title(str(adjust_text(texts, x, y, arrowprops=dict(arrowstyle=\n '->', color='red'))) + ' iterations')\n plt.savefig('500')\n\n\npattern = re.compile('[,.:;!?“”’]')\nX, Y, texts = [], [], []\nfor i, word in enumerate(words):\n if not pattern.findall(word):\n tag = nltk.pos_tag([word])\n if tag[0][1] != 'JJ' and tag[0][1] != 'NNP' and tag[0][1\n ] != 'NN' and tag[0][1] != 'NNS':\n continue\n X.append(X_tsne[i][0])\n Y.append(X_tsne[i][1])\n texts.append(word)\nprint(len(X))\nplot_scatter(X, Y, texts, True)\n",
"step-5": "# coding: utf-8\nimport re\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport word2vec\nfrom matplotlib import pyplot as plt\nfrom adjustText import adjust_text\nimport nltk\n'''\nword2vec.word2phrase('all.txt', 'phrases.txt', verbose=True)\nword2vec.word2vec('phrases.txt', 'text.bin', size=100, verbose=True)\nword2vec.word2clusters('all.txt', 'clusters.txt', 100, verbose=True)\n'''\nmodel = word2vec.load('text.bin')\nwords = [word for word in model.vocab[:500]]\nX = [ model[word] for word in words]\nX = np.array(X)\ntsne = TSNE(n_components=2)\nX_tsne = tsne.fit_transform(X)\n\n\ndef plot_scatter(x,y,texts,adjust=False):\n\n fig, ax = plt.subplots()\n ax.plot(x, y, 'bo')\n\n texts = [plt.text(x[i], y[i], texts[i]) for i in range(len(x))]\n if adjust:\n plt.title(str( adjust_text(texts, x, y, arrowprops=dict(arrowstyle='->', color='red')))+' iterations')\n plt.savefig(\"500\")\n\npattern = re.compile(r\"[,.:;!?“”’]\")\nX, Y, texts = [], [], []\nfor i,word in enumerate(words):\n if not pattern.findall(word):\n tag = nltk.pos_tag([word])\n if tag[0][1] != 'JJ' and tag[0][1] != 'NNP' and tag[0][1] != 'NN' and tag[0][1] != 'NNS':\n continue\n X.append(X_tsne[i][0])\n Y.append(X_tsne[i][1])\n texts.append(word)\n\nprint(len(X))\nplot_scatter(X, Y, texts, True)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
####################################################################
# a COM client coded in Python: talk to MS-Word via its COM object
# model; uses either dynamic dispatch (run-time lookup/binding),
# or the static and faster type-library dispatch if makepy.py has
# been run; install the windows win32all extensions package to use
# this interface; Word runs hidden unless Visible is set to 1 (and
# Visible lets you watch, but impacts interactive Word sessions);
####################################################################
from sys import argv
docdir = 'C:\\temp\\'
if len(argv) == 2: docdir = argv[1] # ex: comclient.py a:\
from win32com.client import Dispatch # early or late binding
word = Dispatch('Word.Application') # connect/start word
word.Visible = 1 # else word runs hidden
# create and save new doc file
newdoc = word.Documents.Add() # call word methods
spot = newdoc.Range(0,0)
spot.InsertBefore('Hello COM client world!') # insert some text
newdoc.SaveAs(docdir + 'pycom.doc') # save in doc file
newdoc.SaveAs(docdir + 'copy.doc')
newdoc.Close()
# open and change a doc file
olddoc = word.Documents.Open(docdir + 'copy.doc')
finder = word.Selection.Find
finder.text = 'COM'
finder.Execute()
word.Selection.TypeText('Automation')
olddoc.Close()
# and so on: see Word's COM interface specs
|
normal
|
{
"blob_id": "df19aa720993c2385a6d025cf7ec8f3935ee4191",
"index": 9343,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(argv) == 2:\n docdir = argv[1]\n<mask token>\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\n<mask token>\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-3": "<mask token>\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2:\n docdir = argv[1]\n<mask token>\nword = Dispatch('Word.Application')\nword.Visible = 1\nnewdoc = word.Documents.Add()\nspot = newdoc.Range(0, 0)\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-4": "from sys import argv\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2:\n docdir = argv[1]\nfrom win32com.client import Dispatch\nword = Dispatch('Word.Application')\nword.Visible = 1\nnewdoc = word.Documents.Add()\nspot = newdoc.Range(0, 0)\nspot.InsertBefore('Hello COM client world!')\nnewdoc.SaveAs(docdir + 'pycom.doc')\nnewdoc.SaveAs(docdir + 'copy.doc')\nnewdoc.Close()\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n",
"step-5": "####################################################################\n# a COM client coded in Python: talk to MS-Word via its COM object\n# model; uses either dynamic dispatch (run-time lookup/binding), \n# or the static and faster type-library dispatch if makepy.py has \n# been run; install the windows win32all extensions package to use \n# this interface; Word runs hidden unless Visible is set to 1 (and\n# Visible lets you watch, but impacts interactive Word sessions);\n####################################################################\n\nfrom sys import argv\ndocdir = 'C:\\\\temp\\\\'\nif len(argv) == 2: docdir = argv[1] # ex: comclient.py a:\\\n\nfrom win32com.client import Dispatch # early or late binding\nword = Dispatch('Word.Application') # connect/start word\nword.Visible = 1 # else word runs hidden\n\n# create and save new doc file\nnewdoc = word.Documents.Add() # call word methods\nspot = newdoc.Range(0,0)\nspot.InsertBefore('Hello COM client world!') # insert some text\nnewdoc.SaveAs(docdir + 'pycom.doc') # save in doc file\nnewdoc.SaveAs(docdir + 'copy.doc') \nnewdoc.Close()\n\n# open and change a doc file\nolddoc = word.Documents.Open(docdir + 'copy.doc')\nfinder = word.Selection.Find\nfinder.text = 'COM'\nfinder.Execute()\nword.Selection.TypeText('Automation')\nolddoc.Close()\n\n# and so on: see Word's COM interface specs\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Config for a linear regression model evaluated on a diabetes dataset."""
from dbispipeline.evaluators import GridEvaluator
import dbispipeline.result_handlers as result_handlers
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from nlp4musa2020.dataloaders.alf200k import ALF200KLoader
from nlp4musa2020.dataloaders.alf200k import genre_target_labels
from nlp4musa2020.dataloaders.vectorizer import lda
from nlp4musa2020.dataloaders.vectorizer import tfidf
import nlp4musa2020.evaluators as evaluators
from nlp4musa2020.models.simplenn_genre import SimpleGenreNN
dataloader = ALF200KLoader(
path='data/processed/dataset-lfm-genres.pickle',
load_feature_groups=[
'rhymes',
'statistical',
'statistical_time',
'explicitness',
'audio',
],
text_vectorizers=lda() + tfidf(),
target=genre_target_labels(),
)
pipeline = Pipeline([
('scaler', StandardScaler()),
('model', SimpleGenreNN(epochs=50)),
])
evaluator = GridEvaluator(
parameters={
'model__dense_sizes': [
(32, 32),
(64, 64),
],
'model__dropout_rate': [0.1],
},
grid_parameters=evaluators.grid_parameters_genres(),
)
result_handlers = [
result_handlers.print_gridsearch_results,
]
|
normal
|
{
"blob_id": "473c653da54ebdb7fe8a9eefc166cab167f43357",
"index": 3994,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=['rhymes', 'statistical', 'statistical_time',\n 'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=\n genre_target_labels())\npipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(\n epochs=50))])\nevaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,\n 64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.\n grid_parameters_genres())\nresult_handlers = [result_handlers.print_gridsearch_results]\n",
"step-3": "<mask token>\nfrom dbispipeline.evaluators import GridEvaluator\nimport dbispipeline.result_handlers as result_handlers\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom nlp4musa2020.dataloaders.alf200k import ALF200KLoader\nfrom nlp4musa2020.dataloaders.alf200k import genre_target_labels\nfrom nlp4musa2020.dataloaders.vectorizer import lda\nfrom nlp4musa2020.dataloaders.vectorizer import tfidf\nimport nlp4musa2020.evaluators as evaluators\nfrom nlp4musa2020.models.simplenn_genre import SimpleGenreNN\ndataloader = ALF200KLoader(path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=['rhymes', 'statistical', 'statistical_time',\n 'explicitness', 'audio'], text_vectorizers=lda() + tfidf(), target=\n genre_target_labels())\npipeline = Pipeline([('scaler', StandardScaler()), ('model', SimpleGenreNN(\n epochs=50))])\nevaluator = GridEvaluator(parameters={'model__dense_sizes': [(32, 32), (64,\n 64)], 'model__dropout_rate': [0.1]}, grid_parameters=evaluators.\n grid_parameters_genres())\nresult_handlers = [result_handlers.print_gridsearch_results]\n",
"step-4": "\"\"\"Config for a linear regression model evaluated on a diabetes dataset.\"\"\"\nfrom dbispipeline.evaluators import GridEvaluator\nimport dbispipeline.result_handlers as result_handlers\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\nfrom nlp4musa2020.dataloaders.alf200k import ALF200KLoader\nfrom nlp4musa2020.dataloaders.alf200k import genre_target_labels\nfrom nlp4musa2020.dataloaders.vectorizer import lda\nfrom nlp4musa2020.dataloaders.vectorizer import tfidf\nimport nlp4musa2020.evaluators as evaluators\nfrom nlp4musa2020.models.simplenn_genre import SimpleGenreNN\n\ndataloader = ALF200KLoader(\n path='data/processed/dataset-lfm-genres.pickle',\n load_feature_groups=[\n 'rhymes',\n 'statistical',\n 'statistical_time',\n 'explicitness',\n 'audio',\n ],\n text_vectorizers=lda() + tfidf(),\n target=genre_target_labels(),\n)\n\npipeline = Pipeline([\n ('scaler', StandardScaler()),\n ('model', SimpleGenreNN(epochs=50)),\n])\n\nevaluator = GridEvaluator(\n parameters={\n 'model__dense_sizes': [\n (32, 32),\n (64, 64),\n ],\n 'model__dropout_rate': [0.1],\n },\n grid_parameters=evaluators.grid_parameters_genres(),\n)\n\nresult_handlers = [\n result_handlers.print_gridsearch_results,\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from abc import ABCMeta, abstractmethod
from datetime import datetime
from enum import Enum
from application.response import ResponseError
class ModelBase:
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
@property
def id(self):
return self.schema.id
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [
name for name in attrs.keys() if not name.startswith("_")
]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, "lite_exclude_keys", [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, "to_dict"):
list_values.append(item.to_dict(depth=depth + 1, lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, "to_dict"):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, "to_dict"):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
|
normal
|
{
"blob_id": "5917c891d2885f779dc33f189f1a875efbd0c302",
"index": 163,
"step-1": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n <mask token>\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-2": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n <mask token>\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-3": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False\n ):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n attrs = self.__class__.__dict__\n include_keys = include_keys or [name for name in attrs.keys() if \n not name.startswith('_')]\n exclude_keys = exclude_keys or []\n if lite is True:\n lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])\n exclude_keys = exclude_keys + lite_exclude_keys\n include_keys = [name for name in include_keys if name not in\n exclude_keys]\n if depth > 1:\n return self.uid\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, 'to_dict'):\n list_values.append(item.to_dict(depth=depth + 1,\n lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, 'to_dict'):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n elif hasattr(value, 'to_dict'):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-4": "from abc import ABCMeta, abstractmethod\nfrom datetime import datetime\nfrom enum import Enum\nfrom application.response import ResponseError\n\n\nclass ModelBase:\n __metaclass__ = ABCMeta\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n\n @property\n def id(self):\n return self.schema.id\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False\n ):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n attrs = self.__class__.__dict__\n include_keys = include_keys or [name for name in attrs.keys() if \n not name.startswith('_')]\n exclude_keys = exclude_keys or []\n if lite is True:\n lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])\n exclude_keys = exclude_keys + lite_exclude_keys\n include_keys = [name for name in include_keys if name not in\n exclude_keys]\n if depth > 1:\n return self.uid\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, 'to_dict'):\n list_values.append(item.to_dict(depth=depth + 1,\n lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, 'to_dict'):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n elif hasattr(value, 'to_dict'):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-5": "from abc import ABCMeta, abstractmethod\nfrom datetime import datetime\nfrom enum import Enum\n\nfrom application.response import ResponseError\n\n\nclass ModelBase:\n __metaclass__ = ABCMeta\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n\n @property\n def id(self):\n return self.schema.id\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n\n attrs = self.__class__.__dict__\n\n include_keys = include_keys or [\n name for name in attrs.keys() if not name.startswith(\"_\")\n ]\n exclude_keys = exclude_keys or []\n\n if lite is True:\n lite_exclude_keys = getattr(self, \"lite_exclude_keys\", [])\n exclude_keys = exclude_keys + lite_exclude_keys\n\n include_keys = [name for name in include_keys if name not in exclude_keys]\n\n if depth > 1:\n return self.uid\n\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, \"to_dict\"):\n list_values.append(item.to_dict(depth=depth + 1, lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, \"to_dict\"):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n\n elif hasattr(value, \"to_dict\"):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-ids": [
6,
8,
9,
12,
13
]
}
|
[
6,
8,
9,
12,
13
] |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=40)
content = models.TextField()
date_published = models.DateTimeField(auto_now=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
img = models.ImageField(upload_to='post_img', null=True, blank=True)
like = models.ManyToManyField(User, related_name='like_user', blank=True)
dislike = models.ManyToManyField(User, related_name='dislike_user',blank=True)
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
comment_box = models.TextField()
date_comment = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.username
class Comment_to_comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)
comment = models.TextField()
date_comment = models.DateTimeField(auto_now=True)
def __str__(self):
return self.from_comment.comment_box
class Points(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
point = models.IntegerField(default=0)
|
normal
|
{
"blob_id": "1257b90781a213ca8e07f67a33b8e847d0525653",
"index": 9354,
"step-1": "<mask token>\n\n\nclass Comment(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n",
"step-2": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n",
"step-3": "<mask token>\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=40)\n content = models.TextField()\n date_published = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n img = models.ImageField(upload_to='post_img', null=True, blank=True)\n like = models.ManyToManyField(User, related_name='like_user', blank=True)\n dislike = models.ManyToManyField(User, related_name='dislike_user',\n blank=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=40)\n content = models.TextField()\n date_published = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n img = models.ImageField(upload_to='post_img', null=True, blank=True)\n like = models.ManyToManyField(User, related_name='like_user', blank=True)\n dislike = models.ManyToManyField(User, related_name='dislike_user',\n blank=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Post(models.Model):\n title = models.CharField(max_length=40)\n content = models.TextField()\n date_published = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n img = models.ImageField(upload_to='post_img', null=True, blank=True)\n like = models.ManyToManyField(User, related_name='like_user', blank=True)\n dislike = models.ManyToManyField(User, related_name='dislike_user',blank=True)\n\n def __str__(self):\n return self.title\n\nclass Comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n comment_box = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.user.username\n\nclass Comment_to_comment(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n from_comment = models.ForeignKey(Comment, on_delete=models.CASCADE)\n comment = models.TextField()\n date_comment = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.from_comment.comment_box\n\nclass Points(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n point = models.IntegerField(default=0)\n ",
"step-ids": [
7,
10,
11,
12,
13
]
}
|
[
7,
10,
11,
12,
13
] |
'''
Написати програму, що визначає, яка з двох
точок знаходиться ближче до початку координат.
'''
import re
re_number = re.compile("^[-+]?\d+\.?\d*$")
def validator(pattern,promt):
text=input(promt)
while not bool(pattern.match(text)):
text = input(promt)
return text
def number_validator(promt):
number=float(validator(re_number, promt))
return number
def len_line(x,y):
length=(x**2 + y**2)**(1/2)
return length
x_1 = number_validator("Введіть абсцису точки А: ")
y_1 = number_validator("Введіть ординатуі точки А: ")
x_2 = number_validator("Введіть абсцису точки В: ")
y_2 = number_validator("Введіть ординату точки В: ")
if len_line(x_1,y_1) > len_line(x_2,y_2) :
print("Точка В лежить ближче до початку координат")
elif len_line(x_1,y_1) < len_line(x_2,y_2):
print("Точка А лежить ближче до початку координат")
else:
print("Відстань від точок до початку координат рівні")
|
normal
|
{
"blob_id": "2d5993489ff3120d980d29edbb53422110a5c039",
"index": 3561,
"step-1": "<mask token>\n\n\ndef validator(pattern, promt):\n text = input(promt)\n while not bool(pattern.match(text)):\n text = input(promt)\n return text\n\n\ndef number_validator(promt):\n number = float(validator(re_number, promt))\n return number\n\n\ndef len_line(x, y):\n length = (x ** 2 + y ** 2) ** (1 / 2)\n return length\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validator(pattern, promt):\n text = input(promt)\n while not bool(pattern.match(text)):\n text = input(promt)\n return text\n\n\ndef number_validator(promt):\n number = float(validator(re_number, promt))\n return number\n\n\ndef len_line(x, y):\n length = (x ** 2 + y ** 2) ** (1 / 2)\n return length\n\n\n<mask token>\nif len_line(x_1, y_1) > len_line(x_2, y_2):\n print('Точка В лежить ближче до початку координат')\nelif len_line(x_1, y_1) < len_line(x_2, y_2):\n print('Точка А лежить ближче до початку координат')\nelse:\n print('Відстань від точок до початку координат рівні')\n",
"step-3": "<mask token>\nre_number = re.compile('^[-+]?\\\\d+\\\\.?\\\\d*$')\n\n\ndef validator(pattern, promt):\n text = input(promt)\n while not bool(pattern.match(text)):\n text = input(promt)\n return text\n\n\ndef number_validator(promt):\n number = float(validator(re_number, promt))\n return number\n\n\ndef len_line(x, y):\n length = (x ** 2 + y ** 2) ** (1 / 2)\n return length\n\n\nx_1 = number_validator('Введіть абсцису точки А: ')\ny_1 = number_validator('Введіть ординатуі точки А: ')\nx_2 = number_validator('Введіть абсцису точки В: ')\ny_2 = number_validator('Введіть ординату точки В: ')\nif len_line(x_1, y_1) > len_line(x_2, y_2):\n print('Точка В лежить ближче до початку координат')\nelif len_line(x_1, y_1) < len_line(x_2, y_2):\n print('Точка А лежить ближче до початку координат')\nelse:\n print('Відстань від точок до початку координат рівні')\n",
"step-4": "<mask token>\nimport re\nre_number = re.compile('^[-+]?\\\\d+\\\\.?\\\\d*$')\n\n\ndef validator(pattern, promt):\n text = input(promt)\n while not bool(pattern.match(text)):\n text = input(promt)\n return text\n\n\ndef number_validator(promt):\n number = float(validator(re_number, promt))\n return number\n\n\ndef len_line(x, y):\n length = (x ** 2 + y ** 2) ** (1 / 2)\n return length\n\n\nx_1 = number_validator('Введіть абсцису точки А: ')\ny_1 = number_validator('Введіть ординатуі точки А: ')\nx_2 = number_validator('Введіть абсцису точки В: ')\ny_2 = number_validator('Введіть ординату точки В: ')\nif len_line(x_1, y_1) > len_line(x_2, y_2):\n print('Точка В лежить ближче до початку координат')\nelif len_line(x_1, y_1) < len_line(x_2, y_2):\n print('Точка А лежить ближче до початку координат')\nelse:\n print('Відстань від точок до початку координат рівні')\n",
"step-5": "'''\nНаписати програму, що визначає, яка з двох\nточок знаходиться ближче до початку координат.\n'''\n\nimport re\n\nre_number = re.compile(\"^[-+]?\\d+\\.?\\d*$\")\n\ndef validator(pattern,promt):\n text=input(promt)\n while not bool(pattern.match(text)):\n text = input(promt)\n return text\n\n\ndef number_validator(promt):\n number=float(validator(re_number, promt))\n return number\n\ndef len_line(x,y):\n length=(x**2 + y**2)**(1/2)\n return length\n\nx_1 = number_validator(\"Введіть абсцису точки А: \")\ny_1 = number_validator(\"Введіть ординатуі точки А: \")\nx_2 = number_validator(\"Введіть абсцису точки В: \")\ny_2 = number_validator(\"Введіть ординату точки В: \")\n\nif len_line(x_1,y_1) > len_line(x_2,y_2) :\n print(\"Точка В лежить ближче до початку координат\")\nelif len_line(x_1,y_1) < len_line(x_2,y_2):\n print(\"Точка А лежить ближче до початку координат\")\nelse:\n print(\"Відстань від точок до початку координат рівні\")",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import json
import tempfile
import zipfile
from contextlib import contextmanager
from utils import (
codepipeline_lambda_handler,
create_zip_file,
get_artifact_s3_client,
get_cloudformation_template,
get_input_artifact_location,
get_output_artifact_location,
get_session,
get_user_parameters,
log,
)
@codepipeline_lambda_handler
def lambda_handler(event, context):
"""
Prepares for an AMI deployment.
"""
# Get details from the event.
job = event["CodePipeline.job"]
input_bucket, input_key = get_input_artifact_location(job)
output_bucket, output_key = get_output_artifact_location(job)
user_params = get_user_parameters(job)
assume_role_arn = user_params["AssumeRoleArn"]
image_parameter_name = user_params["ImageParameterName"]
stack_name = user_params["StackName"]
template_filename = user_params["TemplateFilename"]
# Create client in the pipeline account.
pipeline_s3_client = get_artifact_s3_client(job)
# Create clients in the target account.
target_session = get_session(
role_arn=assume_role_arn, session_name="prepare-ami-deployment"
)
target_cfn_client = target_session.client("cloudformation")
target_ssm_client = target_session.client("ssm")
# Download the input artifact zip file, read manifest.json from it,
# and get the AMI it references. Also look up the associated image name.
with download_zip_file(
s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key
) as zip_file:
image_detail_string = zip_file.read("imageDetail.json").decode("utf-8")
log("IMAGE_DETAIL_STRING", image_detail_string)
image_detail = json.loads(image_detail_string)
image = image_detail["ImageURI"]
log("IMAGE", image)
# Update the SSM parameters with the image,
# to be used by the CloudFormation deployment stage of the pipeline.
target_ssm_client.put_parameter(
Name=image_parameter_name, Value=image, Type="String", Overwrite=True
)
# Write the CloudFormation stack's template to the output artifact location,
# to be used by the CloudFormation deployment stage of the pipeline.
template = get_cloudformation_template(
cfn_client=target_cfn_client, stack_name=stack_name
)
with create_zip_file({template_filename: template}) as zip_path:
pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)
@contextmanager
def download_zip_file(s3_client, bucket, key):
"""
Downloads and extracts a zip file from S3.
"""
temp_file = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as temp_file:
s3_client.download_file(bucket, key, temp_file.name)
with zipfile.ZipFile(temp_file.name, "r") as zip_file:
yield zip_file
|
normal
|
{
"blob_id": "4c59e5fab2469af3f40cafaac226a993f6628290",
"index": 3624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n job = event['CodePipeline.job']\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params['AssumeRoleArn']\n image_parameter_name = user_params['ImageParameterName']\n stack_name = user_params['StackName']\n template_filename = user_params['TemplateFilename']\n pipeline_s3_client = get_artifact_s3_client(job)\n target_session = get_session(role_arn=assume_role_arn, session_name=\n 'prepare-ami-deployment')\n target_cfn_client = target_session.client('cloudformation')\n target_ssm_client = target_session.client('ssm')\n with download_zip_file(s3_client=pipeline_s3_client, bucket=\n input_bucket, key=input_key) as zip_file:\n image_detail_string = zip_file.read('imageDetail.json').decode('utf-8')\n log('IMAGE_DETAIL_STRING', image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail['ImageURI']\n log('IMAGE', image)\n target_ssm_client.put_parameter(Name=image_parameter_name, Value=image,\n Type='String', Overwrite=True)\n template = get_cloudformation_template(cfn_client=target_cfn_client,\n stack_name=stack_name)\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n job = event['CodePipeline.job']\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params['AssumeRoleArn']\n image_parameter_name = user_params['ImageParameterName']\n stack_name = user_params['StackName']\n template_filename = user_params['TemplateFilename']\n pipeline_s3_client = get_artifact_s3_client(job)\n target_session = get_session(role_arn=assume_role_arn, session_name=\n 'prepare-ami-deployment')\n target_cfn_client = target_session.client('cloudformation')\n target_ssm_client = target_session.client('ssm')\n with download_zip_file(s3_client=pipeline_s3_client, bucket=\n input_bucket, key=input_key) as zip_file:\n image_detail_string = zip_file.read('imageDetail.json').decode('utf-8')\n log('IMAGE_DETAIL_STRING', image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail['ImageURI']\n log('IMAGE', image)\n target_ssm_client.put_parameter(Name=image_parameter_name, Value=image,\n Type='String', Overwrite=True)\n template = get_cloudformation_template(cfn_client=target_cfn_client,\n stack_name=stack_name)\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n@contextmanager\ndef download_zip_file(s3_client, bucket, key):\n \"\"\"\n Downloads and extracts a zip file from S3.\n\n \"\"\"\n temp_file = tempfile.NamedTemporaryFile()\n with tempfile.NamedTemporaryFile() as temp_file:\n s3_client.download_file(bucket, key, temp_file.name)\n with zipfile.ZipFile(temp_file.name, 'r') as zip_file:\n yield zip_file\n",
"step-4": "import json\nimport tempfile\nimport zipfile\nfrom contextlib import contextmanager\nfrom utils import codepipeline_lambda_handler, create_zip_file, get_artifact_s3_client, get_cloudformation_template, get_input_artifact_location, get_output_artifact_location, get_session, get_user_parameters, log\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n job = event['CodePipeline.job']\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params['AssumeRoleArn']\n image_parameter_name = user_params['ImageParameterName']\n stack_name = user_params['StackName']\n template_filename = user_params['TemplateFilename']\n pipeline_s3_client = get_artifact_s3_client(job)\n target_session = get_session(role_arn=assume_role_arn, session_name=\n 'prepare-ami-deployment')\n target_cfn_client = target_session.client('cloudformation')\n target_ssm_client = target_session.client('ssm')\n with download_zip_file(s3_client=pipeline_s3_client, bucket=\n input_bucket, key=input_key) as zip_file:\n image_detail_string = zip_file.read('imageDetail.json').decode('utf-8')\n log('IMAGE_DETAIL_STRING', image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail['ImageURI']\n log('IMAGE', image)\n target_ssm_client.put_parameter(Name=image_parameter_name, Value=image,\n Type='String', Overwrite=True)\n template = get_cloudformation_template(cfn_client=target_cfn_client,\n stack_name=stack_name)\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n@contextmanager\ndef download_zip_file(s3_client, bucket, key):\n \"\"\"\n Downloads and extracts a zip file from S3.\n\n \"\"\"\n temp_file = tempfile.NamedTemporaryFile()\n with tempfile.NamedTemporaryFile() as temp_file:\n s3_client.download_file(bucket, key, temp_file.name)\n with zipfile.ZipFile(temp_file.name, 'r') as zip_file:\n yield zip_file\n",
"step-5": "import json\nimport tempfile\nimport zipfile\nfrom contextlib import contextmanager\n\nfrom utils import (\n codepipeline_lambda_handler,\n create_zip_file,\n get_artifact_s3_client,\n get_cloudformation_template,\n get_input_artifact_location,\n get_output_artifact_location,\n get_session,\n get_user_parameters,\n log,\n)\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n\n # Get details from the event.\n job = event[\"CodePipeline.job\"]\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params[\"AssumeRoleArn\"]\n image_parameter_name = user_params[\"ImageParameterName\"]\n stack_name = user_params[\"StackName\"]\n template_filename = user_params[\"TemplateFilename\"]\n\n # Create client in the pipeline account.\n pipeline_s3_client = get_artifact_s3_client(job)\n\n # Create clients in the target account.\n target_session = get_session(\n role_arn=assume_role_arn, session_name=\"prepare-ami-deployment\"\n )\n target_cfn_client = target_session.client(\"cloudformation\")\n target_ssm_client = target_session.client(\"ssm\")\n\n # Download the input artifact zip file, read manifest.json from it,\n # and get the AMI it references. Also look up the associated image name.\n with download_zip_file(\n s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key\n ) as zip_file:\n image_detail_string = zip_file.read(\"imageDetail.json\").decode(\"utf-8\")\n log(\"IMAGE_DETAIL_STRING\", image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail[\"ImageURI\"]\n log(\"IMAGE\", image)\n\n # Update the SSM parameters with the image,\n # to be used by the CloudFormation deployment stage of the pipeline.\n target_ssm_client.put_parameter(\n Name=image_parameter_name, Value=image, Type=\"String\", Overwrite=True\n )\n\n # Write the CloudFormation stack's template to the output artifact location,\n # to be used by the CloudFormation deployment stage of the pipeline.\n template = get_cloudformation_template(\n cfn_client=target_cfn_client, stack_name=stack_name\n )\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n@contextmanager\ndef download_zip_file(s3_client, bucket, key):\n \"\"\"\n Downloads and extracts a zip file from S3.\n\n \"\"\"\n\n temp_file = tempfile.NamedTemporaryFile()\n with tempfile.NamedTemporaryFile() as temp_file:\n s3_client.download_file(bucket, key, temp_file.name)\n with zipfile.ZipFile(temp_file.name, \"r\") as zip_file:\n yield zip_file\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import struct
def parse(message):
return IGENMessage.from_bytes(message)
class IGENMessage(object):
def __init__(self):
self.serial = None
self.temperature = None
self.pv1 = 0
self.pv2 = 0
self.pv3 = 0
self.pa1 = 0
self.pa2 = 0
self.pa3 = 0
self.ov1 = 0
self.ov2 = 0
self.ov3 = 0
self.oa1 = 0
self.oa2 = 0
self.oa3 = 0
self.oHz = 0
self.op1 = 0
self.op2 = 0
self.op3 = 0
self.energy_today = None
self.energy_overall = None
self.operational_hours = None
@classmethod
def from_bytes(cls, data):
if len(data) != 103:
raise Exception('Packet should be exactly 103 bytes')
self = cls()
parsed = struct.unpack('!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)
self.serial = parsed[0].decode('ascii')
self.temperature = parsed[1] / 10
self.pv1 = parsed[2] / 10
self.pv2 = parsed[3] / 10
self.pv3 = parsed[4] / 10
self.pa1 = parsed[5] / 10
self.pa2 = parsed[6] / 10
self.pa3 = parsed[7] / 10
self.oa1 = parsed[8] / 10
self.oa2 = parsed[9] / 10
self.oa3 = parsed[10] / 10
self.ov1 = parsed[11] / 10
self.ov2 = parsed[12] / 10
self.ov3 = parsed[13] / 10
self.oHz = parsed[14] / 100
self.op1 = parsed[15]
self.op2 = parsed[16]
self.op3 = parsed[17]
self.energy_today = parsed[18] / 100
self.energy_overall = parsed[19] / 10
self.operational_hours = parsed[20]
return self
def outputs(self):
return [
(self.ov1, self.oa1, self.op1),
(self.ov2, self.oa2, self.op2),
(self.ov3, self.oa3, self.op3)
]
def inputs(self):
return [
(self.pv1, self.pa1),
(self.pv2, self.pa2),
(self.pv3, self.pa3)
]
def report(self):
print("Logger: {}".format(self.serial))
print("Temperature: {} degrees celcius".format(self.temperature))
print()
print("Inputs: ")
print(" Channel 1: {:6.2f} V {:5.2f} A".format(self.pv1, self.pa1))
print(" Channel 2: {:6.2f} V {:5.2f} A".format(self.pv2, self.pa2))
print(" Channel 3: {:6.2f} V {:5.2f} A".format(self.pv3, self.pa3))
print()
print("Outputs: ({} Hz)".format(self.oHz))
print(" L1: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov1, self.oa1, self.op1))
print(" L2: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov2, self.oa2, self.op2))
print(" L3: {:6.2f} V {:5.2f} A {:5.0f} W".format(self.ov3, self.oa3, self.op3))
print()
print("Energy today: {:8.1f} kWh".format(self.energy_today))
print("Energy overall: {:8.1f} kWh".format(self.energy_overall))
print("Operational hours: {}".format(self.operational_hours))
def __repr__(self):
total_power = self.op1 + self.op2 + self.op3
return "<IGENMessage {} watt ({} kWh today)>".format(total_power, self.energy_today)
|
normal
|
{
"blob_id": "5df42a024e1edbe5cc977a814efe580db04b8b76",
"index": 2386,
"step-1": "<mask token>\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n <mask token>\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-2": "<mask token>\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n self = cls()\n parsed = struct.unpack(\n '!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n self.temperature = parsed[1] / 10\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n self.oHz = parsed[14] / 100\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n self.operational_hours = parsed[20]\n return self\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-3": "<mask token>\n\n\ndef parse(message):\n return IGENMessage.from_bytes(message)\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n self = cls()\n parsed = struct.unpack(\n '!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n self.temperature = parsed[1] / 10\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n self.oHz = parsed[14] / 100\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n self.operational_hours = parsed[20]\n return self\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-4": "import struct\n\n\ndef parse(message):\n return IGENMessage.from_bytes(message)\n\n\nclass IGENMessage(object):\n\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n self = cls()\n parsed = struct.unpack(\n '!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n self.temperature = parsed[1] / 10\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n self.oHz = parsed[14] / 100\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n self.operational_hours = parsed[20]\n return self\n\n def outputs(self):\n return [(self.ov1, self.oa1, self.op1), (self.ov2, self.oa2, self.\n op2), (self.ov3, self.oa3, self.op3)]\n\n def inputs(self):\n return [(self.pv1, self.pa1), (self.pv2, self.pa2), (self.pv3, self\n .pa3)]\n\n def report(self):\n print('Logger: {}'.format(self.serial))\n print('Temperature: {} degrees celcius'.format(self.temperature))\n print()\n print('Inputs: ')\n print(' Channel 1: {:6.2f} V {:5.2f} A'.format(self.pv1, self.pa1))\n print(' Channel 2: {:6.2f} V {:5.2f} A'.format(self.pv2, self.pa2))\n print(' Channel 3: {:6.2f} V {:5.2f} A'.format(self.pv3, self.pa3))\n print()\n print('Outputs: ({} Hz)'.format(self.oHz))\n print(' L1: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov1,\n self.oa1, self.op1))\n print(' L2: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov2,\n self.oa2, self.op2))\n print(' L3: {:6.2f} V {:5.2f} A {:5.0f} W'.format(self.ov3,\n self.oa3, self.op3))\n print()\n print('Energy today: {:8.1f} kWh'.format(self.energy_today))\n print('Energy overall: {:8.1f} kWh'.format(self.energy_overall))\n print('Operational hours: {}'.format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return '<IGENMessage {} watt ({} kWh today)>'.format(total_power,\n self.energy_today)\n",
"step-5": "import struct\n\n\ndef parse(message):\n return IGENMessage.from_bytes(message)\n\n\nclass IGENMessage(object):\n def __init__(self):\n self.serial = None\n self.temperature = None\n self.pv1 = 0\n self.pv2 = 0\n self.pv3 = 0\n self.pa1 = 0\n self.pa2 = 0\n self.pa3 = 0\n self.ov1 = 0\n self.ov2 = 0\n self.ov3 = 0\n self.oa1 = 0\n self.oa2 = 0\n self.oa3 = 0\n self.oHz = 0\n self.op1 = 0\n self.op2 = 0\n self.op3 = 0\n self.energy_today = None\n self.energy_overall = None\n self.operational_hours = None\n\n @classmethod\n def from_bytes(cls, data):\n if len(data) != 103:\n raise Exception('Packet should be exactly 103 bytes')\n\n self = cls()\n\n parsed = struct.unpack('!17x 14s H HHH HHH HHH HHH H HHH 4x H 2x H 2x H 24x', data)\n self.serial = parsed[0].decode('ascii')\n\n self.temperature = parsed[1] / 10\n\n self.pv1 = parsed[2] / 10\n self.pv2 = parsed[3] / 10\n self.pv3 = parsed[4] / 10\n\n self.pa1 = parsed[5] / 10\n self.pa2 = parsed[6] / 10\n self.pa3 = parsed[7] / 10\n\n self.oa1 = parsed[8] / 10\n self.oa2 = parsed[9] / 10\n self.oa3 = parsed[10] / 10\n\n self.ov1 = parsed[11] / 10\n self.ov2 = parsed[12] / 10\n self.ov3 = parsed[13] / 10\n\n self.oHz = parsed[14] / 100\n\n self.op1 = parsed[15]\n self.op2 = parsed[16]\n self.op3 = parsed[17]\n\n self.energy_today = parsed[18] / 100\n self.energy_overall = parsed[19] / 10\n\n self.operational_hours = parsed[20]\n\n return self\n\n def outputs(self):\n return [\n (self.ov1, self.oa1, self.op1),\n (self.ov2, self.oa2, self.op2),\n (self.ov3, self.oa3, self.op3)\n ]\n\n def inputs(self):\n return [\n (self.pv1, self.pa1),\n (self.pv2, self.pa2),\n (self.pv3, self.pa3)\n ]\n\n def report(self):\n print(\"Logger: {}\".format(self.serial))\n print(\"Temperature: {} degrees celcius\".format(self.temperature))\n print()\n print(\"Inputs: \")\n print(\" Channel 1: {:6.2f} V {:5.2f} A\".format(self.pv1, self.pa1))\n print(\" Channel 2: {:6.2f} V {:5.2f} A\".format(self.pv2, self.pa2))\n print(\" Channel 3: {:6.2f} V {:5.2f} A\".format(self.pv3, self.pa3))\n print()\n print(\"Outputs: ({} Hz)\".format(self.oHz))\n print(\" L1: {:6.2f} V {:5.2f} A {:5.0f} W\".format(self.ov1, self.oa1, self.op1))\n print(\" L2: {:6.2f} V {:5.2f} A {:5.0f} W\".format(self.ov2, self.oa2, self.op2))\n print(\" L3: {:6.2f} V {:5.2f} A {:5.0f} W\".format(self.ov3, self.oa3, self.op3))\n print()\n print(\"Energy today: {:8.1f} kWh\".format(self.energy_today))\n print(\"Energy overall: {:8.1f} kWh\".format(self.energy_overall))\n print(\"Operational hours: {}\".format(self.operational_hours))\n\n def __repr__(self):\n total_power = self.op1 + self.op2 + self.op3\n return \"<IGENMessage {} watt ({} kWh today)>\".format(total_power, self.energy_today)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
from django.db import models
from NavigantAnalyzer.common import convert_datetime_string
import json
# A custom view-based model for flat outputs - RÖ - 2018-10-24
# Don't add, change or delete fields without editing the view in the Db
class Results_flat(models.Model):
race_id = models.IntegerField()
race_name = models.CharField(max_length=127)
race_serie = models.CharField(max_length=127, blank=True)
race_begin = models.DateTimeField(blank=True, null=True)
result_start_time = models.DateTimeField(blank=True, null=True)
runner_last_name = models.CharField(max_length=63, blank=True)
runner_first_name = models.CharField(max_length=63, blank=True)
result_emit = models.CharField(max_length=12, blank=True)
course_name = models.CharField(max_length=63)
course_length = models.IntegerField(blank=True, null=True)
course_num_participants = models.IntegerField(blank=True, null=True)
course_min_time = models.IntegerField(blank=True, null=True)
course_mean_time = models.IntegerField(blank=True, null=True)
course_min_puistotime = models.IntegerField(blank=True, null=True)
course_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_min_time = models.IntegerField(blank=True, null=True)
visit_mean_time = models.IntegerField(blank=True, null=True)
visit_min_puistotime = models.IntegerField(blank=True, null=True)
visit_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_puistoman_time = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_min_time = models.IntegerField(blank=True, null=True)
leg_mean_time = models.IntegerField(blank=True, null=True)
leg_min_puistotime = models.IntegerField(blank=True, null=True)
leg_mean_puistotime = models.IntegerField(blank=True, null=True)
visit_order = models.IntegerField()
visit_code = models.IntegerField()
visit_time = models.IntegerField()
visit_position = models.IntegerField(blank=True)
visit_puistoposition = models.IntegerField(blank=True)
leg_time = models.IntegerField(blank=True)
leg_position = models.IntegerField(blank=True)
leg_puistoposition = models.IntegerField(blank=True)
visit_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08
visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08
leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
leg_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08
leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
leg_puisto_success = models.FloatField(null=True) # Since 2019-12-08
result_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08
result_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08
result_puisto_max_level = models.FloatField(null=True) # Since 2019-12-08
result_puisto_success = models.FloatField(null=True) # Since 2019-12-08
result_puisto_optimum = models.IntegerField(null=True) # Since 2019-12-08
result_puisto_mistakes = models.IntegerField(null=True) # Since 2019-12-08
class Meta:
managed = False
db_table = 'NavigantAnalyzer_results_flat'
def get_fields(self):
result = dict()
datetime_fields = ['race_begin', 'result_start_time']
for field in Results_flat._meta.fields:
value = field.value_to_string(self)
if value.isdigit():
value = int(value)
if field.name in datetime_fields:
value = convert_datetime_string(value)
result[field.name] = value
return json.dumps(result)
|
normal
|
{
"blob_id": "802eb0502c5eddcabd41b2d438bf53a5d6fb2c82",
"index": 8368,
"step-1": "<mask token>\n\n\nclass Results_flat(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Results_flat(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-3": "<mask token>\n\n\nclass Results_flat(models.Model):\n race_id = models.IntegerField()\n race_name = models.CharField(max_length=127)\n race_serie = models.CharField(max_length=127, blank=True)\n race_begin = models.DateTimeField(blank=True, null=True)\n result_start_time = models.DateTimeField(blank=True, null=True)\n runner_last_name = models.CharField(max_length=63, blank=True)\n runner_first_name = models.CharField(max_length=63, blank=True)\n result_emit = models.CharField(max_length=12, blank=True)\n course_name = models.CharField(max_length=63)\n course_length = models.IntegerField(blank=True, null=True)\n course_num_participants = models.IntegerField(blank=True, null=True)\n course_min_time = models.IntegerField(blank=True, null=True)\n course_mean_time = models.IntegerField(blank=True, null=True)\n course_min_puistotime = models.IntegerField(blank=True, null=True)\n course_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_min_time = models.IntegerField(blank=True, null=True)\n visit_mean_time = models.IntegerField(blank=True, null=True)\n visit_min_puistotime = models.IntegerField(blank=True, null=True)\n visit_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_puistoman_time = models.IntegerField(blank=True, null=True)\n leg_min_time = models.IntegerField(blank=True, null=True)\n leg_mean_time = models.IntegerField(blank=True, null=True)\n leg_min_puistotime = models.IntegerField(blank=True, null=True)\n leg_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_order = models.IntegerField()\n visit_code = models.IntegerField()\n visit_time = models.IntegerField()\n visit_position = models.IntegerField(blank=True)\n visit_puistoposition = models.IntegerField(blank=True)\n leg_time = models.IntegerField(blank=True)\n leg_position = models.IntegerField(blank=True)\n leg_puistoposition = models.IntegerField(blank=True)\n visit_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puistoperc_time_pm = models.FloatField(null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puisto_success = models.FloatField(null=True)\n result_puistoperc_time_l = models.FloatField(null=True)\n result_puistoperc_time_pm = models.FloatField(null=True)\n result_puisto_max_level = models.FloatField(null=True)\n result_puisto_success = models.FloatField(null=True)\n result_puisto_optimum = models.IntegerField(null=True)\n result_puisto_mistakes = models.IntegerField(null=True)\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-4": "from django.db import models\nfrom NavigantAnalyzer.common import convert_datetime_string\nimport json\n\n\nclass Results_flat(models.Model):\n race_id = models.IntegerField()\n race_name = models.CharField(max_length=127)\n race_serie = models.CharField(max_length=127, blank=True)\n race_begin = models.DateTimeField(blank=True, null=True)\n result_start_time = models.DateTimeField(blank=True, null=True)\n runner_last_name = models.CharField(max_length=63, blank=True)\n runner_first_name = models.CharField(max_length=63, blank=True)\n result_emit = models.CharField(max_length=12, blank=True)\n course_name = models.CharField(max_length=63)\n course_length = models.IntegerField(blank=True, null=True)\n course_num_participants = models.IntegerField(blank=True, null=True)\n course_min_time = models.IntegerField(blank=True, null=True)\n course_mean_time = models.IntegerField(blank=True, null=True)\n course_min_puistotime = models.IntegerField(blank=True, null=True)\n course_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_min_time = models.IntegerField(blank=True, null=True)\n visit_mean_time = models.IntegerField(blank=True, null=True)\n visit_min_puistotime = models.IntegerField(blank=True, null=True)\n visit_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_puistoman_time = models.IntegerField(blank=True, null=True)\n leg_min_time = models.IntegerField(blank=True, null=True)\n leg_mean_time = models.IntegerField(blank=True, null=True)\n leg_min_puistotime = models.IntegerField(blank=True, null=True)\n leg_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_order = models.IntegerField()\n visit_code = models.IntegerField()\n visit_time = models.IntegerField()\n visit_position = models.IntegerField(blank=True)\n visit_puistoposition = models.IntegerField(blank=True)\n leg_time = models.IntegerField(blank=True)\n leg_position = models.IntegerField(blank=True)\n leg_puistoposition = models.IntegerField(blank=True)\n visit_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_l = models.IntegerField(blank=True, null=True)\n leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puistoperc_time_pm = models.FloatField(null=True)\n leg_puistoperc_time_l = models.FloatField(null=True)\n leg_puisto_success = models.FloatField(null=True)\n result_puistoperc_time_l = models.FloatField(null=True)\n result_puistoperc_time_pm = models.FloatField(null=True)\n result_puisto_max_level = models.FloatField(null=True)\n result_puisto_success = models.FloatField(null=True)\n result_puisto_optimum = models.IntegerField(null=True)\n result_puisto_mistakes = models.IntegerField(null=True)\n\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-5": "from django.db import models\nfrom NavigantAnalyzer.common import convert_datetime_string\nimport json\n\n# A custom view-based model for flat outputs - RÖ - 2018-10-24\n# Don't add, change or delete fields without editing the view in the Db\nclass Results_flat(models.Model):\n race_id = models.IntegerField()\n race_name = models.CharField(max_length=127)\n race_serie = models.CharField(max_length=127, blank=True)\n race_begin = models.DateTimeField(blank=True, null=True)\n result_start_time = models.DateTimeField(blank=True, null=True)\n runner_last_name = models.CharField(max_length=63, blank=True)\n runner_first_name = models.CharField(max_length=63, blank=True)\n result_emit = models.CharField(max_length=12, blank=True)\n course_name = models.CharField(max_length=63)\n course_length = models.IntegerField(blank=True, null=True)\n course_num_participants = models.IntegerField(blank=True, null=True)\n course_min_time = models.IntegerField(blank=True, null=True)\n course_mean_time = models.IntegerField(blank=True, null=True)\n course_min_puistotime = models.IntegerField(blank=True, null=True)\n course_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_min_time = models.IntegerField(blank=True, null=True)\n visit_mean_time = models.IntegerField(blank=True, null=True)\n visit_min_puistotime = models.IntegerField(blank=True, null=True)\n visit_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_puistoman_time = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_min_time = models.IntegerField(blank=True, null=True)\n leg_mean_time = models.IntegerField(blank=True, null=True)\n leg_min_puistotime = models.IntegerField(blank=True, null=True)\n leg_mean_puistotime = models.IntegerField(blank=True, null=True)\n visit_order = models.IntegerField()\n visit_code = models.IntegerField()\n visit_time = models.IntegerField()\n visit_position = models.IntegerField(blank=True)\n visit_puistoposition = models.IntegerField(blank=True)\n leg_time = models.IntegerField(blank=True)\n leg_position = models.IntegerField(blank=True)\n leg_puistoposition = models.IntegerField(blank=True)\n visit_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n visit_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_puistodiff_time_l = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_puistodiff_time_pm = models.IntegerField(blank=True, null=True) # Since 2019-12-08\n leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08\n leg_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08\n leg_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08\n leg_puisto_success = models.FloatField(null=True) # Since 2019-12-08\n result_puistoperc_time_l = models.FloatField(null=True) # Since 2019-12-08\n result_puistoperc_time_pm = models.FloatField(null=True) # Since 2019-12-08\n result_puisto_max_level = models.FloatField(null=True) # Since 2019-12-08\n result_puisto_success = models.FloatField(null=True) # Since 2019-12-08\n result_puisto_optimum = models.IntegerField(null=True) # Since 2019-12-08\n result_puisto_mistakes = models.IntegerField(null=True) # Since 2019-12-08\n\n class Meta:\n managed = False\n db_table = 'NavigantAnalyzer_results_flat'\n\n def get_fields(self):\n result = dict()\n datetime_fields = ['race_begin', 'result_start_time']\n for field in Results_flat._meta.fields:\n value = field.value_to_string(self)\n if value.isdigit():\n value = int(value)\n if field.name in datetime_fields:\n value = convert_datetime_string(value)\n result[field.name] = value\n return json.dumps(result)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
sys.path.append("/home/mccann/bin/python/obsolete")
from minuit import *
execfile("/home/mccann/antithesis/utilities.py")
nobeam = getsb("cos")
ebeam = getsb("bge")
pbeam = getsb("bgp")
import gbwkf
import gbwkftau
runstart = pickle.load(open("/home/mccann/antithesis/old_dotps/runstart.p"))
runend = pickle.load(open("/home/mccann/antithesis/old_dotps/runend.p"))
import time
bsbha = pickle.load(open("/home/mccann/synthesis/run/bsbha.p"))
nbish2nb = 23.0481
bhabha_interference = 1. # this is a multiplier: 0. to turn off
class FitRecord: pass
ggfits = pickle.load(open("/home/mccann/antithesis/fit_results/octoberfits_fixen_0_1.0.p"))
# I learned this from Matt and the beam energy program logs
runsummary[123828].energy = 4.72992
runsummary[123832].energy = 4.72990
def run_date(r):
if r in runstart and r in runend:
return (runstart[r] + runend[r])/2.
elif r in runstart:
return runstart[r]
elif r in runend:
return runend[r]
else:
raise Exception
# The 48-hour limit is built into setup_runs
def setup_runs(res, low, high):
beginning = run_date(low)
tmpruns = []
for r in initialrunlist:
if r not in mybadruns and low <= r <= high and runsummary[r].res == res:
if runsummary[r].kind == 's' or runsummary[r].kind == 'p':
if run_date(r) < beginning + 48.*60.*60:
tmpruns.append(r)
return tmpruns
def mygbwkf(mass, fullgam, rmsbeam, yint, phi, w):
"yint = 0.018, 0.018, 0.018; phi=0"
if w > mass + 200.:
return 0.076/(w-mass)
return gbwkf.gbwkf(mass, fullgam, rmsbeam, yint, phi, w-mass)
def mygbwkftau(mass, fullgam, rmsbeam, yint, phi, w):
"yint = 0.20, 0.37, 0.27; phi = 0"
if w > mass + 200.:
return 0.076/(w-mass)
return gbwkftau.gbwkf(mass, fullgam, rmsbeam, yint, phi, w-mass)
def background(w):
tmp = 0.
tmp += 9.35538858434 * (1.-0.0792) * 9000.**2 / w**2
tmp += 9.35538858434 * 0.0792 * log(w**2/9000.**2)
return tmp
def u1func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, w):
tmp = 0.
tmp += area * 0.9793 * mygbwkf(9460.30, fullgam, rmsbeam, yint, phi, w)
tmp += area * 0.578 * btautau * mygbwkftau(9460.30, fullgam, rmsbeam, tauyint, tauphi, w)
tmp += back * (1.-twophofrac) * 9000.**2 / w**2
tmp += back * twophofrac * log(w**2/9000.**2)
return tmp
def u2func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, u1area, w):
tmp = 0.
tmp += area * 0.9618 * mygbwkf(10023.26, fullgam, rmsbeam, yint, phi, w)
tmp += area * 0.578 * btautau * mygbwkftau(10023.26, fullgam, rmsbeam, tauyint, tauphi, w)
tmp += back * (1.-twophofrac) * 9000.**2 / w**2
tmp += back * twophofrac * log(w**2/9000.**2)
tmp += u1area * mygbwkf(9460.30, 0., 0., 0., 0., w)
return tmp
def u3func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, u1area, u2area, w):
tmp = 0.
tmp += area * 0.9641 * mygbwkf(10355.2, fullgam, rmsbeam, yint, phi, w)
tmp += area * 0.578 * btautau * mygbwkftau(10355.2, fullgam, rmsbeam, tauyint, tauphi, w)
tmp += back * (1.-twophofrac) * 9000.**2 / w**2
tmp += back * twophofrac * log(w**2/9000.**2)
tmp += u1area * mygbwkf(9460.30, 0., 0., 0., 0., w)
tmp += u2area * mygbwkf(10023.26, 0., 0., 0., 0., w)
return tmp
def whichamiin(r):
if runsummary[r].res == 1:
for s in ["jan16", "jan30", "feb06", "feb13", "feb20", "feb27", "mar06", "mar13", "apr03", "apr08", "apr09", "apr10"]:
if r in u1runs[s]:
return 1, s
elif runsummary[r].res == 2:
for s in ["may29", "jun11", "jun12", "jul10", "jul24", "aug07"]:
if r in u2runs[s]:
return 2, s
elif runsummary[r].res == 3:
for s in ["nov28", "dec05", "dec12", "dec19", "dec26", "jan02", "jan09"]:
if r in u3runs[s]:
return 3, s
return runsummary[r].res, None
def get_run(r):
gamgam_lumi = None
gamgam_lumi_err = None
bhabha_lumi = None
bhabha_lumi_err = None
num_hadrons = None
num_hadrons_err = None
the_energy = None
the_shift = None
therun = getdb(r)
for lumisource in (0, 3):
g = 0.
h = 0.
e = 0.
p = 0.
c = 0.
ngg = therun.gamgam
if r in mycarefulscan: ngg = therun.gamgam_vstime.sum(0.,0.99)
fitrec = pickle.load(open("/home/mccann/antithesis/fit_results/novemberfits_lastever_3_1.0.p"))
if runsummary[r].res == 1:
myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = fitrec[1].values
elif runsummary[r].res == 2:
myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = fitrec[2].values
elif runsummary[r].res == 3:
myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = fitrec[3].values
whichres, whichweek = whichamiin(r)
thisshift = 0.
if whichweek != None:
thisshift = eval("my"+whichweek)
the_energy = runsummary[r].energy*2000.
the_shift = thisshift
if runsummary[r].res == 1:
myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = ggfits[1].values
elif runsummary[r].res == 2:
myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = ggfits[2].values
elif runsummary[r].res == 3:
myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = ggfits[3].values
whichres, whichweek = whichamiin(r)
thisrmsbeam = myrmsbeam
if whichres == 1:
if whichweek != None:
if whichweek in ["jan16", "jan30", "feb06", "feb13", "feb20"]: thisrmsbeam = myrjan
if whichweek in ["feb27", "mar06", "mar13"]: thisrmsbeam = myrfeb
if whichweek in ["apr03", "apr08", "apr09"]: thisrmsbeam = myrapr1
if whichweek in ["apr10"]: thisrmsbeam = myrapr2
if whichres == 3:
if whichweek != None:
thisrmsbeam = eval("myr"+whichweek)
thisshift = 0.
if whichweek != None:
thisshift = 0. - eval("my"+whichweek)
if r in mycarefulscan:
h += therun.hadroncool_vstime.sum(0.,0.99)
e += therun.beamgase_vstime.sum(0.,0.99)
p += therun.beamgasp_vstime.sum(0.,0.99)
c += therun.cosmic_vstime.sum(0.,0.99)
if lumisource == 0:
g += therun.gamgam_vstime.sum(0.,0.99)
elif lumisource == 1:
g += therun.bhabha_cosp.sum(0., 0.6) * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha
if runsummary[r].kind != 'c':
# eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * inner range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.417*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.672/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.613*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.486*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 2:
g += therun.bhabha_cosp.sum(0.6, 0.8) * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha
if runsummary[r].kind != 'c':
# eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * outer range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.588*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.298667/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.864*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.686*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 3:
g += 1.*bsbha[r] * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha
if runsummary[r].kind != 'c':
# eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * whole range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.597*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 1.73933/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.873*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.691*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
else:
h += therun.hadroncool
e += therun.beamgase
p += therun.beamgasp
c += therun.cosmic
if lumisource == 0:
g += therun.gamgam
elif lumisource == 1:
g += therun.bhabha_cosp.sum(0., 0.6)
if runsummary[r].kind != 'c':
# e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * inner range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.417*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.672/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.613*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.486*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.672/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 2:
g += therun.bhabha_cosp.sum(0.6, 0.8)
if runsummary[r].kind != 'c':
# e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * outer range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.588*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.298667/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.864*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.686*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.298667/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
elif lumisource == 3:
g += 1.*bsbha[r]
if runsummary[r].kind != 'c':
# e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * whole range
if runsummary[r].res == 1:
eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.597*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 1.73933/2.66667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 2:
eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.873*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
if runsummary[r].res == 3:
eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.691*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 1.73933/2.6667
g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb
average_energy = runsummary[r].energy
ebkgnd = 1. * (ebeam.hadroncool - 1.*nobeam.hadroncool*ebeam.cosmic/nobeam.cosmic) * e / ebeam.beamgase
pbkgnd = 1. * (pbeam.hadroncool - 1.*nobeam.hadroncool*pbeam.cosmic/nobeam.cosmic) * p / pbeam.beamgasp
cbkgnd = 1. * nobeam.hadroncool * c / nobeam.cosmic
hadrons = h - ebkgnd/2. - pbkgnd/2. - cbkgnd
hadrons_err = sqrt(h + c * (1.*nobeam.hadroncool/nobeam.cosmic)**2 + ebkgnd/2. + pbkgnd/2.)
num_hadrons = hadrons
num_hadrons_err = hadrons_err
if lumisource == 3:
if whichres == 1:
cs = hadrons / g / average_energy**2 * 199.5 # these differences are due to different efficiencies, as predicted by the MC
bhabha_lumi = g * average_energy**2 / 199.5
bhabha_lumi_err = sqrt(g) * average_energy**2 / 199.5
elif whichres == 2:
cs = hadrons / g / average_energy**2 * 197.4 # and verified by my lumi counts
bhabha_lumi = g * average_energy**2 / 197.4
bhabha_lumi_err = sqrt(g) * average_energy**2 / 197.4
elif whichres == 3:
cs = hadrons / g / average_energy**2 * 196.0 # (I totally believe this.)
bhabha_lumi = g * average_energy**2 / 196.0
bhabha_lumi_err = sqrt(g) * average_energy**2 / 196.0
cs_err = cs * sqrt((1.*hadrons_err / hadrons)**2 + 1./g)
else:
cs = hadrons / g / average_energy**2 * nbish2nb
cs_err = cs * sqrt((1.*hadrons_err / hadrons)**2 + 1./g)
gamgam_lumi = g * average_energy**2 / nbish2nb
gamgam_lumi_err = sqrt(g) * average_energy**2 / nbish2nb
if lumisource == 1:
cs /= 0.23684
cs_err /= 0.23684
if lumisource == 2:
cs /= 0.118999
cs_err /= 0.118999
return float(the_energy), float(the_shift), float(gamgam_lumi), float(gamgam_lumi_err), float(bhabha_lumi), float(bhabha_lumi_err), float(num_hadrons), float(num_hadrons_err)
class ARun:
def __init__(self, r):
self.run = r
self.en, self.shift, self.gg, self.gg_err, self.bb, self.bb_err, self.had, self.had_err = get_run(r)
def getfval(self):
fitrec = pickle.load(open("/home/mccann/antithesis/fit_results/novemberfits_lastever_3_1.0.p"))
whichres, whichweek = whichamiin(self.run)
if whichres == 1:
myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = fitrec[1].values
thisrmsbeam = myrmsbeam
if whichweek != None:
if whichweek in ["jan16", "jan30", "feb06", "feb13", "feb20"]: thisrmsbeam = myrjan
if whichweek in ["feb27", "mar06", "mar13"]: thisrmsbeam = myrfeb
if whichweek in ["apr03", "apr08", "apr09"]: thisrmsbeam = myrapr1
if whichweek in ["apr10"]: thisrmsbeam = myrapr2
else:
if runsummary[self.run].kind != "c" and runsummary[self.run].kind != "h":
raise Exception
self.func = u1func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, self.en+self.shift)
self.deriv = (self.func - u1func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, self.en+self.shift-0.1))/0.1
elif whichres == 2:
myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = fitrec[2].values
self.func = u2func(myarea, myrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, self.en+self.shift)
self.deriv = (self.func - u2func(myarea, myrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, self.en+self.shift-0.1))/0.1
elif whichres == 3:
myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = fitrec[3].values
thisrmsbeam = myrmsbeam
if whichres == 3:
if whichweek != None:
thisrmsbeam = eval("myr"+whichweek)
else:
if runsummary[self.run].kind != "c" and runsummary[self.run].kind != "h":
raise Exception
self.func = u3func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, self.en+self.shift)
self.deriv = (self.func - u3func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, self.en+self.shift-0.1))/0.1
else:
if runsummary[self.run].kind != "c" and runsummary[self.run].kind != "h":
raise Exception
u1runs = {}
u2runs = {}
u3runs = {}
u1runs["cont"] = []
u2runs["cont"] = []
u3runs["cont"] = []
u1runs["high"] = []
u2runs["high"] = []
u3runs["high"] = []
u1runs["jan16"] = setup_runs(1, 123164, 123178)
u1runs["jan30"] = setup_runs(1, 123596, 123718)
u1runs["feb06"] = setup_runs(1, 123781, 123893)
u1runs["feb13"] = setup_runs(1, 124080, 124092)
u1runs["feb20"] = setup_runs(1, 124102, 124214)
u1runs["feb27"] = setup_runs(1, 124279, 124394)
u1runs["mar06"] = setup_runs(1, 124436, 124519)
u1runs["mar13"] = setup_runs(1, 124625, 124736)
u1runs["apr03"] = setup_runs(1, 125119, 125127)
u1runs["apr08"] = setup_runs(1, 125254, 125262)
u1runs["apr09"] = setup_runs(1, 125285, 125295)
u1runs["apr10"] = setup_runs(1, 125303, 125416)
u2runs["may29"] = setup_runs(2, 126449, 126568)
u2runs["jun11"] = setup_runs(2, 126776, 126783)
u2runs["jun12"] = setup_runs(2, 126814, 126915)
u2runs["jul10"] = setup_runs(2, 127588, 127615)
u2runs["jul24"] = setup_runs(2, 127924, 127933)
u2runs["aug07"] = setup_runs(2, 128303, 128316)
u3runs["nov28"] = setup_runs(3, 121884, 122007)
u3runs["dec05"] = setup_runs(3, 122069, 122178)
u3runs["dec12"] = setup_runs(3, 122245, 122326)
u3runs["dec19"] = setup_runs(3, 122409, 122527)
u3runs["dec26"] = setup_runs(3, 122535, 122757)
u3runs["jan02"] = setup_runs(3, 122766, 122881)
u3runs["jan09"] = setup_runs(3, 122993, 123101)
for r in initialrunlist:
if r not in mybadruns:
if runsummary[r].res == 1 and runsummary[r].kind == 'c':
u1runs["cont"].append(r)
if runsummary[r].res == 2 and runsummary[r].kind == 'c':
u2runs["cont"].append(r)
if runsummary[r].res == 3 and runsummary[r].kind == 'c':
u3runs["cont"].append(r)
for r in initialrunlist:
if r not in mybadruns:
if runsummary[r].res == 1 and runsummary[r].kind == 'h':
u1runs["high"].append(r)
if runsummary[r].res == 2 and runsummary[r].kind == 'h':
u2runs["high"].append(r)
if runsummary[r].res == 3 and runsummary[r].kind == 'h':
u3runs["high"].append(r)
data = {}
for un in (u1runs, u2runs, u3runs):
for s in un:
for r in un[s]:
print "arun", r
data[r] = ARun(r)
for r in data:
if runsummary[r].res == 1:
print "u1", r
data[r].getfval()
for r in data:
if runsummary[r].res == 2:
print "u2", r
data[r].getfval()
for r in data:
if runsummary[r].res == 3:
print "u3", r
data[r].getfval()
# x = []
# y = []
# dy = []
# for r in data:
# if runsummary[r].res == 1:
# x.append(data[r].en + data[r].shift)
# y.append(data[r].had/data[r].bb - data[r].func)
# dy.append(data[r].had/data[r].bb*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2))
# p = biggles.FramedPlot()
# p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
# p.add(biggles.SymmetricErrorBarsY(x, y, dy))
# p.show()
histneg = hist.h1(20, -4, 4)
histpos = hist.h1(20, -4, 4)
histpeak = hist.h1(20, -4, 4)
histcont = hist.h1(20, -4, 4)
histtail = hist.h1(20, -4, 4)
profile = hist.prof(20, -4, 4)
x = []
y = []
for r in data:
crosssec = data[r].had/data[r].bb
crosssec_err = crosssec*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2)
pull = (crosssec - data[r].func)/crosssec_err
x.append(data[r].deriv)
y.append(pull**2)
profile.fill(x[-1], y[-1])
if x[-1] < -1:
histneg.fill(pull)
if x[-1] > 1:
histpos.fill(pull)
if -0.1 < x[-1] < 0.1 and runsummary[r].kind == "p":
histpeak.fill(pull)
if -0.1 < x[-1] < 0.1 and runsummary[r].kind == "c":
histcont.fill(pull)
if -0.1 < x[-1] < 0.1 and runsummary[r].kind == "h":
histtail.fill(pull)
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.y1.range = 0, 10
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Function derivative (nb/MeV)"
p.show()
p.write_eps("residualntuple_1.eps")
profile.update()
x = profile.frame + (profile.high - profile.frame[-1])/2.
y = profile.vals
dy = profile.errs
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.y1.range = 0, 5
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Function derivative (nb/MeV)"
p.show()
p.write_eps("residualntuple_2.eps")
histneg.rootn()
histpos.rootn()
histpeak.rootn()
histcont.rootn()
histtail.rootn()
p = (histneg / histneg.sum()).plot()
p.add((histpos / histpos.sum()).steps(linecolor="red"))
p.add((histpeak / histpeak.sum()).steps(linecolor="blue"))
p.add((histcont / histcont.sum()).steps(linecolor="green"))
p.add((histtail / histtail.sum()).steps(linecolor="purple"))
p.add((histneg / histneg.sum()).errorbars())
p.add((histpos / histpos.sum()).errorbars(linecolor="red"))
p.add((histpeak / histpeak.sum()).errorbars(linecolor="blue"))
p.add((histcont / histcont.sum()).errorbars(linecolor="green"))
p.add((histtail / histtail.sum()).errorbars(linecolor="purple"))
p.x1.range = 5, 30
p.y1.range = 0, 0.4
p.x1.label = r"Pull distributions of different types of datasets"
p.show()
p.write_eps("residualntuple_3.eps")
x = []
y = []
profile = hist.prof(20, 5, 30)
for r in data:
crosssec = data[r].had/data[r].bb
crosssec_err = crosssec*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2)
x.append(crosssec)
y.append((crosssec - data[r].func)**2/crosssec_err**2)
profile.fill(x[-1], y[-1])
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.x1.range = 5, 30
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_4.eps")
profile.update()
x = profile.frame + (profile.high - profile.frame[-1])/2.
y = profile.vals
dy = profile.errs
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.x1.range = 5, 30
p.y1.range = 0, 5
p.y1.label = r"Contribution to $\chi^2$"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_5.eps")
x = []
y = []
dy = []
for r in data:
ratio = data[r].bb/data[r].gg
ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)
x.append(data[r].had/data[r].bb)
y.append(ratio)
dy.append(ratio_err)
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.add(biggles.LineY(1.))
p.x1.range = 5, 30
p.y1.label = r"Bhabha luminosity / gamgam luminosity"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_6.eps")
x = []
y = []
profile = hist.prof(20, 5, 30)
for r in data:
ratio = data[r].bb/data[r].gg
ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)
x.append(data[r].had/data[r].bb)
y.append((ratio-1)/ratio_err)
profile.fill(x[-1], y[-1])
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.LineY(0.))
p.x1.range = 5, 30
p.y1.label = r"BB/GG sigmas"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_7.eps")
profile.update()
x = profile.frame + (profile.high - profile.frame[-1])/2.
y = profile.vals
dy = profile.errs
p = biggles.FramedPlot()
p.add(biggles.Points(x, y, symboltype="filled circle", symbolsize=0.5))
p.add(biggles.SymmetricErrorBarsY(x, y, dy))
p.add(biggles.LineY(0.))
p.x1.range = 5, 30
p.y1.range = -3, 3
p.y1.label = r"BB/GG sigmas"
p.x1.label = r"Absolute cross-section (nb)"
p.show()
p.write_eps("residualntuple_8.eps")
offres = []
on1 = []
on2 = []
on3 = []
off1 = []
off2 = []
off3 = []
for r in data:
ratio = data[r].bb/data[r].gg
ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)
if runsummary[r].kind == "c":
offres.append((ratio, ratio_err))
if runsummary[r].res == 1:
off1.append((ratio, ratio_err))
elif runsummary[r].res == 2:
off2.append((ratio, ratio_err))
elif runsummary[r].res == 3:
off3.append((ratio, ratio_err))
elif runsummary[r].kind == "s" and runsummary[r].res == 1:
on1.append((ratio, ratio_err))
elif runsummary[r].kind == "s" and runsummary[r].res == 2:
on2.append((ratio, ratio_err))
elif runsummary[r].kind == "s" and runsummary[r].res == 3:
on3.append((ratio, ratio_err))
print jt.wmean(offres)
print jt.wmean(on1)
print jt.wmean(on2)
print jt.wmean(on3)
print jt.wmean(off1)
print jt.wmean(off2)
print jt.wmean(off3)
|
normal
|
{
"blob_id": "51cd74bff5a0883a7bee2b61b152aecb2c5ccc66",
"index": 6263,
"step-1": "import sys\nsys.path.append(\"/home/mccann/bin/python/obsolete\")\n\nfrom minuit import *\nexecfile(\"/home/mccann/antithesis/utilities.py\")\nnobeam = getsb(\"cos\")\nebeam = getsb(\"bge\")\npbeam = getsb(\"bgp\")\nimport gbwkf\nimport gbwkftau\nrunstart = pickle.load(open(\"/home/mccann/antithesis/old_dotps/runstart.p\"))\nrunend = pickle.load(open(\"/home/mccann/antithesis/old_dotps/runend.p\"))\nimport time\nbsbha = pickle.load(open(\"/home/mccann/synthesis/run/bsbha.p\"))\n\nnbish2nb = 23.0481\nbhabha_interference = 1. # this is a multiplier: 0. to turn off\n\nclass FitRecord: pass\nggfits = pickle.load(open(\"/home/mccann/antithesis/fit_results/octoberfits_fixen_0_1.0.p\"))\n\n# I learned this from Matt and the beam energy program logs\nrunsummary[123828].energy = 4.72992\nrunsummary[123832].energy = 4.72990\n\ndef run_date(r):\n if r in runstart and r in runend:\n return (runstart[r] + runend[r])/2.\n elif r in runstart:\n return runstart[r]\n elif r in runend:\n return runend[r]\n else:\n raise Exception\n\n# The 48-hour limit is built into setup_runs\ndef setup_runs(res, low, high):\n beginning = run_date(low)\n\n tmpruns = []\n for r in initialrunlist:\n if r not in mybadruns and low <= r <= high and runsummary[r].res == res:\n if runsummary[r].kind == 's' or runsummary[r].kind == 'p':\n if run_date(r) < beginning + 48.*60.*60:\n tmpruns.append(r)\n return tmpruns\n\ndef mygbwkf(mass, fullgam, rmsbeam, yint, phi, w):\n \"yint = 0.018, 0.018, 0.018; phi=0\"\n if w > mass + 200.:\n return 0.076/(w-mass)\n return gbwkf.gbwkf(mass, fullgam, rmsbeam, yint, phi, w-mass)\n\ndef mygbwkftau(mass, fullgam, rmsbeam, yint, phi, w):\n \"yint = 0.20, 0.37, 0.27; phi = 0\"\n if w > mass + 200.:\n return 0.076/(w-mass)\n return gbwkftau.gbwkf(mass, fullgam, rmsbeam, yint, phi, w-mass)\n\ndef background(w):\n tmp = 0.\n tmp += 9.35538858434 * (1.-0.0792) * 9000.**2 / w**2\n tmp += 9.35538858434 * 0.0792 * log(w**2/9000.**2)\n return tmp\n\ndef u1func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, w):\n tmp = 0.\n tmp += area * 0.9793 * mygbwkf(9460.30, fullgam, rmsbeam, yint, phi, w)\n tmp += area * 0.578 * btautau * mygbwkftau(9460.30, fullgam, rmsbeam, tauyint, tauphi, w)\n tmp += back * (1.-twophofrac) * 9000.**2 / w**2\n tmp += back * twophofrac * log(w**2/9000.**2)\n return tmp\n\ndef u2func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, u1area, w):\n tmp = 0.\n tmp += area * 0.9618 * mygbwkf(10023.26, fullgam, rmsbeam, yint, phi, w)\n tmp += area * 0.578 * btautau * mygbwkftau(10023.26, fullgam, rmsbeam, tauyint, tauphi, w)\n tmp += back * (1.-twophofrac) * 9000.**2 / w**2\n tmp += back * twophofrac * log(w**2/9000.**2)\n tmp += u1area * mygbwkf(9460.30, 0., 0., 0., 0., w)\n return tmp\n\ndef u3func(area, rmsbeam, back, fullgam, yint, phi, btautau, tauyint, tauphi, twophofrac, u1area, u2area, w):\n tmp = 0.\n tmp += area * 0.9641 * mygbwkf(10355.2, fullgam, rmsbeam, yint, phi, w)\n tmp += area * 0.578 * btautau * mygbwkftau(10355.2, fullgam, rmsbeam, tauyint, tauphi, w)\n tmp += back * (1.-twophofrac) * 9000.**2 / w**2\n tmp += back * twophofrac * log(w**2/9000.**2)\n tmp += u1area * mygbwkf(9460.30, 0., 0., 0., 0., w)\n tmp += u2area * mygbwkf(10023.26, 0., 0., 0., 0., w)\n return tmp\n\ndef whichamiin(r):\n if runsummary[r].res == 1:\n for s in [\"jan16\", \"jan30\", \"feb06\", \"feb13\", \"feb20\", \"feb27\", \"mar06\", \"mar13\", \"apr03\", \"apr08\", \"apr09\", \"apr10\"]:\n if r in u1runs[s]:\n return 1, s\n\n elif runsummary[r].res == 2:\n for s in [\"may29\", \"jun11\", \"jun12\", \"jul10\", \"jul24\", \"aug07\"]:\n if r in u2runs[s]:\n return 2, s\n\n elif runsummary[r].res == 3:\n for s in [\"nov28\", \"dec05\", \"dec12\", \"dec19\", \"dec26\", \"jan02\", \"jan09\"]:\n if r in u3runs[s]:\n return 3, s\n\n return runsummary[r].res, None\n\ndef get_run(r):\n gamgam_lumi = None\n gamgam_lumi_err = None\n bhabha_lumi = None\n bhabha_lumi_err = None\n num_hadrons = None\n num_hadrons_err = None\n the_energy = None\n the_shift = None\n\n therun = getdb(r)\n for lumisource in (0, 3):\n g = 0.\n h = 0.\n e = 0.\n p = 0.\n c = 0.\n\n ngg = therun.gamgam\n if r in mycarefulscan: ngg = therun.gamgam_vstime.sum(0.,0.99)\n\n fitrec = pickle.load(open(\"/home/mccann/antithesis/fit_results/novemberfits_lastever_3_1.0.p\"))\n\n if runsummary[r].res == 1:\n myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = fitrec[1].values\n elif runsummary[r].res == 2:\n myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = fitrec[2].values\n elif runsummary[r].res == 3:\n myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = fitrec[3].values\n\n whichres, whichweek = whichamiin(r)\n thisshift = 0.\n if whichweek != None:\n thisshift = eval(\"my\"+whichweek)\n\n the_energy = runsummary[r].energy*2000.\n the_shift = thisshift\n\n if runsummary[r].res == 1:\n myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = ggfits[1].values\n elif runsummary[r].res == 2:\n myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = ggfits[2].values\n elif runsummary[r].res == 3:\n myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = ggfits[3].values\n\n whichres, whichweek = whichamiin(r)\n\n thisrmsbeam = myrmsbeam\n if whichres == 1:\n if whichweek != None:\n if whichweek in [\"jan16\", \"jan30\", \"feb06\", \"feb13\", \"feb20\"]: thisrmsbeam = myrjan\n if whichweek in [\"feb27\", \"mar06\", \"mar13\"]: thisrmsbeam = myrfeb\n if whichweek in [\"apr03\", \"apr08\", \"apr09\"]: thisrmsbeam = myrapr1\n if whichweek in [\"apr10\"]: thisrmsbeam = myrapr2\n if whichres == 3:\n if whichweek != None:\n thisrmsbeam = eval(\"myr\"+whichweek)\n\n thisshift = 0.\n if whichweek != None:\n thisshift = 0. - eval(\"my\"+whichweek)\n\n if r in mycarefulscan:\n h += therun.hadroncool_vstime.sum(0.,0.99)\n e += therun.beamgase_vstime.sum(0.,0.99)\n p += therun.beamgasp_vstime.sum(0.,0.99)\n c += therun.cosmic_vstime.sum(0.,0.99)\n\n if lumisource == 0:\n g += therun.gamgam_vstime.sum(0.,0.99)\n elif lumisource == 1:\n g += therun.bhabha_cosp.sum(0., 0.6) * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha\n\n if runsummary[r].kind != 'c':\n # eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * inner range\n if runsummary[r].res == 1:\n eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.417*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.672/2.66667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 2:\n eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.613*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.672/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 3:\n eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.486*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.672/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n elif lumisource == 2:\n g += therun.bhabha_cosp.sum(0.6, 0.8) * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha\n\n if runsummary[r].kind != 'c':\n # eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * outer range\n if runsummary[r].res == 1:\n eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.588*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.298667/2.66667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 2:\n eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.864*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.298667/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 3:\n eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.686*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.298667/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n elif lumisource == 3:\n g += 1.*bsbha[r] * therun.bhabha_vstime.sum(0.,0.99) / therun.bhabha\n\n if runsummary[r].kind != 'c':\n # eecs = e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * whole range\n if runsummary[r].res == 1:\n eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.597*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 1.73933/2.66667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 2:\n eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.873*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 1.73933/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 3:\n eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.691*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 1.73933/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n else:\n h += therun.hadroncool\n e += therun.beamgase\n p += therun.beamgasp\n c += therun.cosmic\n\n if lumisource == 0:\n g += therun.gamgam\n\n elif lumisource == 1:\n g += therun.bhabha_cosp.sum(0., 0.6)\n\n if runsummary[r].kind != 'c':\n # e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * inner range\n if runsummary[r].res == 1:\n eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.417*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.672/2.66667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 2:\n eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.613*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.672/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 3:\n eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.486*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.672/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n elif lumisource == 2:\n g += therun.bhabha_cosp.sum(0.6, 0.8)\n\n if runsummary[r].kind != 'c':\n # e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * outer range\n if runsummary[r].res == 1:\n eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.588*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 0.298667/2.66667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 2:\n eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.864*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 0.298667/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 3:\n eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.686*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 0.298667/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n elif lumisource == 3:\n g += 1.*bsbha[r]\n\n if runsummary[r].kind != 'c':\n # e+e- cross-section = hadronic area / (1 - 3 Bmm) * Bmm * whole range\n if runsummary[r].res == 1:\n eecs = myarea * mygbwkf(9460.30+thisshift, myfullgam, thisrmsbeam, 0.597*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9253 * 0.0249 * 1.73933/2.66667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 2:\n eecs = myarea * mygbwkf(10023.26+thisshift, myfullgam, thisrmsbeam, 0.873*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9391 * 0.0203 * 1.73933/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n if runsummary[r].res == 3:\n eecs = myarea * mygbwkf(10355.2+thisshift, myfullgam, thisrmsbeam, 0.691*bhabha_interference, 0., 2000.*runsummary[r].energy) / 0.9283 * 0.0239 * 1.73933/2.6667\n g -= eecs * float(therun.gamgam) * runsummary[r].energy**2 / nbish2nb\n\n average_energy = runsummary[r].energy\n\n ebkgnd = 1. * (ebeam.hadroncool - 1.*nobeam.hadroncool*ebeam.cosmic/nobeam.cosmic) * e / ebeam.beamgase\n pbkgnd = 1. * (pbeam.hadroncool - 1.*nobeam.hadroncool*pbeam.cosmic/nobeam.cosmic) * p / pbeam.beamgasp\n cbkgnd = 1. * nobeam.hadroncool * c / nobeam.cosmic\n\n hadrons = h - ebkgnd/2. - pbkgnd/2. - cbkgnd\n hadrons_err = sqrt(h + c * (1.*nobeam.hadroncool/nobeam.cosmic)**2 + ebkgnd/2. + pbkgnd/2.)\n\n num_hadrons = hadrons\n num_hadrons_err = hadrons_err\n\n if lumisource == 3:\n if whichres == 1:\n cs = hadrons / g / average_energy**2 * 199.5 # these differences are due to different efficiencies, as predicted by the MC\n bhabha_lumi = g * average_energy**2 / 199.5\n bhabha_lumi_err = sqrt(g) * average_energy**2 / 199.5\n elif whichres == 2:\n cs = hadrons / g / average_energy**2 * 197.4 # and verified by my lumi counts\n bhabha_lumi = g * average_energy**2 / 197.4\n bhabha_lumi_err = sqrt(g) * average_energy**2 / 197.4\n elif whichres == 3:\n cs = hadrons / g / average_energy**2 * 196.0 # (I totally believe this.)\n bhabha_lumi = g * average_energy**2 / 196.0\n bhabha_lumi_err = sqrt(g) * average_energy**2 / 196.0\n\n cs_err = cs * sqrt((1.*hadrons_err / hadrons)**2 + 1./g)\n\n else:\n cs = hadrons / g / average_energy**2 * nbish2nb\n cs_err = cs * sqrt((1.*hadrons_err / hadrons)**2 + 1./g)\n gamgam_lumi = g * average_energy**2 / nbish2nb\n gamgam_lumi_err = sqrt(g) * average_energy**2 / nbish2nb\n\n if lumisource == 1:\n cs /= 0.23684\n cs_err /= 0.23684\n\n if lumisource == 2:\n cs /= 0.118999\n cs_err /= 0.118999\n\n return float(the_energy), float(the_shift), float(gamgam_lumi), float(gamgam_lumi_err), float(bhabha_lumi), float(bhabha_lumi_err), float(num_hadrons), float(num_hadrons_err)\n\nclass ARun:\n def __init__(self, r):\n self.run = r\n self.en, self.shift, self.gg, self.gg_err, self.bb, self.bb_err, self.had, self.had_err = get_run(r)\n\n def getfval(self):\n fitrec = pickle.load(open(\"/home/mccann/antithesis/fit_results/novemberfits_lastever_3_1.0.p\"))\n\n whichres, whichweek = whichamiin(self.run)\n if whichres == 1:\n myarea, myrmsbeam, myback, myjan16, myjan30, myfeb06, myfeb13, myfeb20, myfeb27, mymar06, mymar13, myapr03, myapr08, myapr09, myapr10, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myrjan, myrfeb, myrapr1, myrapr2 = fitrec[1].values\n thisrmsbeam = myrmsbeam\n if whichweek != None:\n if whichweek in [\"jan16\", \"jan30\", \"feb06\", \"feb13\", \"feb20\"]: thisrmsbeam = myrjan\n if whichweek in [\"feb27\", \"mar06\", \"mar13\"]: thisrmsbeam = myrfeb\n if whichweek in [\"apr03\", \"apr08\", \"apr09\"]: thisrmsbeam = myrapr1\n if whichweek in [\"apr10\"]: thisrmsbeam = myrapr2\n else:\n if runsummary[self.run].kind != \"c\" and runsummary[self.run].kind != \"h\":\n raise Exception\n self.func = u1func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, self.en+self.shift)\n self.deriv = (self.func - u1func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, self.en+self.shift-0.1))/0.1\n\n elif whichres == 2:\n myarea, myrmsbeam, myback, mymay29, myjun11, myjun12, myjul10, myjul24, myaug07, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area = fitrec[2].values\n self.func = u2func(myarea, myrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, self.en+self.shift)\n self.deriv = (self.func - u2func(myarea, myrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, self.en+self.shift-0.1))/0.1\n\n elif whichres == 3:\n myarea, myrmsbeam, myback, mynov28, mydec05, mydec12, mydec19, mydec26, myjan02, myjan09, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, myrnov28, myrdec05, myrdec12, myrdec19, myrdec26, myrjan02, myrjan09 = fitrec[3].values\n thisrmsbeam = myrmsbeam\n if whichres == 3:\n if whichweek != None:\n thisrmsbeam = eval(\"myr\"+whichweek)\n else:\n if runsummary[self.run].kind != \"c\" and runsummary[self.run].kind != \"h\":\n raise Exception\n self.func = u3func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, self.en+self.shift)\n self.deriv = (self.func - u3func(myarea, thisrmsbeam, myback, myfullgam, myyint, myphi, mybtautau, mytauyint, mytauphi, mytwophofrac, myu1area, myu2area, self.en+self.shift-0.1))/0.1\n else:\n if runsummary[self.run].kind != \"c\" and runsummary[self.run].kind != \"h\":\n raise Exception\n\nu1runs = {}\nu2runs = {}\nu3runs = {}\n\nu1runs[\"cont\"] = []\nu2runs[\"cont\"] = []\nu3runs[\"cont\"] = []\nu1runs[\"high\"] = []\nu2runs[\"high\"] = []\nu3runs[\"high\"] = []\n\nu1runs[\"jan16\"] = setup_runs(1, 123164, 123178)\nu1runs[\"jan30\"] = setup_runs(1, 123596, 123718)\nu1runs[\"feb06\"] = setup_runs(1, 123781, 123893)\nu1runs[\"feb13\"] = setup_runs(1, 124080, 124092)\nu1runs[\"feb20\"] = setup_runs(1, 124102, 124214)\nu1runs[\"feb27\"] = setup_runs(1, 124279, 124394)\nu1runs[\"mar06\"] = setup_runs(1, 124436, 124519)\nu1runs[\"mar13\"] = setup_runs(1, 124625, 124736)\nu1runs[\"apr03\"] = setup_runs(1, 125119, 125127)\nu1runs[\"apr08\"] = setup_runs(1, 125254, 125262)\nu1runs[\"apr09\"] = setup_runs(1, 125285, 125295)\nu1runs[\"apr10\"] = setup_runs(1, 125303, 125416)\n\nu2runs[\"may29\"] = setup_runs(2, 126449, 126568)\nu2runs[\"jun11\"] = setup_runs(2, 126776, 126783)\nu2runs[\"jun12\"] = setup_runs(2, 126814, 126915)\nu2runs[\"jul10\"] = setup_runs(2, 127588, 127615)\nu2runs[\"jul24\"] = setup_runs(2, 127924, 127933)\nu2runs[\"aug07\"] = setup_runs(2, 128303, 128316)\n\nu3runs[\"nov28\"] = setup_runs(3, 121884, 122007)\nu3runs[\"dec05\"] = setup_runs(3, 122069, 122178)\nu3runs[\"dec12\"] = setup_runs(3, 122245, 122326)\nu3runs[\"dec19\"] = setup_runs(3, 122409, 122527)\nu3runs[\"dec26\"] = setup_runs(3, 122535, 122757)\nu3runs[\"jan02\"] = setup_runs(3, 122766, 122881)\nu3runs[\"jan09\"] = setup_runs(3, 122993, 123101)\n\nfor r in initialrunlist:\n if r not in mybadruns:\n if runsummary[r].res == 1 and runsummary[r].kind == 'c':\n u1runs[\"cont\"].append(r)\n if runsummary[r].res == 2 and runsummary[r].kind == 'c':\n u2runs[\"cont\"].append(r)\n if runsummary[r].res == 3 and runsummary[r].kind == 'c':\n u3runs[\"cont\"].append(r)\n\nfor r in initialrunlist:\n if r not in mybadruns:\n if runsummary[r].res == 1 and runsummary[r].kind == 'h':\n u1runs[\"high\"].append(r)\n if runsummary[r].res == 2 and runsummary[r].kind == 'h':\n u2runs[\"high\"].append(r)\n if runsummary[r].res == 3 and runsummary[r].kind == 'h':\n u3runs[\"high\"].append(r)\n\ndata = {}\nfor un in (u1runs, u2runs, u3runs):\n for s in un:\n for r in un[s]:\n print \"arun\", r\n data[r] = ARun(r)\nfor r in data:\n if runsummary[r].res == 1:\n print \"u1\", r\n data[r].getfval()\nfor r in data:\n if runsummary[r].res == 2:\n print \"u2\", r\n data[r].getfval()\nfor r in data:\n if runsummary[r].res == 3:\n print \"u3\", r\n data[r].getfval()\n\n# x = []\n# y = []\n# dy = []\n# for r in data:\n# if runsummary[r].res == 1:\n# x.append(data[r].en + data[r].shift)\n# y.append(data[r].had/data[r].bb - data[r].func)\n# dy.append(data[r].had/data[r].bb*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2))\n# p = biggles.FramedPlot()\n# p.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\n# p.add(biggles.SymmetricErrorBarsY(x, y, dy))\n# p.show()\n\nhistneg = hist.h1(20, -4, 4)\nhistpos = hist.h1(20, -4, 4)\nhistpeak = hist.h1(20, -4, 4)\nhistcont = hist.h1(20, -4, 4)\nhisttail = hist.h1(20, -4, 4)\nprofile = hist.prof(20, -4, 4)\nx = []\ny = []\nfor r in data:\n crosssec = data[r].had/data[r].bb\n crosssec_err = crosssec*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2)\n pull = (crosssec - data[r].func)/crosssec_err\n x.append(data[r].deriv)\n y.append(pull**2)\n profile.fill(x[-1], y[-1])\n if x[-1] < -1:\n histneg.fill(pull)\n if x[-1] > 1:\n histpos.fill(pull)\n if -0.1 < x[-1] < 0.1 and runsummary[r].kind == \"p\":\n histpeak.fill(pull)\n if -0.1 < x[-1] < 0.1 and runsummary[r].kind == \"c\":\n histcont.fill(pull)\n if -0.1 < x[-1] < 0.1 and runsummary[r].kind == \"h\":\n histtail.fill(pull)\np = biggles.FramedPlot()\np.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\np.y1.range = 0, 10\np.y1.label = r\"Contribution to $\\chi^2$\"\np.x1.label = r\"Function derivative (nb/MeV)\"\np.show()\np.write_eps(\"residualntuple_1.eps\")\n\nprofile.update()\nx = profile.frame + (profile.high - profile.frame[-1])/2.\ny = profile.vals\ndy = profile.errs\np = biggles.FramedPlot()\np.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\np.add(biggles.SymmetricErrorBarsY(x, y, dy))\np.y1.range = 0, 5\np.y1.label = r\"Contribution to $\\chi^2$\"\np.x1.label = r\"Function derivative (nb/MeV)\"\np.show()\np.write_eps(\"residualntuple_2.eps\")\n\nhistneg.rootn()\nhistpos.rootn()\nhistpeak.rootn()\nhistcont.rootn()\nhisttail.rootn()\n\np = (histneg / histneg.sum()).plot()\np.add((histpos / histpos.sum()).steps(linecolor=\"red\"))\np.add((histpeak / histpeak.sum()).steps(linecolor=\"blue\"))\np.add((histcont / histcont.sum()).steps(linecolor=\"green\"))\np.add((histtail / histtail.sum()).steps(linecolor=\"purple\"))\np.add((histneg / histneg.sum()).errorbars())\np.add((histpos / histpos.sum()).errorbars(linecolor=\"red\"))\np.add((histpeak / histpeak.sum()).errorbars(linecolor=\"blue\"))\np.add((histcont / histcont.sum()).errorbars(linecolor=\"green\"))\np.add((histtail / histtail.sum()).errorbars(linecolor=\"purple\"))\np.x1.range = 5, 30\np.y1.range = 0, 0.4\np.x1.label = r\"Pull distributions of different types of datasets\"\np.show()\np.write_eps(\"residualntuple_3.eps\")\n\nx = []\ny = []\nprofile = hist.prof(20, 5, 30)\nfor r in data:\n crosssec = data[r].had/data[r].bb\n crosssec_err = crosssec*sqrt((data[r].had_err/data[r].had)**2 + (data[r].bb_err/data[r].bb)**2)\n x.append(crosssec)\n y.append((crosssec - data[r].func)**2/crosssec_err**2)\n profile.fill(x[-1], y[-1])\np = biggles.FramedPlot()\np.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\np.x1.range = 5, 30\np.y1.label = r\"Contribution to $\\chi^2$\"\np.x1.label = r\"Absolute cross-section (nb)\"\np.show()\np.write_eps(\"residualntuple_4.eps\")\n\nprofile.update()\nx = profile.frame + (profile.high - profile.frame[-1])/2.\ny = profile.vals\ndy = profile.errs\np = biggles.FramedPlot()\np.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\np.add(biggles.SymmetricErrorBarsY(x, y, dy))\np.x1.range = 5, 30\np.y1.range = 0, 5\np.y1.label = r\"Contribution to $\\chi^2$\"\np.x1.label = r\"Absolute cross-section (nb)\"\np.show()\np.write_eps(\"residualntuple_5.eps\")\n\nx = []\ny = []\ndy = []\nfor r in data:\n ratio = data[r].bb/data[r].gg\n ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)\n x.append(data[r].had/data[r].bb)\n y.append(ratio)\n dy.append(ratio_err)\np = biggles.FramedPlot()\np.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\np.add(biggles.SymmetricErrorBarsY(x, y, dy))\np.add(biggles.LineY(1.))\np.x1.range = 5, 30\np.y1.label = r\"Bhabha luminosity / gamgam luminosity\"\np.x1.label = r\"Absolute cross-section (nb)\"\np.show()\np.write_eps(\"residualntuple_6.eps\")\n\nx = []\ny = []\nprofile = hist.prof(20, 5, 30)\nfor r in data:\n ratio = data[r].bb/data[r].gg\n ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)\n x.append(data[r].had/data[r].bb)\n y.append((ratio-1)/ratio_err)\n profile.fill(x[-1], y[-1])\np = biggles.FramedPlot()\np.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\np.add(biggles.LineY(0.))\np.x1.range = 5, 30\np.y1.label = r\"BB/GG sigmas\"\np.x1.label = r\"Absolute cross-section (nb)\"\np.show()\np.write_eps(\"residualntuple_7.eps\")\n\nprofile.update()\nx = profile.frame + (profile.high - profile.frame[-1])/2.\ny = profile.vals\ndy = profile.errs\np = biggles.FramedPlot()\np.add(biggles.Points(x, y, symboltype=\"filled circle\", symbolsize=0.5))\np.add(biggles.SymmetricErrorBarsY(x, y, dy))\np.add(biggles.LineY(0.))\np.x1.range = 5, 30\np.y1.range = -3, 3\np.y1.label = r\"BB/GG sigmas\"\np.x1.label = r\"Absolute cross-section (nb)\"\np.show()\np.write_eps(\"residualntuple_8.eps\")\n\noffres = []\non1 = []\non2 = []\non3 = []\noff1 = []\noff2 = []\noff3 = []\nfor r in data:\n ratio = data[r].bb/data[r].gg\n ratio_err = ratio*sqrt((data[r].bb_err/data[r].bb)**2 + (data[r].gg_err/data[r].gg)**2)\n if runsummary[r].kind == \"c\":\n offres.append((ratio, ratio_err))\n if runsummary[r].res == 1:\n off1.append((ratio, ratio_err))\n elif runsummary[r].res == 2:\n off2.append((ratio, ratio_err))\n elif runsummary[r].res == 3:\n off3.append((ratio, ratio_err))\n elif runsummary[r].kind == \"s\" and runsummary[r].res == 1:\n on1.append((ratio, ratio_err))\n elif runsummary[r].kind == \"s\" and runsummary[r].res == 2:\n on2.append((ratio, ratio_err))\n elif runsummary[r].kind == \"s\" and runsummary[r].res == 3:\n on3.append((ratio, ratio_err))\n\nprint jt.wmean(offres)\nprint jt.wmean(on1)\nprint jt.wmean(on2)\nprint jt.wmean(on3)\nprint jt.wmean(off1)\nprint jt.wmean(off2)\nprint jt.wmean(off3)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 1 10:18:11 2017
@author: Duong
"""
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
from pandas.core.frame import DataFrame
# DBS verbinden
database = psycopg2.connect(database="TeamYellow_election", user="student", password="password", host="agdbs-edu01.imp.fu-berlin.de", port="5432")
# SQl-Abfrage
cursor = database.cursor()
cursor.execute(
'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC')
result = cursor.fetchall()
# Dataframe erstellen
data=DataFrame(result, columns=['tweet_date', 'count'])
#Umwandlung des Datentyp der Spalte tweet_date
data['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')
data['week_number'] = data['tweet_date_with_time'].dt.week
data['weekday']= data['tweet_date_with_time'].dt.dayofweek
# Gruppierung der Kalendarwochen mit einzelnen Counts
data2=data.copy()
del data2['tweet_date']
del data2['tweet_date_with_time']
del data2['weekday']
print(data2.groupby('week_number')['count'].apply(list))
# Aufbau Dataframe auf Erkenntnisse aus data2-Prints
data3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0],
'KW02': [3, 1, 7, 1, 0, 1, 0],
'KW03': [0, 2, 6, 1, 11, 3, 2],
'KW04': [13, 5, 1, 3, 6, 2, 1],
'KW05': [0, 1, 2, 0, 4, 3, 4],
'KW06': [2, 6, 1, 2, 1, 5, 0],
'KW07': [1, 3, 5, 2, 5, 2, 1],
'KW08': [2, 7, 1, 3, 5, 1, 3],
'KW09': [3, 10, 9, 3, 3, 6, 2],
'KW10': [0, 1, 2, 0, 2, 4, 0],
'KW11': [2, 3, 8, 0, 3, 10, 5],
'KW12': [0, 11, 4, 1, 0, 0, 0],
'KW13': [1, 0, 3, 2, 1, 6, 5],
'KW14': [4, 5, 0, 0, 1, 1, 2],
'KW15': [2, 4, 1, 2, 0, 4, 2],
'KW16': [0, 11, 4, 2, 3, 4, 1],
'KW17': [2, 6, 0, 1, 1, 0, 0],
'KW18': [4, 8, 0, 1, 1, 0, 0],
'KW19': [2, 8, 3, 0, 0, 0, 0],
'KW20': [1, 1, 1, 0, 5, 0, 1],
'KW21': [0, 0, 2, 1, 1, 0, 0],
'KW22': [0, 0, 1, 4, 2, 3, 0],
'KW23': [0, 0, 1, 0, 1, 2, 0],
'KW24': [0, 0, 3, 0, 1, 4, 1],
'KW25': [0, 0, 1, 10, 0, 0, 0],
'KW26': [1, 1, 0, 0, 2, 3, 0],
'KW27': [1, 0, 0, 2, 0, 0, 0],
'KW28': [1, 2, 2, 1, 0, 1, 0],
'KW29': [0, 1, 2, 7, 2, 1, 0],
'KW30': [1, 3, 3, 4, 0, 1, 1],
'KW31': [3, 2, 2, 0, 1, 4, 1],
'KW32': [1, 6, 0, 0, 0, 1, 0],
'KW33': [0, 0, 4, 0, 1, 1, 0],
'KW34': [1, 0, 1, 2, 1, 2, 1],
'KW35': [2, 0, 1, 3, 1, 0, 0],
'KW36': [1, 1, 2, 2, 2, 0, 0],
'KW37': [0, 1, 1, 2, 4, 0, 0],
'KW38': [0, 3, 0, 2, 1, 1, 0],
'KW39': [3, 18, 0, 0, 0, 0, 0]})
data4= data3.transpose()
data4.columns =['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']
data4['Kalendarwoche']=data4.index
############################# Bau eines Stacked Bar Chart ############################################
#Grundgerüst des Balkendiagramms
f, ax1 = plt.subplots(1, figsize=(25,20))
# Balkengröße
bar_width = 0.75
# Balken fangen von links an
bar_l = [i+1 for i in range(len(data4['Montag']))]
# Position der X-Achsen Werte
tick_pos = [i+(bar_width/2) for i in bar_l]
# Beginn der Erstellung der Balken nach Wochentagen
ax1.bar(bar_l,
data4['Montag'],
width=bar_width,
label='Montag',
alpha=0.5,
color='#1858ef')
ax1.bar(bar_l,
data4['Dienstag'],
width=bar_width,
bottom=data4['Montag'],
label='Dienstag',
alpha=0.5,
color='#6618ef')
ax1.bar(bar_l,
data4['Mittwoch'],
width=bar_width,
bottom=[i+j for i,j in zip(data4['Montag'],data4['Dienstag'])],
label='Mittwoch',
alpha=0.5,
color='#ef1829')
ax1.bar(bar_l,
data4['Donnerstag'],
width=bar_width,
bottom=[i+j+k for i,j,k in zip(data4['Montag'],data4['Dienstag'], data4['Mittwoch'])],
label='Donnerstag',
alpha=0.5,
color='#ef7c18')
ax1.bar(bar_l,
data4['Freitag'],
width=bar_width,
bottom=[i+j+k+l for i,j,k,l in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'])],
label='Freitag',
alpha=0.5,
color='#efc718')
ax1.bar(bar_l,
data4['Samstag'],
width=bar_width,
bottom=[i+j+k+l+m for i,j,k,l,m in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'])],
label='Samstag',
alpha=0.5,
color='#63ef18')
ax1.bar(bar_l,
data4['Sonntag'],
width=bar_width,
bottom=[i+j+k+l+m+n for i,j,k,l,m,n in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'],
data4['Samstag'])],
label='Sonntag',
alpha=0.5,
color='#18efa3')
# X-Achse mit Werte versehen
plt.xticks(tick_pos, data4['Kalendarwoche'])
#Legende
ax1.set_ylabel("Häufigkeit")
ax1.set_xlabel("Kalendarwoche")
plt.legend(loc='upper left')
# Zwischen den Diagrammen Platz lassen
plt.xlim([min(tick_pos)-bar_width, max(tick_pos)+bar_width])
############### Balkendiagramm nach Kalendarwoche#########################################
kw = lambda x: x.isocalendar()[1]
grouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg({'count': 'sum'})
grouped['calendar week']= ('KW1','KW2','KW3','KW4','KW5','KW6','KW7','KW8','KW9','KW10','KW11','KW12','KW13',
'KW14','KW15','KW16','KW17','KW18','KW19','KW20','KW21','KW22','KW23','KW24','KW25','KW26', 'KW27','KW28','KW29',
'KW30','KW31','KW32','KW33','KW34','KW35','KW36','KW37','KW38','KW39')
#Balkendiagramm für alle Hashtag in Kalendarwoche
grouped.set_index('calendar week').plot.bar(rot=45, title='Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15,10), fontsize=10)
############## Balkendiagramm für alle Hashtag pro Tag #####################################
data5=data[['tweet_date','count']].copy()
#Balkendiagramm für alle Hashtag in Tagen
data5.set_index('tweet_date').plot.bar(rot=90, title='Häufigkeit aller Hashtag in Tagen', figsize=(50,25), color ='#ef6618', fontsize=14)
|
normal
|
{
"blob_id": "076b852010ddcea69a294f9f2a653bb2fa2f2676",
"index": 3531,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\n<mask token>\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\n<mask token>\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\n<mask token>\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\n<mask token>\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-3": "<mask token>\ndatabase = psycopg2.connect(database='TeamYellow_election', user='student',\n password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')\ncursor = database.cursor()\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\nresult = cursor.fetchall()\ndata = DataFrame(result, columns=['tweet_date', 'count'])\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\ndata['week_number'] = data['tweet_date_with_time'].dt.week\ndata['weekday'] = data['tweet_date_with_time'].dt.dayofweek\ndata2 = data.copy()\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,\n 1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],\n 'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [\n 1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,\n 3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,\n 5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],\n 'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [\n 0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,\n 1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0, \n 1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],\n 'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [\n 0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,\n 2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1, \n 0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],\n 'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [\n 1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2, \n 2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1, \n 0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})\ndata4 = data3.transpose()\ndata4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',\n 'Samstag', 'Sonntag']\ndata4['Kalendarwoche'] = data4.index\nf, ax1 = plt.subplots(1, figsize=(25, 20))\nbar_width = 0.75\nbar_l = [(i + 1) for i in range(len(data4['Montag']))]\ntick_pos = [(i + bar_width / 2) for i in bar_l]\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\nkw = lambda x: x.isocalendar()[1]\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(\n {'count': 'sum'})\ngrouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',\n 'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',\n 'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',\n 'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',\n 'KW35', 'KW36', 'KW37', 'KW38', 'KW39')\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\ndata5 = data[['tweet_date', 'count']].copy()\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-4": "<mask token>\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport psycopg2\nfrom pandas.core.frame import DataFrame\ndatabase = psycopg2.connect(database='TeamYellow_election', user='student',\n password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')\ncursor = database.cursor()\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\nresult = cursor.fetchall()\ndata = DataFrame(result, columns=['tweet_date', 'count'])\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\ndata['week_number'] = data['tweet_date_with_time'].dt.week\ndata['weekday'] = data['tweet_date_with_time'].dt.dayofweek\ndata2 = data.copy()\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,\n 1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],\n 'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [\n 1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,\n 3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,\n 5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],\n 'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [\n 0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,\n 1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0, \n 1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],\n 'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [\n 0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,\n 2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1, \n 0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],\n 'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [\n 1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2, \n 2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1, \n 0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})\ndata4 = data3.transpose()\ndata4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',\n 'Samstag', 'Sonntag']\ndata4['Kalendarwoche'] = data4.index\nf, ax1 = plt.subplots(1, figsize=(25, 20))\nbar_width = 0.75\nbar_l = [(i + 1) for i in range(len(data4['Montag']))]\ntick_pos = [(i + bar_width / 2) for i in bar_l]\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\nkw = lambda x: x.isocalendar()[1]\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(\n {'count': 'sum'})\ngrouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',\n 'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',\n 'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',\n 'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',\n 'KW35', 'KW36', 'KW37', 'KW38', 'KW39')\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\ndata5 = data[['tweet_date', 'count']].copy()\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 1 10:18:11 2017\r\n\r\n@author: Duong\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport psycopg2\r\nfrom pandas.core.frame import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n# DBS verbinden\r\ndatabase = psycopg2.connect(database=\"TeamYellow_election\", user=\"student\", password=\"password\", host=\"agdbs-edu01.imp.fu-berlin.de\", port=\"5432\")\r\n\r\n# SQl-Abfrage\r\ncursor = database.cursor()\r\ncursor.execute(\r\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC')\r\nresult = cursor.fetchall()\r\n\r\n# Dataframe erstellen\r\ndata=DataFrame(result, columns=['tweet_date', 'count'])\r\n\r\n\r\n#Umwandlung des Datentyp der Spalte tweet_date\r\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\r\ndata['week_number'] = data['tweet_date_with_time'].dt.week\r\ndata['weekday']= data['tweet_date_with_time'].dt.dayofweek\r\n\r\n\r\n# Gruppierung der Kalendarwochen mit einzelnen Counts\r\ndata2=data.copy()\r\ndel data2['tweet_date']\r\ndel data2['tweet_date_with_time']\r\ndel data2['weekday']\r\n\r\nprint(data2.groupby('week_number')['count'].apply(list))\r\n\r\n# Aufbau Dataframe auf Erkenntnisse aus data2-Prints\r\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0],\r\n 'KW02': [3, 1, 7, 1, 0, 1, 0],\r\n 'KW03': [0, 2, 6, 1, 11, 3, 2],\r\n 'KW04': [13, 5, 1, 3, 6, 2, 1],\r\n 'KW05': [0, 1, 2, 0, 4, 3, 4],\r\n 'KW06': [2, 6, 1, 2, 1, 5, 0],\r\n 'KW07': [1, 3, 5, 2, 5, 2, 1],\r\n 'KW08': [2, 7, 1, 3, 5, 1, 3],\r\n 'KW09': [3, 10, 9, 3, 3, 6, 2],\r\n 'KW10': [0, 1, 2, 0, 2, 4, 0],\r\n 'KW11': [2, 3, 8, 0, 3, 10, 5],\r\n 'KW12': [0, 11, 4, 1, 0, 0, 0],\r\n 'KW13': [1, 0, 3, 2, 1, 6, 5],\r\n 'KW14': [4, 5, 0, 0, 1, 1, 2],\r\n 'KW15': [2, 4, 1, 2, 0, 4, 2],\r\n 'KW16': [0, 11, 4, 2, 3, 4, 1],\r\n 'KW17': [2, 6, 0, 1, 1, 0, 0],\r\n 'KW18': [4, 8, 0, 1, 1, 0, 0],\r\n 'KW19': [2, 8, 3, 0, 0, 0, 0],\r\n 'KW20': [1, 1, 1, 0, 5, 0, 1],\r\n 'KW21': [0, 0, 2, 1, 1, 0, 0],\r\n 'KW22': [0, 0, 1, 4, 2, 3, 0],\r\n 'KW23': [0, 0, 1, 0, 1, 2, 0],\r\n 'KW24': [0, 0, 3, 0, 1, 4, 1],\r\n 'KW25': [0, 0, 1, 10, 0, 0, 0],\r\n 'KW26': [1, 1, 0, 0, 2, 3, 0],\r\n 'KW27': [1, 0, 0, 2, 0, 0, 0],\r\n 'KW28': [1, 2, 2, 1, 0, 1, 0],\r\n 'KW29': [0, 1, 2, 7, 2, 1, 0],\r\n 'KW30': [1, 3, 3, 4, 0, 1, 1],\r\n 'KW31': [3, 2, 2, 0, 1, 4, 1],\r\n 'KW32': [1, 6, 0, 0, 0, 1, 0],\r\n 'KW33': [0, 0, 4, 0, 1, 1, 0],\r\n 'KW34': [1, 0, 1, 2, 1, 2, 1],\r\n 'KW35': [2, 0, 1, 3, 1, 0, 0],\r\n 'KW36': [1, 1, 2, 2, 2, 0, 0],\r\n 'KW37': [0, 1, 1, 2, 4, 0, 0],\r\n 'KW38': [0, 3, 0, 2, 1, 1, 0],\r\n 'KW39': [3, 18, 0, 0, 0, 0, 0]})\r\n\r\n\r\ndata4= data3.transpose()\r\ndata4.columns =['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']\r\ndata4['Kalendarwoche']=data4.index\r\n\r\n############################# Bau eines Stacked Bar Chart ############################################\r\n\r\n#Grundgerüst des Balkendiagramms\r\nf, ax1 = plt.subplots(1, figsize=(25,20))\r\n\r\n# Balkengröße\r\nbar_width = 0.75\r\n\r\n# Balken fangen von links an\r\nbar_l = [i+1 for i in range(len(data4['Montag']))]\r\n\r\n# Position der X-Achsen Werte\r\ntick_pos = [i+(bar_width/2) for i in bar_l]\r\n\r\n# Beginn der Erstellung der Balken nach Wochentagen\r\nax1.bar(bar_l,\r\n data4['Montag'],\r\n width=bar_width,\r\n label='Montag',\r\n alpha=0.5,\r\n color='#1858ef')\r\n\r\n\r\nax1.bar(bar_l,\r\n data4['Dienstag'],\r\n width=bar_width,\r\n bottom=data4['Montag'],\r\n label='Dienstag',\r\n alpha=0.5,\r\n color='#6618ef')\r\n\r\nax1.bar(bar_l,\r\n data4['Mittwoch'],\r\n width=bar_width,\r\n bottom=[i+j for i,j in zip(data4['Montag'],data4['Dienstag'])],\r\n label='Mittwoch',\r\n alpha=0.5,\r\n color='#ef1829')\r\n\r\nax1.bar(bar_l,\r\n data4['Donnerstag'],\r\n width=bar_width,\r\n bottom=[i+j+k for i,j,k in zip(data4['Montag'],data4['Dienstag'], data4['Mittwoch'])],\r\n label='Donnerstag',\r\n alpha=0.5,\r\n color='#ef7c18')\r\n\r\nax1.bar(bar_l,\r\n data4['Freitag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l for i,j,k,l in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'])],\r\n label='Freitag',\r\n alpha=0.5,\r\n color='#efc718')\r\n\r\nax1.bar(bar_l,\r\n data4['Samstag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l+m for i,j,k,l,m in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'])],\r\n label='Samstag',\r\n alpha=0.5,\r\n color='#63ef18')\r\n\r\n\r\nax1.bar(bar_l,\r\n data4['Sonntag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l+m+n for i,j,k,l,m,n in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'],\r\n data4['Samstag'])],\r\n label='Sonntag',\r\n alpha=0.5,\r\n color='#18efa3')\r\n\r\n# X-Achse mit Werte versehen\r\nplt.xticks(tick_pos, data4['Kalendarwoche'])\r\n\r\n#Legende\r\nax1.set_ylabel(\"Häufigkeit\")\r\nax1.set_xlabel(\"Kalendarwoche\")\r\nplt.legend(loc='upper left')\r\n\r\n# Zwischen den Diagrammen Platz lassen\r\nplt.xlim([min(tick_pos)-bar_width, max(tick_pos)+bar_width])\r\n\r\n############### Balkendiagramm nach Kalendarwoche#########################################\r\n\r\nkw = lambda x: x.isocalendar()[1]\r\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg({'count': 'sum'})\r\n\r\ngrouped['calendar week']= ('KW1','KW2','KW3','KW4','KW5','KW6','KW7','KW8','KW9','KW10','KW11','KW12','KW13',\r\n 'KW14','KW15','KW16','KW17','KW18','KW19','KW20','KW21','KW22','KW23','KW24','KW25','KW26', 'KW27','KW28','KW29',\r\n 'KW30','KW31','KW32','KW33','KW34','KW35','KW36','KW37','KW38','KW39')\r\n\r\n\r\n\r\n#Balkendiagramm für alle Hashtag in Kalendarwoche\r\ngrouped.set_index('calendar week').plot.bar(rot=45, title='Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15,10), fontsize=10)\r\n\r\n############## Balkendiagramm für alle Hashtag pro Tag #####################################\r\ndata5=data[['tweet_date','count']].copy()\r\n#Balkendiagramm für alle Hashtag in Tagen\r\ndata5.set_index('tweet_date').plot.bar(rot=90, title='Häufigkeit aller Hashtag in Tagen', figsize=(50,25), color ='#ef6618', fontsize=14)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
__author__ = 'Administrator'
import unittest
class CouchTests2(unittest.TestCase):
def test_foo(self):
self.assertEqual(1, 1)
def test_bar(self):
self.assertEqual(1, 1)
|
normal
|
{
"blob_id": "cd4f22b8e2188e8019e7324e80d64a7b95f8f956",
"index": 1961,
"step-1": "<mask token>\n\n\nclass CouchTests2(unittest.TestCase):\n <mask token>\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-2": "<mask token>\n\n\nclass CouchTests2(unittest.TestCase):\n\n def test_foo(self):\n self.assertEqual(1, 1)\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-3": "__author__ = 'Administrator'\n<mask token>\n\n\nclass CouchTests2(unittest.TestCase):\n\n def test_foo(self):\n self.assertEqual(1, 1)\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-4": "__author__ = 'Administrator'\nimport unittest\n\n\nclass CouchTests2(unittest.TestCase):\n\n def test_foo(self):\n self.assertEqual(1, 1)\n\n def test_bar(self):\n self.assertEqual(1, 1)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# KeyLogger.py
# show a character key when pressed without using Enter key
# hide the Tkinter GUI window, only console shows
import Tkinter as tk
def key(event):
if event.keysym == 'Escape':
root.destroy()
print event.char, event.keysym
root = tk.Tk()
print "Press a key (Escape key to exit):"
root.bind_all('<Key>', key)
# don't show the tk window
root.withdraw()
root.mainloop()
|
normal
|
{
"blob_id": "368151a134f987ed78c8048521137672530b5cce",
"index": 1022,
"step-1": "# KeyLogger.py\n# show a character key when pressed without using Enter key\n# hide the Tkinter GUI window, only console shows\n\nimport Tkinter as tk\n\ndef key(event):\n if event.keysym == 'Escape':\n root.destroy()\n print event.char, event.keysym\n\nroot = tk.Tk()\nprint \"Press a key (Escape key to exit):\"\nroot.bind_all('<Key>', key)\n# don't show the tk window\nroot.withdraw()\nroot.mainloop()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import pytest
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver import Firefox
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
def create_gecko_driver():
home_dir = os.getenv('HOME')
return Firefox(executable_path=os.path.join(home_dir, 'bin', 'geckodriver'))
@pytest.fixture
def driver(request):
firefox = create_gecko_driver()
request.addfinalizer(firefox.quit)
return firefox
def test_successful_login(driver: WebDriver): # type hint for IDE
driver.get("http://localhost:8080/litecart/admin/login.php")
driver.find_element_by_name("username").send_keys('admin', Keys.TAB)
driver.find_element_by_name("password").send_keys('admin', Keys.ENTER)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'sidebar')))
|
normal
|
{
"blob_id": "b6e28f29edd0c4659ab992b45861c4c31a57e7fd",
"index": 8920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_gecko_driver():\n home_dir = os.getenv('HOME')\n return Firefox(executable_path=os.path.join(home_dir, 'bin', 'geckodriver')\n )\n\n\[email protected]\ndef driver(request):\n firefox = create_gecko_driver()\n request.addfinalizer(firefox.quit)\n return firefox\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_gecko_driver():\n home_dir = os.getenv('HOME')\n return Firefox(executable_path=os.path.join(home_dir, 'bin', 'geckodriver')\n )\n\n\[email protected]\ndef driver(request):\n firefox = create_gecko_driver()\n request.addfinalizer(firefox.quit)\n return firefox\n\n\ndef test_successful_login(driver: WebDriver):\n driver.get('http://localhost:8080/litecart/admin/login.php')\n driver.find_element_by_name('username').send_keys('admin', Keys.TAB)\n driver.find_element_by_name('password').send_keys('admin', Keys.ENTER)\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID,\n 'sidebar')))\n",
"step-4": "import os\nimport pytest\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\ndef create_gecko_driver():\n home_dir = os.getenv('HOME')\n return Firefox(executable_path=os.path.join(home_dir, 'bin', 'geckodriver')\n )\n\n\[email protected]\ndef driver(request):\n firefox = create_gecko_driver()\n request.addfinalizer(firefox.quit)\n return firefox\n\n\ndef test_successful_login(driver: WebDriver):\n driver.get('http://localhost:8080/litecart/admin/login.php')\n driver.find_element_by_name('username').send_keys('admin', Keys.TAB)\n driver.find_element_by_name('password').send_keys('admin', Keys.ENTER)\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID,\n 'sidebar')))\n",
"step-5": "import os\nimport pytest\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\ndef create_gecko_driver():\n home_dir = os.getenv('HOME')\n return Firefox(executable_path=os.path.join(home_dir, 'bin', 'geckodriver'))\n\n\[email protected]\ndef driver(request):\n firefox = create_gecko_driver()\n request.addfinalizer(firefox.quit)\n return firefox\n\n\ndef test_successful_login(driver: WebDriver): # type hint for IDE\n driver.get(\"http://localhost:8080/litecart/admin/login.php\")\n driver.find_element_by_name(\"username\").send_keys('admin', Keys.TAB)\n driver.find_element_by_name(\"password\").send_keys('admin', Keys.ENTER)\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'sidebar')))\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
a=int(input())
s=0
t=0
while(a!=0):
t=a%10
s=s+t
a=a//10
print(s)
|
normal
|
{
"blob_id": "6050e83e73faaf40cbd5455efd3ad01e4e131188",
"index": 2587,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile a != 0:\n t = a % 10\n s = s + t\n a = a // 10\nprint(s)\n",
"step-3": "a = int(input())\ns = 0\nt = 0\nwhile a != 0:\n t = a % 10\n s = s + t\n a = a // 10\nprint(s)\n",
"step-4": "a=int(input())\ns=0\nt=0\nwhile(a!=0):\n t=a%10\n s=s+t\n a=a//10\nprint(s)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Class for manage tables in Storage and Big Query
"""
# pylint: disable=invalid-name, too-many-locals, too-many-branches, too-many-arguments,line-too-long,R0801,consider-using-f-string
from pathlib import Path
import json
from copy import deepcopy
import textwrap
import inspect
from io import StringIO
from loguru import logger
from google.cloud import bigquery
import ruamel.yaml as ryaml
import requests
import pandas as pd
import google.api_core.exceptions
from basedosdados.upload.base import Base
from basedosdados.upload.storage import Storage
from basedosdados.upload.dataset import Dataset
from basedosdados.upload.datatypes import Datatype
from basedosdados.upload.metadata import Metadata
from basedosdados.exceptions import BaseDosDadosException
class Table(Base):
"""
Manage tables in Google Cloud Storage and BigQuery.
"""
def __init__(self, dataset_id, table_id, **kwargs):
super().__init__(**kwargs)
self.table_id = table_id.replace("-", "_")
self.dataset_id = dataset_id.replace("-", "_")
self.dataset_folder = Path(self.metadata_path / self.dataset_id)
self.table_folder = self.dataset_folder / table_id
self.table_full_name = dict(
prod=f"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}",
staging=f"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}",
)
self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))
self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)
@property
def table_config(self):
"""
Load table_config.yaml
"""
return self._load_yaml(self.table_folder / "table_config.yaml")
def _get_table_obj(self, mode):
"""
Get table object from BigQuery
"""
return self.client[f"bigquery_{mode}"].get_table(self.table_full_name[mode])
def _is_partitioned(self):
"""
Check if table is partitioned
"""
## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic
partitions = self.table_config["partitions"]
if partitions is None or len(partitions) == 0:
return False
if isinstance(partitions, list):
# check if any None inside list.
# False if it is the case Ex: [None, 'partition']
# True otherwise Ex: ['partition1', 'partition2']
return all(item is not None for item in partitions)
raise ValueError("Partitions must be a list or None")
def _load_schema(self, mode="staging"):
"""Load schema from table_config.yaml
Args:
mode (bool): Which dataset to create [prod|staging].
"""
self._check_mode(mode)
json_path = self.table_folder / f"schema-{mode}.json"
columns = self.table_config["columns"]
if mode == "staging":
new_columns = []
for c in columns:
# case is_in_staging are None then must be True
is_in_staging = (
True if c.get("is_in_staging") is None else c["is_in_staging"]
)
# append columns declared in table_config.yaml to schema only if is_in_staging: True
if is_in_staging and not c.get("is_partition"):
c["type"] = "STRING"
new_columns.append(c)
del columns
columns = new_columns
elif mode == "prod":
schema = self._get_table_obj(mode).schema
# get field names for fields at schema and at table_config.yaml
column_names = [c["name"] for c in columns]
schema_names = [s.name for s in schema]
# check if there are mismatched fields
not_in_columns = [name for name in schema_names if name not in column_names]
not_in_schema = [name for name in column_names if name not in schema_names]
# raise if field is not in table_config
if not_in_columns:
raise BaseDosDadosException(
"Column {error_columns} was not found in table_config.yaml. Are you sure that "
"all your column names between table_config.yaml, publish.sql and "
"{project_id}.{dataset_id}.{table_id} are the same?".format(
error_columns=not_in_columns,
project_id=self.table_config["project_id_prod"],
dataset_id=self.table_config["dataset_id"],
table_id=self.table_config["table_id"],
)
)
# raise if field is not in schema
if not_in_schema:
raise BaseDosDadosException(
"Column {error_columns} was not found in publish.sql. Are you sure that "
"all your column names between table_config.yaml, publish.sql and "
"{project_id}.{dataset_id}.{table_id} are the same?".format(
error_columns=not_in_schema,
project_id=self.table_config["project_id_prod"],
dataset_id=self.table_config["dataset_id"],
table_id=self.table_config["table_id"],
)
)
# if field is in schema, get field_type and field_mode
for c in columns:
for s in schema:
if c["name"] == s.name:
c["type"] = s.field_type
c["mode"] = s.mode
break
## force utf-8, write schema_{mode}.json
json.dump(columns, (json_path).open("w", encoding="utf-8"))
# load new created schema
return self.client[f"bigquery_{mode}"].schema_from_json(str(json_path))
def _make_publish_sql(self):
"""Create publish.sql with columns and bigquery_type"""
### publish.sql header and instructions
publish_txt = """
/*
Query para publicar a tabela.
Esse é o lugar para:
- modificar nomes, ordem e tipos de colunas
- dar join com outras tabelas
- criar colunas extras (e.g. logs, proporções, etc.)
Qualquer coluna definida aqui deve também existir em `table_config.yaml`.
# Além disso, sinta-se à vontade para alterar alguns nomes obscuros
# para algo um pouco mais explícito.
TIPOS:
- Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.
- Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`
- Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types
*/
"""
# remove triple quotes extra space
publish_txt = inspect.cleandoc(publish_txt)
publish_txt = textwrap.dedent(publish_txt)
# add create table statement
project_id_prod = self.client["bigquery_prod"].project
publish_txt += f"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n"
# sort columns by is_partition, partitions_columns come first
if self._is_partitioned():
columns = sorted(
self.table_config["columns"],
key=lambda k: (k["is_partition"] is not None, k["is_partition"]),
reverse=True,
)
else:
columns = self.table_config["columns"]
# add columns in publish.sql
for col in columns:
name = col["name"]
bigquery_type = (
"STRING"
if col["bigquery_type"] is None
else col["bigquery_type"].upper()
)
publish_txt += f"SAFE_CAST({name} AS {bigquery_type}) {name},\n"
## remove last comma
publish_txt = publish_txt[:-2] + "\n"
# add from statement
project_id_staging = self.client["bigquery_staging"].project
publish_txt += (
f"FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t"
)
# save publish.sql in table_folder
(self.table_folder / "publish.sql").open("w", encoding="utf-8").write(
publish_txt
)
def _make_template(self, columns, partition_columns, if_table_config_exists, force_columns):
# create table_config.yaml with metadata
self.metadata.create(
if_exists=if_table_config_exists,
columns=partition_columns + columns,
partition_columns=partition_columns,
force_columns=force_columns,
table_only=False,
)
self._make_publish_sql()
@staticmethod
def _sheet_to_df(columns_config_url_or_path):
"""
Convert sheet to dataframe
"""
url = columns_config_url_or_path.replace("edit#gid=", "export?format=csv&gid=")
try:
return pd.read_csv(StringIO(requests.get(url, timeout=10).content.decode("utf-8")))
except Exception as e:
raise BaseDosDadosException(
"Check if your google sheet Share are: Anyone on the internet with this link can view"
) from e
def table_exists(self, mode):
"""Check if table exists in BigQuery.
Args:
mode (str): Which dataset to check [prod|staging].
"""
try:
ref = self._get_table_obj(mode=mode)
except google.api_core.exceptions.NotFound:
ref = None
return bool(ref)
def update_columns(self, columns_config_url_or_path=None):
"""
Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate
publish.sql and autofill type using bigquery_type.
The sheet must contain the columns:
- name: column name
- description: column description
- bigquery_type: column bigquery type
- measurement_unit: column mesurement unit
- covered_by_dictionary: column related dictionary
- directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>
- temporal_coverage: column temporal coverage
- has_sensitive_data: the column has sensitive data
- observations: column observations
Args:
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
"""
ruamel = ryaml.YAML()
ruamel.preserve_quotes = True
ruamel.indent(mapping=4, sequence=6, offset=4)
table_config_yaml = ruamel.load(
(self.table_folder / "table_config.yaml").open(encoding="utf-8")
)
if "https://docs.google.com/spreadsheets/d/" in columns_config_url_or_path:
if (
"edit#gid=" not in columns_config_url_or_path
or "https://docs.google.com/spreadsheets/d/"
not in columns_config_url_or_path
or not columns_config_url_or_path.split("=")[1].isdigit()
):
raise BaseDosDadosException(
"The Google sheet url not in correct format."
"The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>"
)
df = self._sheet_to_df(columns_config_url_or_path)
else:
file_type = columns_config_url_or_path.split(".")[-1]
if file_type == "csv":
df = pd.read_csv(columns_config_url_or_path, encoding="utf-8")
elif file_type in ["xls", "xlsx", "xlsm", "xlsb", "odf", "ods", "odt"]:
df = pd.read_excel(columns_config_url_or_path)
else:
raise BaseDosDadosException(
"File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported."
)
df = df.fillna("NULL")
required_columns = [
"name",
"bigquery_type",
"description",
"temporal_coverage",
"covered_by_dictionary",
"directory_column",
"measurement_unit",
"has_sensitive_data",
"observations",
]
not_found_columns = required_columns.copy()
for sheet_column in df.columns.tolist():
for required_column in required_columns:
if sheet_column == required_column:
not_found_columns.remove(required_column)
if not_found_columns:
raise BaseDosDadosException(
f"The following required columns are not found: {', '.join(not_found_columns)}."
)
columns_parameters = zip(
*[df[required_column].tolist() for required_column in required_columns]
)
for (
name,
bigquery_type,
description,
temporal_coverage,
covered_by_dictionary,
directory_column,
measurement_unit,
has_sensitive_data,
observations,
) in columns_parameters:
for col in table_config_yaml["columns"]:
if col["name"] == name:
col["bigquery_type"] = (
col["bigquery_type"]
if bigquery_type == "NULL"
else bigquery_type.lower()
)
col["description"] = (
col["description"] if description == "NULL" else description
)
col["temporal_coverage"] = (
col["temporal_coverage"]
if temporal_coverage == "NULL"
else [temporal_coverage]
)
col["covered_by_dictionary"] = (
"no"
if covered_by_dictionary == "NULL"
else covered_by_dictionary
)
dataset = directory_column.split(".")[0]
col["directory_column"]["dataset_id"] = (
col["directory_column"]["dataset_id"]
if dataset == "NULL"
else dataset
)
table = directory_column.split(".")[-1].split(":")[0]
col["directory_column"]["table_id"] = (
col["directory_column"]["table_id"]
if table == "NULL"
else table
)
column = directory_column.split(".")[-1].split(":")[-1]
col["directory_column"]["column_name"] = (
col["directory_column"]["column_name"]
if column == "NULL"
else column
)
col["measurement_unit"] = (
col["measurement_unit"]
if measurement_unit == "NULL"
else measurement_unit
)
col["has_sensitive_data"] = (
"no" if has_sensitive_data == "NULL" else has_sensitive_data
)
col["observations"] = (
col["observations"] if observations == "NULL" else observations
)
with open(self.table_folder / "table_config.yaml", "w", encoding="utf-8") as f:
ruamel.dump(table_config_yaml, f)
# regenerate publish.sql
self._make_publish_sql()
def init(
self,
data_sample_path=None,
if_folder_exists="raise",
if_table_config_exists="raise",
source_format="csv",
force_columns = False,
columns_config_url_or_path=None,
): # sourcery skip: low-code-quality
"""Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.
The folder should contain:
* `table_config.yaml`
* `publish.sql`
You can also point to a sample of the data to auto complete columns names.
Args:
data_sample_path (str, pathlib.PosixPath): Optional.
Data sample path to auto complete columns names
It supports Comma Delimited CSV, Apache Avro and
Apache Parquet.
if_folder_exists (str): Optional.
What to do if table folder exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace folder
* 'pass' : Do nothing
if_table_config_exists (str): Optional
What to do if table_config.yaml and publish.sql exists
* 'raise' : Raises FileExistsError
* 'replace' : Replace files with blank template
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
Raises:
FileExistsError: If folder exists and replace is False.
NotImplementedError: If data sample is not in supported type or format.
"""
if not self.dataset_folder.exists():
raise FileExistsError(
f"Dataset folder {self.dataset_folder} folder does not exists. "
"Create a dataset before adding tables."
)
try:
self.table_folder.mkdir(exist_ok=(if_folder_exists == "replace"))
except FileExistsError as e:
if if_folder_exists == "raise":
raise FileExistsError(
f"Table folder already exists for {self.table_id}. "
) from e
if if_folder_exists == "pass":
return self
if not data_sample_path and if_table_config_exists != "pass":
raise BaseDosDadosException(
"You must provide a path to correctly create config files"
)
partition_columns = []
if isinstance(
data_sample_path,
(
str,
Path,
),
):
# Check if partitioned and get data sample and partition columns
data_sample_path = Path(data_sample_path)
if data_sample_path.is_dir():
data_sample_path = [
f
for f in data_sample_path.glob("**/*")
if f.is_file() and f.suffix == f".{source_format}"
][0]
partition_columns = [
k.split("=")[0]
for k in data_sample_path.as_posix().split("/")
if "=" in k
]
columns = Datatype(self, source_format).header(data_sample_path)
else:
columns = ["column_name"]
if if_table_config_exists == "pass":
# Check if config files exists before passing
if (
Path(self.table_folder / "table_config.yaml").is_file()
and Path(self.table_folder / "publish.sql").is_file()
):
pass
# Raise if no sample to determine columns
elif not data_sample_path:
raise BaseDosDadosException(
"You must provide a path to correctly create config files"
)
else:
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
elif if_table_config_exists == "raise":
# Check if config files already exist
if (
Path(self.table_folder / "table_config.yaml").is_file()
and Path(self.table_folder / "publish.sql").is_file()
):
raise FileExistsError(
f"table_config.yaml and publish.sql already exists at {self.table_folder}"
)
# if config files don't exist, create them
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
else:
# Raise: without a path to data sample, should not replace config files with empty template
self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)
if columns_config_url_or_path is not None:
self.update_columns(columns_config_url_or_path)
return self
def create(
self,
path=None,
force_dataset=True,
if_table_exists="raise",
if_storage_data_exists="raise",
if_table_config_exists="raise",
source_format="csv",
force_columns=False,
columns_config_url_or_path=None,
dataset_is_public=True,
location=None,
chunk_size=None,
):
"""Creates BigQuery table at staging dataset.
If you add a path, it automatically saves the data in the storage,
creates a datasets folder and BigQuery location, besides creating the
table and its configuration files.
The new table should be located at `<dataset_id>_staging.<table_id>` in BigQuery.
It looks for data saved in Storage at `<bucket_name>/staging/<dataset_id>/<table_id>/*`
and builds the table.
It currently supports the types:
- Comma Delimited CSV
- Apache Avro
- Apache Parquet
Data can also be partitioned following the hive partitioning scheme
`<key1>=<value1>/<key2>=<value2>` - for instance,
`year=2012/country=BR`. The partition is automatcally detected
by searching for `partitions` on the `table_config.yaml`.
Args:
path (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
job_config_params (dict): Optional.
Job configuration params from bigquery
if_table_exists (str): Optional
What to do if table exists
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
force_dataset (bool): Creates `<dataset_id>` folder and BigQuery Dataset if it doesn't exists.
if_table_config_exists (str): Optional.
What to do if config files already exist
* 'raise': Raises FileExistError
* 'replace': Replace with blank template
* 'pass'; Do nothing
if_storage_data_exists (str): Optional.
What to do if data already exists on your bucket:
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
source_format (str): Optional
Data source format. Only 'csv', 'avro' and 'parquet'
are supported. Defaults to 'csv'.
force_columns (bool): Optional.
If set to `True`, overwrite CKAN's columns with the ones provi
ded.
If set to `False`, keep CKAN's columns instead of the ones pro
vided.
columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.
Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.
Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.
dataset_is_public (bool): Control if prod dataset is public or not. By default staging datasets like `dataset_id_staging` are not public.
location (str): Optional. Location of dataset data.
List of possible region names locations: https://cloud.google.com/bigquery/docs/locations
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if path is None:
# Look if table data already exists at Storage
data = self.client["storage_staging"].list_blobs(
self.bucket_name, prefix=f"staging/{self.dataset_id}/{self.table_id}"
)
# Raise: Cannot create table without external data
if not data:
raise BaseDosDadosException(
"You must provide a path for uploading data"
)
# Add data to storage
if isinstance(
path,
(
str,
Path,
),
):
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
path,
mode="staging",
if_exists=if_storage_data_exists,
chunk_size=chunk_size,
)
# Create Dataset if it doesn't exist
if force_dataset:
dataset_obj = Dataset(self.dataset_id, **self.main_vars)
try:
dataset_obj.init()
except FileExistsError:
pass
dataset_obj.create(
if_exists="pass", location=location, dataset_is_public=dataset_is_public
)
self.init(
data_sample_path=path,
if_folder_exists="replace",
if_table_config_exists=if_table_config_exists,
columns_config_url_or_path=columns_config_url_or_path,
source_format=source_format,
force_columns=force_columns
)
table = bigquery.Table(self.table_full_name["staging"])
table.external_data_configuration = Datatype(
self, source_format, "staging", partitioned=self._is_partitioned()
).external_config
# Lookup if table alreay exists
table_ref = None
try:
table_ref = self.client["bigquery_staging"].get_table(
self.table_full_name["staging"]
)
except google.api_core.exceptions.NotFound:
pass
if isinstance(table_ref, google.cloud.bigquery.table.Table):
if if_table_exists == "pass":
return None
if if_table_exists == "raise":
raise FileExistsError(
"Table already exists, choose replace if you want to overwrite it"
)
if if_table_exists == "replace":
self.delete(mode="staging")
self.client["bigquery_staging"].create_table(table)
logger.success(
"{object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="created",
)
return None
def update(self, mode="all"):
"""Updates BigQuery schema and description.
Args:
mode (str): Optional.
Table of which table to update [prod|staging|all]
not_found_ok (bool): Optional.
What to do if table is not found
"""
self._check_mode(mode)
mode = ["prod", "staging"] if mode == "all" else [mode]
for m in mode:
try:
table = self._get_table_obj(m)
except google.api_core.exceptions.NotFound:
continue
# if m == "staging":
table.description = self._render_template(
Path("table/table_description.txt"), self.table_config
)
# save table description
with open(
self.metadata_path
/ self.dataset_id
/ self.table_id
/ "table_description.txt",
"w",
encoding="utf-8",
) as f:
f.write(table.description)
# when mode is staging the table schema already exists
table.schema = self._load_schema(m)
fields = ["description", "schema"] if m == "prod" else ["description"]
self.client[f"bigquery_{m}"].update_table(table, fields=fields)
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="updated",
)
def publish(self, if_exists="raise"):
"""Creates BigQuery table at production dataset.
Table should be located at `<dataset_id>.<table_id>`.
It creates a view that uses the query from
`<metadata_path>/<dataset_id>/<table_id>/publish.sql`.
Make sure that all columns from the query also exists at
`<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including
the partitions.
Args:
if_exists (str): Optional.
What to do if table exists.
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
Todo:
* Check if all required fields are filled
"""
if if_exists == "replace":
self.delete(mode="prod")
self.client["bigquery_prod"].query(
(self.table_folder / "publish.sql").open("r", encoding="utf-8").read()
).result()
self.update()
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="published",
)
def delete(self, mode):
"""Deletes table in BigQuery.
Args:
mode (str): Table of which table to delete [prod|staging]
"""
self._check_mode(mode)
if mode == "all":
for m, n in self.table_full_name[mode].items():
self.client[f"bigquery_{m}"].delete_table(n, not_found_ok=True)
logger.info(
" {object} {object_id}_{mode} was {action}!",
object_id=self.table_id,
mode=mode,
object="Table",
action="deleted",
)
else:
self.client[f"bigquery_{mode}"].delete_table(
self.table_full_name[mode], not_found_ok=True
)
logger.info(
" {object} {object_id}_{mode} was {action}!",
object_id=self.table_id,
mode=mode,
object="Table",
action="deleted",
)
def append(
self,
filepath,
partitions=None,
if_exists="replace",
chunk_size=None,
**upload_args,
):
"""Appends new data to existing BigQuery table.
As long as the data has the same schema. It appends the data in the
filepath to the existing table.
Args:
filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with
partitions (str, pathlib.PosixPath, dict): Optional.
Hive structured partition as a string or dict
* str : `<key>=<value>/<key2>=<value2>`
* dict: `dict(key=value, key2=value2)`
if_exists (str): 0ptional.
What to do if data with same name exists in storage
* 'raise' : Raises Conflict exception
* 'replace' : Replace table
* 'pass' : Do nothing
chunk_size (int): Optional
The size of a chunk of data whenever iterating (in bytes).
This must be a multiple of 256 KB per the API specification.
If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.
"""
if not self.table_exists("staging"):
raise BaseDosDadosException(
"You cannot append to a table that does not exist"
)
Storage(self.dataset_id, self.table_id, **self.main_vars).upload(
filepath,
mode="staging",
partitions=partitions,
if_exists=if_exists,
chunk_size=chunk_size,
**upload_args,
)
logger.success(
" {object} {object_id} was {action}!",
object_id=self.table_id,
object="Table",
action="appended",
)
|
normal
|
{
"blob_id": "da218e6d9ee311eefb8e9ae4dac5053793eb5514",
"index": 9369,
"step-1": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n <mask token>\n <mask token>\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n <mask token>\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n <mask token>\n <mask token>\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-2": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f'bigquery_{mode}'].get_table(self.\n table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n partitions = self.table_config['partitions']\n if partitions is None or len(partitions) == 0:\n return False\n if isinstance(partitions, list):\n return all(item is not None for item in partitions)\n raise ValueError('Partitions must be a list or None')\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n <mask token>\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n return bool(ref)\n <mask token>\n <mask token>\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n\n def publish(self, if_exists='raise'):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n if if_exists == 'replace':\n self.delete(mode='prod')\n self.client['bigquery_prod'].query((self.table_folder /\n 'publish.sql').open('r', encoding='utf-8').read()).result()\n self.update()\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='published')\n <mask token>\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-3": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f'bigquery_{mode}'].get_table(self.\n table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n partitions = self.table_config['partitions']\n if partitions is None or len(partitions) == 0:\n return False\n if isinstance(partitions, list):\n return all(item is not None for item in partitions)\n raise ValueError('Partitions must be a list or None')\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n\n def _make_template(self, columns, partition_columns,\n if_table_config_exists, force_columns):\n self.metadata.create(if_exists=if_table_config_exists, columns=\n partition_columns + columns, partition_columns=\n partition_columns, force_columns=force_columns, table_only=False)\n self._make_publish_sql()\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n return bool(ref)\n\n def update_columns(self, columns_config_url_or_path=None):\n \"\"\"\n Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate\n publish.sql and autofill type using bigquery_type.\n\n The sheet must contain the columns:\n - name: column name\n - description: column description\n - bigquery_type: column bigquery type\n - measurement_unit: column mesurement unit\n - covered_by_dictionary: column related dictionary\n - directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>\n - temporal_coverage: column temporal coverage\n - has_sensitive_data: the column has sensitive data\n - observations: column observations\n Args:\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n \"\"\"\n ruamel = ryaml.YAML()\n ruamel.preserve_quotes = True\n ruamel.indent(mapping=4, sequence=6, offset=4)\n table_config_yaml = ruamel.load((self.table_folder /\n 'table_config.yaml').open(encoding='utf-8'))\n if ('https://docs.google.com/spreadsheets/d/' in\n columns_config_url_or_path):\n if ('edit#gid=' not in columns_config_url_or_path or \n 'https://docs.google.com/spreadsheets/d/' not in\n columns_config_url_or_path or not\n columns_config_url_or_path.split('=')[1].isdigit()):\n raise BaseDosDadosException(\n 'The Google sheet url not in correct format.The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>'\n )\n df = self._sheet_to_df(columns_config_url_or_path)\n else:\n file_type = columns_config_url_or_path.split('.')[-1]\n if file_type == 'csv':\n df = pd.read_csv(columns_config_url_or_path, encoding='utf-8')\n elif file_type in ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods',\n 'odt']:\n df = pd.read_excel(columns_config_url_or_path)\n else:\n raise BaseDosDadosException(\n 'File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.'\n )\n df = df.fillna('NULL')\n required_columns = ['name', 'bigquery_type', 'description',\n 'temporal_coverage', 'covered_by_dictionary',\n 'directory_column', 'measurement_unit', 'has_sensitive_data',\n 'observations']\n not_found_columns = required_columns.copy()\n for sheet_column in df.columns.tolist():\n for required_column in required_columns:\n if sheet_column == required_column:\n not_found_columns.remove(required_column)\n if not_found_columns:\n raise BaseDosDadosException(\n f\"The following required columns are not found: {', '.join(not_found_columns)}.\"\n )\n columns_parameters = zip(*[df[required_column].tolist() for\n required_column in required_columns])\n for name, bigquery_type, description, temporal_coverage, covered_by_dictionary, directory_column, measurement_unit, has_sensitive_data, observations in columns_parameters:\n for col in table_config_yaml['columns']:\n if col['name'] == name:\n col['bigquery_type'] = col['bigquery_type'\n ] if bigquery_type == 'NULL' else bigquery_type.lower()\n col['description'] = col['description'\n ] if description == 'NULL' else description\n col['temporal_coverage'] = col['temporal_coverage'\n ] if temporal_coverage == 'NULL' else [\n temporal_coverage]\n col['covered_by_dictionary'] = ('no' if \n covered_by_dictionary == 'NULL' else\n covered_by_dictionary)\n dataset = directory_column.split('.')[0]\n col['directory_column']['dataset_id'] = col[\n 'directory_column']['dataset_id'\n ] if dataset == 'NULL' else dataset\n table = directory_column.split('.')[-1].split(':')[0]\n col['directory_column']['table_id'] = col[\n 'directory_column']['table_id'\n ] if table == 'NULL' else table\n column = directory_column.split('.')[-1].split(':')[-1]\n col['directory_column']['column_name'] = col[\n 'directory_column']['column_name'\n ] if column == 'NULL' else column\n col['measurement_unit'] = col['measurement_unit'\n ] if measurement_unit == 'NULL' else measurement_unit\n col['has_sensitive_data'] = ('no' if has_sensitive_data ==\n 'NULL' else has_sensitive_data)\n col['observations'] = col['observations'\n ] if observations == 'NULL' else observations\n with open(self.table_folder / 'table_config.yaml', 'w', encoding=\n 'utf-8') as f:\n ruamel.dump(table_config_yaml, f)\n self._make_publish_sql()\n\n def init(self, data_sample_path=None, if_folder_exists='raise',\n if_table_config_exists='raise', source_format='csv', force_columns=\n False, columns_config_url_or_path=None):\n \"\"\"Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.\n\n The folder should contain:\n\n * `table_config.yaml`\n * `publish.sql`\n\n You can also point to a sample of the data to auto complete columns names.\n\n Args:\n data_sample_path (str, pathlib.PosixPath): Optional.\n Data sample path to auto complete columns names\n It supports Comma Delimited CSV, Apache Avro and\n Apache Parquet.\n if_folder_exists (str): Optional.\n What to do if table folder exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace folder\n * 'pass' : Do nothing\n if_table_config_exists (str): Optional\n What to do if table_config.yaml and publish.sql exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace files with blank template\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n Raises:\n FileExistsError: If folder exists and replace is False.\n NotImplementedError: If data sample is not in supported type or format.\n \"\"\"\n if not self.dataset_folder.exists():\n raise FileExistsError(\n f'Dataset folder {self.dataset_folder} folder does not exists. Create a dataset before adding tables.'\n )\n try:\n self.table_folder.mkdir(exist_ok=if_folder_exists == 'replace')\n except FileExistsError as e:\n if if_folder_exists == 'raise':\n raise FileExistsError(\n f'Table folder already exists for {self.table_id}. '\n ) from e\n if if_folder_exists == 'pass':\n return self\n if not data_sample_path and if_table_config_exists != 'pass':\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n partition_columns = []\n if isinstance(data_sample_path, (str, Path)):\n data_sample_path = Path(data_sample_path)\n if data_sample_path.is_dir():\n data_sample_path = [f for f in data_sample_path.glob('**/*'\n ) if f.is_file() and f.suffix == f'.{source_format}'][0]\n partition_columns = [k.split('=')[0] for k in\n data_sample_path.as_posix().split('/') if '=' in k]\n columns = Datatype(self, source_format).header(data_sample_path)\n else:\n columns = ['column_name']\n if if_table_config_exists == 'pass':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n pass\n elif not data_sample_path:\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n elif if_table_config_exists == 'raise':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n raise FileExistsError(\n f'table_config.yaml and publish.sql already exists at {self.table_folder}'\n )\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n if columns_config_url_or_path is not None:\n self.update_columns(columns_config_url_or_path)\n return self\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n\n def publish(self, if_exists='raise'):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n if if_exists == 'replace':\n self.delete(mode='prod')\n self.client['bigquery_prod'].query((self.table_folder /\n 'publish.sql').open('r', encoding='utf-8').read()).result()\n self.update()\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='published')\n <mask token>\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-4": "<mask token>\n\n\nclass Table(Base):\n <mask token>\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n self.table_id = table_id.replace('-', '_')\n self.dataset_id = dataset_id.replace('-', '_')\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(prod=\n f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\"\n , staging=\n f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\"\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / 'table_config.yaml')\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f'bigquery_{mode}'].get_table(self.\n table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n partitions = self.table_config['partitions']\n if partitions is None or len(partitions) == 0:\n return False\n if isinstance(partitions, list):\n return all(item is not None for item in partitions)\n raise ValueError('Partitions must be a list or None')\n\n def _load_schema(self, mode='staging'):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n self._check_mode(mode)\n json_path = self.table_folder / f'schema-{mode}.json'\n columns = self.table_config['columns']\n if mode == 'staging':\n new_columns = []\n for c in columns:\n is_in_staging = True if c.get('is_in_staging') is None else c[\n 'is_in_staging']\n if is_in_staging and not c.get('is_partition'):\n c['type'] = 'STRING'\n new_columns.append(c)\n del columns\n columns = new_columns\n elif mode == 'prod':\n schema = self._get_table_obj(mode).schema\n column_names = [c['name'] for c in columns]\n schema_names = [s.name for s in schema]\n not_in_columns = [name for name in schema_names if name not in\n column_names]\n not_in_schema = [name for name in column_names if name not in\n schema_names]\n if not_in_columns:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in table_config.yaml. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_columns, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n if not_in_schema:\n raise BaseDosDadosException(\n 'Column {error_columns} was not found in publish.sql. Are you sure that all your column names between table_config.yaml, publish.sql and {project_id}.{dataset_id}.{table_id} are the same?'\n .format(error_columns=not_in_schema, project_id=self.\n table_config['project_id_prod'], dataset_id=self.\n table_config['dataset_id'], table_id=self.table_config[\n 'table_id']))\n for c in columns:\n for s in schema:\n if c['name'] == s.name:\n c['type'] = s.field_type\n c['mode'] = s.mode\n break\n json.dump(columns, json_path.open('w', encoding='utf-8'))\n return self.client[f'bigquery_{mode}'].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n project_id_prod = self.client['bigquery_prod'].project\n publish_txt += f\"\"\"\n\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\nSELECT \n\"\"\"\n if self._is_partitioned():\n columns = sorted(self.table_config['columns'], key=lambda k: (k\n ['is_partition'] is not None, k['is_partition']), reverse=True)\n else:\n columns = self.table_config['columns']\n for col in columns:\n name = col['name']\n bigquery_type = 'STRING' if col['bigquery_type'] is None else col[\n 'bigquery_type'].upper()\n publish_txt += f'SAFE_CAST({name} AS {bigquery_type}) {name},\\n'\n publish_txt = publish_txt[:-2] + '\\n'\n project_id_staging = self.client['bigquery_staging'].project\n publish_txt += (\n f'FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t'\n )\n (self.table_folder / 'publish.sql').open('w', encoding='utf-8').write(\n publish_txt)\n\n def _make_template(self, columns, partition_columns,\n if_table_config_exists, force_columns):\n self.metadata.create(if_exists=if_table_config_exists, columns=\n partition_columns + columns, partition_columns=\n partition_columns, force_columns=force_columns, table_only=False)\n self._make_publish_sql()\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace('edit#gid=',\n 'export?format=csv&gid=')\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).\n content.decode('utf-8')))\n except Exception as e:\n raise BaseDosDadosException(\n 'Check if your google sheet Share are: Anyone on the internet with this link can view'\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n return bool(ref)\n\n def update_columns(self, columns_config_url_or_path=None):\n \"\"\"\n Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate\n publish.sql and autofill type using bigquery_type.\n\n The sheet must contain the columns:\n - name: column name\n - description: column description\n - bigquery_type: column bigquery type\n - measurement_unit: column mesurement unit\n - covered_by_dictionary: column related dictionary\n - directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>\n - temporal_coverage: column temporal coverage\n - has_sensitive_data: the column has sensitive data\n - observations: column observations\n Args:\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n \"\"\"\n ruamel = ryaml.YAML()\n ruamel.preserve_quotes = True\n ruamel.indent(mapping=4, sequence=6, offset=4)\n table_config_yaml = ruamel.load((self.table_folder /\n 'table_config.yaml').open(encoding='utf-8'))\n if ('https://docs.google.com/spreadsheets/d/' in\n columns_config_url_or_path):\n if ('edit#gid=' not in columns_config_url_or_path or \n 'https://docs.google.com/spreadsheets/d/' not in\n columns_config_url_or_path or not\n columns_config_url_or_path.split('=')[1].isdigit()):\n raise BaseDosDadosException(\n 'The Google sheet url not in correct format.The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>'\n )\n df = self._sheet_to_df(columns_config_url_or_path)\n else:\n file_type = columns_config_url_or_path.split('.')[-1]\n if file_type == 'csv':\n df = pd.read_csv(columns_config_url_or_path, encoding='utf-8')\n elif file_type in ['xls', 'xlsx', 'xlsm', 'xlsb', 'odf', 'ods',\n 'odt']:\n df = pd.read_excel(columns_config_url_or_path)\n else:\n raise BaseDosDadosException(\n 'File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.'\n )\n df = df.fillna('NULL')\n required_columns = ['name', 'bigquery_type', 'description',\n 'temporal_coverage', 'covered_by_dictionary',\n 'directory_column', 'measurement_unit', 'has_sensitive_data',\n 'observations']\n not_found_columns = required_columns.copy()\n for sheet_column in df.columns.tolist():\n for required_column in required_columns:\n if sheet_column == required_column:\n not_found_columns.remove(required_column)\n if not_found_columns:\n raise BaseDosDadosException(\n f\"The following required columns are not found: {', '.join(not_found_columns)}.\"\n )\n columns_parameters = zip(*[df[required_column].tolist() for\n required_column in required_columns])\n for name, bigquery_type, description, temporal_coverage, covered_by_dictionary, directory_column, measurement_unit, has_sensitive_data, observations in columns_parameters:\n for col in table_config_yaml['columns']:\n if col['name'] == name:\n col['bigquery_type'] = col['bigquery_type'\n ] if bigquery_type == 'NULL' else bigquery_type.lower()\n col['description'] = col['description'\n ] if description == 'NULL' else description\n col['temporal_coverage'] = col['temporal_coverage'\n ] if temporal_coverage == 'NULL' else [\n temporal_coverage]\n col['covered_by_dictionary'] = ('no' if \n covered_by_dictionary == 'NULL' else\n covered_by_dictionary)\n dataset = directory_column.split('.')[0]\n col['directory_column']['dataset_id'] = col[\n 'directory_column']['dataset_id'\n ] if dataset == 'NULL' else dataset\n table = directory_column.split('.')[-1].split(':')[0]\n col['directory_column']['table_id'] = col[\n 'directory_column']['table_id'\n ] if table == 'NULL' else table\n column = directory_column.split('.')[-1].split(':')[-1]\n col['directory_column']['column_name'] = col[\n 'directory_column']['column_name'\n ] if column == 'NULL' else column\n col['measurement_unit'] = col['measurement_unit'\n ] if measurement_unit == 'NULL' else measurement_unit\n col['has_sensitive_data'] = ('no' if has_sensitive_data ==\n 'NULL' else has_sensitive_data)\n col['observations'] = col['observations'\n ] if observations == 'NULL' else observations\n with open(self.table_folder / 'table_config.yaml', 'w', encoding=\n 'utf-8') as f:\n ruamel.dump(table_config_yaml, f)\n self._make_publish_sql()\n\n def init(self, data_sample_path=None, if_folder_exists='raise',\n if_table_config_exists='raise', source_format='csv', force_columns=\n False, columns_config_url_or_path=None):\n \"\"\"Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.\n\n The folder should contain:\n\n * `table_config.yaml`\n * `publish.sql`\n\n You can also point to a sample of the data to auto complete columns names.\n\n Args:\n data_sample_path (str, pathlib.PosixPath): Optional.\n Data sample path to auto complete columns names\n It supports Comma Delimited CSV, Apache Avro and\n Apache Parquet.\n if_folder_exists (str): Optional.\n What to do if table folder exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace folder\n * 'pass' : Do nothing\n if_table_config_exists (str): Optional\n What to do if table_config.yaml and publish.sql exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace files with blank template\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n Raises:\n FileExistsError: If folder exists and replace is False.\n NotImplementedError: If data sample is not in supported type or format.\n \"\"\"\n if not self.dataset_folder.exists():\n raise FileExistsError(\n f'Dataset folder {self.dataset_folder} folder does not exists. Create a dataset before adding tables.'\n )\n try:\n self.table_folder.mkdir(exist_ok=if_folder_exists == 'replace')\n except FileExistsError as e:\n if if_folder_exists == 'raise':\n raise FileExistsError(\n f'Table folder already exists for {self.table_id}. '\n ) from e\n if if_folder_exists == 'pass':\n return self\n if not data_sample_path and if_table_config_exists != 'pass':\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n partition_columns = []\n if isinstance(data_sample_path, (str, Path)):\n data_sample_path = Path(data_sample_path)\n if data_sample_path.is_dir():\n data_sample_path = [f for f in data_sample_path.glob('**/*'\n ) if f.is_file() and f.suffix == f'.{source_format}'][0]\n partition_columns = [k.split('=')[0] for k in\n data_sample_path.as_posix().split('/') if '=' in k]\n columns = Datatype(self, source_format).header(data_sample_path)\n else:\n columns = ['column_name']\n if if_table_config_exists == 'pass':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n pass\n elif not data_sample_path:\n raise BaseDosDadosException(\n 'You must provide a path to correctly create config files')\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n elif if_table_config_exists == 'raise':\n if Path(self.table_folder / 'table_config.yaml').is_file(\n ) and Path(self.table_folder / 'publish.sql').is_file():\n raise FileExistsError(\n f'table_config.yaml and publish.sql already exists at {self.table_folder}'\n )\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n else:\n self._make_template(columns, partition_columns,\n if_table_config_exists, force_columns=force_columns)\n if columns_config_url_or_path is not None:\n self.update_columns(columns_config_url_or_path)\n return self\n <mask token>\n\n def update(self, mode='all'):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n self._check_mode(mode)\n mode = ['prod', 'staging'] if mode == 'all' else [mode]\n for m in mode:\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n table.description = self._render_template(Path(\n 'table/table_description.txt'), self.table_config)\n with open(self.metadata_path / self.dataset_id / self.table_id /\n 'table_description.txt', 'w', encoding='utf-8') as f:\n f.write(table.description)\n table.schema = self._load_schema(m)\n fields = ['description', 'schema'] if m == 'prod' else [\n 'description']\n self.client[f'bigquery_{m}'].update_table(table, fields=fields)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='updated')\n\n def publish(self, if_exists='raise'):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n if if_exists == 'replace':\n self.delete(mode='prod')\n self.client['bigquery_prod'].query((self.table_folder /\n 'publish.sql').open('r', encoding='utf-8').read()).result()\n self.update()\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='published')\n\n def delete(self, mode):\n \"\"\"Deletes table in BigQuery.\n\n Args:\n mode (str): Table of which table to delete [prod|staging]\n \"\"\"\n self._check_mode(mode)\n if mode == 'all':\n for m, n in self.table_full_name[mode].items():\n self.client[f'bigquery_{m}'].delete_table(n, not_found_ok=True)\n logger.info(' {object} {object_id}_{mode} was {action}!',\n object_id=self.table_id, mode=mode, object='Table', action=\n 'deleted')\n else:\n self.client[f'bigquery_{mode}'].delete_table(self.\n table_full_name[mode], not_found_ok=True)\n logger.info(' {object} {object_id}_{mode} was {action}!',\n object_id=self.table_id, mode=mode, object='Table', action=\n 'deleted')\n\n def append(self, filepath, partitions=None, if_exists='replace',\n chunk_size=None, **upload_args):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists('staging'):\n raise BaseDosDadosException(\n 'You cannot append to a table that does not exist')\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath, mode='staging', partitions=partitions, if_exists=\n if_exists, chunk_size=chunk_size, **upload_args)\n logger.success(' {object} {object_id} was {action}!', object_id=\n self.table_id, object='Table', action='appended')\n",
"step-5": "\"\"\"\nClass for manage tables in Storage and Big Query\n\"\"\"\n# pylint: disable=invalid-name, too-many-locals, too-many-branches, too-many-arguments,line-too-long,R0801,consider-using-f-string\nfrom pathlib import Path\nimport json\nfrom copy import deepcopy\nimport textwrap\nimport inspect\nfrom io import StringIO\n\nfrom loguru import logger\nfrom google.cloud import bigquery\nimport ruamel.yaml as ryaml\nimport requests\nimport pandas as pd\nimport google.api_core.exceptions\n\nfrom basedosdados.upload.base import Base\nfrom basedosdados.upload.storage import Storage\nfrom basedosdados.upload.dataset import Dataset\nfrom basedosdados.upload.datatypes import Datatype\nfrom basedosdados.upload.metadata import Metadata\nfrom basedosdados.exceptions import BaseDosDadosException\n\n\nclass Table(Base):\n \"\"\"\n Manage tables in Google Cloud Storage and BigQuery.\n \"\"\"\n\n def __init__(self, dataset_id, table_id, **kwargs):\n super().__init__(**kwargs)\n\n self.table_id = table_id.replace(\"-\", \"_\")\n self.dataset_id = dataset_id.replace(\"-\", \"_\")\n self.dataset_folder = Path(self.metadata_path / self.dataset_id)\n self.table_folder = self.dataset_folder / table_id\n self.table_full_name = dict(\n prod=f\"{self.client['bigquery_prod'].project}.{self.dataset_id}.{self.table_id}\",\n staging=f\"{self.client['bigquery_staging'].project}.{self.dataset_id}_staging.{self.table_id}\",\n )\n self.table_full_name.update(dict(all=deepcopy(self.table_full_name)))\n self.metadata = Metadata(self.dataset_id, self.table_id, **kwargs)\n\n @property\n def table_config(self):\n \"\"\"\n Load table_config.yaml\n \"\"\"\n return self._load_yaml(self.table_folder / \"table_config.yaml\")\n\n def _get_table_obj(self, mode):\n \"\"\"\n Get table object from BigQuery\n \"\"\"\n return self.client[f\"bigquery_{mode}\"].get_table(self.table_full_name[mode])\n\n def _is_partitioned(self):\n \"\"\"\n Check if table is partitioned\n \"\"\"\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")\n\n def _load_schema(self, mode=\"staging\"):\n \"\"\"Load schema from table_config.yaml\n\n Args:\n mode (bool): Which dataset to create [prod|staging].\n \"\"\"\n\n self._check_mode(mode)\n\n json_path = self.table_folder / f\"schema-{mode}.json\"\n columns = self.table_config[\"columns\"]\n\n if mode == \"staging\":\n new_columns = []\n for c in columns:\n # case is_in_staging are None then must be True\n is_in_staging = (\n True if c.get(\"is_in_staging\") is None else c[\"is_in_staging\"]\n )\n # append columns declared in table_config.yaml to schema only if is_in_staging: True\n if is_in_staging and not c.get(\"is_partition\"):\n c[\"type\"] = \"STRING\"\n new_columns.append(c)\n\n del columns\n columns = new_columns\n\n elif mode == \"prod\":\n schema = self._get_table_obj(mode).schema\n\n # get field names for fields at schema and at table_config.yaml\n column_names = [c[\"name\"] for c in columns]\n schema_names = [s.name for s in schema]\n\n # check if there are mismatched fields\n not_in_columns = [name for name in schema_names if name not in column_names]\n not_in_schema = [name for name in column_names if name not in schema_names]\n\n # raise if field is not in table_config\n if not_in_columns:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in table_config.yaml. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_columns,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # raise if field is not in schema\n if not_in_schema:\n raise BaseDosDadosException(\n \"Column {error_columns} was not found in publish.sql. Are you sure that \"\n \"all your column names between table_config.yaml, publish.sql and \"\n \"{project_id}.{dataset_id}.{table_id} are the same?\".format(\n error_columns=not_in_schema,\n project_id=self.table_config[\"project_id_prod\"],\n dataset_id=self.table_config[\"dataset_id\"],\n table_id=self.table_config[\"table_id\"],\n )\n )\n\n # if field is in schema, get field_type and field_mode\n for c in columns:\n for s in schema:\n if c[\"name\"] == s.name:\n c[\"type\"] = s.field_type\n c[\"mode\"] = s.mode\n break\n ## force utf-8, write schema_{mode}.json\n json.dump(columns, (json_path).open(\"w\", encoding=\"utf-8\"))\n\n # load new created schema\n return self.client[f\"bigquery_{mode}\"].schema_from_json(str(json_path))\n\n def _make_publish_sql(self):\n \"\"\"Create publish.sql with columns and bigquery_type\"\"\"\n\n ### publish.sql header and instructions\n publish_txt = \"\"\"\n /*\n Query para publicar a tabela.\n\n Esse é o lugar para:\n - modificar nomes, ordem e tipos de colunas\n - dar join com outras tabelas\n - criar colunas extras (e.g. logs, proporções, etc.)\n\n Qualquer coluna definida aqui deve também existir em `table_config.yaml`.\n\n # Além disso, sinta-se à vontade para alterar alguns nomes obscuros\n # para algo um pouco mais explícito.\n\n TIPOS:\n - Para modificar tipos de colunas, basta substituir STRING por outro tipo válido.\n - Exemplo: `SAFE_CAST(column_name AS NUMERIC) column_name`\n - Mais detalhes: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types\n */\n \"\"\"\n\n # remove triple quotes extra space\n publish_txt = inspect.cleandoc(publish_txt)\n publish_txt = textwrap.dedent(publish_txt)\n\n # add create table statement\n project_id_prod = self.client[\"bigquery_prod\"].project\n publish_txt += f\"\\n\\nCREATE VIEW {project_id_prod}.{self.dataset_id}.{self.table_id} AS\\nSELECT \\n\"\n\n # sort columns by is_partition, partitions_columns come first\n\n if self._is_partitioned():\n columns = sorted(\n self.table_config[\"columns\"],\n key=lambda k: (k[\"is_partition\"] is not None, k[\"is_partition\"]),\n reverse=True,\n )\n else:\n columns = self.table_config[\"columns\"]\n\n # add columns in publish.sql\n for col in columns:\n name = col[\"name\"]\n bigquery_type = (\n \"STRING\"\n if col[\"bigquery_type\"] is None\n else col[\"bigquery_type\"].upper()\n )\n\n publish_txt += f\"SAFE_CAST({name} AS {bigquery_type}) {name},\\n\"\n ## remove last comma\n publish_txt = publish_txt[:-2] + \"\\n\"\n\n # add from statement\n project_id_staging = self.client[\"bigquery_staging\"].project\n publish_txt += (\n f\"FROM {project_id_staging}.{self.dataset_id}_staging.{self.table_id} AS t\"\n )\n\n # save publish.sql in table_folder\n (self.table_folder / \"publish.sql\").open(\"w\", encoding=\"utf-8\").write(\n publish_txt\n )\n\n def _make_template(self, columns, partition_columns, if_table_config_exists, force_columns):\n # create table_config.yaml with metadata\n self.metadata.create(\n if_exists=if_table_config_exists,\n columns=partition_columns + columns,\n partition_columns=partition_columns,\n force_columns=force_columns,\n table_only=False,\n )\n\n self._make_publish_sql()\n\n @staticmethod\n def _sheet_to_df(columns_config_url_or_path):\n \"\"\"\n Convert sheet to dataframe\n \"\"\"\n url = columns_config_url_or_path.replace(\"edit#gid=\", \"export?format=csv&gid=\")\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).content.decode(\"utf-8\")))\n except Exception as e:\n raise BaseDosDadosException(\n \"Check if your google sheet Share are: Anyone on the internet with this link can view\"\n ) from e\n\n def table_exists(self, mode):\n \"\"\"Check if table exists in BigQuery.\n\n Args:\n mode (str): Which dataset to check [prod|staging].\n \"\"\"\n\n try:\n ref = self._get_table_obj(mode=mode)\n except google.api_core.exceptions.NotFound:\n ref = None\n\n return bool(ref)\n\n def update_columns(self, columns_config_url_or_path=None):\n \"\"\"\n Fills columns in table_config.yaml automatically using a public google sheets URL or a local file. Also regenerate\n publish.sql and autofill type using bigquery_type.\n\n The sheet must contain the columns:\n - name: column name\n - description: column description\n - bigquery_type: column bigquery type\n - measurement_unit: column mesurement unit\n - covered_by_dictionary: column related dictionary\n - directory_column: column related directory in the format <dataset_id>.<table_id>:<column_name>\n - temporal_coverage: column temporal coverage\n - has_sensitive_data: the column has sensitive data\n - observations: column observations\n Args:\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n \"\"\"\n ruamel = ryaml.YAML()\n ruamel.preserve_quotes = True\n ruamel.indent(mapping=4, sequence=6, offset=4)\n table_config_yaml = ruamel.load(\n (self.table_folder / \"table_config.yaml\").open(encoding=\"utf-8\")\n )\n\n if \"https://docs.google.com/spreadsheets/d/\" in columns_config_url_or_path:\n if (\n \"edit#gid=\" not in columns_config_url_or_path\n or \"https://docs.google.com/spreadsheets/d/\"\n not in columns_config_url_or_path\n or not columns_config_url_or_path.split(\"=\")[1].isdigit()\n ):\n raise BaseDosDadosException(\n \"The Google sheet url not in correct format.\"\n \"The url must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>\"\n )\n df = self._sheet_to_df(columns_config_url_or_path)\n else:\n file_type = columns_config_url_or_path.split(\".\")[-1]\n if file_type == \"csv\":\n df = pd.read_csv(columns_config_url_or_path, encoding=\"utf-8\")\n elif file_type in [\"xls\", \"xlsx\", \"xlsm\", \"xlsb\", \"odf\", \"ods\", \"odt\"]:\n df = pd.read_excel(columns_config_url_or_path)\n else:\n raise BaseDosDadosException(\n \"File not suported. Only csv, xls, xlsx, xlsm, xlsb, odf, ods, odt are supported.\"\n )\n\n df = df.fillna(\"NULL\")\n\n required_columns = [\n \"name\",\n \"bigquery_type\",\n \"description\",\n \"temporal_coverage\",\n \"covered_by_dictionary\",\n \"directory_column\",\n \"measurement_unit\",\n \"has_sensitive_data\",\n \"observations\",\n ]\n\n not_found_columns = required_columns.copy()\n for sheet_column in df.columns.tolist():\n for required_column in required_columns:\n if sheet_column == required_column:\n not_found_columns.remove(required_column)\n if not_found_columns:\n raise BaseDosDadosException(\n f\"The following required columns are not found: {', '.join(not_found_columns)}.\"\n )\n\n columns_parameters = zip(\n *[df[required_column].tolist() for required_column in required_columns]\n )\n for (\n name,\n bigquery_type,\n description,\n temporal_coverage,\n covered_by_dictionary,\n directory_column,\n measurement_unit,\n has_sensitive_data,\n observations,\n ) in columns_parameters:\n for col in table_config_yaml[\"columns\"]:\n if col[\"name\"] == name:\n col[\"bigquery_type\"] = (\n col[\"bigquery_type\"]\n if bigquery_type == \"NULL\"\n else bigquery_type.lower()\n )\n\n col[\"description\"] = (\n col[\"description\"] if description == \"NULL\" else description\n )\n\n col[\"temporal_coverage\"] = (\n col[\"temporal_coverage\"]\n if temporal_coverage == \"NULL\"\n else [temporal_coverage]\n )\n\n col[\"covered_by_dictionary\"] = (\n \"no\"\n if covered_by_dictionary == \"NULL\"\n else covered_by_dictionary\n )\n\n dataset = directory_column.split(\".\")[0]\n col[\"directory_column\"][\"dataset_id\"] = (\n col[\"directory_column\"][\"dataset_id\"]\n if dataset == \"NULL\"\n else dataset\n )\n\n table = directory_column.split(\".\")[-1].split(\":\")[0]\n col[\"directory_column\"][\"table_id\"] = (\n col[\"directory_column\"][\"table_id\"]\n if table == \"NULL\"\n else table\n )\n\n column = directory_column.split(\".\")[-1].split(\":\")[-1]\n col[\"directory_column\"][\"column_name\"] = (\n col[\"directory_column\"][\"column_name\"]\n if column == \"NULL\"\n else column\n )\n col[\"measurement_unit\"] = (\n col[\"measurement_unit\"]\n if measurement_unit == \"NULL\"\n else measurement_unit\n )\n\n col[\"has_sensitive_data\"] = (\n \"no\" if has_sensitive_data == \"NULL\" else has_sensitive_data\n )\n\n col[\"observations\"] = (\n col[\"observations\"] if observations == \"NULL\" else observations\n )\n\n with open(self.table_folder / \"table_config.yaml\", \"w\", encoding=\"utf-8\") as f:\n ruamel.dump(table_config_yaml, f)\n\n # regenerate publish.sql\n self._make_publish_sql()\n\n def init(\n self,\n data_sample_path=None,\n if_folder_exists=\"raise\",\n if_table_config_exists=\"raise\",\n source_format=\"csv\",\n force_columns = False,\n columns_config_url_or_path=None,\n ): # sourcery skip: low-code-quality\n \"\"\"Initialize table folder at metadata_path at `metadata_path/<dataset_id>/<table_id>`.\n\n The folder should contain:\n\n * `table_config.yaml`\n * `publish.sql`\n\n You can also point to a sample of the data to auto complete columns names.\n\n Args:\n data_sample_path (str, pathlib.PosixPath): Optional.\n Data sample path to auto complete columns names\n It supports Comma Delimited CSV, Apache Avro and\n Apache Parquet.\n if_folder_exists (str): Optional.\n What to do if table folder exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace folder\n * 'pass' : Do nothing\n if_table_config_exists (str): Optional\n What to do if table_config.yaml and publish.sql exists\n\n * 'raise' : Raises FileExistsError\n * 'replace' : Replace files with blank template\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n Raises:\n FileExistsError: If folder exists and replace is False.\n NotImplementedError: If data sample is not in supported type or format.\n \"\"\"\n if not self.dataset_folder.exists():\n\n raise FileExistsError(\n f\"Dataset folder {self.dataset_folder} folder does not exists. \"\n \"Create a dataset before adding tables.\"\n )\n\n try:\n self.table_folder.mkdir(exist_ok=(if_folder_exists == \"replace\"))\n except FileExistsError as e:\n if if_folder_exists == \"raise\":\n raise FileExistsError(\n f\"Table folder already exists for {self.table_id}. \"\n ) from e\n if if_folder_exists == \"pass\":\n return self\n\n if not data_sample_path and if_table_config_exists != \"pass\":\n raise BaseDosDadosException(\n \"You must provide a path to correctly create config files\"\n )\n\n partition_columns = []\n if isinstance(\n data_sample_path,\n (\n str,\n Path,\n ),\n ):\n # Check if partitioned and get data sample and partition columns\n data_sample_path = Path(data_sample_path)\n\n if data_sample_path.is_dir():\n\n data_sample_path = [\n f\n for f in data_sample_path.glob(\"**/*\")\n if f.is_file() and f.suffix == f\".{source_format}\"\n ][0]\n\n partition_columns = [\n k.split(\"=\")[0]\n for k in data_sample_path.as_posix().split(\"/\")\n if \"=\" in k\n ]\n\n columns = Datatype(self, source_format).header(data_sample_path)\n\n else:\n\n columns = [\"column_name\"]\n\n if if_table_config_exists == \"pass\":\n # Check if config files exists before passing\n if (\n Path(self.table_folder / \"table_config.yaml\").is_file()\n and Path(self.table_folder / \"publish.sql\").is_file()\n ):\n pass\n # Raise if no sample to determine columns\n elif not data_sample_path:\n raise BaseDosDadosException(\n \"You must provide a path to correctly create config files\"\n )\n else:\n self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)\n\n elif if_table_config_exists == \"raise\":\n\n # Check if config files already exist\n if (\n Path(self.table_folder / \"table_config.yaml\").is_file()\n and Path(self.table_folder / \"publish.sql\").is_file()\n ):\n\n raise FileExistsError(\n f\"table_config.yaml and publish.sql already exists at {self.table_folder}\"\n )\n # if config files don't exist, create them\n self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)\n\n else:\n # Raise: without a path to data sample, should not replace config files with empty template\n self._make_template(columns, partition_columns, if_table_config_exists, force_columns=force_columns)\n\n if columns_config_url_or_path is not None:\n self.update_columns(columns_config_url_or_path)\n\n return self\n\n def create(\n self,\n path=None,\n force_dataset=True,\n if_table_exists=\"raise\",\n if_storage_data_exists=\"raise\",\n if_table_config_exists=\"raise\",\n source_format=\"csv\",\n force_columns=False,\n columns_config_url_or_path=None,\n dataset_is_public=True,\n location=None,\n chunk_size=None,\n ):\n \"\"\"Creates BigQuery table at staging dataset.\n\n If you add a path, it automatically saves the data in the storage,\n creates a datasets folder and BigQuery location, besides creating the\n table and its configuration files.\n\n The new table should be located at `<dataset_id>_staging.<table_id>` in BigQuery.\n\n It looks for data saved in Storage at `<bucket_name>/staging/<dataset_id>/<table_id>/*`\n and builds the table.\n\n It currently supports the types:\n\n - Comma Delimited CSV\n - Apache Avro\n - Apache Parquet\n\n Data can also be partitioned following the hive partitioning scheme\n `<key1>=<value1>/<key2>=<value2>` - for instance,\n `year=2012/country=BR`. The partition is automatcally detected\n by searching for `partitions` on the `table_config.yaml`.\n\n Args:\n path (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n job_config_params (dict): Optional.\n Job configuration params from bigquery\n if_table_exists (str): Optional\n What to do if table exists\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n force_dataset (bool): Creates `<dataset_id>` folder and BigQuery Dataset if it doesn't exists.\n if_table_config_exists (str): Optional.\n What to do if config files already exist\n\n * 'raise': Raises FileExistError\n * 'replace': Replace with blank template\n * 'pass'; Do nothing\n if_storage_data_exists (str): Optional.\n What to do if data already exists on your bucket:\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n source_format (str): Optional\n Data source format. Only 'csv', 'avro' and 'parquet'\n are supported. Defaults to 'csv'.\n force_columns (bool): Optional.\n If set to `True`, overwrite CKAN's columns with the ones provi\n ded.\n If set to `False`, keep CKAN's columns instead of the ones pro\n vided.\n columns_config_url_or_path (str): Path to the local architeture file or a public google sheets URL.\n Path only suports csv, xls, xlsx, xlsm, xlsb, odf, ods, odt formats.\n Google sheets URL must be in the format https://docs.google.com/spreadsheets/d/<table_key>/edit#gid=<table_gid>.\n\n dataset_is_public (bool): Control if prod dataset is public or not. By default staging datasets like `dataset_id_staging` are not public.\n\n location (str): Optional. Location of dataset data.\n List of possible region names locations: https://cloud.google.com/bigquery/docs/locations\n\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n\n if path is None:\n\n # Look if table data already exists at Storage\n data = self.client[\"storage_staging\"].list_blobs(\n self.bucket_name, prefix=f\"staging/{self.dataset_id}/{self.table_id}\"\n )\n\n # Raise: Cannot create table without external data\n if not data:\n raise BaseDosDadosException(\n \"You must provide a path for uploading data\"\n )\n\n # Add data to storage\n if isinstance(\n path,\n (\n str,\n Path,\n ),\n ):\n\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n path,\n mode=\"staging\",\n if_exists=if_storage_data_exists,\n chunk_size=chunk_size,\n )\n\n # Create Dataset if it doesn't exist\n if force_dataset:\n\n dataset_obj = Dataset(self.dataset_id, **self.main_vars)\n\n try:\n dataset_obj.init()\n except FileExistsError:\n pass\n\n dataset_obj.create(\n if_exists=\"pass\", location=location, dataset_is_public=dataset_is_public\n )\n\n self.init(\n data_sample_path=path,\n if_folder_exists=\"replace\",\n if_table_config_exists=if_table_config_exists,\n columns_config_url_or_path=columns_config_url_or_path,\n source_format=source_format,\n force_columns=force_columns\n )\n\n table = bigquery.Table(self.table_full_name[\"staging\"])\n table.external_data_configuration = Datatype(\n self, source_format, \"staging\", partitioned=self._is_partitioned()\n ).external_config\n\n # Lookup if table alreay exists\n table_ref = None\n try:\n table_ref = self.client[\"bigquery_staging\"].get_table(\n self.table_full_name[\"staging\"]\n )\n\n except google.api_core.exceptions.NotFound:\n pass\n\n if isinstance(table_ref, google.cloud.bigquery.table.Table):\n\n if if_table_exists == \"pass\":\n\n return None\n\n if if_table_exists == \"raise\":\n\n raise FileExistsError(\n \"Table already exists, choose replace if you want to overwrite it\"\n )\n\n if if_table_exists == \"replace\":\n\n self.delete(mode=\"staging\")\n\n self.client[\"bigquery_staging\"].create_table(table)\n\n logger.success(\n \"{object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"created\",\n )\n return None\n\n def update(self, mode=\"all\"):\n \"\"\"Updates BigQuery schema and description.\n Args:\n mode (str): Optional.\n Table of which table to update [prod|staging|all]\n not_found_ok (bool): Optional.\n What to do if table is not found\n \"\"\"\n\n self._check_mode(mode)\n\n mode = [\"prod\", \"staging\"] if mode == \"all\" else [mode]\n for m in mode:\n\n try:\n table = self._get_table_obj(m)\n except google.api_core.exceptions.NotFound:\n continue\n\n # if m == \"staging\":\n\n table.description = self._render_template(\n Path(\"table/table_description.txt\"), self.table_config\n )\n\n # save table description\n with open(\n self.metadata_path\n / self.dataset_id\n / self.table_id\n / \"table_description.txt\",\n \"w\",\n encoding=\"utf-8\",\n ) as f:\n f.write(table.description)\n\n # when mode is staging the table schema already exists\n table.schema = self._load_schema(m)\n fields = [\"description\", \"schema\"] if m == \"prod\" else [\"description\"]\n self.client[f\"bigquery_{m}\"].update_table(table, fields=fields)\n\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"updated\",\n )\n\n def publish(self, if_exists=\"raise\"):\n \"\"\"Creates BigQuery table at production dataset.\n\n Table should be located at `<dataset_id>.<table_id>`.\n\n It creates a view that uses the query from\n `<metadata_path>/<dataset_id>/<table_id>/publish.sql`.\n\n Make sure that all columns from the query also exists at\n `<metadata_path>/<dataset_id>/<table_id>/table_config.sql`, including\n the partitions.\n\n Args:\n if_exists (str): Optional.\n What to do if table exists.\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n\n Todo:\n\n * Check if all required fields are filled\n \"\"\"\n\n if if_exists == \"replace\":\n self.delete(mode=\"prod\")\n\n self.client[\"bigquery_prod\"].query(\n (self.table_folder / \"publish.sql\").open(\"r\", encoding=\"utf-8\").read()\n ).result()\n\n self.update()\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"published\",\n )\n\n def delete(self, mode):\n \"\"\"Deletes table in BigQuery.\n\n Args:\n mode (str): Table of which table to delete [prod|staging]\n \"\"\"\n\n self._check_mode(mode)\n\n if mode == \"all\":\n for m, n in self.table_full_name[mode].items():\n self.client[f\"bigquery_{m}\"].delete_table(n, not_found_ok=True)\n logger.info(\n \" {object} {object_id}_{mode} was {action}!\",\n object_id=self.table_id,\n mode=mode,\n object=\"Table\",\n action=\"deleted\",\n )\n else:\n self.client[f\"bigquery_{mode}\"].delete_table(\n self.table_full_name[mode], not_found_ok=True\n )\n\n logger.info(\n \" {object} {object_id}_{mode} was {action}!\",\n object_id=self.table_id,\n mode=mode,\n object=\"Table\",\n action=\"deleted\",\n )\n\n def append(\n self,\n filepath,\n partitions=None,\n if_exists=\"replace\",\n chunk_size=None,\n **upload_args,\n ):\n \"\"\"Appends new data to existing BigQuery table.\n\n As long as the data has the same schema. It appends the data in the\n filepath to the existing table.\n\n Args:\n filepath (str or pathlib.PosixPath): Where to find the file that you want to upload to create a table with\n partitions (str, pathlib.PosixPath, dict): Optional.\n Hive structured partition as a string or dict\n\n * str : `<key>=<value>/<key2>=<value2>`\n * dict: `dict(key=value, key2=value2)`\n if_exists (str): 0ptional.\n What to do if data with same name exists in storage\n\n * 'raise' : Raises Conflict exception\n * 'replace' : Replace table\n * 'pass' : Do nothing\n chunk_size (int): Optional\n The size of a chunk of data whenever iterating (in bytes).\n This must be a multiple of 256 KB per the API specification.\n If not specified, the chunk_size of the blob itself is used. If that is not specified, a default value of 40 MB is used.\n \"\"\"\n if not self.table_exists(\"staging\"):\n raise BaseDosDadosException(\n \"You cannot append to a table that does not exist\"\n )\n Storage(self.dataset_id, self.table_id, **self.main_vars).upload(\n filepath,\n mode=\"staging\",\n partitions=partitions,\n if_exists=if_exists,\n chunk_size=chunk_size,\n **upload_args,\n )\n logger.success(\n \" {object} {object_id} was {action}!\",\n object_id=self.table_id,\n object=\"Table\",\n action=\"appended\",\n )\n",
"step-ids": [
8,
12,
15,
16,
20
]
}
|
[
8,
12,
15,
16,
20
] |
class Image:
def __init__(self, **kwargs):
self.ClientID = kwargs['ClientID']
self.DealerID = kwargs['DealerID']
self.VIN = kwargs['VIN']
self.UrlVdp = None
self.PhotoURL = kwargs['PhotoURL']
self.VdpActive = None
def __repr__(self):
return f"{self.DealerID} {self.VIN} {self.UrlVdp}"
class VehiclePhoto:
def __init__(self, **kwargs):
self.ClientID = kwargs['ClientID']
self.DealerID = kwargs['DealerID']
self.Domain = kwargs['Domain']
self.VehiclePhotoID = kwargs['VehiclePhotoID']
self.VIN = kwargs['VIN']
self.UrlVdp = kwargs['UrlVdp']
self.UrlImage = kwargs['UrlImage']
def __repr__(self):
return f"{self.VehiclePhotoID} {self.VIN} {self.UrlVdp}"
|
normal
|
{
"blob_id": "3dc4e10145ad42c0168fec3462da0f87c1e661a5",
"index": 8701,
"step-1": "<mask token>\n\n\nclass VehiclePhoto:\n <mask token>\n\n def __repr__(self):\n return f'{self.VehiclePhotoID} {self.VIN} {self.UrlVdp}'\n",
"step-2": "class Image:\n <mask token>\n <mask token>\n\n\nclass VehiclePhoto:\n\n def __init__(self, **kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.Domain = kwargs['Domain']\n self.VehiclePhotoID = kwargs['VehiclePhotoID']\n self.VIN = kwargs['VIN']\n self.UrlVdp = kwargs['UrlVdp']\n self.UrlImage = kwargs['UrlImage']\n\n def __repr__(self):\n return f'{self.VehiclePhotoID} {self.VIN} {self.UrlVdp}'\n",
"step-3": "class Image:\n\n def __init__(self, **kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.VIN = kwargs['VIN']\n self.UrlVdp = None\n self.PhotoURL = kwargs['PhotoURL']\n self.VdpActive = None\n <mask token>\n\n\nclass VehiclePhoto:\n\n def __init__(self, **kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.Domain = kwargs['Domain']\n self.VehiclePhotoID = kwargs['VehiclePhotoID']\n self.VIN = kwargs['VIN']\n self.UrlVdp = kwargs['UrlVdp']\n self.UrlImage = kwargs['UrlImage']\n\n def __repr__(self):\n return f'{self.VehiclePhotoID} {self.VIN} {self.UrlVdp}'\n",
"step-4": "class Image:\n\n def __init__(self, **kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.VIN = kwargs['VIN']\n self.UrlVdp = None\n self.PhotoURL = kwargs['PhotoURL']\n self.VdpActive = None\n\n def __repr__(self):\n return f'{self.DealerID} {self.VIN} {self.UrlVdp}'\n\n\nclass VehiclePhoto:\n\n def __init__(self, **kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.Domain = kwargs['Domain']\n self.VehiclePhotoID = kwargs['VehiclePhotoID']\n self.VIN = kwargs['VIN']\n self.UrlVdp = kwargs['UrlVdp']\n self.UrlImage = kwargs['UrlImage']\n\n def __repr__(self):\n return f'{self.VehiclePhotoID} {self.VIN} {self.UrlVdp}'\n",
"step-5": "class Image:\n\n def __init__(self, **kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.VIN = kwargs['VIN']\n self.UrlVdp = None\n self.PhotoURL = kwargs['PhotoURL']\n self.VdpActive = None\n \n def __repr__(self):\n return f\"{self.DealerID} {self.VIN} {self.UrlVdp}\"\n\nclass VehiclePhoto:\n def __init__(self, **kwargs):\n self.ClientID = kwargs['ClientID']\n self.DealerID = kwargs['DealerID']\n self.Domain = kwargs['Domain']\n self.VehiclePhotoID = kwargs['VehiclePhotoID']\n self.VIN = kwargs['VIN']\n self.UrlVdp = kwargs['UrlVdp']\n self.UrlImage = kwargs['UrlImage']\n \n def __repr__(self):\n return f\"{self.VehiclePhotoID} {self.VIN} {self.UrlVdp}\"",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import sys
def main():
lines = [line.strip() for line in sys.stdin.readlines()]
h = lines.index("")
w = len(lines[0].split()[0])
start = 0
grids = set()
while start < len(lines):
grid = tuple(x.split()[0] for x in lines[start:start + h])
if len(grid) == h:
grids.add(grid)
start += h + 1
print >> sys.stderr, len(grids)
for grid in grids:
for line in grid:
print line
print
main()
|
normal
|
{
"blob_id": "6ef8a174dcce633b526ce7d6fdb6ceb11089b177",
"index": 3652,
"step-1": "import sys\n\ndef main():\n lines = [line.strip() for line in sys.stdin.readlines()]\n h = lines.index(\"\")\n w = len(lines[0].split()[0])\n start = 0\n grids = set()\n while start < len(lines):\n grid = tuple(x.split()[0] for x in lines[start:start + h])\n if len(grid) == h:\n grids.add(grid)\n start += h + 1\n print >> sys.stderr, len(grids)\n for grid in grids:\n for line in grid:\n print line\n print\n\nmain()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import unittest
from unittest.mock import patch
from fsqlfly.db_helper import *
from fsqlfly.tests.base_test import FSQLFlyTestCase
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
def get_create_object(self):
connection = Connection(name='a', url='#', type='hive', connector='text')
schema = SchemaEvent(name='test', connection=connection, version=1)
schema2 = SchemaEvent(name='test2', connection=connection, version=2)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema)
t_name = ResourceTemplate(name='c', resource_name=r_name, type='both', full_name='a.b.c', connection=connection,
schema_version=schema)
v_name = ResourceVersion(name='d', template=t_name, full_name='a.b.c.d', connection=connection,
resource_name=r_name, schema_version=schema)
return connection, schema, schema2, r_name, t_name, v_name
def test_positive_delete_connection(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(connection)
self.session.commit()
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_connection_by_db_helper(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
DBSession.init_engine(self.engine)
with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):
res = DBDao.delete('connection', pk=connection.id)
self.assertEqual(res.success, True)
self.session.close()
self.session = self.get_session()
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = """
[jdbc]
insert_primary_key = false
"""
resource_name_config = """
[jdbc]
insert_primary_key = true
"""
connection = Connection(name='a', url='#', type='hive', connector='text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema,
config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc', int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key', 'jdbc', bool))
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "abbefb1e426408b32fa9e125c78b572de22dbb8c",
"index": 7493,
"step-1": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n <mask token>\n <mask token>\n <mask token>\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n <mask token>\n <mask token>\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-4": "import unittest\nfrom unittest.mock import patch\nfrom fsqlfly.db_helper import *\nfrom fsqlfly.tests.base_test import FSQLFlyTestCase\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom unittest.mock import patch\nfrom fsqlfly.db_helper import *\nfrom fsqlfly.tests.base_test import FSQLFlyTestCase\n\n\nclass MyTestCase(FSQLFlyTestCase):\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector='text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type='both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name='a.b.c.d', connection=connection,\n resource_name=r_name, schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.session.delete(schema)\n\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = \"\"\"\n[jdbc]\ninsert_primary_key = false\n\n \"\"\"\n resource_name_config = \"\"\"\n[jdbc]\ninsert_primary_key = true\n \"\"\"\n connection = Connection(name='a', url='#', type='hive', connector='text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema,\n config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc', int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key', 'jdbc', bool))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
7,
9,
10
]
}
|
[
4,
5,
7,
9,
10
] |
################################################################################
# run_experiment.py #
# Ian Marci 2017 #
# Defines knn classifier and runs 4-fold cross validation on data in #
# Data/NetworkInput folder. #
# Prints accuracy for each fold as well as confusion matrix. #
################################################################################
# Imports
import tensorflow as tf
import numpy as np
from classifier_input_functions import choose_test_set, get_network_input
# Path and placeholder definitions
train_path = 'Data/NetworkTrain/'
test_path = 'Data/NetworkTest/'
x_train = tf.placeholder('float', [None, 200])
x_test = tf.placeholder('float', [200])
# Distance to decide nearest neighbor
distance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),
reduction_indices=1)
# Prediction chooses lowest distance
pred = tf.argmin(distance, 0)
################################
# 4-fold cross validation loop #
################################
init = tf.global_variables_initializer()
with tf.Session() as sess:
predictions = []
labels = []
accuracies = []
for i in range(4):
sess.run(init)
choice = i + 1
choose_test_set(str(choice))
train_data, train_labels = get_network_input(train_path)
test_data, test_labels = get_network_input(test_path)
fold_accuracy = 0
for i in range(len(test_data)):
nn_index = sess.run(pred, feed_dict={x_train: train_data,
x_test: test_data[i, :]})
predictions.append(np.argmax(train_labels[nn_index]))
labels.append(np.argmax(test_labels[i]))
if predictions[-1] == labels[-1]:
fold_accuracy += 1./len(test_data)
accuracies.append(fold_accuracy)
overall_accuracy = np.mean(accuracies)
print('Average accuracy over 4 folds:', overall_accuracy)
confusion = tf.confusion_matrix(labels=labels, predictions=predictions)
print(confusion.eval())
|
normal
|
{
"blob_id": "dbc599a03d91f369d862f6cc90c31221747ead80",
"index": 2811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n choice = i + 1\n choose_test_set(str(choice))\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n fold_accuracy = 0\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1.0 / len(test_data)\n accuracies.append(fold_accuracy)\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-3": "<mask token>\ntrain_path = 'Data/NetworkTrain/'\ntest_path = 'Data/NetworkTest/'\nx_train = tf.placeholder('float', [None, 200])\nx_test = tf.placeholder('float', [200])\ndistance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),\n reduction_indices=1)\npred = tf.argmin(distance, 0)\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n choice = i + 1\n choose_test_set(str(choice))\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n fold_accuracy = 0\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1.0 / len(test_data)\n accuracies.append(fold_accuracy)\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nfrom classifier_input_functions import choose_test_set, get_network_input\ntrain_path = 'Data/NetworkTrain/'\ntest_path = 'Data/NetworkTest/'\nx_train = tf.placeholder('float', [None, 200])\nx_test = tf.placeholder('float', [200])\ndistance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),\n reduction_indices=1)\npred = tf.argmin(distance, 0)\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n choice = i + 1\n choose_test_set(str(choice))\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n fold_accuracy = 0\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1.0 / len(test_data)\n accuracies.append(fold_accuracy)\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-5": "################################################################################\n# run_experiment.py #\n# Ian Marci 2017 #\n# Defines knn classifier and runs 4-fold cross validation on data in #\n# Data/NetworkInput folder. #\n# Prints accuracy for each fold as well as confusion matrix. #\n################################################################################\n\n# Imports\nimport tensorflow as tf\nimport numpy as np\nfrom classifier_input_functions import choose_test_set, get_network_input\n\n# Path and placeholder definitions\ntrain_path = 'Data/NetworkTrain/'\ntest_path = 'Data/NetworkTest/'\n\nx_train = tf.placeholder('float', [None, 200])\nx_test = tf.placeholder('float', [200])\n\n# Distance to decide nearest neighbor\ndistance = tf.reduce_sum(tf.abs(tf.add(x_train, tf.negative(x_test))),\n reduction_indices=1)\n# Prediction chooses lowest distance\npred = tf.argmin(distance, 0)\n\n################################\n# 4-fold cross validation loop #\n################################\n\ninit = tf.global_variables_initializer()\nwith tf.Session() as sess:\n predictions = []\n labels = []\n accuracies = []\n for i in range(4):\n sess.run(init)\n\n choice = i + 1\n choose_test_set(str(choice))\n\n train_data, train_labels = get_network_input(train_path)\n test_data, test_labels = get_network_input(test_path)\n\n fold_accuracy = 0\n\n for i in range(len(test_data)):\n nn_index = sess.run(pred, feed_dict={x_train: train_data,\n x_test: test_data[i, :]})\n predictions.append(np.argmax(train_labels[nn_index]))\n labels.append(np.argmax(test_labels[i]))\n\n if predictions[-1] == labels[-1]:\n fold_accuracy += 1./len(test_data)\n accuracies.append(fold_accuracy)\n\n overall_accuracy = np.mean(accuracies)\n print('Average accuracy over 4 folds:', overall_accuracy)\n confusion = tf.confusion_matrix(labels=labels, predictions=predictions)\n print(confusion.eval())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pet import Pet
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
def walk(self):
self.pet.play()
def feed(self):
self.pet.eat()
def bathe(self):
self.pet.noise()
Fox = Pet("Ninetailed Fox", "Fox", "Fire-Breathing")
Naruto = Ninja("Naruto", "Izumaki", "Rice Balls", "Ground Beef", Fox)
Naruto.feed()
print(Naruto.pet.energy)
print(Naruto.pet.health)
Naruto.bathe()
Naruto.walk()
print(Naruto.pet.energy)
print(Naruto.pet.health)
|
normal
|
{
"blob_id": "b210784a198eaa3e57b5a65ec182a746aecc0e2b",
"index": 1695,
"step-1": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n <mask token>\n <mask token>\n\n def bathe(self):\n self.pet.noise()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n def walk(self):\n self.pet.play()\n\n def feed(self):\n self.pet.eat()\n\n def bathe(self):\n self.pet.noise()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n def walk(self):\n self.pet.play()\n\n def feed(self):\n self.pet.eat()\n\n def bathe(self):\n self.pet.noise()\n\n\n<mask token>\nNaruto.feed()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\nNaruto.bathe()\nNaruto.walk()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\n",
"step-4": "<mask token>\n\n\nclass Ninja:\n\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n def walk(self):\n self.pet.play()\n\n def feed(self):\n self.pet.eat()\n\n def bathe(self):\n self.pet.noise()\n\n\nFox = Pet('Ninetailed Fox', 'Fox', 'Fire-Breathing')\nNaruto = Ninja('Naruto', 'Izumaki', 'Rice Balls', 'Ground Beef', Fox)\nNaruto.feed()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\nNaruto.bathe()\nNaruto.walk()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\n",
"step-5": "from pet import Pet \n\nclass Ninja:\n def __init__(self, first_name, last_name, treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n\n def walk(self):\n self.pet.play()\n\n\n def feed(self):\n self.pet.eat()\n\n\n def bathe(self):\n self.pet.noise()\n\n\n\nFox = Pet(\"Ninetailed Fox\", \"Fox\", \"Fire-Breathing\")\nNaruto = Ninja(\"Naruto\", \"Izumaki\", \"Rice Balls\", \"Ground Beef\", Fox)\n\n\nNaruto.feed()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)\nNaruto.bathe()\nNaruto.walk()\nprint(Naruto.pet.energy)\nprint(Naruto.pet.health)",
"step-ids": [
3,
5,
6,
7,
9
]
}
|
[
3,
5,
6,
7,
9
] |
#!/usr/bin/python
import glob
import pandas as pd
import numpy as np
manifest = pd.read_csv('./manifest.csv', sep=',', names=['projectId','records'], skiprows=[0])
mailTypes = pd.read_csv('./mail_types.csv', sep=',', names=['typeId','typeName'], skiprows=[0])
#----- mailTypes['typeId'] = pd.to_numeric(mailTypes['typeId'], errors='coerce')
#mailTypes['typeId'] = mailTypes['typeId'].astype(str).astype(int)
#print mailTypes.dtypes
mailAll = pd.DataFrame(columns=['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId',
'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate'])
path = './correspondence/' # use your path
allFiles = glob.glob(path + "*.csv")
counter = 0
for file_ in allFiles :
counter+=1
print 'files remaining: ' + str(len(allFiles) - counter)
correspond = pd.read_csv(file_, sep=',', header='infer')
mail = pd.merge(correspond, mailTypes, how='left', left_on=['correspondenceTypeId'], right_on=['typeId'])
mail.drop('typeId', axis=1, inplace=True)
mail.columns = ['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId', 'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate']
mailAll = mailAll.append(mail)
mailAll_df = pd.DataFrame.from_dict(mailAll)
mailAll_df = mailAll_df[['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId', 'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate']]
mailAll_df.to_csv('mailAll.csv', sep=',')
|
normal
|
{
"blob_id": "2ea33fd06be888db5cda86b345f535532d2a05b5",
"index": 4268,
"step-1": "#!/usr/bin/python \n\nimport glob\nimport pandas as pd\nimport numpy as np\n\nmanifest = pd.read_csv('./manifest.csv', sep=',', names=['projectId','records'], skiprows=[0])\nmailTypes = pd.read_csv('./mail_types.csv', sep=',', names=['typeId','typeName'], skiprows=[0])\n\n#----- mailTypes['typeId'] = pd.to_numeric(mailTypes['typeId'], errors='coerce')\n#mailTypes['typeId'] = mailTypes['typeId'].astype(str).astype(int)\n#print mailTypes.dtypes\n\nmailAll = pd.DataFrame(columns=['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId', \n 'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate'])\n\npath = './correspondence/' # use your path\nallFiles = glob.glob(path + \"*.csv\")\n\ncounter = 0\nfor file_ in allFiles :\n counter+=1\n print 'files remaining: ' + str(len(allFiles) - counter)\n\n correspond = pd.read_csv(file_, sep=',', header='infer')\n mail = pd.merge(correspond, mailTypes, how='left', left_on=['correspondenceTypeId'], right_on=['typeId'])\n mail.drop('typeId', axis=1, inplace=True)\n mail.columns = ['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId', 'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate']\n mailAll = mailAll.append(mail)\n \nmailAll_df = pd.DataFrame.from_dict(mailAll)\nmailAll_df = mailAll_df[['projectId', 'correspondenceId', 'sentDate', 'fromOrganizationId', 'fromUserId', 'correspondenceTypeId', 'correspondenceTypeName', 'responseRequiredByDate']]\nmailAll_df.to_csv('mailAll.csv', sep=',')\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Tienda:
def __init__(self, nombre_tienda, lista_productos = []):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
def __str__(self):
return f"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n"
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print("# # # # # # # PRODUCTO ANHADIDO # # # # # # #")
producto_nuevo.producto_info()
return self
def vender_producto(self, id):
print("\n# # # # # # # PRODUCTO VENDIDO # # # # # # #")
self.lista_productos.pop(id).producto_info()
return self
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f"=================Producto 0{a}:=================")
pro.producto_info()
print("AUMENTA su precio a: ")
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
def descuentazo(self, categoria, descuentazo_porcentaje):
a = 0
for product in self.lista_productos:
a += 1
if product.cat_producto == categoria:
print(f"=================Producto 0{a}:=================")
product.producto_info()
print("Se REMATA, y su nuevo precio de remate es: ")
product.actualizar_precio(descuentazo_porcentaje, False).producto_info()
print(f"Descuento de precios a toda la categoria {categoria}, realizado")
return self
#########################################################
##### coso = Tienda("VERDULERIA")
##### print(coso)
##### print("anhadir_P")
##### pera = ("PERA", 1000, "FRUTAS")
##### coco = ("COCO", 1511, "FRUTAS")
##### coso.anhadir_producto(pera)
##### coso.anhadir_producto(coco)
##### print(coso)
##### print("#############################")
##### coso.vender_producto(1)
|
normal
|
{
"blob_id": "0ae5d20b78bf7c23418de55ffd4d81cd5284c6d5",
"index": 8912,
"step-1": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n <mask token>\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n <mask token>\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-2": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n <mask token>\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-3": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print('\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-4": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print('\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n\n def descuentazo(self, categoria, descuentazo_porcentaje):\n a = 0\n for product in self.lista_productos:\n a += 1\n if product.cat_producto == categoria:\n print(f'=================Producto 0{a}:=================')\n product.producto_info()\n print('Se REMATA, y su nuevo precio de remate es: ')\n product.actualizar_precio(descuentazo_porcentaje, False\n ).producto_info()\n print(\n f'Descuento de precios a toda la categoria {categoria}, realizado')\n return self\n",
"step-5": "class Tienda:\n def __init__(self, nombre_tienda, lista_productos = []):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"Nombre de la Tienda: {self.nombre_tienda}\\nLista de Productos: {self.lista_productos}\\n\"\n \n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print(\"# # # # # # # PRODUCTO ANHADIDO # # # # # # #\")\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print(\"\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #\")\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f\"=================Producto 0{a}:=================\")\n pro.producto_info()\n print(\"AUMENTA su precio a: \")\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n\n def descuentazo(self, categoria, descuentazo_porcentaje):\n a = 0\n for product in self.lista_productos:\n a += 1\n if product.cat_producto == categoria:\n print(f\"=================Producto 0{a}:=================\")\n product.producto_info()\n print(\"Se REMATA, y su nuevo precio de remate es: \")\n product.actualizar_precio(descuentazo_porcentaje, False).producto_info()\n print(f\"Descuento de precios a toda la categoria {categoria}, realizado\")\n return self\n\n#########################################################\n##### coso = Tienda(\"VERDULERIA\")\n##### print(coso)\n##### print(\"anhadir_P\")\n##### pera = (\"PERA\", 1000, \"FRUTAS\")\n##### coco = (\"COCO\", 1511, \"FRUTAS\")\n##### coso.anhadir_producto(pera)\n##### coso.anhadir_producto(coco)\n##### print(coso)\n##### print(\"#############################\")\n##### coso.vender_producto(1)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from colander_validators import (
email,
url)
def test_url():
assert url("ixmat.us") == True
assert url("http://bleh.net") == True
assert type(url("://ixmat.us")) == str
assert type(url("ixmat")) == str
def test_email():
assert email("[email protected]") == True
assert email("[email protected]") == True
assert type(email("barney")) == str
assert type(email("barney@dino")) == str
|
normal
|
{
"blob_id": "40637c7a5e45d0fe4184478a1be2e08e5040c93b",
"index": 8931,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_email():\n assert email('[email protected]') == True\n assert email('[email protected]') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-3": "<mask token>\n\n\ndef test_url():\n assert url('ixmat.us') == True\n assert url('http://bleh.net') == True\n assert type(url('://ixmat.us')) == str\n assert type(url('ixmat')) == str\n\n\ndef test_email():\n assert email('[email protected]') == True\n assert email('[email protected]') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-4": "from colander_validators import email, url\n\n\ndef test_url():\n assert url('ixmat.us') == True\n assert url('http://bleh.net') == True\n assert type(url('://ixmat.us')) == str\n assert type(url('ixmat')) == str\n\n\ndef test_email():\n assert email('[email protected]') == True\n assert email('[email protected]') == True\n assert type(email('barney')) == str\n assert type(email('barney@dino')) == str\n",
"step-5": "from colander_validators import (\n email,\n url)\n\n\ndef test_url():\n\n assert url(\"ixmat.us\") == True\n assert url(\"http://bleh.net\") == True\n assert type(url(\"://ixmat.us\")) == str\n assert type(url(\"ixmat\")) == str\n\n\ndef test_email():\n\n assert email(\"[email protected]\") == True\n assert email(\"[email protected]\") == True\n assert type(email(\"barney\")) == str\n assert type(email(\"barney@dino\")) == str\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .hacker import HackerRegistrationPage
from .judge import JudgeRegistrationPage
from .mentor import MentorRegistrationPage
from .organizer import OrganizerRegistrationPage
from .user import UserRegistrationPage
|
normal
|
{
"blob_id": "34f3212b0254cbcb5e1ca535a29d4fe820dcaad8",
"index": 2978,
"step-1": "<mask token>\n",
"step-2": "from .hacker import HackerRegistrationPage\nfrom .judge import JudgeRegistrationPage\nfrom .mentor import MentorRegistrationPage\nfrom .organizer import OrganizerRegistrationPage\nfrom .user import UserRegistrationPage\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.db.backends.base.base import BaseDatabaseWrapper as BaseDatabaseWrapper
from typing import Any, Optional
def wrap_oracle_errors() -> None: ...
class _UninitializedOperatorsDescriptor:
def __get__(self, instance: Any, cls: Optional[Any] = ...): ...
class DatabaseWrapper(BaseDatabaseWrapper):
vendor: str = ...
display_name: str = ...
data_types: Any = ...
data_type_check_constraints: Any = ...
operators: Any = ...
pattern_esc: str = ...
Database: Any = ...
SchemaEditorClass: Any = ...
client_class: Any = ...
creation_class: Any = ...
features_class: Any = ...
introspection_class: Any = ...
ops_class: Any = ...
validation_class: Any = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
def get_connection_params(self): ...
def get_new_connection(self, conn_params: Any): ...
pattern_ops: Any = ...
def init_connection_state(self) -> None: ...
def create_cursor(self, name: Optional[Any] = ...): ...
def check_constraints(self, table_names: Optional[Any] = ...) -> None: ...
def is_usable(self): ...
def cx_oracle_version(self): ...
def oracle_version(self): ...
class OracleParam:
force_bytes: Any = ...
input_size: Any = ...
def __init__(self, param: Any, cursor: Any, strings_only: bool = ...) -> None: ...
class VariableWrapper:
var: Any = ...
def __init__(self, var: Any) -> None: ...
def bind_parameter(self, cursor: Any): ...
def __getattr__(self, key: Any): ...
def __setattr__(self, key: Any, value: Any) -> None: ...
class FormatStylePlaceholderCursor:
charset: str = ...
cursor: Any = ...
def __init__(self, connection: Any) -> None: ...
def execute(self, query: Any, params: Optional[Any] = ...): ...
def executemany(self, query: Any, params: Optional[Any] = ...): ...
def close(self) -> None: ...
def var(self, *args: Any): ...
def arrayvar(self, *args: Any): ...
def __getattr__(self, attr: Any): ...
def __iter__(self) -> Any: ...
|
normal
|
{
"blob_id": "829b8cd0b648d39c07c20fd1c401bf717ed5b9c4",
"index": 9682,
"step-1": "<mask token>\n\n\nclass OracleParam:\n force_bytes: Any = ...\n input_size: Any = ...\n\n def __init__(self, param: Any, cursor: Any, strings_only: bool=...) ->None:\n ...\n\n\nclass VariableWrapper:\n var: Any = ...\n\n def __init__(self, var: Any) ->None:\n ...\n\n def bind_parameter(self, cursor: Any):\n ...\n\n def __getattr__(self, key: Any):\n ...\n\n def __setattr__(self, key: Any, value: Any) ->None:\n ...\n\n\nclass FormatStylePlaceholderCursor:\n charset: str = ...\n cursor: Any = ...\n\n def __init__(self, connection: Any) ->None:\n ...\n\n def execute(self, query: Any, params: Optional[Any]=...):\n ...\n\n def executemany(self, query: Any, params: Optional[Any]=...):\n ...\n\n def close(self) ->None:\n ...\n\n def var(self, *args: Any):\n ...\n\n def arrayvar(self, *args: Any):\n ...\n\n def __getattr__(self, attr: Any):\n ...\n\n def __iter__(self) ->Any:\n ...\n",
"step-2": "<mask token>\n\n\nclass DatabaseWrapper(BaseDatabaseWrapper):\n vendor: str = ...\n display_name: str = ...\n data_types: Any = ...\n data_type_check_constraints: Any = ...\n operators: Any = ...\n pattern_esc: str = ...\n Database: Any = ...\n SchemaEditorClass: Any = ...\n client_class: Any = ...\n creation_class: Any = ...\n features_class: Any = ...\n introspection_class: Any = ...\n ops_class: Any = ...\n validation_class: Any = ...\n <mask token>\n <mask token>\n\n def get_new_connection(self, conn_params: Any):\n ...\n pattern_ops: Any = ...\n <mask token>\n <mask token>\n\n def check_constraints(self, table_names: Optional[Any]=...) ->None:\n ...\n <mask token>\n <mask token>\n\n def oracle_version(self):\n ...\n\n\nclass OracleParam:\n force_bytes: Any = ...\n input_size: Any = ...\n\n def __init__(self, param: Any, cursor: Any, strings_only: bool=...) ->None:\n ...\n\n\nclass VariableWrapper:\n var: Any = ...\n\n def __init__(self, var: Any) ->None:\n ...\n\n def bind_parameter(self, cursor: Any):\n ...\n\n def __getattr__(self, key: Any):\n ...\n\n def __setattr__(self, key: Any, value: Any) ->None:\n ...\n\n\nclass FormatStylePlaceholderCursor:\n charset: str = ...\n cursor: Any = ...\n\n def __init__(self, connection: Any) ->None:\n ...\n\n def execute(self, query: Any, params: Optional[Any]=...):\n ...\n\n def executemany(self, query: Any, params: Optional[Any]=...):\n ...\n\n def close(self) ->None:\n ...\n\n def var(self, *args: Any):\n ...\n\n def arrayvar(self, *args: Any):\n ...\n\n def __getattr__(self, attr: Any):\n ...\n\n def __iter__(self) ->Any:\n ...\n",
"step-3": "<mask token>\n\n\nclass DatabaseWrapper(BaseDatabaseWrapper):\n vendor: str = ...\n display_name: str = ...\n data_types: Any = ...\n data_type_check_constraints: Any = ...\n operators: Any = ...\n pattern_esc: str = ...\n Database: Any = ...\n SchemaEditorClass: Any = ...\n client_class: Any = ...\n creation_class: Any = ...\n features_class: Any = ...\n introspection_class: Any = ...\n ops_class: Any = ...\n validation_class: Any = ...\n\n def __init__(self, *args: Any, **kwargs: Any) ->None:\n ...\n\n def get_connection_params(self):\n ...\n\n def get_new_connection(self, conn_params: Any):\n ...\n pattern_ops: Any = ...\n <mask token>\n\n def create_cursor(self, name: Optional[Any]=...):\n ...\n\n def check_constraints(self, table_names: Optional[Any]=...) ->None:\n ...\n\n def is_usable(self):\n ...\n\n def cx_oracle_version(self):\n ...\n\n def oracle_version(self):\n ...\n\n\nclass OracleParam:\n force_bytes: Any = ...\n input_size: Any = ...\n\n def __init__(self, param: Any, cursor: Any, strings_only: bool=...) ->None:\n ...\n\n\nclass VariableWrapper:\n var: Any = ...\n\n def __init__(self, var: Any) ->None:\n ...\n\n def bind_parameter(self, cursor: Any):\n ...\n\n def __getattr__(self, key: Any):\n ...\n\n def __setattr__(self, key: Any, value: Any) ->None:\n ...\n\n\nclass FormatStylePlaceholderCursor:\n charset: str = ...\n cursor: Any = ...\n\n def __init__(self, connection: Any) ->None:\n ...\n\n def execute(self, query: Any, params: Optional[Any]=...):\n ...\n\n def executemany(self, query: Any, params: Optional[Any]=...):\n ...\n\n def close(self) ->None:\n ...\n\n def var(self, *args: Any):\n ...\n\n def arrayvar(self, *args: Any):\n ...\n\n def __getattr__(self, attr: Any):\n ...\n\n def __iter__(self) ->Any:\n ...\n",
"step-4": "<mask token>\n\n\nclass _UninitializedOperatorsDescriptor:\n <mask token>\n\n\nclass DatabaseWrapper(BaseDatabaseWrapper):\n vendor: str = ...\n display_name: str = ...\n data_types: Any = ...\n data_type_check_constraints: Any = ...\n operators: Any = ...\n pattern_esc: str = ...\n Database: Any = ...\n SchemaEditorClass: Any = ...\n client_class: Any = ...\n creation_class: Any = ...\n features_class: Any = ...\n introspection_class: Any = ...\n ops_class: Any = ...\n validation_class: Any = ...\n\n def __init__(self, *args: Any, **kwargs: Any) ->None:\n ...\n\n def get_connection_params(self):\n ...\n\n def get_new_connection(self, conn_params: Any):\n ...\n pattern_ops: Any = ...\n\n def init_connection_state(self) ->None:\n ...\n\n def create_cursor(self, name: Optional[Any]=...):\n ...\n\n def check_constraints(self, table_names: Optional[Any]=...) ->None:\n ...\n\n def is_usable(self):\n ...\n\n def cx_oracle_version(self):\n ...\n\n def oracle_version(self):\n ...\n\n\nclass OracleParam:\n force_bytes: Any = ...\n input_size: Any = ...\n\n def __init__(self, param: Any, cursor: Any, strings_only: bool=...) ->None:\n ...\n\n\nclass VariableWrapper:\n var: Any = ...\n\n def __init__(self, var: Any) ->None:\n ...\n\n def bind_parameter(self, cursor: Any):\n ...\n\n def __getattr__(self, key: Any):\n ...\n\n def __setattr__(self, key: Any, value: Any) ->None:\n ...\n\n\nclass FormatStylePlaceholderCursor:\n charset: str = ...\n cursor: Any = ...\n\n def __init__(self, connection: Any) ->None:\n ...\n\n def execute(self, query: Any, params: Optional[Any]=...):\n ...\n\n def executemany(self, query: Any, params: Optional[Any]=...):\n ...\n\n def close(self) ->None:\n ...\n\n def var(self, *args: Any):\n ...\n\n def arrayvar(self, *args: Any):\n ...\n\n def __getattr__(self, attr: Any):\n ...\n\n def __iter__(self) ->Any:\n ...\n",
"step-5": "from django.db.backends.base.base import BaseDatabaseWrapper as BaseDatabaseWrapper\nfrom typing import Any, Optional\n\ndef wrap_oracle_errors() -> None: ...\n\nclass _UninitializedOperatorsDescriptor:\n def __get__(self, instance: Any, cls: Optional[Any] = ...): ...\n\nclass DatabaseWrapper(BaseDatabaseWrapper):\n vendor: str = ...\n display_name: str = ...\n data_types: Any = ...\n data_type_check_constraints: Any = ...\n operators: Any = ...\n pattern_esc: str = ...\n Database: Any = ...\n SchemaEditorClass: Any = ...\n client_class: Any = ...\n creation_class: Any = ...\n features_class: Any = ...\n introspection_class: Any = ...\n ops_class: Any = ...\n validation_class: Any = ...\n def __init__(self, *args: Any, **kwargs: Any) -> None: ...\n def get_connection_params(self): ...\n def get_new_connection(self, conn_params: Any): ...\n pattern_ops: Any = ...\n def init_connection_state(self) -> None: ...\n def create_cursor(self, name: Optional[Any] = ...): ...\n def check_constraints(self, table_names: Optional[Any] = ...) -> None: ...\n def is_usable(self): ...\n def cx_oracle_version(self): ...\n def oracle_version(self): ...\n\nclass OracleParam:\n force_bytes: Any = ...\n input_size: Any = ...\n def __init__(self, param: Any, cursor: Any, strings_only: bool = ...) -> None: ...\n\nclass VariableWrapper:\n var: Any = ...\n def __init__(self, var: Any) -> None: ...\n def bind_parameter(self, cursor: Any): ...\n def __getattr__(self, key: Any): ...\n def __setattr__(self, key: Any, value: Any) -> None: ...\n\nclass FormatStylePlaceholderCursor:\n charset: str = ...\n cursor: Any = ...\n def __init__(self, connection: Any) -> None: ...\n def execute(self, query: Any, params: Optional[Any] = ...): ...\n def executemany(self, query: Any, params: Optional[Any] = ...): ...\n def close(self) -> None: ...\n def var(self, *args: Any): ...\n def arrayvar(self, *args: Any): ...\n def __getattr__(self, attr: Any): ...\n def __iter__(self) -> Any: ...\n",
"step-ids": [
16,
20,
25,
27,
31
]
}
|
[
16,
20,
25,
27,
31
] |
class Solution(object):
def maxDistToClosest(self, seats):
"""
:type seats: List[int]
:rtype: int
"""
start = 0
end = 0
length = len(seats)
max_distance = 0
for i in range(len(seats)):
seat = seats[i]
if seat == 1:
if start == 0 or end == length - 1:
max_distance = max(max_distance, end - start + 1)
else:
max_distance = max(max_distance, (end - start + 1) / 2 +
(end - start + 1) % 2)
if i + 1 < length:
start = end = i + 1
else:
end = i
if start == 0 or end == length - 1:
max_distance = max(max_distance, end - start + 1)
else:
max_distance = max(max_distance, (end - start + 1) / 2 + (end -
start + 1) % 2)
return max_distance
|
normal
|
{
"blob_id": "2b8b502381e35ef8e56bc150114a8a4831782c5a",
"index": 3819,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def maxDistToClosest(self, seats):\n \"\"\"\n :type seats: List[int]\n :rtype: int\n \"\"\"\n start = 0\n end = 0\n length = len(seats)\n max_distance = 0\n for i in range(len(seats)):\n seat = seats[i]\n if seat == 1:\n if start == 0 or end == length - 1:\n max_distance = max(max_distance, end - start + 1)\n else:\n max_distance = max(max_distance, (end - start + 1) / 2 +\n (end - start + 1) % 2)\n if i + 1 < length:\n start = end = i + 1\n else:\n end = i\n if start == 0 or end == length - 1:\n max_distance = max(max_distance, end - start + 1)\n else:\n max_distance = max(max_distance, (end - start + 1) / 2 + (end -\n start + 1) % 2)\n return max_distance\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
str = 'Hello world'
print ("字符串长度 : %d" %(len(str)))
print("字符串的长度 444:",len(str))
print (str)
print (str[0])
print (str[1:5])
print (str[:len(str)])
print (str[1:]*3)
print (str[1:]*5)
print ('字符串拼接')
print ("Hello" + "world")
#print ("python : str.join Test")
str1 = "-"
print (str1.join(str))
list = [1,2,3,4]
for a in str :
print ("当前字母:",a)
n = 0
for s in list :
print ("list[%d] :%d" %(n++,s));
|
normal
|
{
"blob_id": "77b7a0ae115aa063512ea7d6e91811470a4cf9d0",
"index": 2187,
"step-1": "\nstr = 'Hello world'\n\nprint (\"字符串长度 : %d\" %(len(str)))\nprint(\"字符串的长度 444:\",len(str))\nprint (str)\nprint (str[0])\nprint (str[1:5])\nprint (str[:len(str)])\nprint (str[1:]*3)\nprint (str[1:]*5)\n\nprint ('字符串拼接')\n\nprint (\"Hello\" + \"world\")\n\n\n#print (\"python : str.join Test\")\nstr1 = \"-\"\n\nprint (str1.join(str))\n\n\nlist = [1,2,3,4]\n\n\nfor a in str :\n print (\"当前字母:\",a)\n\nn = 0\nfor s in list :\n print (\"list[%d] :%d\" %(n++,s));\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import json
import requests
import itertools
import logging
from shared_code.config.setting import Settings
from TailwindTraderFunc.cognitiveservices import CognitiveServices
from shared_code.storage.storage import BlobStorageService
class TailwindTraders():
def __init__(self, req):
self._settings = Settings()
self._cs = CognitiveServices()
self._storage = BlobStorageService(self._settings.get_storage_connection_string())
self._reqbody = req.get_json()
def readRequest(self):
content = self._reqbody["values"][0]["data"]["content"]
return content
def getBlobUrlById(self, image_id):
image = list(self._storage.list_blobs(self._settings.get_storage_container_name(),
prefix=f'{image_id}.jpg'))
image_url = self._storage.make_blob_url(self._settings.get_storage_container_name(),
image[0].name)
return image_url
def getVisualFeaturesByImage(self, image_url):
response_analyze = self._cs.getVisualFeaturesByImage(image_url, "analyze", {'visualFeatures': 'Description, Tags'})
response_ocr = self._cs.getOCRByImage(image_url, "recognizeText")
return {"analyze":response_analyze, "ocr":response_ocr}
def updateItemField(self, item, content):
item["Tags"] = content["analyze"]["tags"]
item["VisualDetail"] = content["analyze"]["description"]
recognition_result = content["ocr"]["recognitionResult"]
item["OCRText"] = [line["text"] for line in recognition_result["lines"]]
def generateResult(self, content):
result = {"values": [{"recordId": self._reqbody["values"][0]["recordId"],
"data" : {"Items": content["Items"]}}]}
result = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
return result
|
normal
|
{
"blob_id": "75ba2448897bed8388a7b8d876827461e1bc9dd7",
"index": 2809,
"step-1": "<mask token>\n\n\nclass TailwindTraders:\n\n def __init__(self, req):\n self._settings = Settings()\n self._cs = CognitiveServices()\n self._storage = BlobStorageService(self._settings.\n get_storage_connection_string())\n self._reqbody = req.get_json()\n <mask token>\n\n def getBlobUrlById(self, image_id):\n image = list(self._storage.list_blobs(self._settings.\n get_storage_container_name(), prefix=f'{image_id}.jpg'))\n image_url = self._storage.make_blob_url(self._settings.\n get_storage_container_name(), image[0].name)\n return image_url\n\n def getVisualFeaturesByImage(self, image_url):\n response_analyze = self._cs.getVisualFeaturesByImage(image_url,\n 'analyze', {'visualFeatures': 'Description, Tags'})\n response_ocr = self._cs.getOCRByImage(image_url, 'recognizeText')\n return {'analyze': response_analyze, 'ocr': response_ocr}\n\n def updateItemField(self, item, content):\n item['Tags'] = content['analyze']['tags']\n item['VisualDetail'] = content['analyze']['description']\n recognition_result = content['ocr']['recognitionResult']\n item['OCRText'] = [line['text'] for line in recognition_result['lines']\n ]\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TailwindTraders:\n\n def __init__(self, req):\n self._settings = Settings()\n self._cs = CognitiveServices()\n self._storage = BlobStorageService(self._settings.\n get_storage_connection_string())\n self._reqbody = req.get_json()\n\n def readRequest(self):\n content = self._reqbody['values'][0]['data']['content']\n return content\n\n def getBlobUrlById(self, image_id):\n image = list(self._storage.list_blobs(self._settings.\n get_storage_container_name(), prefix=f'{image_id}.jpg'))\n image_url = self._storage.make_blob_url(self._settings.\n get_storage_container_name(), image[0].name)\n return image_url\n\n def getVisualFeaturesByImage(self, image_url):\n response_analyze = self._cs.getVisualFeaturesByImage(image_url,\n 'analyze', {'visualFeatures': 'Description, Tags'})\n response_ocr = self._cs.getOCRByImage(image_url, 'recognizeText')\n return {'analyze': response_analyze, 'ocr': response_ocr}\n\n def updateItemField(self, item, content):\n item['Tags'] = content['analyze']['tags']\n item['VisualDetail'] = content['analyze']['description']\n recognition_result = content['ocr']['recognitionResult']\n item['OCRText'] = [line['text'] for line in recognition_result['lines']\n ]\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TailwindTraders:\n\n def __init__(self, req):\n self._settings = Settings()\n self._cs = CognitiveServices()\n self._storage = BlobStorageService(self._settings.\n get_storage_connection_string())\n self._reqbody = req.get_json()\n\n def readRequest(self):\n content = self._reqbody['values'][0]['data']['content']\n return content\n\n def getBlobUrlById(self, image_id):\n image = list(self._storage.list_blobs(self._settings.\n get_storage_container_name(), prefix=f'{image_id}.jpg'))\n image_url = self._storage.make_blob_url(self._settings.\n get_storage_container_name(), image[0].name)\n return image_url\n\n def getVisualFeaturesByImage(self, image_url):\n response_analyze = self._cs.getVisualFeaturesByImage(image_url,\n 'analyze', {'visualFeatures': 'Description, Tags'})\n response_ocr = self._cs.getOCRByImage(image_url, 'recognizeText')\n return {'analyze': response_analyze, 'ocr': response_ocr}\n\n def updateItemField(self, item, content):\n item['Tags'] = content['analyze']['tags']\n item['VisualDetail'] = content['analyze']['description']\n recognition_result = content['ocr']['recognitionResult']\n item['OCRText'] = [line['text'] for line in recognition_result['lines']\n ]\n\n def generateResult(self, content):\n result = {'values': [{'recordId': self._reqbody['values'][0][\n 'recordId'], 'data': {'Items': content['Items']}}]}\n result = json.dumps(result, sort_keys=True, indent=4, separators=(\n ',', ': '))\n return result\n",
"step-4": "import json\nimport requests\nimport itertools\nimport logging\nfrom shared_code.config.setting import Settings\nfrom TailwindTraderFunc.cognitiveservices import CognitiveServices\nfrom shared_code.storage.storage import BlobStorageService\n\n\nclass TailwindTraders:\n\n def __init__(self, req):\n self._settings = Settings()\n self._cs = CognitiveServices()\n self._storage = BlobStorageService(self._settings.\n get_storage_connection_string())\n self._reqbody = req.get_json()\n\n def readRequest(self):\n content = self._reqbody['values'][0]['data']['content']\n return content\n\n def getBlobUrlById(self, image_id):\n image = list(self._storage.list_blobs(self._settings.\n get_storage_container_name(), prefix=f'{image_id}.jpg'))\n image_url = self._storage.make_blob_url(self._settings.\n get_storage_container_name(), image[0].name)\n return image_url\n\n def getVisualFeaturesByImage(self, image_url):\n response_analyze = self._cs.getVisualFeaturesByImage(image_url,\n 'analyze', {'visualFeatures': 'Description, Tags'})\n response_ocr = self._cs.getOCRByImage(image_url, 'recognizeText')\n return {'analyze': response_analyze, 'ocr': response_ocr}\n\n def updateItemField(self, item, content):\n item['Tags'] = content['analyze']['tags']\n item['VisualDetail'] = content['analyze']['description']\n recognition_result = content['ocr']['recognitionResult']\n item['OCRText'] = [line['text'] for line in recognition_result['lines']\n ]\n\n def generateResult(self, content):\n result = {'values': [{'recordId': self._reqbody['values'][0][\n 'recordId'], 'data': {'Items': content['Items']}}]}\n result = json.dumps(result, sort_keys=True, indent=4, separators=(\n ',', ': '))\n return result\n",
"step-5": "import json\nimport requests\nimport itertools\nimport logging\nfrom shared_code.config.setting import Settings\nfrom TailwindTraderFunc.cognitiveservices import CognitiveServices\nfrom shared_code.storage.storage import BlobStorageService\n\n\nclass TailwindTraders():\n\n def __init__(self, req):\n self._settings = Settings()\n self._cs = CognitiveServices()\n self._storage = BlobStorageService(self._settings.get_storage_connection_string())\n self._reqbody = req.get_json()\n\n def readRequest(self):\n content = self._reqbody[\"values\"][0][\"data\"][\"content\"]\n return content\n\n def getBlobUrlById(self, image_id):\n image = list(self._storage.list_blobs(self._settings.get_storage_container_name(),\n prefix=f'{image_id}.jpg'))\n image_url = self._storage.make_blob_url(self._settings.get_storage_container_name(),\n image[0].name)\n return image_url\n\n def getVisualFeaturesByImage(self, image_url):\n response_analyze = self._cs.getVisualFeaturesByImage(image_url, \"analyze\", {'visualFeatures': 'Description, Tags'})\n response_ocr = self._cs.getOCRByImage(image_url, \"recognizeText\")\n return {\"analyze\":response_analyze, \"ocr\":response_ocr}\n \n def updateItemField(self, item, content):\n item[\"Tags\"] = content[\"analyze\"][\"tags\"]\n item[\"VisualDetail\"] = content[\"analyze\"][\"description\"]\n recognition_result = content[\"ocr\"][\"recognitionResult\"]\n item[\"OCRText\"] = [line[\"text\"] for line in recognition_result[\"lines\"]]\n\n def generateResult(self, content):\n result = {\"values\": [{\"recordId\": self._reqbody[\"values\"][0][\"recordId\"],\n \"data\" : {\"Items\": content[\"Items\"]}}]}\n result = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))\n return result",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# 001. 웹 서버에 요청하고 응답받기
# 학습 내용 : 웹 서버에 접속하여 웹 페이지 정보를 요청하고 서버로부터 응답 객체를 받는 과정을 이해한다.
# 힌트 내용 : requests 모듈의 get() 함수에 접속하려는 웹 페이지의 주소(URL)를 입력한다.
import requests
url = "https://www.python.org/"
resp = requests.get(url)
print(resp) # 200, 정상 동작
url2 = "https://www.python.org/1"
resp2 = requests.get(url2)
print(resp2) # 404 error, 해당 페이지를 찾을 수 없음
|
normal
|
{
"blob_id": "1af73c0ca38ea32119f622dc14741c0bb0aa08fd",
"index": 6344,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(resp)\n<mask token>\nprint(resp2)\n",
"step-3": "<mask token>\nurl = 'https://www.python.org/'\nresp = requests.get(url)\nprint(resp)\nurl2 = 'https://www.python.org/1'\nresp2 = requests.get(url2)\nprint(resp2)\n",
"step-4": "import requests\nurl = 'https://www.python.org/'\nresp = requests.get(url)\nprint(resp)\nurl2 = 'https://www.python.org/1'\nresp2 = requests.get(url2)\nprint(resp2)\n",
"step-5": "# 001. 웹 서버에 요청하고 응답받기\n# 학습 내용 : 웹 서버에 접속하여 웹 페이지 정보를 요청하고 서버로부터 응답 객체를 받는 과정을 이해한다.\n# 힌트 내용 : requests 모듈의 get() 함수에 접속하려는 웹 페이지의 주소(URL)를 입력한다.\n\nimport requests\n\nurl = \"https://www.python.org/\"\nresp = requests.get(url)\nprint(resp) # 200, 정상 동작\n\nurl2 = \"https://www.python.org/1\"\nresp2 = requests.get(url2)\nprint(resp2) # 404 error, 해당 페이지를 찾을 수 없음",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.